1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/ADT/DenseSet.h"
23 #include "llvm/ADT/MapVector.h"
24 #include "llvm/ADT/None.h"
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SetVector.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/iterator.h"
34 #include "llvm/ADT/iterator_range.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/CodeMetrics.h"
37 #include "llvm/Analysis/DemandedBits.h"
38 #include "llvm/Analysis/GlobalsModRef.h"
39 #include "llvm/Analysis/LoopAccessAnalysis.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/MemoryLocation.h"
42 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
43 #include "llvm/Analysis/ScalarEvolution.h"
44 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
45 #include "llvm/Analysis/TargetLibraryInfo.h"
46 #include "llvm/Analysis/TargetTransformInfo.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/Analysis/VectorUtils.h"
49 #include "llvm/IR/Attributes.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/Constant.h"
52 #include "llvm/IR/Constants.h"
53 #include "llvm/IR/DataLayout.h"
54 #include "llvm/IR/DebugLoc.h"
55 #include "llvm/IR/DerivedTypes.h"
56 #include "llvm/IR/Dominators.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/IRBuilder.h"
59 #include "llvm/IR/InstrTypes.h"
60 #include "llvm/IR/Instruction.h"
61 #include "llvm/IR/Instructions.h"
62 #include "llvm/IR/IntrinsicInst.h"
63 #include "llvm/IR/Intrinsics.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/NoFolder.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PassManager.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/Use.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/IR/ValueHandle.h"
74 #include "llvm/IR/Verifier.h"
75 #include "llvm/Pass.h"
76 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Compiler.h"
79 #include "llvm/Support/DOTGraphTraits.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/ErrorHandling.h"
82 #include "llvm/Support/GraphWriter.h"
83 #include "llvm/Support/KnownBits.h"
84 #include "llvm/Support/MathExtras.h"
85 #include "llvm/Support/raw_ostream.h"
86 #include "llvm/Transforms/Utils/LoopUtils.h"
87 #include "llvm/Transforms/Vectorize.h"
100 using namespace llvm::PatternMatch
;
101 using namespace slpvectorizer
;
103 #define SV_NAME "slp-vectorizer"
104 #define DEBUG_TYPE "SLP"
106 STATISTIC(NumVectorInstructions
, "Number of vector instructions generated");
109 llvm::RunSLPVectorization("vectorize-slp", cl::init(false), cl::Hidden
,
110 cl::desc("Run the SLP vectorization passes"));
113 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden
,
114 cl::desc("Only vectorize if you gain more than this "
118 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden
,
119 cl::desc("Attempt to vectorize horizontal reductions"));
121 static cl::opt
<bool> ShouldStartVectorizeHorAtStore(
122 "slp-vectorize-hor-store", cl::init(false), cl::Hidden
,
124 "Attempt to vectorize horizontal reductions feeding into a store"));
127 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden
,
128 cl::desc("Attempt to vectorize for this register size in bits"));
130 /// Limits the size of scheduling regions in a block.
131 /// It avoid long compile times for _very_ large blocks where vector
132 /// instructions are spread over a wide range.
133 /// This limit is way higher than needed by real-world functions.
135 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden
,
136 cl::desc("Limit the size of the SLP scheduling region per block"));
138 static cl::opt
<int> MinVectorRegSizeOption(
139 "slp-min-reg-size", cl::init(128), cl::Hidden
,
140 cl::desc("Attempt to vectorize for this register size in bits"));
142 static cl::opt
<unsigned> RecursionMaxDepth(
143 "slp-recursion-max-depth", cl::init(12), cl::Hidden
,
144 cl::desc("Limit the recursion depth when building a vectorizable tree"));
146 static cl::opt
<unsigned> MinTreeSize(
147 "slp-min-tree-size", cl::init(3), cl::Hidden
,
148 cl::desc("Only vectorize small trees if they are fully vectorizable"));
151 ViewSLPTree("view-slp-tree", cl::Hidden
,
152 cl::desc("Display the SLP trees with Graphviz"));
154 // Limit the number of alias checks. The limit is chosen so that
155 // it has no negative effect on the llvm benchmarks.
156 static const unsigned AliasedCheckLimit
= 10;
158 // Another limit for the alias checks: The maximum distance between load/store
159 // instructions where alias checks are done.
160 // This limit is useful for very large basic blocks.
161 static const unsigned MaxMemDepDistance
= 160;
163 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
164 /// regions to be handled.
165 static const int MinScheduleRegionSize
= 16;
167 /// Predicate for the element types that the SLP vectorizer supports.
169 /// The most important thing to filter here are types which are invalid in LLVM
170 /// vectors. We also filter target specific types which have absolutely no
171 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
172 /// avoids spending time checking the cost model and realizing that they will
173 /// be inevitably scalarized.
174 static bool isValidElementType(Type
*Ty
) {
175 return VectorType::isValidElementType(Ty
) && !Ty
->isX86_FP80Ty() &&
176 !Ty
->isPPC_FP128Ty();
179 /// \returns true if all of the instructions in \p VL are in the same block or
181 static bool allSameBlock(ArrayRef
<Value
*> VL
) {
182 Instruction
*I0
= dyn_cast
<Instruction
>(VL
[0]);
185 BasicBlock
*BB
= I0
->getParent();
186 for (int i
= 1, e
= VL
.size(); i
< e
; i
++) {
187 Instruction
*I
= dyn_cast
<Instruction
>(VL
[i
]);
191 if (BB
!= I
->getParent())
197 /// \returns True if all of the values in \p VL are constants.
198 static bool allConstant(ArrayRef
<Value
*> VL
) {
200 if (!isa
<Constant
>(i
))
205 /// \returns True if all of the values in \p VL are identical.
206 static bool isSplat(ArrayRef
<Value
*> VL
) {
207 for (unsigned i
= 1, e
= VL
.size(); i
< e
; ++i
)
213 /// \returns True if \p I is commutative, handles CmpInst as well as Instruction.
214 static bool isCommutative(Instruction
*I
) {
215 if (auto *IC
= dyn_cast
<CmpInst
>(I
))
216 return IC
->isCommutative();
217 return I
->isCommutative();
220 /// Checks if the vector of instructions can be represented as a shuffle, like:
221 /// %x0 = extractelement <4 x i8> %x, i32 0
222 /// %x3 = extractelement <4 x i8> %x, i32 3
223 /// %y1 = extractelement <4 x i8> %y, i32 1
224 /// %y2 = extractelement <4 x i8> %y, i32 2
225 /// %x0x0 = mul i8 %x0, %x0
226 /// %x3x3 = mul i8 %x3, %x3
227 /// %y1y1 = mul i8 %y1, %y1
228 /// %y2y2 = mul i8 %y2, %y2
229 /// %ins1 = insertelement <4 x i8> undef, i8 %x0x0, i32 0
230 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
231 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
232 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
233 /// ret <4 x i8> %ins4
234 /// can be transformed into:
235 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
237 /// %2 = mul <4 x i8> %1, %1
239 /// We convert this initially to something like:
240 /// %x0 = extractelement <4 x i8> %x, i32 0
241 /// %x3 = extractelement <4 x i8> %x, i32 3
242 /// %y1 = extractelement <4 x i8> %y, i32 1
243 /// %y2 = extractelement <4 x i8> %y, i32 2
244 /// %1 = insertelement <4 x i8> undef, i8 %x0, i32 0
245 /// %2 = insertelement <4 x i8> %1, i8 %x3, i32 1
246 /// %3 = insertelement <4 x i8> %2, i8 %y1, i32 2
247 /// %4 = insertelement <4 x i8> %3, i8 %y2, i32 3
248 /// %5 = mul <4 x i8> %4, %4
249 /// %6 = extractelement <4 x i8> %5, i32 0
250 /// %ins1 = insertelement <4 x i8> undef, i8 %6, i32 0
251 /// %7 = extractelement <4 x i8> %5, i32 1
252 /// %ins2 = insertelement <4 x i8> %ins1, i8 %7, i32 1
253 /// %8 = extractelement <4 x i8> %5, i32 2
254 /// %ins3 = insertelement <4 x i8> %ins2, i8 %8, i32 2
255 /// %9 = extractelement <4 x i8> %5, i32 3
256 /// %ins4 = insertelement <4 x i8> %ins3, i8 %9, i32 3
257 /// ret <4 x i8> %ins4
258 /// InstCombiner transforms this into a shuffle and vector mul
259 /// TODO: Can we split off and reuse the shuffle mask detection from
260 /// TargetTransformInfo::getInstructionThroughput?
261 static Optional
<TargetTransformInfo::ShuffleKind
>
262 isShuffle(ArrayRef
<Value
*> VL
) {
263 auto *EI0
= cast
<ExtractElementInst
>(VL
[0]);
264 unsigned Size
= EI0
->getVectorOperandType()->getVectorNumElements();
265 Value
*Vec1
= nullptr;
266 Value
*Vec2
= nullptr;
267 enum ShuffleMode
{ Unknown
, Select
, Permute
};
268 ShuffleMode CommonShuffleMode
= Unknown
;
269 for (unsigned I
= 0, E
= VL
.size(); I
< E
; ++I
) {
270 auto *EI
= cast
<ExtractElementInst
>(VL
[I
]);
271 auto *Vec
= EI
->getVectorOperand();
272 // All vector operands must have the same number of vector elements.
273 if (Vec
->getType()->getVectorNumElements() != Size
)
275 auto *Idx
= dyn_cast
<ConstantInt
>(EI
->getIndexOperand());
278 // Undefined behavior if Idx is negative or >= Size.
279 if (Idx
->getValue().uge(Size
))
281 unsigned IntIdx
= Idx
->getValue().getZExtValue();
282 // We can extractelement from undef vector.
283 if (isa
<UndefValue
>(Vec
))
285 // For correct shuffling we have to have at most 2 different vector operands
286 // in all extractelement instructions.
287 if (!Vec1
|| Vec1
== Vec
)
289 else if (!Vec2
|| Vec2
== Vec
)
293 if (CommonShuffleMode
== Permute
)
295 // If the extract index is not the same as the operation number, it is a
298 CommonShuffleMode
= Permute
;
301 CommonShuffleMode
= Select
;
303 // If we're not crossing lanes in different vectors, consider it as blending.
304 if (CommonShuffleMode
== Select
&& Vec2
)
305 return TargetTransformInfo::SK_Select
;
306 // If Vec2 was never used, we have a permutation of a single vector, otherwise
307 // we have permutation of 2 vectors.
308 return Vec2
? TargetTransformInfo::SK_PermuteTwoSrc
309 : TargetTransformInfo::SK_PermuteSingleSrc
;
314 /// Main data required for vectorization of instructions.
315 struct InstructionsState
{
316 /// The very first instruction in the list with the main opcode.
317 Value
*OpValue
= nullptr;
319 /// The main/alternate instruction.
320 Instruction
*MainOp
= nullptr;
321 Instruction
*AltOp
= nullptr;
323 /// The main/alternate opcodes for the list of instructions.
324 unsigned getOpcode() const {
325 return MainOp
? MainOp
->getOpcode() : 0;
328 unsigned getAltOpcode() const {
329 return AltOp
? AltOp
->getOpcode() : 0;
332 /// Some of the instructions in the list have alternate opcodes.
333 bool isAltShuffle() const { return getOpcode() != getAltOpcode(); }
335 bool isOpcodeOrAlt(Instruction
*I
) const {
336 unsigned CheckedOpcode
= I
->getOpcode();
337 return getOpcode() == CheckedOpcode
|| getAltOpcode() == CheckedOpcode
;
340 InstructionsState() = delete;
341 InstructionsState(Value
*OpValue
, Instruction
*MainOp
, Instruction
*AltOp
)
342 : OpValue(OpValue
), MainOp(MainOp
), AltOp(AltOp
) {}
345 } // end anonymous namespace
347 /// Chooses the correct key for scheduling data. If \p Op has the same (or
348 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is \p
350 static Value
*isOneOf(const InstructionsState
&S
, Value
*Op
) {
351 auto *I
= dyn_cast
<Instruction
>(Op
);
352 if (I
&& S
.isOpcodeOrAlt(I
))
357 /// \returns analysis of the Instructions in \p VL described in
358 /// InstructionsState, the Opcode that we suppose the whole list
359 /// could be vectorized even if its structure is diverse.
360 static InstructionsState
getSameOpcode(ArrayRef
<Value
*> VL
,
361 unsigned BaseIndex
= 0) {
362 // Make sure these are all Instructions.
363 if (llvm::any_of(VL
, [](Value
*V
) { return !isa
<Instruction
>(V
); }))
364 return InstructionsState(VL
[BaseIndex
], nullptr, nullptr);
366 bool IsCastOp
= isa
<CastInst
>(VL
[BaseIndex
]);
367 bool IsBinOp
= isa
<BinaryOperator
>(VL
[BaseIndex
]);
368 unsigned Opcode
= cast
<Instruction
>(VL
[BaseIndex
])->getOpcode();
369 unsigned AltOpcode
= Opcode
;
370 unsigned AltIndex
= BaseIndex
;
372 // Check for one alternate opcode from another BinaryOperator.
373 // TODO - generalize to support all operators (types, calls etc.).
374 for (int Cnt
= 0, E
= VL
.size(); Cnt
< E
; Cnt
++) {
375 unsigned InstOpcode
= cast
<Instruction
>(VL
[Cnt
])->getOpcode();
376 if (IsBinOp
&& isa
<BinaryOperator
>(VL
[Cnt
])) {
377 if (InstOpcode
== Opcode
|| InstOpcode
== AltOpcode
)
379 if (Opcode
== AltOpcode
) {
380 AltOpcode
= InstOpcode
;
384 } else if (IsCastOp
&& isa
<CastInst
>(VL
[Cnt
])) {
385 Type
*Ty0
= cast
<Instruction
>(VL
[BaseIndex
])->getOperand(0)->getType();
386 Type
*Ty1
= cast
<Instruction
>(VL
[Cnt
])->getOperand(0)->getType();
388 if (InstOpcode
== Opcode
|| InstOpcode
== AltOpcode
)
390 if (Opcode
== AltOpcode
) {
391 AltOpcode
= InstOpcode
;
396 } else if (InstOpcode
== Opcode
|| InstOpcode
== AltOpcode
)
398 return InstructionsState(VL
[BaseIndex
], nullptr, nullptr);
401 return InstructionsState(VL
[BaseIndex
], cast
<Instruction
>(VL
[BaseIndex
]),
402 cast
<Instruction
>(VL
[AltIndex
]));
405 /// \returns true if all of the values in \p VL have the same type or false
407 static bool allSameType(ArrayRef
<Value
*> VL
) {
408 Type
*Ty
= VL
[0]->getType();
409 for (int i
= 1, e
= VL
.size(); i
< e
; i
++)
410 if (VL
[i
]->getType() != Ty
)
416 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
417 static Optional
<unsigned> getExtractIndex(Instruction
*E
) {
418 unsigned Opcode
= E
->getOpcode();
419 assert((Opcode
== Instruction::ExtractElement
||
420 Opcode
== Instruction::ExtractValue
) &&
421 "Expected extractelement or extractvalue instruction.");
422 if (Opcode
== Instruction::ExtractElement
) {
423 auto *CI
= dyn_cast
<ConstantInt
>(E
->getOperand(1));
426 return CI
->getZExtValue();
428 ExtractValueInst
*EI
= cast
<ExtractValueInst
>(E
);
429 if (EI
->getNumIndices() != 1)
431 return *EI
->idx_begin();
434 /// \returns True if in-tree use also needs extract. This refers to
435 /// possible scalar operand in vectorized instruction.
436 static bool InTreeUserNeedToExtract(Value
*Scalar
, Instruction
*UserInst
,
437 TargetLibraryInfo
*TLI
) {
438 unsigned Opcode
= UserInst
->getOpcode();
440 case Instruction::Load
: {
441 LoadInst
*LI
= cast
<LoadInst
>(UserInst
);
442 return (LI
->getPointerOperand() == Scalar
);
444 case Instruction::Store
: {
445 StoreInst
*SI
= cast
<StoreInst
>(UserInst
);
446 return (SI
->getPointerOperand() == Scalar
);
448 case Instruction::Call
: {
449 CallInst
*CI
= cast
<CallInst
>(UserInst
);
450 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
451 for (unsigned i
= 0, e
= CI
->getNumArgOperands(); i
!= e
; ++i
) {
452 if (hasVectorInstrinsicScalarOpd(ID
, i
))
453 return (CI
->getArgOperand(i
) == Scalar
);
462 /// \returns the AA location that is being access by the instruction.
463 static MemoryLocation
getLocation(Instruction
*I
, AliasAnalysis
*AA
) {
464 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
465 return MemoryLocation::get(SI
);
466 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
))
467 return MemoryLocation::get(LI
);
468 return MemoryLocation();
471 /// \returns True if the instruction is not a volatile or atomic load/store.
472 static bool isSimple(Instruction
*I
) {
473 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
))
474 return LI
->isSimple();
475 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
476 return SI
->isSimple();
477 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(I
))
478 return !MI
->isVolatile();
484 namespace slpvectorizer
{
486 /// Bottom Up SLP Vectorizer.
491 using ValueList
= SmallVector
<Value
*, 8>;
492 using InstrList
= SmallVector
<Instruction
*, 16>;
493 using ValueSet
= SmallPtrSet
<Value
*, 16>;
494 using StoreList
= SmallVector
<StoreInst
*, 8>;
495 using ExtraValueToDebugLocsMap
=
496 MapVector
<Value
*, SmallVector
<Instruction
*, 2>>;
498 BoUpSLP(Function
*Func
, ScalarEvolution
*Se
, TargetTransformInfo
*Tti
,
499 TargetLibraryInfo
*TLi
, AliasAnalysis
*Aa
, LoopInfo
*Li
,
500 DominatorTree
*Dt
, AssumptionCache
*AC
, DemandedBits
*DB
,
501 const DataLayout
*DL
, OptimizationRemarkEmitter
*ORE
)
502 : F(Func
), SE(Se
), TTI(Tti
), TLI(TLi
), AA(Aa
), LI(Li
), DT(Dt
), AC(AC
),
503 DB(DB
), DL(DL
), ORE(ORE
), Builder(Se
->getContext()) {
504 CodeMetrics::collectEphemeralValues(F
, AC
, EphValues
);
505 // Use the vector register size specified by the target unless overridden
506 // by a command-line option.
507 // TODO: It would be better to limit the vectorization factor based on
508 // data type rather than just register size. For example, x86 AVX has
509 // 256-bit registers, but it does not support integer operations
510 // at that width (that requires AVX2).
511 if (MaxVectorRegSizeOption
.getNumOccurrences())
512 MaxVecRegSize
= MaxVectorRegSizeOption
;
514 MaxVecRegSize
= TTI
->getRegisterBitWidth(true);
516 if (MinVectorRegSizeOption
.getNumOccurrences())
517 MinVecRegSize
= MinVectorRegSizeOption
;
519 MinVecRegSize
= TTI
->getMinVectorRegisterBitWidth();
522 /// Vectorize the tree that starts with the elements in \p VL.
523 /// Returns the vectorized root.
524 Value
*vectorizeTree();
526 /// Vectorize the tree but with the list of externally used values \p
527 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
528 /// generated extractvalue instructions.
529 Value
*vectorizeTree(ExtraValueToDebugLocsMap
&ExternallyUsedValues
);
531 /// \returns the cost incurred by unwanted spills and fills, caused by
532 /// holding live values over call sites.
533 int getSpillCost() const;
535 /// \returns the vectorization cost of the subtree that starts at \p VL.
536 /// A negative number means that this is profitable.
539 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
540 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
541 void buildTree(ArrayRef
<Value
*> Roots
,
542 ArrayRef
<Value
*> UserIgnoreLst
= None
);
544 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
545 /// the purpose of scheduling and extraction in the \p UserIgnoreLst taking
546 /// into account (anf updating it, if required) list of externally used
547 /// values stored in \p ExternallyUsedValues.
548 void buildTree(ArrayRef
<Value
*> Roots
,
549 ExtraValueToDebugLocsMap
&ExternallyUsedValues
,
550 ArrayRef
<Value
*> UserIgnoreLst
= None
);
552 /// Clear the internal data structures that are created by 'buildTree'.
554 VectorizableTree
.clear();
555 ScalarToTreeEntry
.clear();
557 ExternalUses
.clear();
558 NumOpsWantToKeepOrder
.clear();
559 NumOpsWantToKeepOriginalOrder
= 0;
560 for (auto &Iter
: BlocksSchedules
) {
561 BlockScheduling
*BS
= Iter
.second
.get();
567 unsigned getTreeSize() const { return VectorizableTree
.size(); }
569 /// Perform LICM and CSE on the newly generated gather sequences.
570 void optimizeGatherSequence();
572 /// \returns The best order of instructions for vectorization.
573 Optional
<ArrayRef
<unsigned>> bestOrder() const {
574 auto I
= std::max_element(
575 NumOpsWantToKeepOrder
.begin(), NumOpsWantToKeepOrder
.end(),
576 [](const decltype(NumOpsWantToKeepOrder
)::value_type
&D1
,
577 const decltype(NumOpsWantToKeepOrder
)::value_type
&D2
) {
578 return D1
.second
< D2
.second
;
580 if (I
== NumOpsWantToKeepOrder
.end() ||
581 I
->getSecond() <= NumOpsWantToKeepOriginalOrder
)
584 return makeArrayRef(I
->getFirst());
587 /// \return The vector element size in bits to use when vectorizing the
588 /// expression tree ending at \p V. If V is a store, the size is the width of
589 /// the stored value. Otherwise, the size is the width of the largest loaded
590 /// value reaching V. This method is used by the vectorizer to calculate
591 /// vectorization factors.
592 unsigned getVectorElementSize(Value
*V
) const;
594 /// Compute the minimum type sizes required to represent the entries in a
595 /// vectorizable tree.
596 void computeMinimumValueSizes();
598 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
599 unsigned getMaxVecRegSize() const {
600 return MaxVecRegSize
;
603 // \returns minimum vector register size as set by cl::opt.
604 unsigned getMinVecRegSize() const {
605 return MinVecRegSize
;
608 /// Check if ArrayType or StructType is isomorphic to some VectorType.
610 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
611 unsigned canMapToVector(Type
*T
, const DataLayout
&DL
) const;
613 /// \returns True if the VectorizableTree is both tiny and not fully
614 /// vectorizable. We do not vectorize such trees.
615 bool isTreeTinyAndNotFullyVectorizable() const;
617 OptimizationRemarkEmitter
*getORE() { return ORE
; }
619 /// This structure holds any data we need about the edges being traversed
620 /// during buildTree_rec(). We keep track of:
621 /// (i) the user TreeEntry index, and
622 /// (ii) the index of the edge.
624 EdgeInfo() = default;
625 EdgeInfo(TreeEntry
*UserTE
, unsigned EdgeIdx
)
626 : UserTE(UserTE
), EdgeIdx(EdgeIdx
) {}
627 /// The user TreeEntry.
628 TreeEntry
*UserTE
= nullptr;
629 /// The operand index of the use.
630 unsigned EdgeIdx
= UINT_MAX
;
632 friend inline raw_ostream
&operator<<(raw_ostream
&OS
,
633 const BoUpSLP::EdgeInfo
&EI
) {
638 void dump(raw_ostream
&OS
) const {
639 OS
<< "{User:" << (UserTE
? std::to_string(UserTE
->Idx
) : "null")
640 << " EdgeIdx:" << EdgeIdx
<< "}";
642 LLVM_DUMP_METHOD
void dump() const { dump(dbgs()); }
646 /// A helper data structure to hold the operands of a vector of instructions.
647 /// This supports a fixed vector length for all operand vectors.
649 /// For each operand we need (i) the value, and (ii) the opcode that it
650 /// would be attached to if the expression was in a left-linearized form.
651 /// This is required to avoid illegal operand reordering.
656 /// Op1 Op2 Linearized + Op2
657 /// \ / ----------> |/
660 /// Op1 - Op2 (0 + Op1) - Op2
663 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
665 /// Another way to think of this is to track all the operations across the
666 /// path from the operand all the way to the root of the tree and to
667 /// calculate the operation that corresponds to this path. For example, the
668 /// path from Op2 to the root crosses the RHS of the '-', therefore the
669 /// corresponding operation is a '-' (which matches the one in the
670 /// linearized tree, as shown above).
672 /// For lack of a better term, we refer to this operation as Accumulated
673 /// Path Operation (APO).
675 OperandData() = default;
676 OperandData(Value
*V
, bool APO
, bool IsUsed
)
677 : V(V
), APO(APO
), IsUsed(IsUsed
) {}
678 /// The operand value.
680 /// TreeEntries only allow a single opcode, or an alternate sequence of
681 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
682 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
683 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
686 /// Helper data for the reordering function.
690 /// During operand reordering, we are trying to select the operand at lane
691 /// that matches best with the operand at the neighboring lane. Our
692 /// selection is based on the type of value we are looking for. For example,
693 /// if the neighboring lane has a load, we need to look for a load that is
694 /// accessing a consecutive address. These strategies are summarized in the
695 /// 'ReorderingMode' enumerator.
696 enum class ReorderingMode
{
697 Load
, ///< Matching loads to consecutive memory addresses
698 Opcode
, ///< Matching instructions based on opcode (same or alternate)
699 Constant
, ///< Matching constants
700 Splat
, ///< Matching the same instruction multiple times (broadcast)
701 Failed
, ///< We failed to create a vectorizable group
704 using OperandDataVec
= SmallVector
<OperandData
, 2>;
706 /// A vector of operand vectors.
707 SmallVector
<OperandDataVec
, 4> OpsVec
;
709 const DataLayout
&DL
;
712 /// \returns the operand data at \p OpIdx and \p Lane.
713 OperandData
&getData(unsigned OpIdx
, unsigned Lane
) {
714 return OpsVec
[OpIdx
][Lane
];
717 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
718 const OperandData
&getData(unsigned OpIdx
, unsigned Lane
) const {
719 return OpsVec
[OpIdx
][Lane
];
722 /// Clears the used flag for all entries.
724 for (unsigned OpIdx
= 0, NumOperands
= getNumOperands();
725 OpIdx
!= NumOperands
; ++OpIdx
)
726 for (unsigned Lane
= 0, NumLanes
= getNumLanes(); Lane
!= NumLanes
;
728 OpsVec
[OpIdx
][Lane
].IsUsed
= false;
731 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
732 void swap(unsigned OpIdx1
, unsigned OpIdx2
, unsigned Lane
) {
733 std::swap(OpsVec
[OpIdx1
][Lane
], OpsVec
[OpIdx2
][Lane
]);
736 // Search all operands in Ops[*][Lane] for the one that matches best
737 // Ops[OpIdx][LastLane] and return its opreand index.
738 // If no good match can be found, return None.
740 getBestOperand(unsigned OpIdx
, int Lane
, int LastLane
,
741 ArrayRef
<ReorderingMode
> ReorderingModes
) {
742 unsigned NumOperands
= getNumOperands();
744 // The operand of the previous lane at OpIdx.
745 Value
*OpLastLane
= getData(OpIdx
, LastLane
).V
;
747 // Our strategy mode for OpIdx.
748 ReorderingMode RMode
= ReorderingModes
[OpIdx
];
750 // The linearized opcode of the operand at OpIdx, Lane.
751 bool OpIdxAPO
= getData(OpIdx
, Lane
).APO
;
753 const unsigned BestScore
= 2;
754 const unsigned GoodScore
= 1;
756 // The best operand index and its score.
757 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
758 // are using the score to differentiate between the two.
760 Optional
<unsigned> Idx
= None
;
764 // Iterate through all unused operands and look for the best.
765 for (unsigned Idx
= 0; Idx
!= NumOperands
; ++Idx
) {
766 // Get the operand at Idx and Lane.
767 OperandData
&OpData
= getData(Idx
, Lane
);
768 Value
*Op
= OpData
.V
;
769 bool OpAPO
= OpData
.APO
;
771 // Skip already selected operands.
775 // Skip if we are trying to move the operand to a position with a
776 // different opcode in the linearized tree form. This would break the
778 if (OpAPO
!= OpIdxAPO
)
781 // Look for an operand that matches the current mode.
783 case ReorderingMode::Load
:
784 if (isa
<LoadInst
>(Op
)) {
785 // Figure out which is left and right, so that we can check for
787 bool LeftToRight
= Lane
> LastLane
;
788 Value
*OpLeft
= (LeftToRight
) ? OpLastLane
: Op
;
789 Value
*OpRight
= (LeftToRight
) ? Op
: OpLastLane
;
790 if (isConsecutiveAccess(cast
<LoadInst
>(OpLeft
),
791 cast
<LoadInst
>(OpRight
), DL
, SE
))
795 case ReorderingMode::Opcode
:
796 // We accept both Instructions and Undefs, but with different scores.
797 if ((isa
<Instruction
>(Op
) && isa
<Instruction
>(OpLastLane
) &&
798 cast
<Instruction
>(Op
)->getOpcode() ==
799 cast
<Instruction
>(OpLastLane
)->getOpcode()) ||
800 (isa
<UndefValue
>(OpLastLane
) && isa
<Instruction
>(Op
)) ||
801 isa
<UndefValue
>(Op
)) {
802 // An instruction has a higher score than an undef.
803 unsigned Score
= (isa
<UndefValue
>(Op
)) ? GoodScore
: BestScore
;
804 if (Score
> BestOp
.Score
) {
806 BestOp
.Score
= Score
;
810 case ReorderingMode::Constant
:
811 if (isa
<Constant
>(Op
)) {
812 unsigned Score
= (isa
<UndefValue
>(Op
)) ? GoodScore
: BestScore
;
813 if (Score
> BestOp
.Score
) {
815 BestOp
.Score
= Score
;
819 case ReorderingMode::Splat
:
820 if (Op
== OpLastLane
)
823 case ReorderingMode::Failed
:
829 getData(BestOp
.Idx
.getValue(), Lane
).IsUsed
= true;
832 // If we could not find a good match return None.
836 /// Helper for reorderOperandVecs. \Returns the lane that we should start
837 /// reordering from. This is the one which has the least number of operands
838 /// that can freely move about.
839 unsigned getBestLaneToStartReordering() const {
840 unsigned BestLane
= 0;
841 unsigned Min
= UINT_MAX
;
842 for (unsigned Lane
= 0, NumLanes
= getNumLanes(); Lane
!= NumLanes
;
844 unsigned NumFreeOps
= getMaxNumOperandsThatCanBeReordered(Lane
);
845 if (NumFreeOps
< Min
) {
853 /// \Returns the maximum number of operands that are allowed to be reordered
854 /// for \p Lane. This is used as a heuristic for selecting the first lane to
855 /// start operand reordering.
856 unsigned getMaxNumOperandsThatCanBeReordered(unsigned Lane
) const {
857 unsigned CntTrue
= 0;
858 unsigned NumOperands
= getNumOperands();
859 // Operands with the same APO can be reordered. We therefore need to count
860 // how many of them we have for each APO, like this: Cnt[APO] = x.
861 // Since we only have two APOs, namely true and false, we can avoid using
862 // a map. Instead we can simply count the number of operands that
863 // correspond to one of them (in this case the 'true' APO), and calculate
864 // the other by subtracting it from the total number of operands.
865 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
)
866 if (getData(OpIdx
, Lane
).APO
)
868 unsigned CntFalse
= NumOperands
- CntTrue
;
869 return std::max(CntTrue
, CntFalse
);
872 /// Go through the instructions in VL and append their operands.
873 void appendOperandsOfVL(ArrayRef
<Value
*> VL
) {
874 assert(!VL
.empty() && "Bad VL");
875 assert((empty() || VL
.size() == getNumLanes()) &&
876 "Expected same number of lanes");
877 assert(isa
<Instruction
>(VL
[0]) && "Expected instruction");
878 unsigned NumOperands
= cast
<Instruction
>(VL
[0])->getNumOperands();
879 OpsVec
.resize(NumOperands
);
880 unsigned NumLanes
= VL
.size();
881 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
) {
882 OpsVec
[OpIdx
].resize(NumLanes
);
883 for (unsigned Lane
= 0; Lane
!= NumLanes
; ++Lane
) {
884 assert(isa
<Instruction
>(VL
[Lane
]) && "Expected instruction");
885 // Our tree has just 3 nodes: the root and two operands.
886 // It is therefore trivial to get the APO. We only need to check the
887 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
888 // RHS operand. The LHS operand of both add and sub is never attached
889 // to an inversese operation in the linearized form, therefore its APO
890 // is false. The RHS is true only if VL[Lane] is an inverse operation.
892 // Since operand reordering is performed on groups of commutative
893 // operations or alternating sequences (e.g., +, -), we can safely
894 // tell the inverse operations by checking commutativity.
895 bool IsInverseOperation
= !isCommutative(cast
<Instruction
>(VL
[Lane
]));
896 bool APO
= (OpIdx
== 0) ? false : IsInverseOperation
;
897 OpsVec
[OpIdx
][Lane
] = {cast
<Instruction
>(VL
[Lane
])->getOperand(OpIdx
),
903 /// \returns the number of operands.
904 unsigned getNumOperands() const { return OpsVec
.size(); }
906 /// \returns the number of lanes.
907 unsigned getNumLanes() const { return OpsVec
[0].size(); }
909 /// \returns the operand value at \p OpIdx and \p Lane.
910 Value
*getValue(unsigned OpIdx
, unsigned Lane
) const {
911 return getData(OpIdx
, Lane
).V
;
914 /// \returns true if the data structure is empty.
915 bool empty() const { return OpsVec
.empty(); }
918 void clear() { OpsVec
.clear(); }
920 /// \Returns true if there are enough operands identical to \p Op to fill
921 /// the whole vector.
922 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
923 bool shouldBroadcast(Value
*Op
, unsigned OpIdx
, unsigned Lane
) {
924 bool OpAPO
= getData(OpIdx
, Lane
).APO
;
925 for (unsigned Ln
= 0, Lns
= getNumLanes(); Ln
!= Lns
; ++Ln
) {
928 // This is set to true if we found a candidate for broadcast at Lane.
929 bool FoundCandidate
= false;
930 for (unsigned OpI
= 0, OpE
= getNumOperands(); OpI
!= OpE
; ++OpI
) {
931 OperandData
&Data
= getData(OpI
, Ln
);
932 if (Data
.APO
!= OpAPO
|| Data
.IsUsed
)
935 FoundCandidate
= true;
947 /// Initialize with all the operands of the instruction vector \p RootVL.
948 VLOperands(ArrayRef
<Value
*> RootVL
, const DataLayout
&DL
,
951 // Append all the operands of RootVL.
952 appendOperandsOfVL(RootVL
);
955 /// \Returns a value vector with the operands across all lanes for the
956 /// opearnd at \p OpIdx.
957 ValueList
getVL(unsigned OpIdx
) const {
958 ValueList
OpVL(OpsVec
[OpIdx
].size());
959 assert(OpsVec
[OpIdx
].size() == getNumLanes() &&
960 "Expected same num of lanes across all operands");
961 for (unsigned Lane
= 0, Lanes
= getNumLanes(); Lane
!= Lanes
; ++Lane
)
962 OpVL
[Lane
] = OpsVec
[OpIdx
][Lane
].V
;
966 // Performs operand reordering for 2 or more operands.
967 // The original operands are in OrigOps[OpIdx][Lane].
968 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
970 unsigned NumOperands
= getNumOperands();
971 unsigned NumLanes
= getNumLanes();
972 // Each operand has its own mode. We are using this mode to help us select
973 // the instructions for each lane, so that they match best with the ones
974 // we have selected so far.
975 SmallVector
<ReorderingMode
, 2> ReorderingModes(NumOperands
);
977 // This is a greedy single-pass algorithm. We are going over each lane
978 // once and deciding on the best order right away with no back-tracking.
979 // However, in order to increase its effectiveness, we start with the lane
980 // that has operands that can move the least. For example, given the
982 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
983 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
984 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
985 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
986 // we will start at Lane 1, since the operands of the subtraction cannot
987 // be reordered. Then we will visit the rest of the lanes in a circular
988 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
990 // Find the first lane that we will start our search from.
991 unsigned FirstLane
= getBestLaneToStartReordering();
993 // Initialize the modes.
994 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
) {
995 Value
*OpLane0
= getValue(OpIdx
, FirstLane
);
996 // Keep track if we have instructions with all the same opcode on one
998 if (isa
<LoadInst
>(OpLane0
))
999 ReorderingModes
[OpIdx
] = ReorderingMode::Load
;
1000 else if (isa
<Instruction
>(OpLane0
)) {
1001 // Check if OpLane0 should be broadcast.
1002 if (shouldBroadcast(OpLane0
, OpIdx
, FirstLane
))
1003 ReorderingModes
[OpIdx
] = ReorderingMode::Splat
;
1005 ReorderingModes
[OpIdx
] = ReorderingMode::Opcode
;
1007 else if (isa
<Constant
>(OpLane0
))
1008 ReorderingModes
[OpIdx
] = ReorderingMode::Constant
;
1009 else if (isa
<Argument
>(OpLane0
))
1010 // Our best hope is a Splat. It may save some cost in some cases.
1011 ReorderingModes
[OpIdx
] = ReorderingMode::Splat
;
1013 // NOTE: This should be unreachable.
1014 ReorderingModes
[OpIdx
] = ReorderingMode::Failed
;
1017 // If the initial strategy fails for any of the operand indexes, then we
1018 // perform reordering again in a second pass. This helps avoid assigning
1019 // high priority to the failed strategy, and should improve reordering for
1020 // the non-failed operand indexes.
1021 for (int Pass
= 0; Pass
!= 2; ++Pass
) {
1022 // Skip the second pass if the first pass did not fail.
1023 bool StrategyFailed
= false;
1024 // Mark all operand data as free to use.
1026 // We keep the original operand order for the FirstLane, so reorder the
1027 // rest of the lanes. We are visiting the nodes in a circular fashion,
1028 // using FirstLane as the center point and increasing the radius
1030 for (unsigned Distance
= 1; Distance
!= NumLanes
; ++Distance
) {
1031 // Visit the lane on the right and then the lane on the left.
1032 for (int Direction
: {+1, -1}) {
1033 int Lane
= FirstLane
+ Direction
* Distance
;
1034 if (Lane
< 0 || Lane
>= (int)NumLanes
)
1036 int LastLane
= Lane
- Direction
;
1037 assert(LastLane
>= 0 && LastLane
< (int)NumLanes
&&
1039 // Look for a good match for each operand.
1040 for (unsigned OpIdx
= 0; OpIdx
!= NumOperands
; ++OpIdx
) {
1041 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
1042 Optional
<unsigned> BestIdx
=
1043 getBestOperand(OpIdx
, Lane
, LastLane
, ReorderingModes
);
1044 // By not selecting a value, we allow the operands that follow to
1045 // select a better matching value. We will get a non-null value in
1046 // the next run of getBestOperand().
1048 // Swap the current operand with the one returned by
1049 // getBestOperand().
1050 swap(OpIdx
, BestIdx
.getValue(), Lane
);
1052 // We failed to find a best operand, set mode to 'Failed'.
1053 ReorderingModes
[OpIdx
] = ReorderingMode::Failed
;
1054 // Enable the second pass.
1055 StrategyFailed
= true;
1060 // Skip second pass if the strategy did not fail.
1061 if (!StrategyFailed
)
1066 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1067 LLVM_DUMP_METHOD
static StringRef
getModeStr(ReorderingMode RMode
) {
1069 case ReorderingMode::Load
:
1071 case ReorderingMode::Opcode
:
1073 case ReorderingMode::Constant
:
1075 case ReorderingMode::Splat
:
1077 case ReorderingMode::Failed
:
1080 llvm_unreachable("Unimplemented Reordering Type");
1083 LLVM_DUMP_METHOD
static raw_ostream
&printMode(ReorderingMode RMode
,
1085 return OS
<< getModeStr(RMode
);
1089 LLVM_DUMP_METHOD
static void dumpMode(ReorderingMode RMode
) {
1090 printMode(RMode
, dbgs());
1093 friend raw_ostream
&operator<<(raw_ostream
&OS
, ReorderingMode RMode
) {
1094 return printMode(RMode
, OS
);
1097 LLVM_DUMP_METHOD raw_ostream
&print(raw_ostream
&OS
) const {
1098 const unsigned Indent
= 2;
1100 for (const OperandDataVec
&OpDataVec
: OpsVec
) {
1101 OS
<< "Operand " << Cnt
++ << "\n";
1102 for (const OperandData
&OpData
: OpDataVec
) {
1103 OS
.indent(Indent
) << "{";
1104 if (Value
*V
= OpData
.V
)
1108 OS
<< ", APO:" << OpData
.APO
<< "}\n";
1116 LLVM_DUMP_METHOD
void dump() const { print(dbgs()); }
1121 /// Checks if all users of \p I are the part of the vectorization tree.
1122 bool areAllUsersVectorized(Instruction
*I
) const;
1124 /// \returns the cost of the vectorizable entry.
1125 int getEntryCost(TreeEntry
*E
);
1127 /// This is the recursive part of buildTree.
1128 void buildTree_rec(ArrayRef
<Value
*> Roots
, unsigned Depth
,
1129 const EdgeInfo
&EI
);
1131 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
1132 /// be vectorized to use the original vector (or aggregate "bitcast" to a
1133 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
1134 /// returns false, setting \p CurrentOrder to either an empty vector or a
1135 /// non-identity permutation that allows to reuse extract instructions.
1136 bool canReuseExtract(ArrayRef
<Value
*> VL
, Value
*OpValue
,
1137 SmallVectorImpl
<unsigned> &CurrentOrder
) const;
1139 /// Vectorize a single entry in the tree.
1140 Value
*vectorizeTree(TreeEntry
*E
);
1142 /// Vectorize a single entry in the tree, starting in \p VL.
1143 Value
*vectorizeTree(ArrayRef
<Value
*> VL
);
1145 /// \returns the scalarization cost for this type. Scalarization in this
1146 /// context means the creation of vectors from a group of scalars.
1147 int getGatherCost(Type
*Ty
, const DenseSet
<unsigned> &ShuffledIndices
) const;
1149 /// \returns the scalarization cost for this list of values. Assuming that
1150 /// this subtree gets vectorized, we may need to extract the values from the
1151 /// roots. This method calculates the cost of extracting the values.
1152 int getGatherCost(ArrayRef
<Value
*> VL
) const;
1154 /// Set the Builder insert point to one after the last instruction in
1156 void setInsertPointAfterBundle(ArrayRef
<Value
*> VL
,
1157 const InstructionsState
&S
);
1159 /// \returns a vector from a collection of scalars in \p VL.
1160 Value
*Gather(ArrayRef
<Value
*> VL
, VectorType
*Ty
);
1162 /// \returns whether the VectorizableTree is fully vectorizable and will
1163 /// be beneficial even the tree height is tiny.
1164 bool isFullyVectorizableTinyTree() const;
1166 /// Reorder commutative or alt operands to get better probability of
1167 /// generating vectorized code.
1168 static void reorderInputsAccordingToOpcode(ArrayRef
<Value
*> VL
,
1169 SmallVectorImpl
<Value
*> &Left
,
1170 SmallVectorImpl
<Value
*> &Right
,
1171 const DataLayout
&DL
,
1172 ScalarEvolution
&SE
);
1174 using VecTreeTy
= SmallVector
<std::unique_ptr
<TreeEntry
>, 8>;
1175 TreeEntry(VecTreeTy
&Container
) : Container(Container
) {}
1177 /// \returns true if the scalars in VL are equal to this entry.
1178 bool isSame(ArrayRef
<Value
*> VL
) const {
1179 if (VL
.size() == Scalars
.size())
1180 return std::equal(VL
.begin(), VL
.end(), Scalars
.begin());
1181 return VL
.size() == ReuseShuffleIndices
.size() &&
1183 VL
.begin(), VL
.end(), ReuseShuffleIndices
.begin(),
1184 [this](Value
*V
, unsigned Idx
) { return V
== Scalars
[Idx
]; });
1187 /// A vector of scalars.
1190 /// The Scalars are vectorized into this value. It is initialized to Null.
1191 Value
*VectorizedValue
= nullptr;
1193 /// Do we need to gather this sequence ?
1194 bool NeedToGather
= false;
1196 /// Does this sequence require some shuffling?
1197 SmallVector
<unsigned, 4> ReuseShuffleIndices
;
1199 /// Does this entry require reordering?
1200 ArrayRef
<unsigned> ReorderIndices
;
1202 /// Points back to the VectorizableTree.
1204 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
1205 /// to be a pointer and needs to be able to initialize the child iterator.
1206 /// Thus we need a reference back to the container to translate the indices
1208 VecTreeTy
&Container
;
1210 /// The TreeEntry index containing the user of this entry. We can actually
1211 /// have multiple users so the data structure is not truly a tree.
1212 SmallVector
<EdgeInfo
, 1> UserTreeIndices
;
1214 /// The index of this treeEntry in VectorizableTree.
1218 /// The operands of each instruction in each lane Operands[op_index][lane].
1219 /// Note: This helps avoid the replication of the code that performs the
1220 /// reordering of operands during buildTree_rec() and vectorizeTree().
1221 SmallVector
<ValueList
, 2> Operands
;
1224 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
1225 void setOperand(unsigned OpIdx
, ArrayRef
<Value
*> OpVL
,
1226 ArrayRef
<unsigned> ReuseShuffleIndices
) {
1227 if (Operands
.size() < OpIdx
+ 1)
1228 Operands
.resize(OpIdx
+ 1);
1229 assert(Operands
[OpIdx
].size() == 0 && "Already resized?");
1230 Operands
[OpIdx
].resize(Scalars
.size());
1231 for (unsigned Lane
= 0, E
= Scalars
.size(); Lane
!= E
; ++Lane
)
1232 Operands
[OpIdx
][Lane
] = (!ReuseShuffleIndices
.empty())
1233 ? OpVL
[ReuseShuffleIndices
[Lane
]]
1237 /// If there is a user TreeEntry, then set its operand.
1238 void trySetUserTEOperand(const EdgeInfo
&UserTreeIdx
,
1239 ArrayRef
<Value
*> OpVL
,
1240 ArrayRef
<unsigned> ReuseShuffleIndices
) {
1241 if (UserTreeIdx
.UserTE
)
1242 UserTreeIdx
.UserTE
->setOperand(UserTreeIdx
.EdgeIdx
, OpVL
,
1243 ReuseShuffleIndices
);
1246 /// \returns the \p OpIdx operand of this TreeEntry.
1247 ValueList
&getOperand(unsigned OpIdx
) {
1248 assert(OpIdx
< Operands
.size() && "Off bounds");
1249 return Operands
[OpIdx
];
1252 /// \return the single \p OpIdx operand.
1253 Value
*getSingleOperand(unsigned OpIdx
) const {
1254 assert(OpIdx
< Operands
.size() && "Off bounds");
1255 assert(!Operands
[OpIdx
].empty() && "No operand available");
1256 return Operands
[OpIdx
][0];
1261 LLVM_DUMP_METHOD
void dump() const {
1262 dbgs() << Idx
<< ".\n";
1263 for (unsigned OpI
= 0, OpE
= Operands
.size(); OpI
!= OpE
; ++OpI
) {
1264 dbgs() << "Operand " << OpI
<< ":\n";
1265 for (const Value
*V
: Operands
[OpI
])
1266 dbgs().indent(2) << *V
<< "\n";
1268 dbgs() << "Scalars: \n";
1269 for (Value
*V
: Scalars
)
1270 dbgs().indent(2) << *V
<< "\n";
1271 dbgs() << "NeedToGather: " << NeedToGather
<< "\n";
1272 dbgs() << "VectorizedValue: ";
1273 if (VectorizedValue
)
1274 dbgs() << *VectorizedValue
;
1278 dbgs() << "ReuseShuffleIndices: ";
1279 if (ReuseShuffleIndices
.empty())
1282 for (unsigned Idx
: ReuseShuffleIndices
)
1283 dbgs() << Idx
<< ", ";
1285 dbgs() << "ReorderIndices: ";
1286 for (unsigned Idx
: ReorderIndices
)
1287 dbgs() << Idx
<< ", ";
1289 dbgs() << "UserTreeIndices: ";
1290 for (const auto &EInfo
: UserTreeIndices
)
1291 dbgs() << EInfo
<< ", ";
1297 /// Create a new VectorizableTree entry.
1298 TreeEntry
*newTreeEntry(ArrayRef
<Value
*> VL
, bool Vectorized
,
1299 const EdgeInfo
&UserTreeIdx
,
1300 ArrayRef
<unsigned> ReuseShuffleIndices
= None
,
1301 ArrayRef
<unsigned> ReorderIndices
= None
) {
1302 VectorizableTree
.push_back(llvm::make_unique
<TreeEntry
>(VectorizableTree
));
1303 TreeEntry
*Last
= VectorizableTree
.back().get();
1304 Last
->Idx
= VectorizableTree
.size() - 1;
1305 Last
->Scalars
.insert(Last
->Scalars
.begin(), VL
.begin(), VL
.end());
1306 Last
->NeedToGather
= !Vectorized
;
1307 Last
->ReuseShuffleIndices
.append(ReuseShuffleIndices
.begin(),
1308 ReuseShuffleIndices
.end());
1309 Last
->ReorderIndices
= ReorderIndices
;
1311 for (int i
= 0, e
= VL
.size(); i
!= e
; ++i
) {
1312 assert(!getTreeEntry(VL
[i
]) && "Scalar already in tree!");
1313 ScalarToTreeEntry
[VL
[i
]] = Last
->Idx
;
1316 MustGather
.insert(VL
.begin(), VL
.end());
1319 if (UserTreeIdx
.UserTE
)
1320 Last
->UserTreeIndices
.push_back(UserTreeIdx
);
1322 Last
->trySetUserTEOperand(UserTreeIdx
, VL
, ReuseShuffleIndices
);
1326 /// -- Vectorization State --
1327 /// Holds all of the tree entries.
1328 TreeEntry::VecTreeTy VectorizableTree
;
1332 LLVM_DUMP_METHOD
void dumpVectorizableTree() const {
1333 for (unsigned Id
= 0, IdE
= VectorizableTree
.size(); Id
!= IdE
; ++Id
) {
1334 VectorizableTree
[Id
]->dump();
1340 TreeEntry
*getTreeEntry(Value
*V
) {
1341 auto I
= ScalarToTreeEntry
.find(V
);
1342 if (I
!= ScalarToTreeEntry
.end())
1343 return VectorizableTree
[I
->second
].get();
1347 const TreeEntry
*getTreeEntry(Value
*V
) const {
1348 auto I
= ScalarToTreeEntry
.find(V
);
1349 if (I
!= ScalarToTreeEntry
.end())
1350 return VectorizableTree
[I
->second
].get();
1354 /// Maps a specific scalar to its tree entry.
1355 SmallDenseMap
<Value
*, int> ScalarToTreeEntry
;
1357 /// A list of scalars that we found that we need to keep as scalars.
1358 ValueSet MustGather
;
1360 /// This POD struct describes one external user in the vectorized tree.
1361 struct ExternalUser
{
1362 ExternalUser(Value
*S
, llvm::User
*U
, int L
)
1363 : Scalar(S
), User(U
), Lane(L
) {}
1365 // Which scalar in our function.
1368 // Which user that uses the scalar.
1371 // Which lane does the scalar belong to.
1374 using UserList
= SmallVector
<ExternalUser
, 16>;
1376 /// Checks if two instructions may access the same memory.
1378 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
1379 /// is invariant in the calling loop.
1380 bool isAliased(const MemoryLocation
&Loc1
, Instruction
*Inst1
,
1381 Instruction
*Inst2
) {
1382 // First check if the result is already in the cache.
1383 AliasCacheKey key
= std::make_pair(Inst1
, Inst2
);
1384 Optional
<bool> &result
= AliasCache
[key
];
1385 if (result
.hasValue()) {
1386 return result
.getValue();
1388 MemoryLocation Loc2
= getLocation(Inst2
, AA
);
1389 bool aliased
= true;
1390 if (Loc1
.Ptr
&& Loc2
.Ptr
&& isSimple(Inst1
) && isSimple(Inst2
)) {
1391 // Do the alias check.
1392 aliased
= AA
->alias(Loc1
, Loc2
);
1394 // Store the result in the cache.
1399 using AliasCacheKey
= std::pair
<Instruction
*, Instruction
*>;
1401 /// Cache for alias results.
1402 /// TODO: consider moving this to the AliasAnalysis itself.
1403 DenseMap
<AliasCacheKey
, Optional
<bool>> AliasCache
;
1405 /// Removes an instruction from its block and eventually deletes it.
1406 /// It's like Instruction::eraseFromParent() except that the actual deletion
1407 /// is delayed until BoUpSLP is destructed.
1408 /// This is required to ensure that there are no incorrect collisions in the
1409 /// AliasCache, which can happen if a new instruction is allocated at the
1410 /// same address as a previously deleted instruction.
1411 void eraseInstruction(Instruction
*I
) {
1412 I
->removeFromParent();
1413 I
->dropAllReferences();
1414 DeletedInstructions
.emplace_back(I
);
1417 /// Temporary store for deleted instructions. Instructions will be deleted
1418 /// eventually when the BoUpSLP is destructed.
1419 SmallVector
<unique_value
, 8> DeletedInstructions
;
1421 /// A list of values that need to extracted out of the tree.
1422 /// This list holds pairs of (Internal Scalar : External User). External User
1423 /// can be nullptr, it means that this Internal Scalar will be used later,
1424 /// after vectorization.
1425 UserList ExternalUses
;
1427 /// Values used only by @llvm.assume calls.
1428 SmallPtrSet
<const Value
*, 32> EphValues
;
1430 /// Holds all of the instructions that we gathered.
1431 SetVector
<Instruction
*> GatherSeq
;
1433 /// A list of blocks that we are going to CSE.
1434 SetVector
<BasicBlock
*> CSEBlocks
;
1436 /// Contains all scheduling relevant data for an instruction.
1437 /// A ScheduleData either represents a single instruction or a member of an
1438 /// instruction bundle (= a group of instructions which is combined into a
1439 /// vector instruction).
1440 struct ScheduleData
{
1441 // The initial value for the dependency counters. It means that the
1442 // dependencies are not calculated yet.
1443 enum { InvalidDeps
= -1 };
1445 ScheduleData() = default;
1447 void init(int BlockSchedulingRegionID
, Value
*OpVal
) {
1448 FirstInBundle
= this;
1449 NextInBundle
= nullptr;
1450 NextLoadStore
= nullptr;
1451 IsScheduled
= false;
1452 SchedulingRegionID
= BlockSchedulingRegionID
;
1453 UnscheduledDepsInBundle
= UnscheduledDeps
;
1454 clearDependencies();
1458 /// Returns true if the dependency information has been calculated.
1459 bool hasValidDependencies() const { return Dependencies
!= InvalidDeps
; }
1461 /// Returns true for single instructions and for bundle representatives
1462 /// (= the head of a bundle).
1463 bool isSchedulingEntity() const { return FirstInBundle
== this; }
1465 /// Returns true if it represents an instruction bundle and not only a
1466 /// single instruction.
1467 bool isPartOfBundle() const {
1468 return NextInBundle
!= nullptr || FirstInBundle
!= this;
1471 /// Returns true if it is ready for scheduling, i.e. it has no more
1472 /// unscheduled depending instructions/bundles.
1473 bool isReady() const {
1474 assert(isSchedulingEntity() &&
1475 "can't consider non-scheduling entity for ready list");
1476 return UnscheduledDepsInBundle
== 0 && !IsScheduled
;
1479 /// Modifies the number of unscheduled dependencies, also updating it for
1480 /// the whole bundle.
1481 int incrementUnscheduledDeps(int Incr
) {
1482 UnscheduledDeps
+= Incr
;
1483 return FirstInBundle
->UnscheduledDepsInBundle
+= Incr
;
1486 /// Sets the number of unscheduled dependencies to the number of
1488 void resetUnscheduledDeps() {
1489 incrementUnscheduledDeps(Dependencies
- UnscheduledDeps
);
1492 /// Clears all dependency information.
1493 void clearDependencies() {
1494 Dependencies
= InvalidDeps
;
1495 resetUnscheduledDeps();
1496 MemoryDependencies
.clear();
1499 void dump(raw_ostream
&os
) const {
1500 if (!isSchedulingEntity()) {
1501 os
<< "/ " << *Inst
;
1502 } else if (NextInBundle
) {
1504 ScheduleData
*SD
= NextInBundle
;
1506 os
<< ';' << *SD
->Inst
;
1507 SD
= SD
->NextInBundle
;
1515 Instruction
*Inst
= nullptr;
1517 /// Points to the head in an instruction bundle (and always to this for
1518 /// single instructions).
1519 ScheduleData
*FirstInBundle
= nullptr;
1521 /// Single linked list of all instructions in a bundle. Null if it is a
1522 /// single instruction.
1523 ScheduleData
*NextInBundle
= nullptr;
1525 /// Single linked list of all memory instructions (e.g. load, store, call)
1526 /// in the block - until the end of the scheduling region.
1527 ScheduleData
*NextLoadStore
= nullptr;
1529 /// The dependent memory instructions.
1530 /// This list is derived on demand in calculateDependencies().
1531 SmallVector
<ScheduleData
*, 4> MemoryDependencies
;
1533 /// This ScheduleData is in the current scheduling region if this matches
1534 /// the current SchedulingRegionID of BlockScheduling.
1535 int SchedulingRegionID
= 0;
1537 /// Used for getting a "good" final ordering of instructions.
1538 int SchedulingPriority
= 0;
1540 /// The number of dependencies. Constitutes of the number of users of the
1541 /// instruction plus the number of dependent memory instructions (if any).
1542 /// This value is calculated on demand.
1543 /// If InvalidDeps, the number of dependencies is not calculated yet.
1544 int Dependencies
= InvalidDeps
;
1546 /// The number of dependencies minus the number of dependencies of scheduled
1547 /// instructions. As soon as this is zero, the instruction/bundle gets ready
1549 /// Note that this is negative as long as Dependencies is not calculated.
1550 int UnscheduledDeps
= InvalidDeps
;
1552 /// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
1553 /// single instructions.
1554 int UnscheduledDepsInBundle
= InvalidDeps
;
1556 /// True if this instruction is scheduled (or considered as scheduled in the
1558 bool IsScheduled
= false;
1560 /// Opcode of the current instruction in the schedule data.
1561 Value
*OpValue
= nullptr;
1565 friend inline raw_ostream
&operator<<(raw_ostream
&os
,
1566 const BoUpSLP::ScheduleData
&SD
) {
1572 friend struct GraphTraits
<BoUpSLP
*>;
1573 friend struct DOTGraphTraits
<BoUpSLP
*>;
1575 /// Contains all scheduling data for a basic block.
1576 struct BlockScheduling
{
1577 BlockScheduling(BasicBlock
*BB
)
1578 : BB(BB
), ChunkSize(BB
->size()), ChunkPos(ChunkSize
) {}
1582 ScheduleStart
= nullptr;
1583 ScheduleEnd
= nullptr;
1584 FirstLoadStoreInRegion
= nullptr;
1585 LastLoadStoreInRegion
= nullptr;
1587 // Reduce the maximum schedule region size by the size of the
1588 // previous scheduling run.
1589 ScheduleRegionSizeLimit
-= ScheduleRegionSize
;
1590 if (ScheduleRegionSizeLimit
< MinScheduleRegionSize
)
1591 ScheduleRegionSizeLimit
= MinScheduleRegionSize
;
1592 ScheduleRegionSize
= 0;
1594 // Make a new scheduling region, i.e. all existing ScheduleData is not
1595 // in the new region yet.
1596 ++SchedulingRegionID
;
1599 ScheduleData
*getScheduleData(Value
*V
) {
1600 ScheduleData
*SD
= ScheduleDataMap
[V
];
1601 if (SD
&& SD
->SchedulingRegionID
== SchedulingRegionID
)
1606 ScheduleData
*getScheduleData(Value
*V
, Value
*Key
) {
1608 return getScheduleData(V
);
1609 auto I
= ExtraScheduleDataMap
.find(V
);
1610 if (I
!= ExtraScheduleDataMap
.end()) {
1611 ScheduleData
*SD
= I
->second
[Key
];
1612 if (SD
&& SD
->SchedulingRegionID
== SchedulingRegionID
)
1618 bool isInSchedulingRegion(ScheduleData
*SD
) {
1619 return SD
->SchedulingRegionID
== SchedulingRegionID
;
1622 /// Marks an instruction as scheduled and puts all dependent ready
1623 /// instructions into the ready-list.
1624 template <typename ReadyListType
>
1625 void schedule(ScheduleData
*SD
, ReadyListType
&ReadyList
) {
1626 SD
->IsScheduled
= true;
1627 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD
<< "\n");
1629 ScheduleData
*BundleMember
= SD
;
1630 while (BundleMember
) {
1631 if (BundleMember
->Inst
!= BundleMember
->OpValue
) {
1632 BundleMember
= BundleMember
->NextInBundle
;
1635 // Handle the def-use chain dependencies.
1636 for (Use
&U
: BundleMember
->Inst
->operands()) {
1637 auto *I
= dyn_cast
<Instruction
>(U
.get());
1640 doForAllOpcodes(I
, [&ReadyList
](ScheduleData
*OpDef
) {
1641 if (OpDef
&& OpDef
->hasValidDependencies() &&
1642 OpDef
->incrementUnscheduledDeps(-1) == 0) {
1643 // There are no more unscheduled dependencies after
1644 // decrementing, so we can put the dependent instruction
1645 // into the ready list.
1646 ScheduleData
*DepBundle
= OpDef
->FirstInBundle
;
1647 assert(!DepBundle
->IsScheduled
&&
1648 "already scheduled bundle gets ready");
1649 ReadyList
.insert(DepBundle
);
1651 << "SLP: gets ready (def): " << *DepBundle
<< "\n");
1655 // Handle the memory dependencies.
1656 for (ScheduleData
*MemoryDepSD
: BundleMember
->MemoryDependencies
) {
1657 if (MemoryDepSD
->incrementUnscheduledDeps(-1) == 0) {
1658 // There are no more unscheduled dependencies after decrementing,
1659 // so we can put the dependent instruction into the ready list.
1660 ScheduleData
*DepBundle
= MemoryDepSD
->FirstInBundle
;
1661 assert(!DepBundle
->IsScheduled
&&
1662 "already scheduled bundle gets ready");
1663 ReadyList
.insert(DepBundle
);
1665 << "SLP: gets ready (mem): " << *DepBundle
<< "\n");
1668 BundleMember
= BundleMember
->NextInBundle
;
1672 void doForAllOpcodes(Value
*V
,
1673 function_ref
<void(ScheduleData
*SD
)> Action
) {
1674 if (ScheduleData
*SD
= getScheduleData(V
))
1676 auto I
= ExtraScheduleDataMap
.find(V
);
1677 if (I
!= ExtraScheduleDataMap
.end())
1678 for (auto &P
: I
->second
)
1679 if (P
.second
->SchedulingRegionID
== SchedulingRegionID
)
1683 /// Put all instructions into the ReadyList which are ready for scheduling.
1684 template <typename ReadyListType
>
1685 void initialFillReadyList(ReadyListType
&ReadyList
) {
1686 for (auto *I
= ScheduleStart
; I
!= ScheduleEnd
; I
= I
->getNextNode()) {
1687 doForAllOpcodes(I
, [&](ScheduleData
*SD
) {
1688 if (SD
->isSchedulingEntity() && SD
->isReady()) {
1689 ReadyList
.insert(SD
);
1691 << "SLP: initially in ready list: " << *I
<< "\n");
1697 /// Checks if a bundle of instructions can be scheduled, i.e. has no
1698 /// cyclic dependencies. This is only a dry-run, no instructions are
1699 /// actually moved at this stage.
1700 bool tryScheduleBundle(ArrayRef
<Value
*> VL
, BoUpSLP
*SLP
,
1701 const InstructionsState
&S
);
1703 /// Un-bundles a group of instructions.
1704 void cancelScheduling(ArrayRef
<Value
*> VL
, Value
*OpValue
);
1706 /// Allocates schedule data chunk.
1707 ScheduleData
*allocateScheduleDataChunks();
1709 /// Extends the scheduling region so that V is inside the region.
1710 /// \returns true if the region size is within the limit.
1711 bool extendSchedulingRegion(Value
*V
, const InstructionsState
&S
);
1713 /// Initialize the ScheduleData structures for new instructions in the
1714 /// scheduling region.
1715 void initScheduleData(Instruction
*FromI
, Instruction
*ToI
,
1716 ScheduleData
*PrevLoadStore
,
1717 ScheduleData
*NextLoadStore
);
1719 /// Updates the dependency information of a bundle and of all instructions/
1720 /// bundles which depend on the original bundle.
1721 void calculateDependencies(ScheduleData
*SD
, bool InsertInReadyList
,
1724 /// Sets all instruction in the scheduling region to un-scheduled.
1725 void resetSchedule();
1729 /// Simple memory allocation for ScheduleData.
1730 std::vector
<std::unique_ptr
<ScheduleData
[]>> ScheduleDataChunks
;
1732 /// The size of a ScheduleData array in ScheduleDataChunks.
1735 /// The allocator position in the current chunk, which is the last entry
1736 /// of ScheduleDataChunks.
1739 /// Attaches ScheduleData to Instruction.
1740 /// Note that the mapping survives during all vectorization iterations, i.e.
1741 /// ScheduleData structures are recycled.
1742 DenseMap
<Value
*, ScheduleData
*> ScheduleDataMap
;
1744 /// Attaches ScheduleData to Instruction with the leading key.
1745 DenseMap
<Value
*, SmallDenseMap
<Value
*, ScheduleData
*>>
1746 ExtraScheduleDataMap
;
1748 struct ReadyList
: SmallVector
<ScheduleData
*, 8> {
1749 void insert(ScheduleData
*SD
) { push_back(SD
); }
1752 /// The ready-list for scheduling (only used for the dry-run).
1753 ReadyList ReadyInsts
;
1755 /// The first instruction of the scheduling region.
1756 Instruction
*ScheduleStart
= nullptr;
1758 /// The first instruction _after_ the scheduling region.
1759 Instruction
*ScheduleEnd
= nullptr;
1761 /// The first memory accessing instruction in the scheduling region
1763 ScheduleData
*FirstLoadStoreInRegion
= nullptr;
1765 /// The last memory accessing instruction in the scheduling region
1767 ScheduleData
*LastLoadStoreInRegion
= nullptr;
1769 /// The current size of the scheduling region.
1770 int ScheduleRegionSize
= 0;
1772 /// The maximum size allowed for the scheduling region.
1773 int ScheduleRegionSizeLimit
= ScheduleRegionSizeBudget
;
1775 /// The ID of the scheduling region. For a new vectorization iteration this
1776 /// is incremented which "removes" all ScheduleData from the region.
1777 // Make sure that the initial SchedulingRegionID is greater than the
1778 // initial SchedulingRegionID in ScheduleData (which is 0).
1779 int SchedulingRegionID
= 1;
1782 /// Attaches the BlockScheduling structures to basic blocks.
1783 MapVector
<BasicBlock
*, std::unique_ptr
<BlockScheduling
>> BlocksSchedules
;
1785 /// Performs the "real" scheduling. Done before vectorization is actually
1786 /// performed in a basic block.
1787 void scheduleBlock(BlockScheduling
*BS
);
1789 /// List of users to ignore during scheduling and that don't need extracting.
1790 ArrayRef
<Value
*> UserIgnoreList
;
1792 using OrdersType
= SmallVector
<unsigned, 4>;
1793 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
1794 /// sorted SmallVectors of unsigned.
1795 struct OrdersTypeDenseMapInfo
{
1796 static OrdersType
getEmptyKey() {
1802 static OrdersType
getTombstoneKey() {
1808 static unsigned getHashValue(const OrdersType
&V
) {
1809 return static_cast<unsigned>(hash_combine_range(V
.begin(), V
.end()));
1812 static bool isEqual(const OrdersType
&LHS
, const OrdersType
&RHS
) {
1817 /// Contains orders of operations along with the number of bundles that have
1818 /// operations in this order. It stores only those orders that require
1819 /// reordering, if reordering is not required it is counted using \a
1820 /// NumOpsWantToKeepOriginalOrder.
1821 DenseMap
<OrdersType
, unsigned, OrdersTypeDenseMapInfo
> NumOpsWantToKeepOrder
;
1822 /// Number of bundles that do not require reordering.
1823 unsigned NumOpsWantToKeepOriginalOrder
= 0;
1825 // Analysis and block reference.
1827 ScalarEvolution
*SE
;
1828 TargetTransformInfo
*TTI
;
1829 TargetLibraryInfo
*TLI
;
1833 AssumptionCache
*AC
;
1835 const DataLayout
*DL
;
1836 OptimizationRemarkEmitter
*ORE
;
1838 unsigned MaxVecRegSize
; // This is set by TTI or overridden by cl::opt.
1839 unsigned MinVecRegSize
; // Set by cl::opt (default: 128).
1841 /// Instruction builder to construct the vectorized tree.
1842 IRBuilder
<> Builder
;
1844 /// A map of scalar integer values to the smallest bit width with which they
1845 /// can legally be represented. The values map to (width, signed) pairs,
1846 /// where "width" indicates the minimum bit width and "signed" is True if the
1847 /// value must be signed-extended, rather than zero-extended, back to its
1849 MapVector
<Value
*, std::pair
<uint64_t, bool>> MinBWs
;
1852 } // end namespace slpvectorizer
1854 template <> struct GraphTraits
<BoUpSLP
*> {
1855 using TreeEntry
= BoUpSLP::TreeEntry
;
1857 /// NodeRef has to be a pointer per the GraphWriter.
1858 using NodeRef
= TreeEntry
*;
1860 using ContainerTy
= BoUpSLP::TreeEntry::VecTreeTy
;
1862 /// Add the VectorizableTree to the index iterator to be able to return
1863 /// TreeEntry pointers.
1864 struct ChildIteratorType
1865 : public iterator_adaptor_base
<
1866 ChildIteratorType
, SmallVector
<BoUpSLP::EdgeInfo
, 1>::iterator
> {
1867 ContainerTy
&VectorizableTree
;
1869 ChildIteratorType(SmallVector
<BoUpSLP::EdgeInfo
, 1>::iterator W
,
1871 : ChildIteratorType::iterator_adaptor_base(W
), VectorizableTree(VT
) {}
1873 NodeRef
operator*() { return I
->UserTE
; }
1876 static NodeRef
getEntryNode(BoUpSLP
&R
) {
1877 return R
.VectorizableTree
[0].get();
1880 static ChildIteratorType
child_begin(NodeRef N
) {
1881 return {N
->UserTreeIndices
.begin(), N
->Container
};
1884 static ChildIteratorType
child_end(NodeRef N
) {
1885 return {N
->UserTreeIndices
.end(), N
->Container
};
1888 /// For the node iterator we just need to turn the TreeEntry iterator into a
1889 /// TreeEntry* iterator so that it dereferences to NodeRef.
1890 class nodes_iterator
{
1891 using ItTy
= ContainerTy::iterator
;
1895 nodes_iterator(const ItTy
&It2
) : It(It2
) {}
1896 NodeRef
operator*() { return It
->get(); }
1897 nodes_iterator
operator++() {
1901 bool operator!=(const nodes_iterator
&N2
) const { return N2
.It
!= It
; }
1904 static nodes_iterator
nodes_begin(BoUpSLP
*R
) {
1905 return nodes_iterator(R
->VectorizableTree
.begin());
1908 static nodes_iterator
nodes_end(BoUpSLP
*R
) {
1909 return nodes_iterator(R
->VectorizableTree
.end());
1912 static unsigned size(BoUpSLP
*R
) { return R
->VectorizableTree
.size(); }
1915 template <> struct DOTGraphTraits
<BoUpSLP
*> : public DefaultDOTGraphTraits
{
1916 using TreeEntry
= BoUpSLP::TreeEntry
;
1918 DOTGraphTraits(bool isSimple
= false) : DefaultDOTGraphTraits(isSimple
) {}
1920 std::string
getNodeLabel(const TreeEntry
*Entry
, const BoUpSLP
*R
) {
1922 raw_string_ostream
OS(Str
);
1923 if (isSplat(Entry
->Scalars
)) {
1924 OS
<< "<splat> " << *Entry
->Scalars
[0];
1927 for (auto V
: Entry
->Scalars
) {
1930 R
->ExternalUses
.begin(), R
->ExternalUses
.end(),
1931 [&](const BoUpSLP::ExternalUser
&EU
) { return EU
.Scalar
== V
; }))
1938 static std::string
getNodeAttributes(const TreeEntry
*Entry
,
1940 if (Entry
->NeedToGather
)
1946 } // end namespace llvm
1948 void BoUpSLP::buildTree(ArrayRef
<Value
*> Roots
,
1949 ArrayRef
<Value
*> UserIgnoreLst
) {
1950 ExtraValueToDebugLocsMap ExternallyUsedValues
;
1951 buildTree(Roots
, ExternallyUsedValues
, UserIgnoreLst
);
1954 void BoUpSLP::buildTree(ArrayRef
<Value
*> Roots
,
1955 ExtraValueToDebugLocsMap
&ExternallyUsedValues
,
1956 ArrayRef
<Value
*> UserIgnoreLst
) {
1958 UserIgnoreList
= UserIgnoreLst
;
1959 if (!allSameType(Roots
))
1961 buildTree_rec(Roots
, 0, EdgeInfo());
1963 // Collect the values that we need to extract from the tree.
1964 for (auto &TEPtr
: VectorizableTree
) {
1965 TreeEntry
*Entry
= TEPtr
.get();
1967 // No need to handle users of gathered values.
1968 if (Entry
->NeedToGather
)
1972 for (int Lane
= 0, LE
= Entry
->Scalars
.size(); Lane
!= LE
; ++Lane
) {
1973 Value
*Scalar
= Entry
->Scalars
[Lane
];
1974 int FoundLane
= Lane
;
1975 if (!Entry
->ReuseShuffleIndices
.empty()) {
1977 std::distance(Entry
->ReuseShuffleIndices
.begin(),
1978 llvm::find(Entry
->ReuseShuffleIndices
, FoundLane
));
1981 // Check if the scalar is externally used as an extra arg.
1982 auto ExtI
= ExternallyUsedValues
.find(Scalar
);
1983 if (ExtI
!= ExternallyUsedValues
.end()) {
1984 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
1985 << Lane
<< " from " << *Scalar
<< ".\n");
1986 ExternalUses
.emplace_back(Scalar
, nullptr, FoundLane
);
1988 for (User
*U
: Scalar
->users()) {
1989 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U
<< ".\n");
1991 Instruction
*UserInst
= dyn_cast
<Instruction
>(U
);
1995 // Skip in-tree scalars that become vectors
1996 if (TreeEntry
*UseEntry
= getTreeEntry(U
)) {
1997 Value
*UseScalar
= UseEntry
->Scalars
[0];
1998 // Some in-tree scalars will remain as scalar in vectorized
1999 // instructions. If that is the case, the one in Lane 0 will
2001 if (UseScalar
!= U
||
2002 !InTreeUserNeedToExtract(Scalar
, UserInst
, TLI
)) {
2003 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
2005 assert(!UseEntry
->NeedToGather
&& "Bad state");
2010 // Ignore users in the user ignore list.
2011 if (is_contained(UserIgnoreList
, UserInst
))
2014 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *U
<< " from lane "
2015 << Lane
<< " from " << *Scalar
<< ".\n");
2016 ExternalUses
.push_back(ExternalUser(Scalar
, U
, FoundLane
));
2022 void BoUpSLP::buildTree_rec(ArrayRef
<Value
*> VL
, unsigned Depth
,
2023 const EdgeInfo
&UserTreeIdx
) {
2024 assert((allConstant(VL
) || allSameType(VL
)) && "Invalid types!");
2026 InstructionsState S
= getSameOpcode(VL
);
2027 if (Depth
== RecursionMaxDepth
) {
2028 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
2029 newTreeEntry(VL
, false, UserTreeIdx
);
2033 // Don't handle vectors.
2034 if (S
.OpValue
->getType()->isVectorTy()) {
2035 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
2036 newTreeEntry(VL
, false, UserTreeIdx
);
2040 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(S
.OpValue
))
2041 if (SI
->getValueOperand()->getType()->isVectorTy()) {
2042 LLVM_DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
2043 newTreeEntry(VL
, false, UserTreeIdx
);
2047 // If all of the operands are identical or constant we have a simple solution.
2048 if (allConstant(VL
) || isSplat(VL
) || !allSameBlock(VL
) || !S
.getOpcode()) {
2049 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
2050 newTreeEntry(VL
, false, UserTreeIdx
);
2054 // We now know that this is a vector of instructions of the same type from
2057 // Don't vectorize ephemeral values.
2058 for (unsigned i
= 0, e
= VL
.size(); i
!= e
; ++i
) {
2059 if (EphValues
.count(VL
[i
])) {
2060 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL
[i
]
2061 << ") is ephemeral.\n");
2062 newTreeEntry(VL
, false, UserTreeIdx
);
2067 // Check if this is a duplicate of another entry.
2068 if (TreeEntry
*E
= getTreeEntry(S
.OpValue
)) {
2069 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S
.OpValue
<< ".\n");
2070 if (!E
->isSame(VL
)) {
2071 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
2072 newTreeEntry(VL
, false, UserTreeIdx
);
2075 // Record the reuse of the tree node. FIXME, currently this is only used to
2076 // properly draw the graph rather than for the actual vectorization.
2077 E
->UserTreeIndices
.push_back(UserTreeIdx
);
2078 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S
.OpValue
2080 E
->trySetUserTEOperand(UserTreeIdx
, VL
, None
);
2084 // Check that none of the instructions in the bundle are already in the tree.
2085 for (unsigned i
= 0, e
= VL
.size(); i
!= e
; ++i
) {
2086 auto *I
= dyn_cast
<Instruction
>(VL
[i
]);
2089 if (getTreeEntry(I
)) {
2090 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *VL
[i
]
2091 << ") is already in tree.\n");
2092 newTreeEntry(VL
, false, UserTreeIdx
);
2097 // If any of the scalars is marked as a value that needs to stay scalar, then
2098 // we need to gather the scalars.
2099 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
2100 for (unsigned i
= 0, e
= VL
.size(); i
!= e
; ++i
) {
2101 if (MustGather
.count(VL
[i
]) || is_contained(UserIgnoreList
, VL
[i
])) {
2102 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
2103 newTreeEntry(VL
, false, UserTreeIdx
);
2108 // Check that all of the users of the scalars that we want to vectorize are
2110 auto *VL0
= cast
<Instruction
>(S
.OpValue
);
2111 BasicBlock
*BB
= VL0
->getParent();
2113 if (!DT
->isReachableFromEntry(BB
)) {
2114 // Don't go into unreachable blocks. They may contain instructions with
2115 // dependency cycles which confuse the final scheduling.
2116 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
2117 newTreeEntry(VL
, false, UserTreeIdx
);
2121 // Check that every instruction appears once in this bundle.
2122 SmallVector
<unsigned, 4> ReuseShuffleIndicies
;
2123 SmallVector
<Value
*, 4> UniqueValues
;
2124 DenseMap
<Value
*, unsigned> UniquePositions
;
2125 for (Value
*V
: VL
) {
2126 auto Res
= UniquePositions
.try_emplace(V
, UniqueValues
.size());
2127 ReuseShuffleIndicies
.emplace_back(Res
.first
->second
);
2129 UniqueValues
.emplace_back(V
);
2131 if (UniqueValues
.size() == VL
.size()) {
2132 ReuseShuffleIndicies
.clear();
2134 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
2135 if (UniqueValues
.size() <= 1 || !llvm::isPowerOf2_32(UniqueValues
.size())) {
2136 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
2137 newTreeEntry(VL
, false, UserTreeIdx
);
2143 auto &BSRef
= BlocksSchedules
[BB
];
2145 BSRef
= llvm::make_unique
<BlockScheduling
>(BB
);
2147 BlockScheduling
&BS
= *BSRef
.get();
2149 if (!BS
.tryScheduleBundle(VL
, this, S
)) {
2150 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
2151 assert((!BS
.getScheduleData(VL0
) ||
2152 !BS
.getScheduleData(VL0
)->isPartOfBundle()) &&
2153 "tryScheduleBundle should cancelScheduling on failure");
2154 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2157 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
2159 unsigned ShuffleOrOp
= S
.isAltShuffle() ?
2160 (unsigned) Instruction::ShuffleVector
: S
.getOpcode();
2161 switch (ShuffleOrOp
) {
2162 case Instruction::PHI
: {
2163 PHINode
*PH
= dyn_cast
<PHINode
>(VL0
);
2165 // Check for terminator values (e.g. invoke).
2166 for (unsigned j
= 0; j
< VL
.size(); ++j
)
2167 for (unsigned i
= 0, e
= PH
->getNumIncomingValues(); i
< e
; ++i
) {
2168 Instruction
*Term
= dyn_cast
<Instruction
>(
2169 cast
<PHINode
>(VL
[j
])->getIncomingValueForBlock(
2170 PH
->getIncomingBlock(i
)));
2171 if (Term
&& Term
->isTerminator()) {
2173 << "SLP: Need to swizzle PHINodes (terminator use).\n");
2174 BS
.cancelScheduling(VL
, VL0
);
2175 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2180 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2181 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
2183 for (unsigned i
= 0, e
= PH
->getNumIncomingValues(); i
< e
; ++i
) {
2185 // Prepare the operand vector.
2187 Operands
.push_back(cast
<PHINode
>(j
)->getIncomingValueForBlock(
2188 PH
->getIncomingBlock(i
)));
2190 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2194 case Instruction::ExtractValue
:
2195 case Instruction::ExtractElement
: {
2196 OrdersType CurrentOrder
;
2197 bool Reuse
= canReuseExtract(VL
, VL0
, CurrentOrder
);
2199 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
2200 ++NumOpsWantToKeepOriginalOrder
;
2201 newTreeEntry(VL
, /*Vectorized=*/true, UserTreeIdx
,
2202 ReuseShuffleIndicies
);
2203 // This is a special case, as it does not gather, but at the same time
2204 // we are not extending buildTree_rec() towards the operands.
2206 Op0
.assign(VL
.size(), VL0
->getOperand(0));
2207 VectorizableTree
.back()->setOperand(0, Op0
, ReuseShuffleIndicies
);
2210 if (!CurrentOrder
.empty()) {
2212 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
2214 for (unsigned Idx
: CurrentOrder
)
2215 dbgs() << " " << Idx
;
2218 // Insert new order with initial value 0, if it does not exist,
2219 // otherwise return the iterator to the existing one.
2220 auto StoredCurrentOrderAndNum
=
2221 NumOpsWantToKeepOrder
.try_emplace(CurrentOrder
).first
;
2222 ++StoredCurrentOrderAndNum
->getSecond();
2223 newTreeEntry(VL
, /*Vectorized=*/true, UserTreeIdx
, ReuseShuffleIndicies
,
2224 StoredCurrentOrderAndNum
->getFirst());
2225 // This is a special case, as it does not gather, but at the same time
2226 // we are not extending buildTree_rec() towards the operands.
2228 Op0
.assign(VL
.size(), VL0
->getOperand(0));
2229 VectorizableTree
.back()->setOperand(0, Op0
, ReuseShuffleIndicies
);
2232 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
2233 newTreeEntry(VL
, /*Vectorized=*/false, UserTreeIdx
, ReuseShuffleIndicies
);
2234 BS
.cancelScheduling(VL
, VL0
);
2237 case Instruction::Load
: {
2238 // Check that a vectorized load would load the same memory as a scalar
2239 // load. For example, we don't want to vectorize loads that are smaller
2240 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
2241 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
2242 // from such a struct, we read/write packed bits disagreeing with the
2243 // unvectorized version.
2244 Type
*ScalarTy
= VL0
->getType();
2246 if (DL
->getTypeSizeInBits(ScalarTy
) !=
2247 DL
->getTypeAllocSizeInBits(ScalarTy
)) {
2248 BS
.cancelScheduling(VL
, VL0
);
2249 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2250 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
2254 // Make sure all loads in the bundle are simple - we can't vectorize
2255 // atomic or volatile loads.
2256 SmallVector
<Value
*, 4> PointerOps(VL
.size());
2257 auto POIter
= PointerOps
.begin();
2258 for (Value
*V
: VL
) {
2259 auto *L
= cast
<LoadInst
>(V
);
2260 if (!L
->isSimple()) {
2261 BS
.cancelScheduling(VL
, VL0
);
2262 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2263 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
2266 *POIter
= L
->getPointerOperand();
2270 OrdersType CurrentOrder
;
2271 // Check the order of pointer operands.
2272 if (llvm::sortPtrAccesses(PointerOps
, *DL
, *SE
, CurrentOrder
)) {
2275 if (CurrentOrder
.empty()) {
2276 Ptr0
= PointerOps
.front();
2277 PtrN
= PointerOps
.back();
2279 Ptr0
= PointerOps
[CurrentOrder
.front()];
2280 PtrN
= PointerOps
[CurrentOrder
.back()];
2282 const SCEV
*Scev0
= SE
->getSCEV(Ptr0
);
2283 const SCEV
*ScevN
= SE
->getSCEV(PtrN
);
2285 dyn_cast
<SCEVConstant
>(SE
->getMinusSCEV(ScevN
, Scev0
));
2286 uint64_t Size
= DL
->getTypeAllocSize(ScalarTy
);
2287 // Check that the sorted loads are consecutive.
2288 if (Diff
&& Diff
->getAPInt().getZExtValue() == (VL
.size() - 1) * Size
) {
2289 if (CurrentOrder
.empty()) {
2290 // Original loads are consecutive and does not require reordering.
2291 ++NumOpsWantToKeepOriginalOrder
;
2292 newTreeEntry(VL
, /*Vectorized=*/true, UserTreeIdx
,
2293 ReuseShuffleIndicies
);
2294 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
2297 auto I
= NumOpsWantToKeepOrder
.try_emplace(CurrentOrder
).first
;
2299 newTreeEntry(VL
, /*Vectorized=*/true, UserTreeIdx
,
2300 ReuseShuffleIndicies
, I
->getFirst());
2301 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
2307 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
2308 BS
.cancelScheduling(VL
, VL0
);
2309 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2312 case Instruction::ZExt
:
2313 case Instruction::SExt
:
2314 case Instruction::FPToUI
:
2315 case Instruction::FPToSI
:
2316 case Instruction::FPExt
:
2317 case Instruction::PtrToInt
:
2318 case Instruction::IntToPtr
:
2319 case Instruction::SIToFP
:
2320 case Instruction::UIToFP
:
2321 case Instruction::Trunc
:
2322 case Instruction::FPTrunc
:
2323 case Instruction::BitCast
: {
2324 Type
*SrcTy
= VL0
->getOperand(0)->getType();
2325 for (unsigned i
= 0; i
< VL
.size(); ++i
) {
2326 Type
*Ty
= cast
<Instruction
>(VL
[i
])->getOperand(0)->getType();
2327 if (Ty
!= SrcTy
|| !isValidElementType(Ty
)) {
2328 BS
.cancelScheduling(VL
, VL0
);
2329 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2331 << "SLP: Gathering casts with different src types.\n");
2335 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2336 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
2338 for (unsigned i
= 0, e
= VL0
->getNumOperands(); i
< e
; ++i
) {
2340 // Prepare the operand vector.
2342 Operands
.push_back(cast
<Instruction
>(j
)->getOperand(i
));
2344 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2348 case Instruction::ICmp
:
2349 case Instruction::FCmp
: {
2350 // Check that all of the compares have the same predicate.
2351 CmpInst::Predicate P0
= cast
<CmpInst
>(VL0
)->getPredicate();
2352 CmpInst::Predicate SwapP0
= CmpInst::getSwappedPredicate(P0
);
2353 Type
*ComparedTy
= VL0
->getOperand(0)->getType();
2354 for (unsigned i
= 1, e
= VL
.size(); i
< e
; ++i
) {
2355 CmpInst
*Cmp
= cast
<CmpInst
>(VL
[i
]);
2356 if ((Cmp
->getPredicate() != P0
&& Cmp
->getPredicate() != SwapP0
) ||
2357 Cmp
->getOperand(0)->getType() != ComparedTy
) {
2358 BS
.cancelScheduling(VL
, VL0
);
2359 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2361 << "SLP: Gathering cmp with different predicate.\n");
2366 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2367 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
2369 ValueList Left
, Right
;
2370 if (cast
<CmpInst
>(VL0
)->isCommutative()) {
2371 // Commutative predicate - collect + sort operands of the instructions
2372 // so that each side is more likely to have the same opcode.
2373 assert(P0
== SwapP0
&& "Commutative Predicate mismatch");
2374 reorderInputsAccordingToOpcode(VL
, Left
, Right
, *DL
, *SE
);
2376 // Collect operands - commute if it uses the swapped predicate.
2377 for (Value
*V
: VL
) {
2378 auto *Cmp
= cast
<CmpInst
>(V
);
2379 Value
*LHS
= Cmp
->getOperand(0);
2380 Value
*RHS
= Cmp
->getOperand(1);
2381 if (Cmp
->getPredicate() != P0
)
2382 std::swap(LHS
, RHS
);
2383 Left
.push_back(LHS
);
2384 Right
.push_back(RHS
);
2388 buildTree_rec(Left
, Depth
+ 1, {TE
, 0});
2389 buildTree_rec(Right
, Depth
+ 1, {TE
, 1});
2392 case Instruction::Select
:
2393 case Instruction::FNeg
:
2394 case Instruction::Add
:
2395 case Instruction::FAdd
:
2396 case Instruction::Sub
:
2397 case Instruction::FSub
:
2398 case Instruction::Mul
:
2399 case Instruction::FMul
:
2400 case Instruction::UDiv
:
2401 case Instruction::SDiv
:
2402 case Instruction::FDiv
:
2403 case Instruction::URem
:
2404 case Instruction::SRem
:
2405 case Instruction::FRem
:
2406 case Instruction::Shl
:
2407 case Instruction::LShr
:
2408 case Instruction::AShr
:
2409 case Instruction::And
:
2410 case Instruction::Or
:
2411 case Instruction::Xor
: {
2412 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2413 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
2415 // Sort operands of the instructions so that each side is more likely to
2416 // have the same opcode.
2417 if (isa
<BinaryOperator
>(VL0
) && VL0
->isCommutative()) {
2418 ValueList Left
, Right
;
2419 reorderInputsAccordingToOpcode(VL
, Left
, Right
, *DL
, *SE
);
2420 buildTree_rec(Left
, Depth
+ 1, {TE
, 0});
2421 buildTree_rec(Right
, Depth
+ 1, {TE
, 1});
2425 for (unsigned i
= 0, e
= VL0
->getNumOperands(); i
< e
; ++i
) {
2427 // Prepare the operand vector.
2429 Operands
.push_back(cast
<Instruction
>(j
)->getOperand(i
));
2431 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2435 case Instruction::GetElementPtr
: {
2436 // We don't combine GEPs with complicated (nested) indexing.
2437 for (unsigned j
= 0; j
< VL
.size(); ++j
) {
2438 if (cast
<Instruction
>(VL
[j
])->getNumOperands() != 2) {
2439 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
2440 BS
.cancelScheduling(VL
, VL0
);
2441 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2446 // We can't combine several GEPs into one vector if they operate on
2448 Type
*Ty0
= VL0
->getOperand(0)->getType();
2449 for (unsigned j
= 0; j
< VL
.size(); ++j
) {
2450 Type
*CurTy
= cast
<Instruction
>(VL
[j
])->getOperand(0)->getType();
2453 << "SLP: not-vectorizable GEP (different types).\n");
2454 BS
.cancelScheduling(VL
, VL0
);
2455 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2460 // We don't combine GEPs with non-constant indexes.
2461 for (unsigned j
= 0; j
< VL
.size(); ++j
) {
2462 auto Op
= cast
<Instruction
>(VL
[j
])->getOperand(1);
2463 if (!isa
<ConstantInt
>(Op
)) {
2465 << "SLP: not-vectorizable GEP (non-constant indexes).\n");
2466 BS
.cancelScheduling(VL
, VL0
);
2467 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2472 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2473 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
2474 for (unsigned i
= 0, e
= 2; i
< e
; ++i
) {
2476 // Prepare the operand vector.
2478 Operands
.push_back(cast
<Instruction
>(j
)->getOperand(i
));
2480 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2484 case Instruction::Store
: {
2485 // Check if the stores are consecutive or of we need to swizzle them.
2486 for (unsigned i
= 0, e
= VL
.size() - 1; i
< e
; ++i
)
2487 if (!isConsecutiveAccess(VL
[i
], VL
[i
+ 1], *DL
, *SE
)) {
2488 BS
.cancelScheduling(VL
, VL0
);
2489 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2490 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
2494 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2495 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
2499 Operands
.push_back(cast
<Instruction
>(j
)->getOperand(0));
2501 buildTree_rec(Operands
, Depth
+ 1, {TE
, 0});
2504 case Instruction::Call
: {
2505 // Check if the calls are all to the same vectorizable intrinsic.
2506 CallInst
*CI
= cast
<CallInst
>(VL0
);
2507 // Check if this is an Intrinsic call or something that can be
2508 // represented by an intrinsic call
2509 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
2510 if (!isTriviallyVectorizable(ID
)) {
2511 BS
.cancelScheduling(VL
, VL0
);
2512 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2513 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
2516 Function
*Int
= CI
->getCalledFunction();
2517 unsigned NumArgs
= CI
->getNumArgOperands();
2518 SmallVector
<Value
*, 4> ScalarArgs(NumArgs
, nullptr);
2519 for (unsigned j
= 0; j
!= NumArgs
; ++j
)
2520 if (hasVectorInstrinsicScalarOpd(ID
, j
))
2521 ScalarArgs
[j
] = CI
->getArgOperand(j
);
2522 for (unsigned i
= 1, e
= VL
.size(); i
!= e
; ++i
) {
2523 CallInst
*CI2
= dyn_cast
<CallInst
>(VL
[i
]);
2524 if (!CI2
|| CI2
->getCalledFunction() != Int
||
2525 getVectorIntrinsicIDForCall(CI2
, TLI
) != ID
||
2526 !CI
->hasIdenticalOperandBundleSchema(*CI2
)) {
2527 BS
.cancelScheduling(VL
, VL0
);
2528 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2529 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI
<< "!=" << *VL
[i
]
2533 // Some intrinsics have scalar arguments and should be same in order for
2534 // them to be vectorized.
2535 for (unsigned j
= 0; j
!= NumArgs
; ++j
) {
2536 if (hasVectorInstrinsicScalarOpd(ID
, j
)) {
2537 Value
*A1J
= CI2
->getArgOperand(j
);
2538 if (ScalarArgs
[j
] != A1J
) {
2539 BS
.cancelScheduling(VL
, VL0
);
2540 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2541 LLVM_DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
2542 << " argument " << ScalarArgs
[j
] << "!=" << A1J
2548 // Verify that the bundle operands are identical between the two calls.
2549 if (CI
->hasOperandBundles() &&
2550 !std::equal(CI
->op_begin() + CI
->getBundleOperandsStartIndex(),
2551 CI
->op_begin() + CI
->getBundleOperandsEndIndex(),
2552 CI2
->op_begin() + CI2
->getBundleOperandsStartIndex())) {
2553 BS
.cancelScheduling(VL
, VL0
);
2554 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2555 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:"
2556 << *CI
<< "!=" << *VL
[i
] << '\n');
2561 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2562 for (unsigned i
= 0, e
= CI
->getNumArgOperands(); i
!= e
; ++i
) {
2564 // Prepare the operand vector.
2565 for (Value
*j
: VL
) {
2566 CallInst
*CI2
= dyn_cast
<CallInst
>(j
);
2567 Operands
.push_back(CI2
->getArgOperand(i
));
2569 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2573 case Instruction::ShuffleVector
: {
2574 // If this is not an alternate sequence of opcode like add-sub
2575 // then do not vectorize this instruction.
2576 if (!S
.isAltShuffle()) {
2577 BS
.cancelScheduling(VL
, VL0
);
2578 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2579 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
2582 auto *TE
= newTreeEntry(VL
, true, UserTreeIdx
, ReuseShuffleIndicies
);
2583 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
2585 // Reorder operands if reordering would enable vectorization.
2586 if (isa
<BinaryOperator
>(VL0
)) {
2587 ValueList Left
, Right
;
2588 reorderInputsAccordingToOpcode(VL
, Left
, Right
, *DL
, *SE
);
2589 buildTree_rec(Left
, Depth
+ 1, {TE
, 0});
2590 buildTree_rec(Right
, Depth
+ 1, {TE
, 1});
2594 for (unsigned i
= 0, e
= VL0
->getNumOperands(); i
< e
; ++i
) {
2596 // Prepare the operand vector.
2598 Operands
.push_back(cast
<Instruction
>(j
)->getOperand(i
));
2600 buildTree_rec(Operands
, Depth
+ 1, {TE
, i
});
2605 BS
.cancelScheduling(VL
, VL0
);
2606 newTreeEntry(VL
, false, UserTreeIdx
, ReuseShuffleIndicies
);
2607 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
2612 unsigned BoUpSLP::canMapToVector(Type
*T
, const DataLayout
&DL
) const {
2615 auto *ST
= dyn_cast
<StructType
>(T
);
2617 N
= ST
->getNumElements();
2618 EltTy
= *ST
->element_begin();
2620 N
= cast
<ArrayType
>(T
)->getNumElements();
2621 EltTy
= cast
<ArrayType
>(T
)->getElementType();
2623 if (!isValidElementType(EltTy
))
2625 uint64_t VTSize
= DL
.getTypeStoreSizeInBits(VectorType::get(EltTy
, N
));
2626 if (VTSize
< MinVecRegSize
|| VTSize
> MaxVecRegSize
|| VTSize
!= DL
.getTypeStoreSizeInBits(T
))
2629 // Check that struct is homogeneous.
2630 for (const auto *Ty
: ST
->elements())
2637 bool BoUpSLP::canReuseExtract(ArrayRef
<Value
*> VL
, Value
*OpValue
,
2638 SmallVectorImpl
<unsigned> &CurrentOrder
) const {
2639 Instruction
*E0
= cast
<Instruction
>(OpValue
);
2640 assert(E0
->getOpcode() == Instruction::ExtractElement
||
2641 E0
->getOpcode() == Instruction::ExtractValue
);
2642 assert(E0
->getOpcode() == getSameOpcode(VL
).getOpcode() && "Invalid opcode");
2643 // Check if all of the extracts come from the same vector and from the
2645 Value
*Vec
= E0
->getOperand(0);
2647 CurrentOrder
.clear();
2649 // We have to extract from a vector/aggregate with the same number of elements.
2651 if (E0
->getOpcode() == Instruction::ExtractValue
) {
2652 const DataLayout
&DL
= E0
->getModule()->getDataLayout();
2653 NElts
= canMapToVector(Vec
->getType(), DL
);
2656 // Check if load can be rewritten as load of vector.
2657 LoadInst
*LI
= dyn_cast
<LoadInst
>(Vec
);
2658 if (!LI
|| !LI
->isSimple() || !LI
->hasNUses(VL
.size()))
2661 NElts
= Vec
->getType()->getVectorNumElements();
2664 if (NElts
!= VL
.size())
2667 // Check that all of the indices extract from the correct offset.
2668 bool ShouldKeepOrder
= true;
2669 unsigned E
= VL
.size();
2670 // Assign to all items the initial value E + 1 so we can check if the extract
2671 // instruction index was used already.
2672 // Also, later we can check that all the indices are used and we have a
2673 // consecutive access in the extract instructions, by checking that no
2674 // element of CurrentOrder still has value E + 1.
2675 CurrentOrder
.assign(E
, E
+ 1);
2677 for (; I
< E
; ++I
) {
2678 auto *Inst
= cast
<Instruction
>(VL
[I
]);
2679 if (Inst
->getOperand(0) != Vec
)
2681 Optional
<unsigned> Idx
= getExtractIndex(Inst
);
2684 const unsigned ExtIdx
= *Idx
;
2686 if (ExtIdx
>= E
|| CurrentOrder
[ExtIdx
] != E
+ 1)
2688 ShouldKeepOrder
= false;
2689 CurrentOrder
[ExtIdx
] = I
;
2691 if (CurrentOrder
[I
] != E
+ 1)
2693 CurrentOrder
[I
] = I
;
2697 CurrentOrder
.clear();
2701 return ShouldKeepOrder
;
2704 bool BoUpSLP::areAllUsersVectorized(Instruction
*I
) const {
2705 return I
->hasOneUse() ||
2706 std::all_of(I
->user_begin(), I
->user_end(), [this](User
*U
) {
2707 return ScalarToTreeEntry
.count(U
) > 0;
2711 int BoUpSLP::getEntryCost(TreeEntry
*E
) {
2712 ArrayRef
<Value
*> VL
= E
->Scalars
;
2714 Type
*ScalarTy
= VL
[0]->getType();
2715 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(VL
[0]))
2716 ScalarTy
= SI
->getValueOperand()->getType();
2717 else if (CmpInst
*CI
= dyn_cast
<CmpInst
>(VL
[0]))
2718 ScalarTy
= CI
->getOperand(0)->getType();
2719 VectorType
*VecTy
= VectorType::get(ScalarTy
, VL
.size());
2721 // If we have computed a smaller type for the expression, update VecTy so
2722 // that the costs will be accurate.
2723 if (MinBWs
.count(VL
[0]))
2724 VecTy
= VectorType::get(
2725 IntegerType::get(F
->getContext(), MinBWs
[VL
[0]].first
), VL
.size());
2727 unsigned ReuseShuffleNumbers
= E
->ReuseShuffleIndices
.size();
2728 bool NeedToShuffleReuses
= !E
->ReuseShuffleIndices
.empty();
2729 int ReuseShuffleCost
= 0;
2730 if (NeedToShuffleReuses
) {
2732 TTI
->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc
, VecTy
);
2734 if (E
->NeedToGather
) {
2735 if (allConstant(VL
))
2738 return ReuseShuffleCost
+
2739 TTI
->getShuffleCost(TargetTransformInfo::SK_Broadcast
, VecTy
, 0);
2741 if (getSameOpcode(VL
).getOpcode() == Instruction::ExtractElement
&&
2742 allSameType(VL
) && allSameBlock(VL
)) {
2743 Optional
<TargetTransformInfo::ShuffleKind
> ShuffleKind
= isShuffle(VL
);
2744 if (ShuffleKind
.hasValue()) {
2745 int Cost
= TTI
->getShuffleCost(ShuffleKind
.getValue(), VecTy
);
2746 for (auto *V
: VL
) {
2747 // If all users of instruction are going to be vectorized and this
2748 // instruction itself is not going to be vectorized, consider this
2749 // instruction as dead and remove its cost from the final cost of the
2751 if (areAllUsersVectorized(cast
<Instruction
>(V
)) &&
2752 !ScalarToTreeEntry
.count(V
)) {
2753 auto *IO
= cast
<ConstantInt
>(
2754 cast
<ExtractElementInst
>(V
)->getIndexOperand());
2755 Cost
-= TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
,
2756 IO
->getZExtValue());
2759 return ReuseShuffleCost
+ Cost
;
2762 return ReuseShuffleCost
+ getGatherCost(VL
);
2764 InstructionsState S
= getSameOpcode(VL
);
2765 assert(S
.getOpcode() && allSameType(VL
) && allSameBlock(VL
) && "Invalid VL");
2766 Instruction
*VL0
= cast
<Instruction
>(S
.OpValue
);
2767 unsigned ShuffleOrOp
= S
.isAltShuffle() ?
2768 (unsigned) Instruction::ShuffleVector
: S
.getOpcode();
2769 switch (ShuffleOrOp
) {
2770 case Instruction::PHI
:
2773 case Instruction::ExtractValue
:
2774 case Instruction::ExtractElement
:
2775 if (NeedToShuffleReuses
) {
2777 for (unsigned I
: E
->ReuseShuffleIndices
) {
2778 if (ShuffleOrOp
== Instruction::ExtractElement
) {
2779 auto *IO
= cast
<ConstantInt
>(
2780 cast
<ExtractElementInst
>(VL
[I
])->getIndexOperand());
2781 Idx
= IO
->getZExtValue();
2782 ReuseShuffleCost
-= TTI
->getVectorInstrCost(
2783 Instruction::ExtractElement
, VecTy
, Idx
);
2785 ReuseShuffleCost
-= TTI
->getVectorInstrCost(
2786 Instruction::ExtractElement
, VecTy
, Idx
);
2790 Idx
= ReuseShuffleNumbers
;
2791 for (Value
*V
: VL
) {
2792 if (ShuffleOrOp
== Instruction::ExtractElement
) {
2793 auto *IO
= cast
<ConstantInt
>(
2794 cast
<ExtractElementInst
>(V
)->getIndexOperand());
2795 Idx
= IO
->getZExtValue();
2800 TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
, Idx
);
2803 if (!E
->NeedToGather
) {
2804 int DeadCost
= ReuseShuffleCost
;
2805 if (!E
->ReorderIndices
.empty()) {
2806 // TODO: Merge this shuffle with the ReuseShuffleCost.
2807 DeadCost
+= TTI
->getShuffleCost(
2808 TargetTransformInfo::SK_PermuteSingleSrc
, VecTy
);
2810 for (unsigned i
= 0, e
= VL
.size(); i
< e
; ++i
) {
2811 Instruction
*E
= cast
<Instruction
>(VL
[i
]);
2812 // If all users are going to be vectorized, instruction can be
2813 // considered as dead.
2814 // The same, if have only one user, it will be vectorized for sure.
2815 if (areAllUsersVectorized(E
)) {
2816 // Take credit for instruction that will become dead.
2817 if (E
->hasOneUse()) {
2818 Instruction
*Ext
= E
->user_back();
2819 if ((isa
<SExtInst
>(Ext
) || isa
<ZExtInst
>(Ext
)) &&
2820 all_of(Ext
->users(),
2821 [](User
*U
) { return isa
<GetElementPtrInst
>(U
); })) {
2822 // Use getExtractWithExtendCost() to calculate the cost of
2823 // extractelement/ext pair.
2824 DeadCost
-= TTI
->getExtractWithExtendCost(
2825 Ext
->getOpcode(), Ext
->getType(), VecTy
, i
);
2826 // Add back the cost of s|zext which is subtracted separately.
2827 DeadCost
+= TTI
->getCastInstrCost(
2828 Ext
->getOpcode(), Ext
->getType(), E
->getType(), Ext
);
2833 TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
, i
);
2838 return ReuseShuffleCost
+ getGatherCost(VL
);
2840 case Instruction::ZExt
:
2841 case Instruction::SExt
:
2842 case Instruction::FPToUI
:
2843 case Instruction::FPToSI
:
2844 case Instruction::FPExt
:
2845 case Instruction::PtrToInt
:
2846 case Instruction::IntToPtr
:
2847 case Instruction::SIToFP
:
2848 case Instruction::UIToFP
:
2849 case Instruction::Trunc
:
2850 case Instruction::FPTrunc
:
2851 case Instruction::BitCast
: {
2852 Type
*SrcTy
= VL0
->getOperand(0)->getType();
2854 TTI
->getCastInstrCost(S
.getOpcode(), ScalarTy
, SrcTy
, VL0
);
2855 if (NeedToShuffleReuses
) {
2856 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
2859 // Calculate the cost of this instruction.
2860 int ScalarCost
= VL
.size() * ScalarEltCost
;
2862 VectorType
*SrcVecTy
= VectorType::get(SrcTy
, VL
.size());
2864 // Check if the values are candidates to demote.
2865 if (!MinBWs
.count(VL0
) || VecTy
!= SrcVecTy
) {
2866 VecCost
= ReuseShuffleCost
+
2867 TTI
->getCastInstrCost(S
.getOpcode(), VecTy
, SrcVecTy
, VL0
);
2869 return VecCost
- ScalarCost
;
2871 case Instruction::FCmp
:
2872 case Instruction::ICmp
:
2873 case Instruction::Select
: {
2874 // Calculate the cost of this instruction.
2875 int ScalarEltCost
= TTI
->getCmpSelInstrCost(S
.getOpcode(), ScalarTy
,
2876 Builder
.getInt1Ty(), VL0
);
2877 if (NeedToShuffleReuses
) {
2878 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
2880 VectorType
*MaskTy
= VectorType::get(Builder
.getInt1Ty(), VL
.size());
2881 int ScalarCost
= VecTy
->getNumElements() * ScalarEltCost
;
2882 int VecCost
= TTI
->getCmpSelInstrCost(S
.getOpcode(), VecTy
, MaskTy
, VL0
);
2883 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
2885 case Instruction::FNeg
:
2886 case Instruction::Add
:
2887 case Instruction::FAdd
:
2888 case Instruction::Sub
:
2889 case Instruction::FSub
:
2890 case Instruction::Mul
:
2891 case Instruction::FMul
:
2892 case Instruction::UDiv
:
2893 case Instruction::SDiv
:
2894 case Instruction::FDiv
:
2895 case Instruction::URem
:
2896 case Instruction::SRem
:
2897 case Instruction::FRem
:
2898 case Instruction::Shl
:
2899 case Instruction::LShr
:
2900 case Instruction::AShr
:
2901 case Instruction::And
:
2902 case Instruction::Or
:
2903 case Instruction::Xor
: {
2904 // Certain instructions can be cheaper to vectorize if they have a
2905 // constant second vector operand.
2906 TargetTransformInfo::OperandValueKind Op1VK
=
2907 TargetTransformInfo::OK_AnyValue
;
2908 TargetTransformInfo::OperandValueKind Op2VK
=
2909 TargetTransformInfo::OK_UniformConstantValue
;
2910 TargetTransformInfo::OperandValueProperties Op1VP
=
2911 TargetTransformInfo::OP_None
;
2912 TargetTransformInfo::OperandValueProperties Op2VP
=
2913 TargetTransformInfo::OP_PowerOf2
;
2915 // If all operands are exactly the same ConstantInt then set the
2916 // operand kind to OK_UniformConstantValue.
2917 // If instead not all operands are constants, then set the operand kind
2918 // to OK_AnyValue. If all operands are constants but not the same,
2919 // then set the operand kind to OK_NonUniformConstantValue.
2920 ConstantInt
*CInt0
= nullptr;
2921 for (unsigned i
= 0, e
= VL
.size(); i
< e
; ++i
) {
2922 const Instruction
*I
= cast
<Instruction
>(VL
[i
]);
2923 unsigned OpIdx
= isa
<BinaryOperator
>(I
) ? 1 : 0;
2924 ConstantInt
*CInt
= dyn_cast
<ConstantInt
>(I
->getOperand(OpIdx
));
2926 Op2VK
= TargetTransformInfo::OK_AnyValue
;
2927 Op2VP
= TargetTransformInfo::OP_None
;
2930 if (Op2VP
== TargetTransformInfo::OP_PowerOf2
&&
2931 !CInt
->getValue().isPowerOf2())
2932 Op2VP
= TargetTransformInfo::OP_None
;
2938 Op2VK
= TargetTransformInfo::OK_NonUniformConstantValue
;
2941 SmallVector
<const Value
*, 4> Operands(VL0
->operand_values());
2942 int ScalarEltCost
= TTI
->getArithmeticInstrCost(
2943 S
.getOpcode(), ScalarTy
, Op1VK
, Op2VK
, Op1VP
, Op2VP
, Operands
);
2944 if (NeedToShuffleReuses
) {
2945 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
2947 int ScalarCost
= VecTy
->getNumElements() * ScalarEltCost
;
2948 int VecCost
= TTI
->getArithmeticInstrCost(S
.getOpcode(), VecTy
, Op1VK
,
2949 Op2VK
, Op1VP
, Op2VP
, Operands
);
2950 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
2952 case Instruction::GetElementPtr
: {
2953 TargetTransformInfo::OperandValueKind Op1VK
=
2954 TargetTransformInfo::OK_AnyValue
;
2955 TargetTransformInfo::OperandValueKind Op2VK
=
2956 TargetTransformInfo::OK_UniformConstantValue
;
2959 TTI
->getArithmeticInstrCost(Instruction::Add
, ScalarTy
, Op1VK
, Op2VK
);
2960 if (NeedToShuffleReuses
) {
2961 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
2963 int ScalarCost
= VecTy
->getNumElements() * ScalarEltCost
;
2965 TTI
->getArithmeticInstrCost(Instruction::Add
, VecTy
, Op1VK
, Op2VK
);
2966 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
2968 case Instruction::Load
: {
2969 // Cost of wide load - cost of scalar loads.
2970 unsigned alignment
= cast
<LoadInst
>(VL0
)->getAlignment();
2972 TTI
->getMemoryOpCost(Instruction::Load
, ScalarTy
, alignment
, 0, VL0
);
2973 if (NeedToShuffleReuses
) {
2974 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
2976 int ScalarLdCost
= VecTy
->getNumElements() * ScalarEltCost
;
2978 TTI
->getMemoryOpCost(Instruction::Load
, VecTy
, alignment
, 0, VL0
);
2979 if (!E
->ReorderIndices
.empty()) {
2980 // TODO: Merge this shuffle with the ReuseShuffleCost.
2981 VecLdCost
+= TTI
->getShuffleCost(
2982 TargetTransformInfo::SK_PermuteSingleSrc
, VecTy
);
2984 return ReuseShuffleCost
+ VecLdCost
- ScalarLdCost
;
2986 case Instruction::Store
: {
2987 // We know that we can merge the stores. Calculate the cost.
2988 unsigned alignment
= cast
<StoreInst
>(VL0
)->getAlignment();
2990 TTI
->getMemoryOpCost(Instruction::Store
, ScalarTy
, alignment
, 0, VL0
);
2991 if (NeedToShuffleReuses
) {
2992 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
2994 int ScalarStCost
= VecTy
->getNumElements() * ScalarEltCost
;
2996 TTI
->getMemoryOpCost(Instruction::Store
, VecTy
, alignment
, 0, VL0
);
2997 return ReuseShuffleCost
+ VecStCost
- ScalarStCost
;
2999 case Instruction::Call
: {
3000 CallInst
*CI
= cast
<CallInst
>(VL0
);
3001 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
3003 // Calculate the cost of the scalar and vector calls.
3004 SmallVector
<Type
*, 4> ScalarTys
;
3005 for (unsigned op
= 0, opc
= CI
->getNumArgOperands(); op
!= opc
; ++op
)
3006 ScalarTys
.push_back(CI
->getArgOperand(op
)->getType());
3009 if (auto *FPMO
= dyn_cast
<FPMathOperator
>(CI
))
3010 FMF
= FPMO
->getFastMathFlags();
3013 TTI
->getIntrinsicInstrCost(ID
, ScalarTy
, ScalarTys
, FMF
);
3014 if (NeedToShuffleReuses
) {
3015 ReuseShuffleCost
-= (ReuseShuffleNumbers
- VL
.size()) * ScalarEltCost
;
3017 int ScalarCallCost
= VecTy
->getNumElements() * ScalarEltCost
;
3019 SmallVector
<Value
*, 4> Args(CI
->arg_operands());
3020 int VecCallCost
= TTI
->getIntrinsicInstrCost(ID
, CI
->getType(), Args
, FMF
,
3021 VecTy
->getNumElements());
3023 LLVM_DEBUG(dbgs() << "SLP: Call cost " << VecCallCost
- ScalarCallCost
3024 << " (" << VecCallCost
<< "-" << ScalarCallCost
<< ")"
3025 << " for " << *CI
<< "\n");
3027 return ReuseShuffleCost
+ VecCallCost
- ScalarCallCost
;
3029 case Instruction::ShuffleVector
: {
3030 assert(S
.isAltShuffle() &&
3031 ((Instruction::isBinaryOp(S
.getOpcode()) &&
3032 Instruction::isBinaryOp(S
.getAltOpcode())) ||
3033 (Instruction::isCast(S
.getOpcode()) &&
3034 Instruction::isCast(S
.getAltOpcode()))) &&
3035 "Invalid Shuffle Vector Operand");
3037 if (NeedToShuffleReuses
) {
3038 for (unsigned Idx
: E
->ReuseShuffleIndices
) {
3039 Instruction
*I
= cast
<Instruction
>(VL
[Idx
]);
3040 ReuseShuffleCost
-= TTI
->getInstructionCost(
3041 I
, TargetTransformInfo::TCK_RecipThroughput
);
3043 for (Value
*V
: VL
) {
3044 Instruction
*I
= cast
<Instruction
>(V
);
3045 ReuseShuffleCost
+= TTI
->getInstructionCost(
3046 I
, TargetTransformInfo::TCK_RecipThroughput
);
3049 for (Value
*i
: VL
) {
3050 Instruction
*I
= cast
<Instruction
>(i
);
3051 assert(S
.isOpcodeOrAlt(I
) && "Unexpected main/alternate opcode");
3052 ScalarCost
+= TTI
->getInstructionCost(
3053 I
, TargetTransformInfo::TCK_RecipThroughput
);
3055 // VecCost is equal to sum of the cost of creating 2 vectors
3056 // and the cost of creating shuffle.
3058 if (Instruction::isBinaryOp(S
.getOpcode())) {
3059 VecCost
= TTI
->getArithmeticInstrCost(S
.getOpcode(), VecTy
);
3060 VecCost
+= TTI
->getArithmeticInstrCost(S
.getAltOpcode(), VecTy
);
3062 Type
*Src0SclTy
= S
.MainOp
->getOperand(0)->getType();
3063 Type
*Src1SclTy
= S
.AltOp
->getOperand(0)->getType();
3064 VectorType
*Src0Ty
= VectorType::get(Src0SclTy
, VL
.size());
3065 VectorType
*Src1Ty
= VectorType::get(Src1SclTy
, VL
.size());
3066 VecCost
= TTI
->getCastInstrCost(S
.getOpcode(), VecTy
, Src0Ty
);
3067 VecCost
+= TTI
->getCastInstrCost(S
.getAltOpcode(), VecTy
, Src1Ty
);
3069 VecCost
+= TTI
->getShuffleCost(TargetTransformInfo::SK_Select
, VecTy
, 0);
3070 return ReuseShuffleCost
+ VecCost
- ScalarCost
;
3073 llvm_unreachable("Unknown instruction");
3077 bool BoUpSLP::isFullyVectorizableTinyTree() const {
3078 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
3079 << VectorizableTree
.size() << " is fully vectorizable .\n");
3081 // We only handle trees of heights 1 and 2.
3082 if (VectorizableTree
.size() == 1 && !VectorizableTree
[0]->NeedToGather
)
3085 if (VectorizableTree
.size() != 2)
3088 // Handle splat and all-constants stores.
3089 if (!VectorizableTree
[0]->NeedToGather
&&
3090 (allConstant(VectorizableTree
[1]->Scalars
) ||
3091 isSplat(VectorizableTree
[1]->Scalars
)))
3094 // Gathering cost would be too much for tiny trees.
3095 if (VectorizableTree
[0]->NeedToGather
|| VectorizableTree
[1]->NeedToGather
)
3101 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable() const {
3102 // We can vectorize the tree if its size is greater than or equal to the
3103 // minimum size specified by the MinTreeSize command line option.
3104 if (VectorizableTree
.size() >= MinTreeSize
)
3107 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
3108 // can vectorize it if we can prove it fully vectorizable.
3109 if (isFullyVectorizableTinyTree())
3112 assert(VectorizableTree
.empty()
3113 ? ExternalUses
.empty()
3114 : true && "We shouldn't have any external users");
3116 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
3121 int BoUpSLP::getSpillCost() const {
3122 // Walk from the bottom of the tree to the top, tracking which values are
3123 // live. When we see a call instruction that is not part of our tree,
3124 // query TTI to see if there is a cost to keeping values live over it
3125 // (for example, if spills and fills are required).
3126 unsigned BundleWidth
= VectorizableTree
.front()->Scalars
.size();
3129 SmallPtrSet
<Instruction
*, 4> LiveValues
;
3130 Instruction
*PrevInst
= nullptr;
3132 for (const auto &TEPtr
: VectorizableTree
) {
3133 Instruction
*Inst
= dyn_cast
<Instruction
>(TEPtr
->Scalars
[0]);
3142 // Update LiveValues.
3143 LiveValues
.erase(PrevInst
);
3144 for (auto &J
: PrevInst
->operands()) {
3145 if (isa
<Instruction
>(&*J
) && getTreeEntry(&*J
))
3146 LiveValues
.insert(cast
<Instruction
>(&*J
));
3150 dbgs() << "SLP: #LV: " << LiveValues
.size();
3151 for (auto *X
: LiveValues
)
3152 dbgs() << " " << X
->getName();
3153 dbgs() << ", Looking at ";
3157 // Now find the sequence of instructions between PrevInst and Inst.
3158 unsigned NumCalls
= 0;
3159 BasicBlock::reverse_iterator InstIt
= ++Inst
->getIterator().getReverse(),
3161 PrevInst
->getIterator().getReverse();
3162 while (InstIt
!= PrevInstIt
) {
3163 if (PrevInstIt
== PrevInst
->getParent()->rend()) {
3164 PrevInstIt
= Inst
->getParent()->rbegin();
3168 // Debug informations don't impact spill cost.
3169 if ((isa
<CallInst
>(&*PrevInstIt
) &&
3170 !isa
<DbgInfoIntrinsic
>(&*PrevInstIt
)) &&
3171 &*PrevInstIt
!= PrevInst
)
3178 SmallVector
<Type
*, 4> V
;
3179 for (auto *II
: LiveValues
)
3180 V
.push_back(VectorType::get(II
->getType(), BundleWidth
));
3181 Cost
+= NumCalls
* TTI
->getCostOfKeepingLiveOverCall(V
);
3190 int BoUpSLP::getTreeCost() {
3192 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
3193 << VectorizableTree
.size() << ".\n");
3195 unsigned BundleWidth
= VectorizableTree
[0]->Scalars
.size();
3197 for (unsigned I
= 0, E
= VectorizableTree
.size(); I
< E
; ++I
) {
3198 TreeEntry
&TE
= *VectorizableTree
[I
].get();
3200 // We create duplicate tree entries for gather sequences that have multiple
3201 // uses. However, we should not compute the cost of duplicate sequences.
3202 // For example, if we have a build vector (i.e., insertelement sequence)
3203 // that is used by more than one vector instruction, we only need to
3204 // compute the cost of the insertelement instructions once. The redundant
3205 // instructions will be eliminated by CSE.
3207 // We should consider not creating duplicate tree entries for gather
3208 // sequences, and instead add additional edges to the tree representing
3209 // their uses. Since such an approach results in fewer total entries,
3210 // existing heuristics based on tree size may yield different results.
3212 if (TE
.NeedToGather
&&
3214 std::next(VectorizableTree
.begin(), I
+ 1), VectorizableTree
.end(),
3215 [TE
](const std::unique_ptr
<TreeEntry
> &EntryPtr
) {
3216 return EntryPtr
->NeedToGather
&& EntryPtr
->isSame(TE
.Scalars
);
3220 int C
= getEntryCost(&TE
);
3221 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
3222 << " for bundle that starts with " << *TE
.Scalars
[0]
3227 SmallPtrSet
<Value
*, 16> ExtractCostCalculated
;
3228 int ExtractCost
= 0;
3229 for (ExternalUser
&EU
: ExternalUses
) {
3230 // We only add extract cost once for the same scalar.
3231 if (!ExtractCostCalculated
.insert(EU
.Scalar
).second
)
3234 // Uses by ephemeral values are free (because the ephemeral value will be
3235 // removed prior to code generation, and so the extraction will be
3236 // removed as well).
3237 if (EphValues
.count(EU
.User
))
3240 // If we plan to rewrite the tree in a smaller type, we will need to sign
3241 // extend the extracted value back to the original type. Here, we account
3242 // for the extract and the added cost of the sign extend if needed.
3243 auto *VecTy
= VectorType::get(EU
.Scalar
->getType(), BundleWidth
);
3244 auto *ScalarRoot
= VectorizableTree
[0]->Scalars
[0];
3245 if (MinBWs
.count(ScalarRoot
)) {
3246 auto *MinTy
= IntegerType::get(F
->getContext(), MinBWs
[ScalarRoot
].first
);
3248 MinBWs
[ScalarRoot
].second
? Instruction::SExt
: Instruction::ZExt
;
3249 VecTy
= VectorType::get(MinTy
, BundleWidth
);
3250 ExtractCost
+= TTI
->getExtractWithExtendCost(Extend
, EU
.Scalar
->getType(),
3254 TTI
->getVectorInstrCost(Instruction::ExtractElement
, VecTy
, EU
.Lane
);
3258 int SpillCost
= getSpillCost();
3259 Cost
+= SpillCost
+ ExtractCost
;
3263 raw_string_ostream
OS(Str
);
3264 OS
<< "SLP: Spill Cost = " << SpillCost
<< ".\n"
3265 << "SLP: Extract Cost = " << ExtractCost
<< ".\n"
3266 << "SLP: Total Cost = " << Cost
<< ".\n";
3268 LLVM_DEBUG(dbgs() << Str
);
3271 ViewGraph(this, "SLP" + F
->getName(), false, Str
);
3276 int BoUpSLP::getGatherCost(Type
*Ty
,
3277 const DenseSet
<unsigned> &ShuffledIndices
) const {
3279 for (unsigned i
= 0, e
= cast
<VectorType
>(Ty
)->getNumElements(); i
< e
; ++i
)
3280 if (!ShuffledIndices
.count(i
))
3281 Cost
+= TTI
->getVectorInstrCost(Instruction::InsertElement
, Ty
, i
);
3282 if (!ShuffledIndices
.empty())
3283 Cost
+= TTI
->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc
, Ty
);
3287 int BoUpSLP::getGatherCost(ArrayRef
<Value
*> VL
) const {
3288 // Find the type of the operands in VL.
3289 Type
*ScalarTy
= VL
[0]->getType();
3290 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(VL
[0]))
3291 ScalarTy
= SI
->getValueOperand()->getType();
3292 VectorType
*VecTy
= VectorType::get(ScalarTy
, VL
.size());
3293 // Find the cost of inserting/extracting values from the vector.
3294 // Check if the same elements are inserted several times and count them as
3295 // shuffle candidates.
3296 DenseSet
<unsigned> ShuffledElements
;
3297 DenseSet
<Value
*> UniqueElements
;
3298 // Iterate in reverse order to consider insert elements with the high cost.
3299 for (unsigned I
= VL
.size(); I
> 0; --I
) {
3300 unsigned Idx
= I
- 1;
3301 if (!UniqueElements
.insert(VL
[Idx
]).second
)
3302 ShuffledElements
.insert(Idx
);
3304 return getGatherCost(VecTy
, ShuffledElements
);
3307 // Perform operand reordering on the instructions in VL and return the reordered
3308 // operands in Left and Right.
3309 void BoUpSLP::reorderInputsAccordingToOpcode(
3310 ArrayRef
<Value
*> VL
, SmallVectorImpl
<Value
*> &Left
,
3311 SmallVectorImpl
<Value
*> &Right
, const DataLayout
&DL
,
3312 ScalarEvolution
&SE
) {
3315 VLOperands
Ops(VL
, DL
, SE
);
3316 // Reorder the operands in place.
3318 Left
= Ops
.getVL(0);
3319 Right
= Ops
.getVL(1);
3322 void BoUpSLP::setInsertPointAfterBundle(ArrayRef
<Value
*> VL
,
3323 const InstructionsState
&S
) {
3324 // Get the basic block this bundle is in. All instructions in the bundle
3325 // should be in this block.
3326 auto *Front
= cast
<Instruction
>(S
.OpValue
);
3327 auto *BB
= Front
->getParent();
3328 assert(llvm::all_of(make_range(VL
.begin(), VL
.end()), [=](Value
*V
) -> bool {
3329 auto *I
= cast
<Instruction
>(V
);
3330 return !S
.isOpcodeOrAlt(I
) || I
->getParent() == BB
;
3333 // The last instruction in the bundle in program order.
3334 Instruction
*LastInst
= nullptr;
3336 // Find the last instruction. The common case should be that BB has been
3337 // scheduled, and the last instruction is VL.back(). So we start with
3338 // VL.back() and iterate over schedule data until we reach the end of the
3339 // bundle. The end of the bundle is marked by null ScheduleData.
3340 if (BlocksSchedules
.count(BB
)) {
3342 BlocksSchedules
[BB
]->getScheduleData(isOneOf(S
, VL
.back()));
3343 if (Bundle
&& Bundle
->isPartOfBundle())
3344 for (; Bundle
; Bundle
= Bundle
->NextInBundle
)
3345 if (Bundle
->OpValue
== Bundle
->Inst
)
3346 LastInst
= Bundle
->Inst
;
3349 // LastInst can still be null at this point if there's either not an entry
3350 // for BB in BlocksSchedules or there's no ScheduleData available for
3351 // VL.back(). This can be the case if buildTree_rec aborts for various
3352 // reasons (e.g., the maximum recursion depth is reached, the maximum region
3353 // size is reached, etc.). ScheduleData is initialized in the scheduling
3356 // If this happens, we can still find the last instruction by brute force. We
3357 // iterate forwards from Front (inclusive) until we either see all
3358 // instructions in the bundle or reach the end of the block. If Front is the
3359 // last instruction in program order, LastInst will be set to Front, and we
3360 // will visit all the remaining instructions in the block.
3362 // One of the reasons we exit early from buildTree_rec is to place an upper
3363 // bound on compile-time. Thus, taking an additional compile-time hit here is
3364 // not ideal. However, this should be exceedingly rare since it requires that
3365 // we both exit early from buildTree_rec and that the bundle be out-of-order
3366 // (causing us to iterate all the way to the end of the block).
3368 SmallPtrSet
<Value
*, 16> Bundle(VL
.begin(), VL
.end());
3369 for (auto &I
: make_range(BasicBlock::iterator(Front
), BB
->end())) {
3370 if (Bundle
.erase(&I
) && S
.isOpcodeOrAlt(&I
))
3377 // Set the insertion point after the last instruction in the bundle. Set the
3378 // debug location to Front.
3379 Builder
.SetInsertPoint(BB
, ++LastInst
->getIterator());
3380 Builder
.SetCurrentDebugLocation(Front
->getDebugLoc());
3383 Value
*BoUpSLP::Gather(ArrayRef
<Value
*> VL
, VectorType
*Ty
) {
3384 Value
*Vec
= UndefValue::get(Ty
);
3385 // Generate the 'InsertElement' instruction.
3386 for (unsigned i
= 0; i
< Ty
->getNumElements(); ++i
) {
3387 Vec
= Builder
.CreateInsertElement(Vec
, VL
[i
], Builder
.getInt32(i
));
3388 if (Instruction
*Insrt
= dyn_cast
<Instruction
>(Vec
)) {
3389 GatherSeq
.insert(Insrt
);
3390 CSEBlocks
.insert(Insrt
->getParent());
3392 // Add to our 'need-to-extract' list.
3393 if (TreeEntry
*E
= getTreeEntry(VL
[i
])) {
3394 // Find which lane we need to extract.
3396 for (unsigned Lane
= 0, LE
= E
->Scalars
.size(); Lane
!= LE
; ++Lane
) {
3397 // Is this the lane of the scalar that we are looking for ?
3398 if (E
->Scalars
[Lane
] == VL
[i
]) {
3403 assert(FoundLane
>= 0 && "Could not find the correct lane");
3404 if (!E
->ReuseShuffleIndices
.empty()) {
3406 std::distance(E
->ReuseShuffleIndices
.begin(),
3407 llvm::find(E
->ReuseShuffleIndices
, FoundLane
));
3409 ExternalUses
.push_back(ExternalUser(VL
[i
], Insrt
, FoundLane
));
3417 Value
*BoUpSLP::vectorizeTree(ArrayRef
<Value
*> VL
) {
3418 InstructionsState S
= getSameOpcode(VL
);
3419 if (S
.getOpcode()) {
3420 if (TreeEntry
*E
= getTreeEntry(S
.OpValue
)) {
3421 if (E
->isSame(VL
)) {
3422 Value
*V
= vectorizeTree(E
);
3423 if (VL
.size() == E
->Scalars
.size() && !E
->ReuseShuffleIndices
.empty()) {
3424 // We need to get the vectorized value but without shuffle.
3425 if (auto *SV
= dyn_cast
<ShuffleVectorInst
>(V
)) {
3426 V
= SV
->getOperand(0);
3428 // Reshuffle to get only unique values.
3429 SmallVector
<unsigned, 4> UniqueIdxs
;
3430 SmallSet
<unsigned, 4> UsedIdxs
;
3431 for(unsigned Idx
: E
->ReuseShuffleIndices
)
3432 if (UsedIdxs
.insert(Idx
).second
)
3433 UniqueIdxs
.emplace_back(Idx
);
3434 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
3443 Type
*ScalarTy
= S
.OpValue
->getType();
3444 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(S
.OpValue
))
3445 ScalarTy
= SI
->getValueOperand()->getType();
3447 // Check that every instruction appears once in this bundle.
3448 SmallVector
<unsigned, 4> ReuseShuffleIndicies
;
3449 SmallVector
<Value
*, 4> UniqueValues
;
3450 if (VL
.size() > 2) {
3451 DenseMap
<Value
*, unsigned> UniquePositions
;
3452 for (Value
*V
: VL
) {
3453 auto Res
= UniquePositions
.try_emplace(V
, UniqueValues
.size());
3454 ReuseShuffleIndicies
.emplace_back(Res
.first
->second
);
3455 if (Res
.second
|| isa
<Constant
>(V
))
3456 UniqueValues
.emplace_back(V
);
3458 // Do not shuffle single element or if number of unique values is not power
3460 if (UniqueValues
.size() == VL
.size() || UniqueValues
.size() <= 1 ||
3461 !llvm::isPowerOf2_32(UniqueValues
.size()))
3462 ReuseShuffleIndicies
.clear();
3466 VectorType
*VecTy
= VectorType::get(ScalarTy
, VL
.size());
3468 Value
*V
= Gather(VL
, VecTy
);
3469 if (!ReuseShuffleIndicies
.empty()) {
3470 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3471 ReuseShuffleIndicies
, "shuffle");
3472 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3473 GatherSeq
.insert(I
);
3474 CSEBlocks
.insert(I
->getParent());
3480 static void inversePermutation(ArrayRef
<unsigned> Indices
,
3481 SmallVectorImpl
<unsigned> &Mask
) {
3483 const unsigned E
= Indices
.size();
3485 for (unsigned I
= 0; I
< E
; ++I
)
3486 Mask
[Indices
[I
]] = I
;
3489 Value
*BoUpSLP::vectorizeTree(TreeEntry
*E
) {
3490 IRBuilder
<>::InsertPointGuard
Guard(Builder
);
3492 if (E
->VectorizedValue
) {
3493 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E
->Scalars
[0] << ".\n");
3494 return E
->VectorizedValue
;
3497 InstructionsState S
= getSameOpcode(E
->Scalars
);
3498 Instruction
*VL0
= cast
<Instruction
>(S
.OpValue
);
3499 Type
*ScalarTy
= VL0
->getType();
3500 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(VL0
))
3501 ScalarTy
= SI
->getValueOperand()->getType();
3502 VectorType
*VecTy
= VectorType::get(ScalarTy
, E
->Scalars
.size());
3504 bool NeedToShuffleReuses
= !E
->ReuseShuffleIndices
.empty();
3506 if (E
->NeedToGather
) {
3507 setInsertPointAfterBundle(E
->Scalars
, S
);
3508 auto *V
= Gather(E
->Scalars
, VecTy
);
3509 if (NeedToShuffleReuses
) {
3510 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3511 E
->ReuseShuffleIndices
, "shuffle");
3512 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3513 GatherSeq
.insert(I
);
3514 CSEBlocks
.insert(I
->getParent());
3517 E
->VectorizedValue
= V
;
3521 unsigned ShuffleOrOp
= S
.isAltShuffle() ?
3522 (unsigned) Instruction::ShuffleVector
: S
.getOpcode();
3523 switch (ShuffleOrOp
) {
3524 case Instruction::PHI
: {
3525 PHINode
*PH
= dyn_cast
<PHINode
>(VL0
);
3526 Builder
.SetInsertPoint(PH
->getParent()->getFirstNonPHI());
3527 Builder
.SetCurrentDebugLocation(PH
->getDebugLoc());
3528 PHINode
*NewPhi
= Builder
.CreatePHI(VecTy
, PH
->getNumIncomingValues());
3530 if (NeedToShuffleReuses
) {
3531 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3532 E
->ReuseShuffleIndices
, "shuffle");
3534 E
->VectorizedValue
= V
;
3536 // PHINodes may have multiple entries from the same block. We want to
3537 // visit every block once.
3538 SmallPtrSet
<BasicBlock
*, 4> VisitedBBs
;
3540 for (unsigned i
= 0, e
= PH
->getNumIncomingValues(); i
< e
; ++i
) {
3542 BasicBlock
*IBB
= PH
->getIncomingBlock(i
);
3544 if (!VisitedBBs
.insert(IBB
).second
) {
3545 NewPhi
->addIncoming(NewPhi
->getIncomingValueForBlock(IBB
), IBB
);
3549 Builder
.SetInsertPoint(IBB
->getTerminator());
3550 Builder
.SetCurrentDebugLocation(PH
->getDebugLoc());
3551 Value
*Vec
= vectorizeTree(E
->getOperand(i
));
3552 NewPhi
->addIncoming(Vec
, IBB
);
3555 assert(NewPhi
->getNumIncomingValues() == PH
->getNumIncomingValues() &&
3556 "Invalid number of incoming values");
3560 case Instruction::ExtractElement
: {
3561 if (!E
->NeedToGather
) {
3562 Value
*V
= E
->getSingleOperand(0);
3563 if (!E
->ReorderIndices
.empty()) {
3565 inversePermutation(E
->ReorderIndices
, Mask
);
3566 Builder
.SetInsertPoint(VL0
);
3567 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
), Mask
,
3570 if (NeedToShuffleReuses
) {
3571 // TODO: Merge this shuffle with the ReorderShuffleMask.
3572 if (E
->ReorderIndices
.empty())
3573 Builder
.SetInsertPoint(VL0
);
3574 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3575 E
->ReuseShuffleIndices
, "shuffle");
3577 E
->VectorizedValue
= V
;
3580 setInsertPointAfterBundle(E
->Scalars
, S
);
3581 auto *V
= Gather(E
->Scalars
, VecTy
);
3582 if (NeedToShuffleReuses
) {
3583 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3584 E
->ReuseShuffleIndices
, "shuffle");
3585 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3586 GatherSeq
.insert(I
);
3587 CSEBlocks
.insert(I
->getParent());
3590 E
->VectorizedValue
= V
;
3593 case Instruction::ExtractValue
: {
3594 if (!E
->NeedToGather
) {
3595 LoadInst
*LI
= cast
<LoadInst
>(E
->getSingleOperand(0));
3596 Builder
.SetInsertPoint(LI
);
3597 PointerType
*PtrTy
= PointerType::get(VecTy
, LI
->getPointerAddressSpace());
3598 Value
*Ptr
= Builder
.CreateBitCast(LI
->getOperand(0), PtrTy
);
3599 LoadInst
*V
= Builder
.CreateAlignedLoad(VecTy
, Ptr
, LI
->getAlignment());
3600 Value
*NewV
= propagateMetadata(V
, E
->Scalars
);
3601 if (!E
->ReorderIndices
.empty()) {
3603 inversePermutation(E
->ReorderIndices
, Mask
);
3604 NewV
= Builder
.CreateShuffleVector(NewV
, UndefValue::get(VecTy
), Mask
,
3607 if (NeedToShuffleReuses
) {
3608 // TODO: Merge this shuffle with the ReorderShuffleMask.
3609 NewV
= Builder
.CreateShuffleVector(
3610 NewV
, UndefValue::get(VecTy
), E
->ReuseShuffleIndices
, "shuffle");
3612 E
->VectorizedValue
= NewV
;
3615 setInsertPointAfterBundle(E
->Scalars
, S
);
3616 auto *V
= Gather(E
->Scalars
, VecTy
);
3617 if (NeedToShuffleReuses
) {
3618 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3619 E
->ReuseShuffleIndices
, "shuffle");
3620 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3621 GatherSeq
.insert(I
);
3622 CSEBlocks
.insert(I
->getParent());
3625 E
->VectorizedValue
= V
;
3628 case Instruction::ZExt
:
3629 case Instruction::SExt
:
3630 case Instruction::FPToUI
:
3631 case Instruction::FPToSI
:
3632 case Instruction::FPExt
:
3633 case Instruction::PtrToInt
:
3634 case Instruction::IntToPtr
:
3635 case Instruction::SIToFP
:
3636 case Instruction::UIToFP
:
3637 case Instruction::Trunc
:
3638 case Instruction::FPTrunc
:
3639 case Instruction::BitCast
: {
3640 setInsertPointAfterBundle(E
->Scalars
, S
);
3642 Value
*InVec
= vectorizeTree(E
->getOperand(0));
3644 if (E
->VectorizedValue
) {
3645 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3646 return E
->VectorizedValue
;
3649 CastInst
*CI
= dyn_cast
<CastInst
>(VL0
);
3650 Value
*V
= Builder
.CreateCast(CI
->getOpcode(), InVec
, VecTy
);
3651 if (NeedToShuffleReuses
) {
3652 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3653 E
->ReuseShuffleIndices
, "shuffle");
3655 E
->VectorizedValue
= V
;
3656 ++NumVectorInstructions
;
3659 case Instruction::FCmp
:
3660 case Instruction::ICmp
: {
3661 setInsertPointAfterBundle(E
->Scalars
, S
);
3663 Value
*L
= vectorizeTree(E
->getOperand(0));
3664 Value
*R
= vectorizeTree(E
->getOperand(1));
3666 if (E
->VectorizedValue
) {
3667 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3668 return E
->VectorizedValue
;
3671 CmpInst::Predicate P0
= cast
<CmpInst
>(VL0
)->getPredicate();
3673 if (S
.getOpcode() == Instruction::FCmp
)
3674 V
= Builder
.CreateFCmp(P0
, L
, R
);
3676 V
= Builder
.CreateICmp(P0
, L
, R
);
3678 propagateIRFlags(V
, E
->Scalars
, VL0
);
3679 if (NeedToShuffleReuses
) {
3680 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3681 E
->ReuseShuffleIndices
, "shuffle");
3683 E
->VectorizedValue
= V
;
3684 ++NumVectorInstructions
;
3687 case Instruction::Select
: {
3688 setInsertPointAfterBundle(E
->Scalars
, S
);
3690 Value
*Cond
= vectorizeTree(E
->getOperand(0));
3691 Value
*True
= vectorizeTree(E
->getOperand(1));
3692 Value
*False
= vectorizeTree(E
->getOperand(2));
3694 if (E
->VectorizedValue
) {
3695 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3696 return E
->VectorizedValue
;
3699 Value
*V
= Builder
.CreateSelect(Cond
, True
, False
);
3700 if (NeedToShuffleReuses
) {
3701 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3702 E
->ReuseShuffleIndices
, "shuffle");
3704 E
->VectorizedValue
= V
;
3705 ++NumVectorInstructions
;
3708 case Instruction::FNeg
: {
3709 setInsertPointAfterBundle(E
->Scalars
, S
);
3711 Value
*Op
= vectorizeTree(E
->getOperand(0));
3713 if (E
->VectorizedValue
) {
3714 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3715 return E
->VectorizedValue
;
3718 Value
*V
= Builder
.CreateUnOp(
3719 static_cast<Instruction::UnaryOps
>(S
.getOpcode()), Op
);
3720 propagateIRFlags(V
, E
->Scalars
, VL0
);
3721 if (auto *I
= dyn_cast
<Instruction
>(V
))
3722 V
= propagateMetadata(I
, E
->Scalars
);
3724 if (NeedToShuffleReuses
) {
3725 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3726 E
->ReuseShuffleIndices
, "shuffle");
3728 E
->VectorizedValue
= V
;
3729 ++NumVectorInstructions
;
3733 case Instruction::Add
:
3734 case Instruction::FAdd
:
3735 case Instruction::Sub
:
3736 case Instruction::FSub
:
3737 case Instruction::Mul
:
3738 case Instruction::FMul
:
3739 case Instruction::UDiv
:
3740 case Instruction::SDiv
:
3741 case Instruction::FDiv
:
3742 case Instruction::URem
:
3743 case Instruction::SRem
:
3744 case Instruction::FRem
:
3745 case Instruction::Shl
:
3746 case Instruction::LShr
:
3747 case Instruction::AShr
:
3748 case Instruction::And
:
3749 case Instruction::Or
:
3750 case Instruction::Xor
: {
3751 setInsertPointAfterBundle(E
->Scalars
, S
);
3753 Value
*LHS
= vectorizeTree(E
->getOperand(0));
3754 Value
*RHS
= vectorizeTree(E
->getOperand(1));
3756 if (E
->VectorizedValue
) {
3757 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3758 return E
->VectorizedValue
;
3761 Value
*V
= Builder
.CreateBinOp(
3762 static_cast<Instruction::BinaryOps
>(S
.getOpcode()), LHS
, RHS
);
3763 propagateIRFlags(V
, E
->Scalars
, VL0
);
3764 if (auto *I
= dyn_cast
<Instruction
>(V
))
3765 V
= propagateMetadata(I
, E
->Scalars
);
3767 if (NeedToShuffleReuses
) {
3768 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3769 E
->ReuseShuffleIndices
, "shuffle");
3771 E
->VectorizedValue
= V
;
3772 ++NumVectorInstructions
;
3776 case Instruction::Load
: {
3777 // Loads are inserted at the head of the tree because we don't want to
3778 // sink them all the way down past store instructions.
3779 bool IsReorder
= !E
->ReorderIndices
.empty();
3781 S
= getSameOpcode(E
->Scalars
, E
->ReorderIndices
.front());
3782 VL0
= cast
<Instruction
>(S
.OpValue
);
3784 setInsertPointAfterBundle(E
->Scalars
, S
);
3786 LoadInst
*LI
= cast
<LoadInst
>(VL0
);
3787 Type
*ScalarLoadTy
= LI
->getType();
3788 unsigned AS
= LI
->getPointerAddressSpace();
3790 Value
*VecPtr
= Builder
.CreateBitCast(LI
->getPointerOperand(),
3791 VecTy
->getPointerTo(AS
));
3793 // The pointer operand uses an in-tree scalar so we add the new BitCast to
3794 // ExternalUses list to make sure that an extract will be generated in the
3796 Value
*PO
= LI
->getPointerOperand();
3797 if (getTreeEntry(PO
))
3798 ExternalUses
.push_back(ExternalUser(PO
, cast
<User
>(VecPtr
), 0));
3800 unsigned Alignment
= LI
->getAlignment();
3801 LI
= Builder
.CreateLoad(VecTy
, VecPtr
);
3803 Alignment
= DL
->getABITypeAlignment(ScalarLoadTy
);
3805 LI
->setAlignment(Alignment
);
3806 Value
*V
= propagateMetadata(LI
, E
->Scalars
);
3809 inversePermutation(E
->ReorderIndices
, Mask
);
3810 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(V
->getType()),
3811 Mask
, "reorder_shuffle");
3813 if (NeedToShuffleReuses
) {
3814 // TODO: Merge this shuffle with the ReorderShuffleMask.
3815 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3816 E
->ReuseShuffleIndices
, "shuffle");
3818 E
->VectorizedValue
= V
;
3819 ++NumVectorInstructions
;
3822 case Instruction::Store
: {
3823 StoreInst
*SI
= cast
<StoreInst
>(VL0
);
3824 unsigned Alignment
= SI
->getAlignment();
3825 unsigned AS
= SI
->getPointerAddressSpace();
3827 setInsertPointAfterBundle(E
->Scalars
, S
);
3829 Value
*VecValue
= vectorizeTree(E
->getOperand(0));
3830 Value
*ScalarPtr
= SI
->getPointerOperand();
3831 Value
*VecPtr
= Builder
.CreateBitCast(ScalarPtr
, VecTy
->getPointerTo(AS
));
3832 StoreInst
*ST
= Builder
.CreateStore(VecValue
, VecPtr
);
3834 // The pointer operand uses an in-tree scalar, so add the new BitCast to
3835 // ExternalUses to make sure that an extract will be generated in the
3837 if (getTreeEntry(ScalarPtr
))
3838 ExternalUses
.push_back(ExternalUser(ScalarPtr
, cast
<User
>(VecPtr
), 0));
3841 Alignment
= DL
->getABITypeAlignment(SI
->getValueOperand()->getType());
3843 ST
->setAlignment(Alignment
);
3844 Value
*V
= propagateMetadata(ST
, E
->Scalars
);
3845 if (NeedToShuffleReuses
) {
3846 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3847 E
->ReuseShuffleIndices
, "shuffle");
3849 E
->VectorizedValue
= V
;
3850 ++NumVectorInstructions
;
3853 case Instruction::GetElementPtr
: {
3854 setInsertPointAfterBundle(E
->Scalars
, S
);
3856 Value
*Op0
= vectorizeTree(E
->getOperand(0));
3858 std::vector
<Value
*> OpVecs
;
3859 for (int j
= 1, e
= cast
<GetElementPtrInst
>(VL0
)->getNumOperands(); j
< e
;
3861 Value
*OpVec
= vectorizeTree(E
->getOperand(j
));
3862 OpVecs
.push_back(OpVec
);
3865 Value
*V
= Builder
.CreateGEP(
3866 cast
<GetElementPtrInst
>(VL0
)->getSourceElementType(), Op0
, OpVecs
);
3867 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
3868 V
= propagateMetadata(I
, E
->Scalars
);
3870 if (NeedToShuffleReuses
) {
3871 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3872 E
->ReuseShuffleIndices
, "shuffle");
3874 E
->VectorizedValue
= V
;
3875 ++NumVectorInstructions
;
3879 case Instruction::Call
: {
3880 CallInst
*CI
= cast
<CallInst
>(VL0
);
3881 setInsertPointAfterBundle(E
->Scalars
, S
);
3883 Intrinsic::ID IID
= Intrinsic::not_intrinsic
;
3884 if (Function
*FI
= CI
->getCalledFunction())
3885 IID
= FI
->getIntrinsicID();
3887 Value
*ScalarArg
= nullptr;
3888 std::vector
<Value
*> OpVecs
;
3889 for (int j
= 0, e
= CI
->getNumArgOperands(); j
< e
; ++j
) {
3891 // Some intrinsics have scalar arguments. This argument should not be
3893 if (hasVectorInstrinsicScalarOpd(IID
, j
)) {
3894 CallInst
*CEI
= cast
<CallInst
>(VL0
);
3895 ScalarArg
= CEI
->getArgOperand(j
);
3896 OpVecs
.push_back(CEI
->getArgOperand(j
));
3900 Value
*OpVec
= vectorizeTree(E
->getOperand(j
));
3901 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << j
<< "]: " << *OpVec
<< "\n");
3902 OpVecs
.push_back(OpVec
);
3905 Module
*M
= F
->getParent();
3906 Intrinsic::ID ID
= getVectorIntrinsicIDForCall(CI
, TLI
);
3907 Type
*Tys
[] = { VectorType::get(CI
->getType(), E
->Scalars
.size()) };
3908 Function
*CF
= Intrinsic::getDeclaration(M
, ID
, Tys
);
3909 SmallVector
<OperandBundleDef
, 1> OpBundles
;
3910 CI
->getOperandBundlesAsDefs(OpBundles
);
3911 Value
*V
= Builder
.CreateCall(CF
, OpVecs
, OpBundles
);
3913 // The scalar argument uses an in-tree scalar so we add the new vectorized
3914 // call to ExternalUses list to make sure that an extract will be
3915 // generated in the future.
3916 if (ScalarArg
&& getTreeEntry(ScalarArg
))
3917 ExternalUses
.push_back(ExternalUser(ScalarArg
, cast
<User
>(V
), 0));
3919 propagateIRFlags(V
, E
->Scalars
, VL0
);
3920 if (NeedToShuffleReuses
) {
3921 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3922 E
->ReuseShuffleIndices
, "shuffle");
3924 E
->VectorizedValue
= V
;
3925 ++NumVectorInstructions
;
3928 case Instruction::ShuffleVector
: {
3929 assert(S
.isAltShuffle() &&
3930 ((Instruction::isBinaryOp(S
.getOpcode()) &&
3931 Instruction::isBinaryOp(S
.getAltOpcode())) ||
3932 (Instruction::isCast(S
.getOpcode()) &&
3933 Instruction::isCast(S
.getAltOpcode()))) &&
3934 "Invalid Shuffle Vector Operand");
3936 Value
*LHS
= nullptr, *RHS
= nullptr;
3937 if (Instruction::isBinaryOp(S
.getOpcode())) {
3938 setInsertPointAfterBundle(E
->Scalars
, S
);
3939 LHS
= vectorizeTree(E
->getOperand(0));
3940 RHS
= vectorizeTree(E
->getOperand(1));
3942 setInsertPointAfterBundle(E
->Scalars
, S
);
3943 LHS
= vectorizeTree(E
->getOperand(0));
3946 if (E
->VectorizedValue
) {
3947 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0
<< ".\n");
3948 return E
->VectorizedValue
;
3952 if (Instruction::isBinaryOp(S
.getOpcode())) {
3953 V0
= Builder
.CreateBinOp(
3954 static_cast<Instruction::BinaryOps
>(S
.getOpcode()), LHS
, RHS
);
3955 V1
= Builder
.CreateBinOp(
3956 static_cast<Instruction::BinaryOps
>(S
.getAltOpcode()), LHS
, RHS
);
3958 V0
= Builder
.CreateCast(
3959 static_cast<Instruction::CastOps
>(S
.getOpcode()), LHS
, VecTy
);
3960 V1
= Builder
.CreateCast(
3961 static_cast<Instruction::CastOps
>(S
.getAltOpcode()), LHS
, VecTy
);
3964 // Create shuffle to take alternate operations from the vector.
3965 // Also, gather up main and alt scalar ops to propagate IR flags to
3966 // each vector operation.
3967 ValueList OpScalars
, AltScalars
;
3968 unsigned e
= E
->Scalars
.size();
3969 SmallVector
<Constant
*, 8> Mask(e
);
3970 for (unsigned i
= 0; i
< e
; ++i
) {
3971 auto *OpInst
= cast
<Instruction
>(E
->Scalars
[i
]);
3972 assert(S
.isOpcodeOrAlt(OpInst
) && "Unexpected main/alternate opcode");
3973 if (OpInst
->getOpcode() == S
.getAltOpcode()) {
3974 Mask
[i
] = Builder
.getInt32(e
+ i
);
3975 AltScalars
.push_back(E
->Scalars
[i
]);
3977 Mask
[i
] = Builder
.getInt32(i
);
3978 OpScalars
.push_back(E
->Scalars
[i
]);
3982 Value
*ShuffleMask
= ConstantVector::get(Mask
);
3983 propagateIRFlags(V0
, OpScalars
);
3984 propagateIRFlags(V1
, AltScalars
);
3986 Value
*V
= Builder
.CreateShuffleVector(V0
, V1
, ShuffleMask
);
3987 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
3988 V
= propagateMetadata(I
, E
->Scalars
);
3989 if (NeedToShuffleReuses
) {
3990 V
= Builder
.CreateShuffleVector(V
, UndefValue::get(VecTy
),
3991 E
->ReuseShuffleIndices
, "shuffle");
3993 E
->VectorizedValue
= V
;
3994 ++NumVectorInstructions
;
3999 llvm_unreachable("unknown inst");
4004 Value
*BoUpSLP::vectorizeTree() {
4005 ExtraValueToDebugLocsMap ExternallyUsedValues
;
4006 return vectorizeTree(ExternallyUsedValues
);
4010 BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap
&ExternallyUsedValues
) {
4011 // All blocks must be scheduled before any instructions are inserted.
4012 for (auto &BSIter
: BlocksSchedules
) {
4013 scheduleBlock(BSIter
.second
.get());
4016 Builder
.SetInsertPoint(&F
->getEntryBlock().front());
4017 auto *VectorRoot
= vectorizeTree(VectorizableTree
[0].get());
4019 // If the vectorized tree can be rewritten in a smaller type, we truncate the
4020 // vectorized root. InstCombine will then rewrite the entire expression. We
4021 // sign extend the extracted values below.
4022 auto *ScalarRoot
= VectorizableTree
[0]->Scalars
[0];
4023 if (MinBWs
.count(ScalarRoot
)) {
4024 if (auto *I
= dyn_cast
<Instruction
>(VectorRoot
))
4025 Builder
.SetInsertPoint(&*++BasicBlock::iterator(I
));
4026 auto BundleWidth
= VectorizableTree
[0]->Scalars
.size();
4027 auto *MinTy
= IntegerType::get(F
->getContext(), MinBWs
[ScalarRoot
].first
);
4028 auto *VecTy
= VectorType::get(MinTy
, BundleWidth
);
4029 auto *Trunc
= Builder
.CreateTrunc(VectorRoot
, VecTy
);
4030 VectorizableTree
[0]->VectorizedValue
= Trunc
;
4033 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses
.size()
4036 // If necessary, sign-extend or zero-extend ScalarRoot to the larger type
4037 // specified by ScalarType.
4038 auto extend
= [&](Value
*ScalarRoot
, Value
*Ex
, Type
*ScalarType
) {
4039 if (!MinBWs
.count(ScalarRoot
))
4041 if (MinBWs
[ScalarRoot
].second
)
4042 return Builder
.CreateSExt(Ex
, ScalarType
);
4043 return Builder
.CreateZExt(Ex
, ScalarType
);
4046 // Extract all of the elements with the external uses.
4047 for (const auto &ExternalUse
: ExternalUses
) {
4048 Value
*Scalar
= ExternalUse
.Scalar
;
4049 llvm::User
*User
= ExternalUse
.User
;
4051 // Skip users that we already RAUW. This happens when one instruction
4052 // has multiple uses of the same value.
4053 if (User
&& !is_contained(Scalar
->users(), User
))
4055 TreeEntry
*E
= getTreeEntry(Scalar
);
4056 assert(E
&& "Invalid scalar");
4057 assert(!E
->NeedToGather
&& "Extracting from a gather list");
4059 Value
*Vec
= E
->VectorizedValue
;
4060 assert(Vec
&& "Can't find vectorizable value");
4062 Value
*Lane
= Builder
.getInt32(ExternalUse
.Lane
);
4063 // If User == nullptr, the Scalar is used as extra arg. Generate
4064 // ExtractElement instruction and update the record for this scalar in
4065 // ExternallyUsedValues.
4067 assert(ExternallyUsedValues
.count(Scalar
) &&
4068 "Scalar with nullptr as an external user must be registered in "
4069 "ExternallyUsedValues map");
4070 if (auto *VecI
= dyn_cast
<Instruction
>(Vec
)) {
4071 Builder
.SetInsertPoint(VecI
->getParent(),
4072 std::next(VecI
->getIterator()));
4074 Builder
.SetInsertPoint(&F
->getEntryBlock().front());
4076 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4077 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4078 CSEBlocks
.insert(cast
<Instruction
>(Scalar
)->getParent());
4079 auto &Locs
= ExternallyUsedValues
[Scalar
];
4080 ExternallyUsedValues
.insert({Ex
, Locs
});
4081 ExternallyUsedValues
.erase(Scalar
);
4082 // Required to update internally referenced instructions.
4083 Scalar
->replaceAllUsesWith(Ex
);
4087 // Generate extracts for out-of-tree users.
4088 // Find the insertion point for the extractelement lane.
4089 if (auto *VecI
= dyn_cast
<Instruction
>(Vec
)) {
4090 if (PHINode
*PH
= dyn_cast
<PHINode
>(User
)) {
4091 for (int i
= 0, e
= PH
->getNumIncomingValues(); i
!= e
; ++i
) {
4092 if (PH
->getIncomingValue(i
) == Scalar
) {
4093 Instruction
*IncomingTerminator
=
4094 PH
->getIncomingBlock(i
)->getTerminator();
4095 if (isa
<CatchSwitchInst
>(IncomingTerminator
)) {
4096 Builder
.SetInsertPoint(VecI
->getParent(),
4097 std::next(VecI
->getIterator()));
4099 Builder
.SetInsertPoint(PH
->getIncomingBlock(i
)->getTerminator());
4101 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4102 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4103 CSEBlocks
.insert(PH
->getIncomingBlock(i
));
4104 PH
->setOperand(i
, Ex
);
4108 Builder
.SetInsertPoint(cast
<Instruction
>(User
));
4109 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4110 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4111 CSEBlocks
.insert(cast
<Instruction
>(User
)->getParent());
4112 User
->replaceUsesOfWith(Scalar
, Ex
);
4115 Builder
.SetInsertPoint(&F
->getEntryBlock().front());
4116 Value
*Ex
= Builder
.CreateExtractElement(Vec
, Lane
);
4117 Ex
= extend(ScalarRoot
, Ex
, Scalar
->getType());
4118 CSEBlocks
.insert(&F
->getEntryBlock());
4119 User
->replaceUsesOfWith(Scalar
, Ex
);
4122 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User
<< ".\n");
4125 // For each vectorized value:
4126 for (auto &TEPtr
: VectorizableTree
) {
4127 TreeEntry
*Entry
= TEPtr
.get();
4129 // No need to handle users of gathered values.
4130 if (Entry
->NeedToGather
)
4133 assert(Entry
->VectorizedValue
&& "Can't find vectorizable value");
4136 for (int Lane
= 0, LE
= Entry
->Scalars
.size(); Lane
!= LE
; ++Lane
) {
4137 Value
*Scalar
= Entry
->Scalars
[Lane
];
4139 Type
*Ty
= Scalar
->getType();
4140 if (!Ty
->isVoidTy()) {
4142 for (User
*U
: Scalar
->users()) {
4143 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U
<< ".\n");
4145 // It is legal to replace users in the ignorelist by undef.
4146 assert((getTreeEntry(U
) || is_contained(UserIgnoreList
, U
)) &&
4147 "Replacing out-of-tree value with undef");
4150 Value
*Undef
= UndefValue::get(Ty
);
4151 Scalar
->replaceAllUsesWith(Undef
);
4153 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar
<< ".\n");
4154 eraseInstruction(cast
<Instruction
>(Scalar
));
4158 Builder
.ClearInsertionPoint();
4160 return VectorizableTree
[0]->VectorizedValue
;
4163 void BoUpSLP::optimizeGatherSequence() {
4164 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq
.size()
4165 << " gather sequences instructions.\n");
4166 // LICM InsertElementInst sequences.
4167 for (Instruction
*I
: GatherSeq
) {
4168 if (!isa
<InsertElementInst
>(I
) && !isa
<ShuffleVectorInst
>(I
))
4171 // Check if this block is inside a loop.
4172 Loop
*L
= LI
->getLoopFor(I
->getParent());
4176 // Check if it has a preheader.
4177 BasicBlock
*PreHeader
= L
->getLoopPreheader();
4181 // If the vector or the element that we insert into it are
4182 // instructions that are defined in this basic block then we can't
4183 // hoist this instruction.
4184 auto *Op0
= dyn_cast
<Instruction
>(I
->getOperand(0));
4185 auto *Op1
= dyn_cast
<Instruction
>(I
->getOperand(1));
4186 if (Op0
&& L
->contains(Op0
))
4188 if (Op1
&& L
->contains(Op1
))
4191 // We can hoist this instruction. Move it to the pre-header.
4192 I
->moveBefore(PreHeader
->getTerminator());
4195 // Make a list of all reachable blocks in our CSE queue.
4196 SmallVector
<const DomTreeNode
*, 8> CSEWorkList
;
4197 CSEWorkList
.reserve(CSEBlocks
.size());
4198 for (BasicBlock
*BB
: CSEBlocks
)
4199 if (DomTreeNode
*N
= DT
->getNode(BB
)) {
4200 assert(DT
->isReachableFromEntry(N
));
4201 CSEWorkList
.push_back(N
);
4204 // Sort blocks by domination. This ensures we visit a block after all blocks
4205 // dominating it are visited.
4206 llvm::stable_sort(CSEWorkList
,
4207 [this](const DomTreeNode
*A
, const DomTreeNode
*B
) {
4208 return DT
->properlyDominates(A
, B
);
4211 // Perform O(N^2) search over the gather sequences and merge identical
4212 // instructions. TODO: We can further optimize this scan if we split the
4213 // instructions into different buckets based on the insert lane.
4214 SmallVector
<Instruction
*, 16> Visited
;
4215 for (auto I
= CSEWorkList
.begin(), E
= CSEWorkList
.end(); I
!= E
; ++I
) {
4216 assert((I
== CSEWorkList
.begin() || !DT
->dominates(*I
, *std::prev(I
))) &&
4217 "Worklist not sorted properly!");
4218 BasicBlock
*BB
= (*I
)->getBlock();
4219 // For all instructions in blocks containing gather sequences:
4220 for (BasicBlock::iterator it
= BB
->begin(), e
= BB
->end(); it
!= e
;) {
4221 Instruction
*In
= &*it
++;
4222 if (!isa
<InsertElementInst
>(In
) && !isa
<ExtractElementInst
>(In
))
4225 // Check if we can replace this instruction with any of the
4226 // visited instructions.
4227 for (Instruction
*v
: Visited
) {
4228 if (In
->isIdenticalTo(v
) &&
4229 DT
->dominates(v
->getParent(), In
->getParent())) {
4230 In
->replaceAllUsesWith(v
);
4231 eraseInstruction(In
);
4237 assert(!is_contained(Visited
, In
));
4238 Visited
.push_back(In
);
4246 // Groups the instructions to a bundle (which is then a single scheduling entity)
4247 // and schedules instructions until the bundle gets ready.
4248 bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef
<Value
*> VL
,
4250 const InstructionsState
&S
) {
4251 if (isa
<PHINode
>(S
.OpValue
))
4254 // Initialize the instruction bundle.
4255 Instruction
*OldScheduleEnd
= ScheduleEnd
;
4256 ScheduleData
*PrevInBundle
= nullptr;
4257 ScheduleData
*Bundle
= nullptr;
4258 bool ReSchedule
= false;
4259 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S
.OpValue
<< "\n");
4261 // Make sure that the scheduling region contains all
4262 // instructions of the bundle.
4263 for (Value
*V
: VL
) {
4264 if (!extendSchedulingRegion(V
, S
))
4268 for (Value
*V
: VL
) {
4269 ScheduleData
*BundleMember
= getScheduleData(V
);
4270 assert(BundleMember
&&
4271 "no ScheduleData for bundle member (maybe not in same basic block)");
4272 if (BundleMember
->IsScheduled
) {
4273 // A bundle member was scheduled as single instruction before and now
4274 // needs to be scheduled as part of the bundle. We just get rid of the
4275 // existing schedule.
4276 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
4277 << " was already scheduled\n");
4280 assert(BundleMember
->isSchedulingEntity() &&
4281 "bundle member already part of other bundle");
4283 PrevInBundle
->NextInBundle
= BundleMember
;
4285 Bundle
= BundleMember
;
4287 BundleMember
->UnscheduledDepsInBundle
= 0;
4288 Bundle
->UnscheduledDepsInBundle
+= BundleMember
->UnscheduledDeps
;
4290 // Group the instructions to a bundle.
4291 BundleMember
->FirstInBundle
= Bundle
;
4292 PrevInBundle
= BundleMember
;
4294 if (ScheduleEnd
!= OldScheduleEnd
) {
4295 // The scheduling region got new instructions at the lower end (or it is a
4296 // new region for the first bundle). This makes it necessary to
4297 // recalculate all dependencies.
4298 // It is seldom that this needs to be done a second time after adding the
4299 // initial bundle to the region.
4300 for (auto *I
= ScheduleStart
; I
!= ScheduleEnd
; I
= I
->getNextNode()) {
4301 doForAllOpcodes(I
, [](ScheduleData
*SD
) {
4302 SD
->clearDependencies();
4309 initialFillReadyList(ReadyInsts
);
4312 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
<< " in block "
4313 << BB
->getName() << "\n");
4315 calculateDependencies(Bundle
, true, SLP
);
4317 // Now try to schedule the new bundle. As soon as the bundle is "ready" it
4318 // means that there are no cyclic dependencies and we can schedule it.
4319 // Note that's important that we don't "schedule" the bundle yet (see
4320 // cancelScheduling).
4321 while (!Bundle
->isReady() && !ReadyInsts
.empty()) {
4323 ScheduleData
*pickedSD
= ReadyInsts
.back();
4324 ReadyInsts
.pop_back();
4326 if (pickedSD
->isSchedulingEntity() && pickedSD
->isReady()) {
4327 schedule(pickedSD
, ReadyInsts
);
4330 if (!Bundle
->isReady()) {
4331 cancelScheduling(VL
, S
.OpValue
);
4337 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef
<Value
*> VL
,
4339 if (isa
<PHINode
>(OpValue
))
4342 ScheduleData
*Bundle
= getScheduleData(OpValue
);
4343 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle
<< "\n");
4344 assert(!Bundle
->IsScheduled
&&
4345 "Can't cancel bundle which is already scheduled");
4346 assert(Bundle
->isSchedulingEntity() && Bundle
->isPartOfBundle() &&
4347 "tried to unbundle something which is not a bundle");
4349 // Un-bundle: make single instructions out of the bundle.
4350 ScheduleData
*BundleMember
= Bundle
;
4351 while (BundleMember
) {
4352 assert(BundleMember
->FirstInBundle
== Bundle
&& "corrupt bundle links");
4353 BundleMember
->FirstInBundle
= BundleMember
;
4354 ScheduleData
*Next
= BundleMember
->NextInBundle
;
4355 BundleMember
->NextInBundle
= nullptr;
4356 BundleMember
->UnscheduledDepsInBundle
= BundleMember
->UnscheduledDeps
;
4357 if (BundleMember
->UnscheduledDepsInBundle
== 0) {
4358 ReadyInsts
.insert(BundleMember
);
4360 BundleMember
= Next
;
4364 BoUpSLP::ScheduleData
*BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
4365 // Allocate a new ScheduleData for the instruction.
4366 if (ChunkPos
>= ChunkSize
) {
4367 ScheduleDataChunks
.push_back(llvm::make_unique
<ScheduleData
[]>(ChunkSize
));
4370 return &(ScheduleDataChunks
.back()[ChunkPos
++]);
4373 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(Value
*V
,
4374 const InstructionsState
&S
) {
4375 if (getScheduleData(V
, isOneOf(S
, V
)))
4377 Instruction
*I
= dyn_cast
<Instruction
>(V
);
4378 assert(I
&& "bundle member must be an instruction");
4379 assert(!isa
<PHINode
>(I
) && "phi nodes don't need to be scheduled");
4380 auto &&CheckSheduleForI
= [this, &S
](Instruction
*I
) -> bool {
4381 ScheduleData
*ISD
= getScheduleData(I
);
4384 assert(isInSchedulingRegion(ISD
) &&
4385 "ScheduleData not in scheduling region");
4386 ScheduleData
*SD
= allocateScheduleDataChunks();
4388 SD
->init(SchedulingRegionID
, S
.OpValue
);
4389 ExtraScheduleDataMap
[I
][S
.OpValue
] = SD
;
4392 if (CheckSheduleForI(I
))
4394 if (!ScheduleStart
) {
4395 // It's the first instruction in the new region.
4396 initScheduleData(I
, I
->getNextNode(), nullptr, nullptr);
4398 ScheduleEnd
= I
->getNextNode();
4399 if (isOneOf(S
, I
) != I
)
4400 CheckSheduleForI(I
);
4401 assert(ScheduleEnd
&& "tried to vectorize a terminator?");
4402 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I
<< "\n");
4405 // Search up and down at the same time, because we don't know if the new
4406 // instruction is above or below the existing scheduling region.
4407 BasicBlock::reverse_iterator UpIter
=
4408 ++ScheduleStart
->getIterator().getReverse();
4409 BasicBlock::reverse_iterator UpperEnd
= BB
->rend();
4410 BasicBlock::iterator DownIter
= ScheduleEnd
->getIterator();
4411 BasicBlock::iterator LowerEnd
= BB
->end();
4413 if (++ScheduleRegionSize
> ScheduleRegionSizeLimit
) {
4414 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
4418 if (UpIter
!= UpperEnd
) {
4419 if (&*UpIter
== I
) {
4420 initScheduleData(I
, ScheduleStart
, nullptr, FirstLoadStoreInRegion
);
4422 if (isOneOf(S
, I
) != I
)
4423 CheckSheduleForI(I
);
4424 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
4430 if (DownIter
!= LowerEnd
) {
4431 if (&*DownIter
== I
) {
4432 initScheduleData(ScheduleEnd
, I
->getNextNode(), LastLoadStoreInRegion
,
4434 ScheduleEnd
= I
->getNextNode();
4435 if (isOneOf(S
, I
) != I
)
4436 CheckSheduleForI(I
);
4437 assert(ScheduleEnd
&& "tried to vectorize a terminator?");
4438 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I
4444 assert((UpIter
!= UpperEnd
|| DownIter
!= LowerEnd
) &&
4445 "instruction not found in block");
4450 void BoUpSLP::BlockScheduling::initScheduleData(Instruction
*FromI
,
4452 ScheduleData
*PrevLoadStore
,
4453 ScheduleData
*NextLoadStore
) {
4454 ScheduleData
*CurrentLoadStore
= PrevLoadStore
;
4455 for (Instruction
*I
= FromI
; I
!= ToI
; I
= I
->getNextNode()) {
4456 ScheduleData
*SD
= ScheduleDataMap
[I
];
4458 SD
= allocateScheduleDataChunks();
4459 ScheduleDataMap
[I
] = SD
;
4462 assert(!isInSchedulingRegion(SD
) &&
4463 "new ScheduleData already in scheduling region");
4464 SD
->init(SchedulingRegionID
, I
);
4466 if (I
->mayReadOrWriteMemory() &&
4467 (!isa
<IntrinsicInst
>(I
) ||
4468 cast
<IntrinsicInst
>(I
)->getIntrinsicID() != Intrinsic::sideeffect
)) {
4469 // Update the linked list of memory accessing instructions.
4470 if (CurrentLoadStore
) {
4471 CurrentLoadStore
->NextLoadStore
= SD
;
4473 FirstLoadStoreInRegion
= SD
;
4475 CurrentLoadStore
= SD
;
4478 if (NextLoadStore
) {
4479 if (CurrentLoadStore
)
4480 CurrentLoadStore
->NextLoadStore
= NextLoadStore
;
4482 LastLoadStoreInRegion
= CurrentLoadStore
;
4486 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData
*SD
,
4487 bool InsertInReadyList
,
4489 assert(SD
->isSchedulingEntity());
4491 SmallVector
<ScheduleData
*, 10> WorkList
;
4492 WorkList
.push_back(SD
);
4494 while (!WorkList
.empty()) {
4495 ScheduleData
*SD
= WorkList
.back();
4496 WorkList
.pop_back();
4498 ScheduleData
*BundleMember
= SD
;
4499 while (BundleMember
) {
4500 assert(isInSchedulingRegion(BundleMember
));
4501 if (!BundleMember
->hasValidDependencies()) {
4503 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
4505 BundleMember
->Dependencies
= 0;
4506 BundleMember
->resetUnscheduledDeps();
4508 // Handle def-use chain dependencies.
4509 if (BundleMember
->OpValue
!= BundleMember
->Inst
) {
4510 ScheduleData
*UseSD
= getScheduleData(BundleMember
->Inst
);
4511 if (UseSD
&& isInSchedulingRegion(UseSD
->FirstInBundle
)) {
4512 BundleMember
->Dependencies
++;
4513 ScheduleData
*DestBundle
= UseSD
->FirstInBundle
;
4514 if (!DestBundle
->IsScheduled
)
4515 BundleMember
->incrementUnscheduledDeps(1);
4516 if (!DestBundle
->hasValidDependencies())
4517 WorkList
.push_back(DestBundle
);
4520 for (User
*U
: BundleMember
->Inst
->users()) {
4521 if (isa
<Instruction
>(U
)) {
4522 ScheduleData
*UseSD
= getScheduleData(U
);
4523 if (UseSD
&& isInSchedulingRegion(UseSD
->FirstInBundle
)) {
4524 BundleMember
->Dependencies
++;
4525 ScheduleData
*DestBundle
= UseSD
->FirstInBundle
;
4526 if (!DestBundle
->IsScheduled
)
4527 BundleMember
->incrementUnscheduledDeps(1);
4528 if (!DestBundle
->hasValidDependencies())
4529 WorkList
.push_back(DestBundle
);
4532 // I'm not sure if this can ever happen. But we need to be safe.
4533 // This lets the instruction/bundle never be scheduled and
4534 // eventually disable vectorization.
4535 BundleMember
->Dependencies
++;
4536 BundleMember
->incrementUnscheduledDeps(1);
4541 // Handle the memory dependencies.
4542 ScheduleData
*DepDest
= BundleMember
->NextLoadStore
;
4544 Instruction
*SrcInst
= BundleMember
->Inst
;
4545 MemoryLocation SrcLoc
= getLocation(SrcInst
, SLP
->AA
);
4546 bool SrcMayWrite
= BundleMember
->Inst
->mayWriteToMemory();
4547 unsigned numAliased
= 0;
4548 unsigned DistToSrc
= 1;
4551 assert(isInSchedulingRegion(DepDest
));
4553 // We have two limits to reduce the complexity:
4554 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
4555 // SLP->isAliased (which is the expensive part in this loop).
4556 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
4557 // the whole loop (even if the loop is fast, it's quadratic).
4558 // It's important for the loop break condition (see below) to
4559 // check this limit even between two read-only instructions.
4560 if (DistToSrc
>= MaxMemDepDistance
||
4561 ((SrcMayWrite
|| DepDest
->Inst
->mayWriteToMemory()) &&
4562 (numAliased
>= AliasedCheckLimit
||
4563 SLP
->isAliased(SrcLoc
, SrcInst
, DepDest
->Inst
)))) {
4565 // We increment the counter only if the locations are aliased
4566 // (instead of counting all alias checks). This gives a better
4567 // balance between reduced runtime and accurate dependencies.
4570 DepDest
->MemoryDependencies
.push_back(BundleMember
);
4571 BundleMember
->Dependencies
++;
4572 ScheduleData
*DestBundle
= DepDest
->FirstInBundle
;
4573 if (!DestBundle
->IsScheduled
) {
4574 BundleMember
->incrementUnscheduledDeps(1);
4576 if (!DestBundle
->hasValidDependencies()) {
4577 WorkList
.push_back(DestBundle
);
4580 DepDest
= DepDest
->NextLoadStore
;
4582 // Example, explaining the loop break condition: Let's assume our
4583 // starting instruction is i0 and MaxMemDepDistance = 3.
4586 // i0,i1,i2,i3,i4,i5,i6,i7,i8
4589 // MaxMemDepDistance let us stop alias-checking at i3 and we add
4590 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
4591 // Previously we already added dependencies from i3 to i6,i7,i8
4592 // (because of MaxMemDepDistance). As we added a dependency from
4593 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
4594 // and we can abort this loop at i6.
4595 if (DistToSrc
>= 2 * MaxMemDepDistance
)
4601 BundleMember
= BundleMember
->NextInBundle
;
4603 if (InsertInReadyList
&& SD
->isReady()) {
4604 ReadyInsts
.push_back(SD
);
4605 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD
->Inst
4611 void BoUpSLP::BlockScheduling::resetSchedule() {
4612 assert(ScheduleStart
&&
4613 "tried to reset schedule on block which has not been scheduled");
4614 for (Instruction
*I
= ScheduleStart
; I
!= ScheduleEnd
; I
= I
->getNextNode()) {
4615 doForAllOpcodes(I
, [&](ScheduleData
*SD
) {
4616 assert(isInSchedulingRegion(SD
) &&
4617 "ScheduleData not in scheduling region");
4618 SD
->IsScheduled
= false;
4619 SD
->resetUnscheduledDeps();
4625 void BoUpSLP::scheduleBlock(BlockScheduling
*BS
) {
4626 if (!BS
->ScheduleStart
)
4629 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS
->BB
->getName() << "\n");
4631 BS
->resetSchedule();
4633 // For the real scheduling we use a more sophisticated ready-list: it is
4634 // sorted by the original instruction location. This lets the final schedule
4635 // be as close as possible to the original instruction order.
4636 struct ScheduleDataCompare
{
4637 bool operator()(ScheduleData
*SD1
, ScheduleData
*SD2
) const {
4638 return SD2
->SchedulingPriority
< SD1
->SchedulingPriority
;
4641 std::set
<ScheduleData
*, ScheduleDataCompare
> ReadyInsts
;
4643 // Ensure that all dependency data is updated and fill the ready-list with
4644 // initial instructions.
4646 int NumToSchedule
= 0;
4647 for (auto *I
= BS
->ScheduleStart
; I
!= BS
->ScheduleEnd
;
4648 I
= I
->getNextNode()) {
4649 BS
->doForAllOpcodes(I
, [this, &Idx
, &NumToSchedule
, BS
](ScheduleData
*SD
) {
4650 assert(SD
->isPartOfBundle() ==
4651 (getTreeEntry(SD
->Inst
) != nullptr) &&
4652 "scheduler and vectorizer bundle mismatch");
4653 SD
->FirstInBundle
->SchedulingPriority
= Idx
++;
4654 if (SD
->isSchedulingEntity()) {
4655 BS
->calculateDependencies(SD
, false, this);
4660 BS
->initialFillReadyList(ReadyInsts
);
4662 Instruction
*LastScheduledInst
= BS
->ScheduleEnd
;
4664 // Do the "real" scheduling.
4665 while (!ReadyInsts
.empty()) {
4666 ScheduleData
*picked
= *ReadyInsts
.begin();
4667 ReadyInsts
.erase(ReadyInsts
.begin());
4669 // Move the scheduled instruction(s) to their dedicated places, if not
4671 ScheduleData
*BundleMember
= picked
;
4672 while (BundleMember
) {
4673 Instruction
*pickedInst
= BundleMember
->Inst
;
4674 if (LastScheduledInst
->getNextNode() != pickedInst
) {
4675 BS
->BB
->getInstList().remove(pickedInst
);
4676 BS
->BB
->getInstList().insert(LastScheduledInst
->getIterator(),
4679 LastScheduledInst
= pickedInst
;
4680 BundleMember
= BundleMember
->NextInBundle
;
4683 BS
->schedule(picked
, ReadyInsts
);
4686 assert(NumToSchedule
== 0 && "could not schedule all instructions");
4688 // Avoid duplicate scheduling of the block.
4689 BS
->ScheduleStart
= nullptr;
4692 unsigned BoUpSLP::getVectorElementSize(Value
*V
) const {
4693 // If V is a store, just return the width of the stored value without
4694 // traversing the expression tree. This is the common case.
4695 if (auto *Store
= dyn_cast
<StoreInst
>(V
))
4696 return DL
->getTypeSizeInBits(Store
->getValueOperand()->getType());
4698 // If V is not a store, we can traverse the expression tree to find loads
4699 // that feed it. The type of the loaded value may indicate a more suitable
4700 // width than V's type. We want to base the vector element size on the width
4701 // of memory operations where possible.
4702 SmallVector
<Instruction
*, 16> Worklist
;
4703 SmallPtrSet
<Instruction
*, 16> Visited
;
4704 if (auto *I
= dyn_cast
<Instruction
>(V
))
4705 Worklist
.push_back(I
);
4707 // Traverse the expression tree in bottom-up order looking for loads. If we
4708 // encounter an instruction we don't yet handle, we give up.
4710 auto FoundUnknownInst
= false;
4711 while (!Worklist
.empty() && !FoundUnknownInst
) {
4712 auto *I
= Worklist
.pop_back_val();
4715 // We should only be looking at scalar instructions here. If the current
4716 // instruction has a vector type, give up.
4717 auto *Ty
= I
->getType();
4718 if (isa
<VectorType
>(Ty
))
4719 FoundUnknownInst
= true;
4721 // If the current instruction is a load, update MaxWidth to reflect the
4722 // width of the loaded value.
4723 else if (isa
<LoadInst
>(I
))
4724 MaxWidth
= std::max
<unsigned>(MaxWidth
, DL
->getTypeSizeInBits(Ty
));
4726 // Otherwise, we need to visit the operands of the instruction. We only
4727 // handle the interesting cases from buildTree here. If an operand is an
4728 // instruction we haven't yet visited, we add it to the worklist.
4729 else if (isa
<PHINode
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
) ||
4730 isa
<CmpInst
>(I
) || isa
<SelectInst
>(I
) || isa
<BinaryOperator
>(I
)) {
4731 for (Use
&U
: I
->operands())
4732 if (auto *J
= dyn_cast
<Instruction
>(U
.get()))
4733 if (!Visited
.count(J
))
4734 Worklist
.push_back(J
);
4737 // If we don't yet handle the instruction, give up.
4739 FoundUnknownInst
= true;
4742 // If we didn't encounter a memory access in the expression tree, or if we
4743 // gave up for some reason, just return the width of V.
4744 if (!MaxWidth
|| FoundUnknownInst
)
4745 return DL
->getTypeSizeInBits(V
->getType());
4747 // Otherwise, return the maximum width we found.
4751 // Determine if a value V in a vectorizable expression Expr can be demoted to a
4752 // smaller type with a truncation. We collect the values that will be demoted
4753 // in ToDemote and additional roots that require investigating in Roots.
4754 static bool collectValuesToDemote(Value
*V
, SmallPtrSetImpl
<Value
*> &Expr
,
4755 SmallVectorImpl
<Value
*> &ToDemote
,
4756 SmallVectorImpl
<Value
*> &Roots
) {
4757 // We can always demote constants.
4758 if (isa
<Constant
>(V
)) {
4759 ToDemote
.push_back(V
);
4763 // If the value is not an instruction in the expression with only one use, it
4764 // cannot be demoted.
4765 auto *I
= dyn_cast
<Instruction
>(V
);
4766 if (!I
|| !I
->hasOneUse() || !Expr
.count(I
))
4769 switch (I
->getOpcode()) {
4771 // We can always demote truncations and extensions. Since truncations can
4772 // seed additional demotion, we save the truncated value.
4773 case Instruction::Trunc
:
4774 Roots
.push_back(I
->getOperand(0));
4776 case Instruction::ZExt
:
4777 case Instruction::SExt
:
4780 // We can demote certain binary operations if we can demote both of their
4782 case Instruction::Add
:
4783 case Instruction::Sub
:
4784 case Instruction::Mul
:
4785 case Instruction::And
:
4786 case Instruction::Or
:
4787 case Instruction::Xor
:
4788 if (!collectValuesToDemote(I
->getOperand(0), Expr
, ToDemote
, Roots
) ||
4789 !collectValuesToDemote(I
->getOperand(1), Expr
, ToDemote
, Roots
))
4793 // We can demote selects if we can demote their true and false values.
4794 case Instruction::Select
: {
4795 SelectInst
*SI
= cast
<SelectInst
>(I
);
4796 if (!collectValuesToDemote(SI
->getTrueValue(), Expr
, ToDemote
, Roots
) ||
4797 !collectValuesToDemote(SI
->getFalseValue(), Expr
, ToDemote
, Roots
))
4802 // We can demote phis if we can demote all their incoming operands. Note that
4803 // we don't need to worry about cycles since we ensure single use above.
4804 case Instruction::PHI
: {
4805 PHINode
*PN
= cast
<PHINode
>(I
);
4806 for (Value
*IncValue
: PN
->incoming_values())
4807 if (!collectValuesToDemote(IncValue
, Expr
, ToDemote
, Roots
))
4812 // Otherwise, conservatively give up.
4817 // Record the value that we can demote.
4818 ToDemote
.push_back(V
);
4822 void BoUpSLP::computeMinimumValueSizes() {
4823 // If there are no external uses, the expression tree must be rooted by a
4824 // store. We can't demote in-memory values, so there is nothing to do here.
4825 if (ExternalUses
.empty())
4828 // We only attempt to truncate integer expressions.
4829 auto &TreeRoot
= VectorizableTree
[0]->Scalars
;
4830 auto *TreeRootIT
= dyn_cast
<IntegerType
>(TreeRoot
[0]->getType());
4834 // If the expression is not rooted by a store, these roots should have
4835 // external uses. We will rely on InstCombine to rewrite the expression in
4836 // the narrower type. However, InstCombine only rewrites single-use values.
4837 // This means that if a tree entry other than a root is used externally, it
4838 // must have multiple uses and InstCombine will not rewrite it. The code
4839 // below ensures that only the roots are used externally.
4840 SmallPtrSet
<Value
*, 32> Expr(TreeRoot
.begin(), TreeRoot
.end());
4841 for (auto &EU
: ExternalUses
)
4842 if (!Expr
.erase(EU
.Scalar
))
4847 // Collect the scalar values of the vectorizable expression. We will use this
4848 // context to determine which values can be demoted. If we see a truncation,
4849 // we mark it as seeding another demotion.
4850 for (auto &EntryPtr
: VectorizableTree
)
4851 Expr
.insert(EntryPtr
->Scalars
.begin(), EntryPtr
->Scalars
.end());
4853 // Ensure the roots of the vectorizable tree don't form a cycle. They must
4854 // have a single external user that is not in the vectorizable tree.
4855 for (auto *Root
: TreeRoot
)
4856 if (!Root
->hasOneUse() || Expr
.count(*Root
->user_begin()))
4859 // Conservatively determine if we can actually truncate the roots of the
4860 // expression. Collect the values that can be demoted in ToDemote and
4861 // additional roots that require investigating in Roots.
4862 SmallVector
<Value
*, 32> ToDemote
;
4863 SmallVector
<Value
*, 4> Roots
;
4864 for (auto *Root
: TreeRoot
)
4865 if (!collectValuesToDemote(Root
, Expr
, ToDemote
, Roots
))
4868 // The maximum bit width required to represent all the values that can be
4869 // demoted without loss of precision. It would be safe to truncate the roots
4870 // of the expression to this width.
4871 auto MaxBitWidth
= 8u;
4873 // We first check if all the bits of the roots are demanded. If they're not,
4874 // we can truncate the roots to this narrower type.
4875 for (auto *Root
: TreeRoot
) {
4876 auto Mask
= DB
->getDemandedBits(cast
<Instruction
>(Root
));
4877 MaxBitWidth
= std::max
<unsigned>(
4878 Mask
.getBitWidth() - Mask
.countLeadingZeros(), MaxBitWidth
);
4881 // True if the roots can be zero-extended back to their original type, rather
4882 // than sign-extended. We know that if the leading bits are not demanded, we
4883 // can safely zero-extend. So we initialize IsKnownPositive to True.
4884 bool IsKnownPositive
= true;
4886 // If all the bits of the roots are demanded, we can try a little harder to
4887 // compute a narrower type. This can happen, for example, if the roots are
4888 // getelementptr indices. InstCombine promotes these indices to the pointer
4889 // width. Thus, all their bits are technically demanded even though the
4890 // address computation might be vectorized in a smaller type.
4892 // We start by looking at each entry that can be demoted. We compute the
4893 // maximum bit width required to store the scalar by using ValueTracking to
4894 // compute the number of high-order bits we can truncate.
4895 if (MaxBitWidth
== DL
->getTypeSizeInBits(TreeRoot
[0]->getType()) &&
4896 llvm::all_of(TreeRoot
, [](Value
*R
) {
4897 assert(R
->hasOneUse() && "Root should have only one use!");
4898 return isa
<GetElementPtrInst
>(R
->user_back());
4902 // Determine if the sign bit of all the roots is known to be zero. If not,
4903 // IsKnownPositive is set to False.
4904 IsKnownPositive
= llvm::all_of(TreeRoot
, [&](Value
*R
) {
4905 KnownBits Known
= computeKnownBits(R
, *DL
);
4906 return Known
.isNonNegative();
4909 // Determine the maximum number of bits required to store the scalar
4911 for (auto *Scalar
: ToDemote
) {
4912 auto NumSignBits
= ComputeNumSignBits(Scalar
, *DL
, 0, AC
, nullptr, DT
);
4913 auto NumTypeBits
= DL
->getTypeSizeInBits(Scalar
->getType());
4914 MaxBitWidth
= std::max
<unsigned>(NumTypeBits
- NumSignBits
, MaxBitWidth
);
4917 // If we can't prove that the sign bit is zero, we must add one to the
4918 // maximum bit width to account for the unknown sign bit. This preserves
4919 // the existing sign bit so we can safely sign-extend the root back to the
4920 // original type. Otherwise, if we know the sign bit is zero, we will
4921 // zero-extend the root instead.
4923 // FIXME: This is somewhat suboptimal, as there will be cases where adding
4924 // one to the maximum bit width will yield a larger-than-necessary
4925 // type. In general, we need to add an extra bit only if we can't
4926 // prove that the upper bit of the original type is equal to the
4927 // upper bit of the proposed smaller type. If these two bits are the
4928 // same (either zero or one) we know that sign-extending from the
4929 // smaller type will result in the same value. Here, since we can't
4930 // yet prove this, we are just making the proposed smaller type
4931 // larger to ensure correctness.
4932 if (!IsKnownPositive
)
4936 // Round MaxBitWidth up to the next power-of-two.
4937 if (!isPowerOf2_64(MaxBitWidth
))
4938 MaxBitWidth
= NextPowerOf2(MaxBitWidth
);
4940 // If the maximum bit width we compute is less than the with of the roots'
4941 // type, we can proceed with the narrowing. Otherwise, do nothing.
4942 if (MaxBitWidth
>= TreeRootIT
->getBitWidth())
4945 // If we can truncate the root, we must collect additional values that might
4946 // be demoted as a result. That is, those seeded by truncations we will
4948 while (!Roots
.empty())
4949 collectValuesToDemote(Roots
.pop_back_val(), Expr
, ToDemote
, Roots
);
4951 // Finally, map the values we can demote to the maximum bit with we computed.
4952 for (auto *Scalar
: ToDemote
)
4953 MinBWs
[Scalar
] = std::make_pair(MaxBitWidth
, !IsKnownPositive
);
4958 /// The SLPVectorizer Pass.
4959 struct SLPVectorizer
: public FunctionPass
{
4960 SLPVectorizerPass Impl
;
4962 /// Pass identification, replacement for typeid
4965 explicit SLPVectorizer() : FunctionPass(ID
) {
4966 initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
4969 bool doInitialization(Module
&M
) override
{
4973 bool runOnFunction(Function
&F
) override
{
4974 if (skipFunction(F
))
4977 auto *SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
4978 auto *TTI
= &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
4979 auto *TLIP
= getAnalysisIfAvailable
<TargetLibraryInfoWrapperPass
>();
4980 auto *TLI
= TLIP
? &TLIP
->getTLI() : nullptr;
4981 auto *AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
4982 auto *LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
4983 auto *DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
4984 auto *AC
= &getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
);
4985 auto *DB
= &getAnalysis
<DemandedBitsWrapperPass
>().getDemandedBits();
4986 auto *ORE
= &getAnalysis
<OptimizationRemarkEmitterWrapperPass
>().getORE();
4988 return Impl
.runImpl(F
, SE
, TTI
, TLI
, AA
, LI
, DT
, AC
, DB
, ORE
);
4991 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
4992 FunctionPass::getAnalysisUsage(AU
);
4993 AU
.addRequired
<AssumptionCacheTracker
>();
4994 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
4995 AU
.addRequired
<AAResultsWrapperPass
>();
4996 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
4997 AU
.addRequired
<LoopInfoWrapperPass
>();
4998 AU
.addRequired
<DominatorTreeWrapperPass
>();
4999 AU
.addRequired
<DemandedBitsWrapperPass
>();
5000 AU
.addRequired
<OptimizationRemarkEmitterWrapperPass
>();
5001 AU
.addPreserved
<LoopInfoWrapperPass
>();
5002 AU
.addPreserved
<DominatorTreeWrapperPass
>();
5003 AU
.addPreserved
<AAResultsWrapperPass
>();
5004 AU
.addPreserved
<GlobalsAAWrapperPass
>();
5005 AU
.setPreservesCFG();
5009 } // end anonymous namespace
5011 PreservedAnalyses
SLPVectorizerPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
5012 auto *SE
= &AM
.getResult
<ScalarEvolutionAnalysis
>(F
);
5013 auto *TTI
= &AM
.getResult
<TargetIRAnalysis
>(F
);
5014 auto *TLI
= AM
.getCachedResult
<TargetLibraryAnalysis
>(F
);
5015 auto *AA
= &AM
.getResult
<AAManager
>(F
);
5016 auto *LI
= &AM
.getResult
<LoopAnalysis
>(F
);
5017 auto *DT
= &AM
.getResult
<DominatorTreeAnalysis
>(F
);
5018 auto *AC
= &AM
.getResult
<AssumptionAnalysis
>(F
);
5019 auto *DB
= &AM
.getResult
<DemandedBitsAnalysis
>(F
);
5020 auto *ORE
= &AM
.getResult
<OptimizationRemarkEmitterAnalysis
>(F
);
5022 bool Changed
= runImpl(F
, SE
, TTI
, TLI
, AA
, LI
, DT
, AC
, DB
, ORE
);
5024 return PreservedAnalyses::all();
5026 PreservedAnalyses PA
;
5027 PA
.preserveSet
<CFGAnalyses
>();
5028 PA
.preserve
<AAManager
>();
5029 PA
.preserve
<GlobalsAA
>();
5033 bool SLPVectorizerPass::runImpl(Function
&F
, ScalarEvolution
*SE_
,
5034 TargetTransformInfo
*TTI_
,
5035 TargetLibraryInfo
*TLI_
, AliasAnalysis
*AA_
,
5036 LoopInfo
*LI_
, DominatorTree
*DT_
,
5037 AssumptionCache
*AC_
, DemandedBits
*DB_
,
5038 OptimizationRemarkEmitter
*ORE_
) {
5047 DL
= &F
.getParent()->getDataLayout();
5051 bool Changed
= false;
5053 // If the target claims to have no vector registers don't attempt
5055 if (!TTI
->getNumberOfRegisters(true))
5058 // Don't vectorize when the attribute NoImplicitFloat is used.
5059 if (F
.hasFnAttribute(Attribute::NoImplicitFloat
))
5062 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F
.getName() << ".\n");
5064 // Use the bottom up slp vectorizer to construct chains that start with
5065 // store instructions.
5066 BoUpSLP
R(&F
, SE
, TTI
, TLI
, AA
, LI
, DT
, AC
, DB
, DL
, ORE_
);
5068 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
5069 // delete instructions.
5071 // Scan the blocks in the function in post order.
5072 for (auto BB
: post_order(&F
.getEntryBlock())) {
5073 collectSeedInstructions(BB
);
5075 // Vectorize trees that end at stores.
5076 if (!Stores
.empty()) {
5077 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores
.size()
5078 << " underlying objects.\n");
5079 Changed
|= vectorizeStoreChains(R
);
5082 // Vectorize trees that end at reductions.
5083 Changed
|= vectorizeChainsInBlock(BB
, R
);
5085 // Vectorize the index computations of getelementptr instructions. This
5086 // is primarily intended to catch gather-like idioms ending at
5087 // non-consecutive loads.
5088 if (!GEPs
.empty()) {
5089 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs
.size()
5090 << " underlying objects.\n");
5091 Changed
|= vectorizeGEPIndices(BB
, R
);
5096 R
.optimizeGatherSequence();
5097 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F
.getName() << "\"\n");
5098 LLVM_DEBUG(verifyFunction(F
));
5103 /// Check that the Values in the slice in VL array are still existent in
5104 /// the WeakTrackingVH array.
5105 /// Vectorization of part of the VL array may cause later values in the VL array
5106 /// to become invalid. We track when this has happened in the WeakTrackingVH
5108 static bool hasValueBeenRAUWed(ArrayRef
<Value
*> VL
,
5109 ArrayRef
<WeakTrackingVH
> VH
, unsigned SliceBegin
,
5110 unsigned SliceSize
) {
5111 VL
= VL
.slice(SliceBegin
, SliceSize
);
5112 VH
= VH
.slice(SliceBegin
, SliceSize
);
5113 return !std::equal(VL
.begin(), VL
.end(), VH
.begin());
5116 bool SLPVectorizerPass::vectorizeStoreChain(ArrayRef
<Value
*> Chain
, BoUpSLP
&R
,
5117 unsigned VecRegSize
) {
5118 const unsigned ChainLen
= Chain
.size();
5119 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
5121 const unsigned Sz
= R
.getVectorElementSize(Chain
[0]);
5122 const unsigned VF
= VecRegSize
/ Sz
;
5124 if (!isPowerOf2_32(Sz
) || VF
< 2)
5127 // Keep track of values that were deleted by vectorizing in the loop below.
5128 const SmallVector
<WeakTrackingVH
, 8> TrackValues(Chain
.begin(), Chain
.end());
5130 bool Changed
= false;
5131 // Look for profitable vectorizable trees at all offsets, starting at zero.
5132 for (unsigned i
= 0, e
= ChainLen
; i
+ VF
<= e
; ++i
) {
5134 // Check that a previous iteration of this loop did not delete the Value.
5135 if (hasValueBeenRAUWed(Chain
, TrackValues
, i
, VF
))
5138 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF
<< " stores at offset " << i
5140 ArrayRef
<Value
*> Operands
= Chain
.slice(i
, VF
);
5142 R
.buildTree(Operands
);
5143 if (R
.isTreeTinyAndNotFullyVectorizable())
5146 R
.computeMinimumValueSizes();
5148 int Cost
= R
.getTreeCost();
5150 LLVM_DEBUG(dbgs() << "SLP: Found cost=" << Cost
<< " for VF=" << VF
5152 if (Cost
< -SLPCostThreshold
) {
5153 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost
<< "\n");
5155 using namespace ore
;
5157 R
.getORE()->emit(OptimizationRemark(SV_NAME
, "StoresVectorized",
5158 cast
<StoreInst
>(Chain
[i
]))
5159 << "Stores SLP vectorized with cost " << NV("Cost", Cost
)
5160 << " and with tree size "
5161 << NV("TreeSize", R
.getTreeSize()));
5165 // Move to the next bundle.
5174 bool SLPVectorizerPass::vectorizeStores(ArrayRef
<StoreInst
*> Stores
,
5176 SetVector
<StoreInst
*> Heads
;
5177 SmallDenseSet
<StoreInst
*> Tails
;
5178 SmallDenseMap
<StoreInst
*, StoreInst
*> ConsecutiveChain
;
5180 // We may run into multiple chains that merge into a single chain. We mark the
5181 // stores that we vectorized so that we don't visit the same store twice.
5182 BoUpSLP::ValueSet VectorizedStores
;
5183 bool Changed
= false;
5185 auto &&FindConsecutiveAccess
=
5186 [this, &Stores
, &Heads
, &Tails
, &ConsecutiveChain
] (int K
, int Idx
) {
5187 if (!isConsecutiveAccess(Stores
[K
], Stores
[Idx
], *DL
, *SE
))
5190 Tails
.insert(Stores
[Idx
]);
5191 Heads
.insert(Stores
[K
]);
5192 ConsecutiveChain
[Stores
[K
]] = Stores
[Idx
];
5196 // Do a quadratic search on all of the given stores in reverse order and find
5197 // all of the pairs of stores that follow each other.
5198 int E
= Stores
.size();
5199 for (int Idx
= E
- 1; Idx
>= 0; --Idx
) {
5200 // If a store has multiple consecutive store candidates, search according
5201 // to the sequence: Idx-1, Idx+1, Idx-2, Idx+2, ...
5202 // This is because usually pairing with immediate succeeding or preceding
5203 // candidate create the best chance to find slp vectorization opportunity.
5204 for (int Offset
= 1, F
= std::max(E
- Idx
, Idx
+ 1); Offset
< F
; ++Offset
)
5205 if ((Idx
>= Offset
&& FindConsecutiveAccess(Idx
- Offset
, Idx
)) ||
5206 (Idx
+ Offset
< E
&& FindConsecutiveAccess(Idx
+ Offset
, Idx
)))
5210 // For stores that start but don't end a link in the chain:
5211 for (auto *SI
: llvm::reverse(Heads
)) {
5212 if (Tails
.count(SI
))
5215 // We found a store instr that starts a chain. Now follow the chain and try
5217 BoUpSLP::ValueList Operands
;
5219 // Collect the chain into a list.
5220 while ((Tails
.count(I
) || Heads
.count(I
)) && !VectorizedStores
.count(I
)) {
5221 Operands
.push_back(I
);
5222 // Move to the next value in the chain.
5223 I
= ConsecutiveChain
[I
];
5226 // FIXME: Is division-by-2 the correct step? Should we assert that the
5227 // register size is a power-of-2?
5228 for (unsigned Size
= R
.getMaxVecRegSize(); Size
>= R
.getMinVecRegSize();
5230 if (vectorizeStoreChain(Operands
, R
, Size
)) {
5231 // Mark the vectorized stores so that we don't vectorize them again.
5232 VectorizedStores
.insert(Operands
.begin(), Operands
.end());
5242 void SLPVectorizerPass::collectSeedInstructions(BasicBlock
*BB
) {
5243 // Initialize the collections. We will make a single pass over the block.
5247 // Visit the store and getelementptr instructions in BB and organize them in
5248 // Stores and GEPs according to the underlying objects of their pointer
5250 for (Instruction
&I
: *BB
) {
5251 // Ignore store instructions that are volatile or have a pointer operand
5252 // that doesn't point to a scalar type.
5253 if (auto *SI
= dyn_cast
<StoreInst
>(&I
)) {
5254 if (!SI
->isSimple())
5256 if (!isValidElementType(SI
->getValueOperand()->getType()))
5258 Stores
[GetUnderlyingObject(SI
->getPointerOperand(), *DL
)].push_back(SI
);
5261 // Ignore getelementptr instructions that have more than one index, a
5262 // constant index, or a pointer operand that doesn't point to a scalar
5264 else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(&I
)) {
5265 auto Idx
= GEP
->idx_begin()->get();
5266 if (GEP
->getNumIndices() > 1 || isa
<Constant
>(Idx
))
5268 if (!isValidElementType(Idx
->getType()))
5270 if (GEP
->getType()->isVectorTy())
5272 GEPs
[GEP
->getPointerOperand()].push_back(GEP
);
5277 bool SLPVectorizerPass::tryToVectorizePair(Value
*A
, Value
*B
, BoUpSLP
&R
) {
5280 Value
*VL
[] = { A
, B
};
5281 return tryToVectorizeList(VL
, R
, /*UserCost=*/0, true);
5284 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef
<Value
*> VL
, BoUpSLP
&R
,
5285 int UserCost
, bool AllowReorder
) {
5289 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
5290 << VL
.size() << ".\n");
5292 // Check that all of the parts are scalar instructions of the same type,
5293 // we permit an alternate opcode via InstructionsState.
5294 InstructionsState S
= getSameOpcode(VL
);
5298 Instruction
*I0
= cast
<Instruction
>(S
.OpValue
);
5299 unsigned Sz
= R
.getVectorElementSize(I0
);
5300 unsigned MinVF
= std::max(2U, R
.getMinVecRegSize() / Sz
);
5301 unsigned MaxVF
= std::max
<unsigned>(PowerOf2Floor(VL
.size()), MinVF
);
5303 R
.getORE()->emit([&]() {
5304 return OptimizationRemarkMissed(SV_NAME
, "SmallVF", I0
)
5305 << "Cannot SLP vectorize list: vectorization factor "
5306 << "less than 2 is not supported";
5311 for (Value
*V
: VL
) {
5312 Type
*Ty
= V
->getType();
5313 if (!isValidElementType(Ty
)) {
5314 // NOTE: the following will give user internal llvm type name, which may
5316 R
.getORE()->emit([&]() {
5317 std::string type_str
;
5318 llvm::raw_string_ostream
rso(type_str
);
5320 return OptimizationRemarkMissed(SV_NAME
, "UnsupportedType", I0
)
5321 << "Cannot SLP vectorize list: type "
5322 << rso
.str() + " is unsupported by vectorizer";
5328 bool Changed
= false;
5329 bool CandidateFound
= false;
5330 int MinCost
= SLPCostThreshold
;
5332 // Keep track of values that were deleted by vectorizing in the loop below.
5333 SmallVector
<WeakTrackingVH
, 8> TrackValues(VL
.begin(), VL
.end());
5335 unsigned NextInst
= 0, MaxInst
= VL
.size();
5336 for (unsigned VF
= MaxVF
; NextInst
+ 1 < MaxInst
&& VF
>= MinVF
;
5338 // No actual vectorization should happen, if number of parts is the same as
5339 // provided vectorization factor (i.e. the scalar type is used for vector
5340 // code during codegen).
5341 auto *VecTy
= VectorType::get(VL
[0]->getType(), VF
);
5342 if (TTI
->getNumberOfParts(VecTy
) == VF
)
5344 for (unsigned I
= NextInst
; I
< MaxInst
; ++I
) {
5345 unsigned OpsWidth
= 0;
5347 if (I
+ VF
> MaxInst
)
5348 OpsWidth
= MaxInst
- I
;
5352 if (!isPowerOf2_32(OpsWidth
) || OpsWidth
< 2)
5355 // Check that a previous iteration of this loop did not delete the Value.
5356 if (hasValueBeenRAUWed(VL
, TrackValues
, I
, OpsWidth
))
5359 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth
<< " operations "
5361 ArrayRef
<Value
*> Ops
= VL
.slice(I
, OpsWidth
);
5364 Optional
<ArrayRef
<unsigned>> Order
= R
.bestOrder();
5365 // TODO: check if we can allow reordering for more cases.
5366 if (AllowReorder
&& Order
) {
5367 // TODO: reorder tree nodes without tree rebuilding.
5368 // Conceptually, there is nothing actually preventing us from trying to
5369 // reorder a larger list. In fact, we do exactly this when vectorizing
5370 // reductions. However, at this point, we only expect to get here when
5371 // there are exactly two operations.
5372 assert(Ops
.size() == 2);
5373 Value
*ReorderedOps
[] = {Ops
[1], Ops
[0]};
5374 R
.buildTree(ReorderedOps
, None
);
5376 if (R
.isTreeTinyAndNotFullyVectorizable())
5379 R
.computeMinimumValueSizes();
5380 int Cost
= R
.getTreeCost() - UserCost
;
5381 CandidateFound
= true;
5382 MinCost
= std::min(MinCost
, Cost
);
5384 if (Cost
< -SLPCostThreshold
) {
5385 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost
<< ".\n");
5386 R
.getORE()->emit(OptimizationRemark(SV_NAME
, "VectorizedList",
5387 cast
<Instruction
>(Ops
[0]))
5388 << "SLP vectorized with cost " << ore::NV("Cost", Cost
)
5389 << " and with tree size "
5390 << ore::NV("TreeSize", R
.getTreeSize()));
5393 // Move to the next bundle.
5401 if (!Changed
&& CandidateFound
) {
5402 R
.getORE()->emit([&]() {
5403 return OptimizationRemarkMissed(SV_NAME
, "NotBeneficial", I0
)
5404 << "List vectorization was possible but not beneficial with cost "
5405 << ore::NV("Cost", MinCost
) << " >= "
5406 << ore::NV("Treshold", -SLPCostThreshold
);
5408 } else if (!Changed
) {
5409 R
.getORE()->emit([&]() {
5410 return OptimizationRemarkMissed(SV_NAME
, "NotPossible", I0
)
5411 << "Cannot SLP vectorize list: vectorization was impossible"
5412 << " with available vectorization factors";
5418 bool SLPVectorizerPass::tryToVectorize(Instruction
*I
, BoUpSLP
&R
) {
5422 if (!isa
<BinaryOperator
>(I
) && !isa
<CmpInst
>(I
))
5425 Value
*P
= I
->getParent();
5427 // Vectorize in current basic block only.
5428 auto *Op0
= dyn_cast
<Instruction
>(I
->getOperand(0));
5429 auto *Op1
= dyn_cast
<Instruction
>(I
->getOperand(1));
5430 if (!Op0
|| !Op1
|| Op0
->getParent() != P
|| Op1
->getParent() != P
)
5433 // Try to vectorize V.
5434 if (tryToVectorizePair(Op0
, Op1
, R
))
5437 auto *A
= dyn_cast
<BinaryOperator
>(Op0
);
5438 auto *B
= dyn_cast
<BinaryOperator
>(Op1
);
5440 if (B
&& B
->hasOneUse()) {
5441 auto *B0
= dyn_cast
<BinaryOperator
>(B
->getOperand(0));
5442 auto *B1
= dyn_cast
<BinaryOperator
>(B
->getOperand(1));
5443 if (B0
&& B0
->getParent() == P
&& tryToVectorizePair(A
, B0
, R
))
5445 if (B1
&& B1
->getParent() == P
&& tryToVectorizePair(A
, B1
, R
))
5450 if (A
&& A
->hasOneUse()) {
5451 auto *A0
= dyn_cast
<BinaryOperator
>(A
->getOperand(0));
5452 auto *A1
= dyn_cast
<BinaryOperator
>(A
->getOperand(1));
5453 if (A0
&& A0
->getParent() == P
&& tryToVectorizePair(A0
, B
, R
))
5455 if (A1
&& A1
->getParent() == P
&& tryToVectorizePair(A1
, B
, R
))
5461 /// Generate a shuffle mask to be used in a reduction tree.
5463 /// \param VecLen The length of the vector to be reduced.
5464 /// \param NumEltsToRdx The number of elements that should be reduced in the
5466 /// \param IsPairwise Whether the reduction is a pairwise or splitting
5467 /// reduction. A pairwise reduction will generate a mask of
5468 /// <0,2,...> or <1,3,..> while a splitting reduction will generate
5469 /// <2,3, undef,undef> for a vector of 4 and NumElts = 2.
5470 /// \param IsLeft True will generate a mask of even elements, odd otherwise.
5471 static Value
*createRdxShuffleMask(unsigned VecLen
, unsigned NumEltsToRdx
,
5472 bool IsPairwise
, bool IsLeft
,
5473 IRBuilder
<> &Builder
) {
5474 assert((IsPairwise
|| !IsLeft
) && "Don't support a <0,1,undef,...> mask");
5476 SmallVector
<Constant
*, 32> ShuffleMask(
5477 VecLen
, UndefValue::get(Builder
.getInt32Ty()));
5480 // Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
5481 for (unsigned i
= 0; i
!= NumEltsToRdx
; ++i
)
5482 ShuffleMask
[i
] = Builder
.getInt32(2 * i
+ !IsLeft
);
5484 // Move the upper half of the vector to the lower half.
5485 for (unsigned i
= 0; i
!= NumEltsToRdx
; ++i
)
5486 ShuffleMask
[i
] = Builder
.getInt32(NumEltsToRdx
+ i
);
5488 return ConstantVector::get(ShuffleMask
);
5493 /// Model horizontal reductions.
5495 /// A horizontal reduction is a tree of reduction operations (currently add and
5496 /// fadd) that has operations that can be put into a vector as its leaf.
5497 /// For example, this tree:
5504 /// This tree has "mul" as its reduced values and "+" as its reduction
5505 /// operations. A reduction might be feeding into a store or a binary operation
5520 class HorizontalReduction
{
5521 using ReductionOpsType
= SmallVector
<Value
*, 16>;
5522 using ReductionOpsListType
= SmallVector
<ReductionOpsType
, 2>;
5523 ReductionOpsListType ReductionOps
;
5524 SmallVector
<Value
*, 32> ReducedVals
;
5525 // Use map vector to make stable output.
5526 MapVector
<Instruction
*, Value
*> ExtraArgs
;
5528 /// Kind of the reduction data.
5529 enum ReductionKind
{
5530 RK_None
, /// Not a reduction.
5531 RK_Arithmetic
, /// Binary reduction data.
5532 RK_Min
, /// Minimum reduction data.
5533 RK_UMin
, /// Unsigned minimum reduction data.
5534 RK_Max
, /// Maximum reduction data.
5535 RK_UMax
, /// Unsigned maximum reduction data.
5538 /// Contains info about operation, like its opcode, left and right operands.
5539 class OperationData
{
5540 /// Opcode of the instruction.
5541 unsigned Opcode
= 0;
5543 /// Left operand of the reduction operation.
5544 Value
*LHS
= nullptr;
5546 /// Right operand of the reduction operation.
5547 Value
*RHS
= nullptr;
5549 /// Kind of the reduction operation.
5550 ReductionKind Kind
= RK_None
;
5552 /// True if float point min/max reduction has no NaNs.
5555 /// Checks if the reduction operation can be vectorized.
5556 bool isVectorizable() const {
5557 return LHS
&& RHS
&&
5558 // We currently only support add/mul/logical && min/max reductions.
5559 ((Kind
== RK_Arithmetic
&&
5560 (Opcode
== Instruction::Add
|| Opcode
== Instruction::FAdd
||
5561 Opcode
== Instruction::Mul
|| Opcode
== Instruction::FMul
||
5562 Opcode
== Instruction::And
|| Opcode
== Instruction::Or
||
5563 Opcode
== Instruction::Xor
)) ||
5564 ((Opcode
== Instruction::ICmp
|| Opcode
== Instruction::FCmp
) &&
5565 (Kind
== RK_Min
|| Kind
== RK_Max
)) ||
5566 (Opcode
== Instruction::ICmp
&&
5567 (Kind
== RK_UMin
|| Kind
== RK_UMax
)));
5570 /// Creates reduction operation with the current opcode.
5571 Value
*createOp(IRBuilder
<> &Builder
, const Twine
&Name
) const {
5572 assert(isVectorizable() &&
5573 "Expected add|fadd or min/max reduction operation.");
5574 Value
*Cmp
= nullptr;
5577 return Builder
.CreateBinOp((Instruction::BinaryOps
)Opcode
, LHS
, RHS
,
5580 Cmp
= Opcode
== Instruction::ICmp
? Builder
.CreateICmpSLT(LHS
, RHS
)
5581 : Builder
.CreateFCmpOLT(LHS
, RHS
);
5584 Cmp
= Opcode
== Instruction::ICmp
? Builder
.CreateICmpSGT(LHS
, RHS
)
5585 : Builder
.CreateFCmpOGT(LHS
, RHS
);
5588 assert(Opcode
== Instruction::ICmp
&& "Expected integer types.");
5589 Cmp
= Builder
.CreateICmpULT(LHS
, RHS
);
5592 assert(Opcode
== Instruction::ICmp
&& "Expected integer types.");
5593 Cmp
= Builder
.CreateICmpUGT(LHS
, RHS
);
5596 llvm_unreachable("Unknown reduction operation.");
5598 return Builder
.CreateSelect(Cmp
, LHS
, RHS
, Name
);
5602 explicit OperationData() = default;
5604 /// Construction for reduced values. They are identified by opcode only and
5605 /// don't have associated LHS/RHS values.
5606 explicit OperationData(Value
*V
) {
5607 if (auto *I
= dyn_cast
<Instruction
>(V
))
5608 Opcode
= I
->getOpcode();
5611 /// Constructor for reduction operations with opcode and its left and
5613 OperationData(unsigned Opcode
, Value
*LHS
, Value
*RHS
, ReductionKind Kind
,
5615 : Opcode(Opcode
), LHS(LHS
), RHS(RHS
), Kind(Kind
), NoNaN(NoNaN
) {
5616 assert(Kind
!= RK_None
&& "One of the reduction operations is expected.");
5619 explicit operator bool() const { return Opcode
; }
5621 /// Get the index of the first operand.
5622 unsigned getFirstOperandIndex() const {
5623 assert(!!*this && "The opcode is not set.");
5637 /// Total number of operands in the reduction operation.
5638 unsigned getNumberOfOperands() const {
5639 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5640 "Expected reduction operation.");
5652 llvm_unreachable("Reduction kind is not set");
5655 /// Checks if the operation has the same parent as \p P.
5656 bool hasSameParent(Instruction
*I
, Value
*P
, bool IsRedOp
) const {
5657 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5658 "Expected reduction operation.");
5660 return I
->getParent() == P
;
5663 // Arithmetic reduction operation must be used once only.
5664 return I
->getParent() == P
;
5669 // SelectInst must be used twice while the condition op must have single
5671 auto *Cmp
= cast
<Instruction
>(cast
<SelectInst
>(I
)->getCondition());
5672 return I
->getParent() == P
&& Cmp
&& Cmp
->getParent() == P
;
5677 llvm_unreachable("Reduction kind is not set");
5679 /// Expected number of uses for reduction operations/reduced values.
5680 bool hasRequiredNumberOfUses(Instruction
*I
, bool IsReductionOp
) const {
5681 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5682 "Expected reduction operation.");
5685 return I
->hasOneUse();
5690 return I
->hasNUses(2) &&
5692 cast
<SelectInst
>(I
)->getCondition()->hasOneUse());
5696 llvm_unreachable("Reduction kind is not set");
5699 /// Initializes the list of reduction operations.
5700 void initReductionOps(ReductionOpsListType
&ReductionOps
) {
5701 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5702 "Expected reduction operation.");
5705 ReductionOps
.assign(1, ReductionOpsType());
5711 ReductionOps
.assign(2, ReductionOpsType());
5714 llvm_unreachable("Reduction kind is not set");
5717 /// Add all reduction operations for the reduction instruction \p I.
5718 void addReductionOps(Instruction
*I
, ReductionOpsListType
&ReductionOps
) {
5719 assert(Kind
!= RK_None
&& !!*this && LHS
&& RHS
&&
5720 "Expected reduction operation.");
5723 ReductionOps
[0].emplace_back(I
);
5729 ReductionOps
[0].emplace_back(cast
<SelectInst
>(I
)->getCondition());
5730 ReductionOps
[1].emplace_back(I
);
5733 llvm_unreachable("Reduction kind is not set");
5737 /// Checks if instruction is associative and can be vectorized.
5738 bool isAssociative(Instruction
*I
) const {
5739 assert(Kind
!= RK_None
&& *this && LHS
&& RHS
&&
5740 "Expected reduction operation.");
5743 return I
->isAssociative();
5746 return Opcode
== Instruction::ICmp
||
5747 cast
<Instruction
>(I
->getOperand(0))->isFast();
5750 assert(Opcode
== Instruction::ICmp
&&
5751 "Only integer compare operation is expected.");
5756 llvm_unreachable("Reduction kind is not set");
5759 /// Checks if the reduction operation can be vectorized.
5760 bool isVectorizable(Instruction
*I
) const {
5761 return isVectorizable() && isAssociative(I
);
5764 /// Checks if two operation data are both a reduction op or both a reduced
5766 bool operator==(const OperationData
&OD
) {
5767 assert(((Kind
!= OD
.Kind
) || ((!LHS
== !OD
.LHS
) && (!RHS
== !OD
.RHS
))) &&
5768 "One of the comparing operations is incorrect.");
5769 return this == &OD
|| (Kind
== OD
.Kind
&& Opcode
== OD
.Opcode
);
5771 bool operator!=(const OperationData
&OD
) { return !(*this == OD
); }
5780 /// Get the opcode of the reduction operation.
5781 unsigned getOpcode() const {
5782 assert(isVectorizable() && "Expected vectorizable operation.");
5786 /// Get kind of reduction data.
5787 ReductionKind
getKind() const { return Kind
; }
5788 Value
*getLHS() const { return LHS
; }
5789 Value
*getRHS() const { return RHS
; }
5790 Type
*getConditionType() const {
5798 return CmpInst::makeCmpResultType(LHS
->getType());
5802 llvm_unreachable("Reduction kind is not set");
5805 /// Creates reduction operation with the current opcode with the IR flags
5806 /// from \p ReductionOps.
5807 Value
*createOp(IRBuilder
<> &Builder
, const Twine
&Name
,
5808 const ReductionOpsListType
&ReductionOps
) const {
5809 assert(isVectorizable() &&
5810 "Expected add|fadd or min/max reduction operation.");
5811 auto *Op
= createOp(Builder
, Name
);
5814 propagateIRFlags(Op
, ReductionOps
[0]);
5820 if (auto *SI
= dyn_cast
<SelectInst
>(Op
))
5821 propagateIRFlags(SI
->getCondition(), ReductionOps
[0]);
5822 propagateIRFlags(Op
, ReductionOps
[1]);
5827 llvm_unreachable("Unknown reduction operation.");
5829 /// Creates reduction operation with the current opcode with the IR flags
5831 Value
*createOp(IRBuilder
<> &Builder
, const Twine
&Name
,
5832 Instruction
*I
) const {
5833 assert(isVectorizable() &&
5834 "Expected add|fadd or min/max reduction operation.");
5835 auto *Op
= createOp(Builder
, Name
);
5838 propagateIRFlags(Op
, I
);
5844 if (auto *SI
= dyn_cast
<SelectInst
>(Op
)) {
5845 propagateIRFlags(SI
->getCondition(),
5846 cast
<SelectInst
>(I
)->getCondition());
5848 propagateIRFlags(Op
, I
);
5853 llvm_unreachable("Unknown reduction operation.");
5856 TargetTransformInfo::ReductionFlags
getFlags() const {
5857 TargetTransformInfo::ReductionFlags Flags
;
5858 Flags
.NoNaN
= NoNaN
;
5863 Flags
.IsSigned
= Opcode
== Instruction::ICmp
;
5864 Flags
.IsMaxOp
= false;
5867 Flags
.IsSigned
= Opcode
== Instruction::ICmp
;
5868 Flags
.IsMaxOp
= true;
5871 Flags
.IsSigned
= false;
5872 Flags
.IsMaxOp
= false;
5875 Flags
.IsSigned
= false;
5876 Flags
.IsMaxOp
= true;
5879 llvm_unreachable("Reduction kind is not set");
5885 WeakTrackingVH ReductionRoot
;
5887 /// The operation data of the reduction operation.
5888 OperationData ReductionData
;
5890 /// The operation data of the values we perform a reduction on.
5891 OperationData ReducedValueData
;
5893 /// Should we model this reduction as a pairwise reduction tree or a tree that
5894 /// splits the vector in halves and adds those halves.
5895 bool IsPairwiseReduction
= false;
5897 /// Checks if the ParentStackElem.first should be marked as a reduction
5898 /// operation with an extra argument or as extra argument itself.
5899 void markExtraArg(std::pair
<Instruction
*, unsigned> &ParentStackElem
,
5901 if (ExtraArgs
.count(ParentStackElem
.first
)) {
5902 ExtraArgs
[ParentStackElem
.first
] = nullptr;
5903 // We ran into something like:
5904 // ParentStackElem.first = ExtraArgs[ParentStackElem.first] + ExtraArg.
5905 // The whole ParentStackElem.first should be considered as an extra value
5907 // Do not perform analysis of remaining operands of ParentStackElem.first
5908 // instruction, this whole instruction is an extra argument.
5909 ParentStackElem
.second
= ParentStackElem
.first
->getNumOperands();
5911 // We ran into something like:
5912 // ParentStackElem.first += ... + ExtraArg + ...
5913 ExtraArgs
[ParentStackElem
.first
] = ExtraArg
;
5917 static OperationData
getOperationData(Value
*V
) {
5919 return OperationData();
5923 if (m_BinOp(m_Value(LHS
), m_Value(RHS
)).match(V
)) {
5924 return OperationData(cast
<BinaryOperator
>(V
)->getOpcode(), LHS
, RHS
,
5927 if (auto *Select
= dyn_cast
<SelectInst
>(V
)) {
5928 // Look for a min/max pattern.
5929 if (m_UMin(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
5930 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMin
);
5931 } else if (m_SMin(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
5932 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Min
);
5933 } else if (m_OrdFMin(m_Value(LHS
), m_Value(RHS
)).match(Select
) ||
5934 m_UnordFMin(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
5935 return OperationData(
5936 Instruction::FCmp
, LHS
, RHS
, RK_Min
,
5937 cast
<Instruction
>(Select
->getCondition())->hasNoNaNs());
5938 } else if (m_UMax(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
5939 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMax
);
5940 } else if (m_SMax(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
5941 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Max
);
5942 } else if (m_OrdFMax(m_Value(LHS
), m_Value(RHS
)).match(Select
) ||
5943 m_UnordFMax(m_Value(LHS
), m_Value(RHS
)).match(Select
)) {
5944 return OperationData(
5945 Instruction::FCmp
, LHS
, RHS
, RK_Max
,
5946 cast
<Instruction
>(Select
->getCondition())->hasNoNaNs());
5948 // Try harder: look for min/max pattern based on instructions producing
5949 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
5950 // During the intermediate stages of SLP, it's very common to have
5951 // pattern like this (since optimizeGatherSequence is run only once
5953 // %1 = extractelement <2 x i32> %a, i32 0
5954 // %2 = extractelement <2 x i32> %a, i32 1
5955 // %cond = icmp sgt i32 %1, %2
5956 // %3 = extractelement <2 x i32> %a, i32 0
5957 // %4 = extractelement <2 x i32> %a, i32 1
5958 // %select = select i1 %cond, i32 %3, i32 %4
5959 CmpInst::Predicate Pred
;
5963 LHS
= Select
->getTrueValue();
5964 RHS
= Select
->getFalseValue();
5965 Value
*Cond
= Select
->getCondition();
5967 // TODO: Support inverse predicates.
5968 if (match(Cond
, m_Cmp(Pred
, m_Specific(LHS
), m_Instruction(L2
)))) {
5969 if (!isa
<ExtractElementInst
>(RHS
) ||
5970 !L2
->isIdenticalTo(cast
<Instruction
>(RHS
)))
5971 return OperationData(V
);
5972 } else if (match(Cond
, m_Cmp(Pred
, m_Instruction(L1
), m_Specific(RHS
)))) {
5973 if (!isa
<ExtractElementInst
>(LHS
) ||
5974 !L1
->isIdenticalTo(cast
<Instruction
>(LHS
)))
5975 return OperationData(V
);
5977 if (!isa
<ExtractElementInst
>(LHS
) || !isa
<ExtractElementInst
>(RHS
))
5978 return OperationData(V
);
5979 if (!match(Cond
, m_Cmp(Pred
, m_Instruction(L1
), m_Instruction(L2
))) ||
5980 !L1
->isIdenticalTo(cast
<Instruction
>(LHS
)) ||
5981 !L2
->isIdenticalTo(cast
<Instruction
>(RHS
)))
5982 return OperationData(V
);
5986 return OperationData(V
);
5988 case CmpInst::ICMP_ULT
:
5989 case CmpInst::ICMP_ULE
:
5990 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMin
);
5992 case CmpInst::ICMP_SLT
:
5993 case CmpInst::ICMP_SLE
:
5994 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Min
);
5996 case CmpInst::FCMP_OLT
:
5997 case CmpInst::FCMP_OLE
:
5998 case CmpInst::FCMP_ULT
:
5999 case CmpInst::FCMP_ULE
:
6000 return OperationData(Instruction::FCmp
, LHS
, RHS
, RK_Min
,
6001 cast
<Instruction
>(Cond
)->hasNoNaNs());
6003 case CmpInst::ICMP_UGT
:
6004 case CmpInst::ICMP_UGE
:
6005 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_UMax
);
6007 case CmpInst::ICMP_SGT
:
6008 case CmpInst::ICMP_SGE
:
6009 return OperationData(Instruction::ICmp
, LHS
, RHS
, RK_Max
);
6011 case CmpInst::FCMP_OGT
:
6012 case CmpInst::FCMP_OGE
:
6013 case CmpInst::FCMP_UGT
:
6014 case CmpInst::FCMP_UGE
:
6015 return OperationData(Instruction::FCmp
, LHS
, RHS
, RK_Max
,
6016 cast
<Instruction
>(Cond
)->hasNoNaNs());
6020 return OperationData(V
);
6024 HorizontalReduction() = default;
6026 /// Try to find a reduction tree.
6027 bool matchAssociativeReduction(PHINode
*Phi
, Instruction
*B
) {
6028 assert((!Phi
|| is_contained(Phi
->operands(), B
)) &&
6029 "Thi phi needs to use the binary operator");
6031 ReductionData
= getOperationData(B
);
6033 // We could have a initial reductions that is not an add.
6034 // r *= v1 + v2 + v3 + v4
6035 // In such a case start looking for a tree rooted in the first '+'.
6037 if (ReductionData
.getLHS() == Phi
) {
6039 B
= dyn_cast
<Instruction
>(ReductionData
.getRHS());
6040 ReductionData
= getOperationData(B
);
6041 } else if (ReductionData
.getRHS() == Phi
) {
6043 B
= dyn_cast
<Instruction
>(ReductionData
.getLHS());
6044 ReductionData
= getOperationData(B
);
6048 if (!ReductionData
.isVectorizable(B
))
6051 Type
*Ty
= B
->getType();
6052 if (!isValidElementType(Ty
))
6054 if (!Ty
->isIntOrIntVectorTy() && !Ty
->isFPOrFPVectorTy())
6057 ReducedValueData
.clear();
6060 // Post order traverse the reduction tree starting at B. We only handle true
6061 // trees containing only binary operators.
6062 SmallVector
<std::pair
<Instruction
*, unsigned>, 32> Stack
;
6063 Stack
.push_back(std::make_pair(B
, ReductionData
.getFirstOperandIndex()));
6064 ReductionData
.initReductionOps(ReductionOps
);
6065 while (!Stack
.empty()) {
6066 Instruction
*TreeN
= Stack
.back().first
;
6067 unsigned EdgeToVist
= Stack
.back().second
++;
6068 OperationData OpData
= getOperationData(TreeN
);
6069 bool IsReducedValue
= OpData
!= ReductionData
;
6072 if (IsReducedValue
|| EdgeToVist
== OpData
.getNumberOfOperands()) {
6074 ReducedVals
.push_back(TreeN
);
6076 auto I
= ExtraArgs
.find(TreeN
);
6077 if (I
!= ExtraArgs
.end() && !I
->second
) {
6078 // Check if TreeN is an extra argument of its parent operation.
6079 if (Stack
.size() <= 1) {
6080 // TreeN can't be an extra argument as it is a root reduction
6084 // Yes, TreeN is an extra argument, do not add it to a list of
6085 // reduction operations.
6086 // Stack[Stack.size() - 2] always points to the parent operation.
6087 markExtraArg(Stack
[Stack
.size() - 2], TreeN
);
6088 ExtraArgs
.erase(TreeN
);
6090 ReductionData
.addReductionOps(TreeN
, ReductionOps
);
6097 // Visit left or right.
6098 Value
*NextV
= TreeN
->getOperand(EdgeToVist
);
6100 auto *I
= dyn_cast
<Instruction
>(NextV
);
6101 OpData
= getOperationData(I
);
6102 // Continue analysis if the next operand is a reduction operation or
6103 // (possibly) a reduced value. If the reduced value opcode is not set,
6104 // the first met operation != reduction operation is considered as the
6105 // reduced value class.
6106 if (I
&& (!ReducedValueData
|| OpData
== ReducedValueData
||
6107 OpData
== ReductionData
)) {
6108 const bool IsReductionOperation
= OpData
== ReductionData
;
6109 // Only handle trees in the current basic block.
6110 if (!ReductionData
.hasSameParent(I
, B
->getParent(),
6111 IsReductionOperation
)) {
6112 // I is an extra argument for TreeN (its parent operation).
6113 markExtraArg(Stack
.back(), I
);
6117 // Each tree node needs to have minimal number of users except for the
6118 // ultimate reduction.
6119 if (!ReductionData
.hasRequiredNumberOfUses(I
,
6120 OpData
== ReductionData
) &&
6122 // I is an extra argument for TreeN (its parent operation).
6123 markExtraArg(Stack
.back(), I
);
6127 if (IsReductionOperation
) {
6128 // We need to be able to reassociate the reduction operations.
6129 if (!OpData
.isAssociative(I
)) {
6130 // I is an extra argument for TreeN (its parent operation).
6131 markExtraArg(Stack
.back(), I
);
6134 } else if (ReducedValueData
&&
6135 ReducedValueData
!= OpData
) {
6136 // Make sure that the opcodes of the operations that we are going to
6138 // I is an extra argument for TreeN (its parent operation).
6139 markExtraArg(Stack
.back(), I
);
6141 } else if (!ReducedValueData
)
6142 ReducedValueData
= OpData
;
6144 Stack
.push_back(std::make_pair(I
, OpData
.getFirstOperandIndex()));
6148 // NextV is an extra argument for TreeN (its parent operation).
6149 markExtraArg(Stack
.back(), NextV
);
6154 /// Attempt to vectorize the tree found by
6155 /// matchAssociativeReduction.
6156 bool tryToReduce(BoUpSLP
&V
, TargetTransformInfo
*TTI
) {
6157 if (ReducedVals
.empty())
6160 // If there is a sufficient number of reduction values, reduce
6161 // to a nearby power-of-2. Can safely generate oversized
6162 // vectors and rely on the backend to split them to legal sizes.
6163 unsigned NumReducedVals
= ReducedVals
.size();
6164 if (NumReducedVals
< 4)
6167 unsigned ReduxWidth
= PowerOf2Floor(NumReducedVals
);
6169 Value
*VectorizedTree
= nullptr;
6171 // FIXME: Fast-math-flags should be set based on the instructions in the
6172 // reduction (not all of 'fast' are required).
6173 IRBuilder
<> Builder(cast
<Instruction
>(ReductionRoot
));
6174 FastMathFlags Unsafe
;
6176 Builder
.setFastMathFlags(Unsafe
);
6179 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues
;
6180 // The same extra argument may be used several time, so log each attempt
6182 for (auto &Pair
: ExtraArgs
) {
6183 assert(Pair
.first
&& "DebugLoc must be set.");
6184 ExternallyUsedValues
[Pair
.second
].push_back(Pair
.first
);
6186 // The reduction root is used as the insertion point for new instructions,
6187 // so set it as externally used to prevent it from being deleted.
6188 ExternallyUsedValues
[ReductionRoot
];
6189 SmallVector
<Value
*, 16> IgnoreList
;
6190 for (auto &V
: ReductionOps
)
6191 IgnoreList
.append(V
.begin(), V
.end());
6192 while (i
< NumReducedVals
- ReduxWidth
+ 1 && ReduxWidth
> 2) {
6193 auto VL
= makeArrayRef(&ReducedVals
[i
], ReduxWidth
);
6194 V
.buildTree(VL
, ExternallyUsedValues
, IgnoreList
);
6195 Optional
<ArrayRef
<unsigned>> Order
= V
.bestOrder();
6196 // TODO: Handle orders of size less than number of elements in the vector.
6197 if (Order
&& Order
->size() == VL
.size()) {
6198 // TODO: reorder tree nodes without tree rebuilding.
6199 SmallVector
<Value
*, 4> ReorderedOps(VL
.size());
6200 llvm::transform(*Order
, ReorderedOps
.begin(),
6201 [VL
](const unsigned Idx
) { return VL
[Idx
]; });
6202 V
.buildTree(ReorderedOps
, ExternallyUsedValues
, IgnoreList
);
6204 if (V
.isTreeTinyAndNotFullyVectorizable())
6207 V
.computeMinimumValueSizes();
6210 int TreeCost
= V
.getTreeCost();
6211 int ReductionCost
= getReductionCost(TTI
, ReducedVals
[i
], ReduxWidth
);
6212 int Cost
= TreeCost
+ ReductionCost
;
6213 if (Cost
>= -SLPCostThreshold
) {
6214 V
.getORE()->emit([&]() {
6215 return OptimizationRemarkMissed(
6216 SV_NAME
, "HorSLPNotBeneficial", cast
<Instruction
>(VL
[0]))
6217 << "Vectorizing horizontal reduction is possible"
6218 << "but not beneficial with cost "
6219 << ore::NV("Cost", Cost
) << " and threshold "
6220 << ore::NV("Threshold", -SLPCostThreshold
);
6225 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
6226 << Cost
<< ". (HorRdx)\n");
6227 V
.getORE()->emit([&]() {
6228 return OptimizationRemark(
6229 SV_NAME
, "VectorizedHorizontalReduction", cast
<Instruction
>(VL
[0]))
6230 << "Vectorized horizontal reduction with cost "
6231 << ore::NV("Cost", Cost
) << " and with tree size "
6232 << ore::NV("TreeSize", V
.getTreeSize());
6235 // Vectorize a tree.
6236 DebugLoc Loc
= cast
<Instruction
>(ReducedVals
[i
])->getDebugLoc();
6237 Value
*VectorizedRoot
= V
.vectorizeTree(ExternallyUsedValues
);
6239 // Emit a reduction.
6240 Builder
.SetInsertPoint(cast
<Instruction
>(ReductionRoot
));
6241 Value
*ReducedSubTree
=
6242 emitReduction(VectorizedRoot
, Builder
, ReduxWidth
, TTI
);
6243 if (VectorizedTree
) {
6244 Builder
.SetCurrentDebugLocation(Loc
);
6245 OperationData
VectReductionData(ReductionData
.getOpcode(),
6246 VectorizedTree
, ReducedSubTree
,
6247 ReductionData
.getKind());
6249 VectReductionData
.createOp(Builder
, "op.rdx", ReductionOps
);
6251 VectorizedTree
= ReducedSubTree
;
6253 ReduxWidth
= PowerOf2Floor(NumReducedVals
- i
);
6256 if (VectorizedTree
) {
6257 // Finish the reduction.
6258 for (; i
< NumReducedVals
; ++i
) {
6259 auto *I
= cast
<Instruction
>(ReducedVals
[i
]);
6260 Builder
.SetCurrentDebugLocation(I
->getDebugLoc());
6261 OperationData
VectReductionData(ReductionData
.getOpcode(),
6263 ReductionData
.getKind());
6264 VectorizedTree
= VectReductionData
.createOp(Builder
, "", ReductionOps
);
6266 for (auto &Pair
: ExternallyUsedValues
) {
6267 // Add each externally used value to the final reduction.
6268 for (auto *I
: Pair
.second
) {
6269 Builder
.SetCurrentDebugLocation(I
->getDebugLoc());
6270 OperationData
VectReductionData(ReductionData
.getOpcode(),
6271 VectorizedTree
, Pair
.first
,
6272 ReductionData
.getKind());
6273 VectorizedTree
= VectReductionData
.createOp(Builder
, "op.extra", I
);
6277 ReductionRoot
->replaceAllUsesWith(VectorizedTree
);
6279 return VectorizedTree
!= nullptr;
6282 unsigned numReductionValues() const {
6283 return ReducedVals
.size();
6287 /// Calculate the cost of a reduction.
6288 int getReductionCost(TargetTransformInfo
*TTI
, Value
*FirstReducedVal
,
6289 unsigned ReduxWidth
) {
6290 Type
*ScalarTy
= FirstReducedVal
->getType();
6291 Type
*VecTy
= VectorType::get(ScalarTy
, ReduxWidth
);
6293 int PairwiseRdxCost
;
6294 int SplittingRdxCost
;
6295 switch (ReductionData
.getKind()) {
6298 TTI
->getArithmeticReductionCost(ReductionData
.getOpcode(), VecTy
,
6299 /*IsPairwiseForm=*/true);
6301 TTI
->getArithmeticReductionCost(ReductionData
.getOpcode(), VecTy
,
6302 /*IsPairwiseForm=*/false);
6308 Type
*VecCondTy
= CmpInst::makeCmpResultType(VecTy
);
6309 bool IsUnsigned
= ReductionData
.getKind() == RK_UMin
||
6310 ReductionData
.getKind() == RK_UMax
;
6312 TTI
->getMinMaxReductionCost(VecTy
, VecCondTy
,
6313 /*IsPairwiseForm=*/true, IsUnsigned
);
6315 TTI
->getMinMaxReductionCost(VecTy
, VecCondTy
,
6316 /*IsPairwiseForm=*/false, IsUnsigned
);
6320 llvm_unreachable("Expected arithmetic or min/max reduction operation");
6323 IsPairwiseReduction
= PairwiseRdxCost
< SplittingRdxCost
;
6324 int VecReduxCost
= IsPairwiseReduction
? PairwiseRdxCost
: SplittingRdxCost
;
6326 int ScalarReduxCost
= 0;
6327 switch (ReductionData
.getKind()) {
6330 TTI
->getArithmeticInstrCost(ReductionData
.getOpcode(), ScalarTy
);
6337 TTI
->getCmpSelInstrCost(ReductionData
.getOpcode(), ScalarTy
) +
6338 TTI
->getCmpSelInstrCost(Instruction::Select
, ScalarTy
,
6339 CmpInst::makeCmpResultType(ScalarTy
));
6342 llvm_unreachable("Expected arithmetic or min/max reduction operation");
6344 ScalarReduxCost
*= (ReduxWidth
- 1);
6346 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost
- ScalarReduxCost
6347 << " for reduction that starts with " << *FirstReducedVal
6349 << (IsPairwiseReduction
? "pairwise" : "splitting")
6350 << " reduction)\n");
6352 return VecReduxCost
- ScalarReduxCost
;
6355 /// Emit a horizontal reduction of the vectorized value.
6356 Value
*emitReduction(Value
*VectorizedValue
, IRBuilder
<> &Builder
,
6357 unsigned ReduxWidth
, const TargetTransformInfo
*TTI
) {
6358 assert(VectorizedValue
&& "Need to have a vectorized tree node");
6359 assert(isPowerOf2_32(ReduxWidth
) &&
6360 "We only handle power-of-two reductions for now");
6362 if (!IsPairwiseReduction
) {
6363 // FIXME: The builder should use an FMF guard. It should not be hard-coded
6365 assert(Builder
.getFastMathFlags().isFast() && "Expected 'fast' FMF");
6366 return createSimpleTargetReduction(
6367 Builder
, TTI
, ReductionData
.getOpcode(), VectorizedValue
,
6368 ReductionData
.getFlags(), ReductionOps
.back());
6371 Value
*TmpVec
= VectorizedValue
;
6372 for (unsigned i
= ReduxWidth
/ 2; i
!= 0; i
>>= 1) {
6374 createRdxShuffleMask(ReduxWidth
, i
, true, true, Builder
);
6376 createRdxShuffleMask(ReduxWidth
, i
, true, false, Builder
);
6378 Value
*LeftShuf
= Builder
.CreateShuffleVector(
6379 TmpVec
, UndefValue::get(TmpVec
->getType()), LeftMask
, "rdx.shuf.l");
6380 Value
*RightShuf
= Builder
.CreateShuffleVector(
6381 TmpVec
, UndefValue::get(TmpVec
->getType()), (RightMask
),
6383 OperationData
VectReductionData(ReductionData
.getOpcode(), LeftShuf
,
6384 RightShuf
, ReductionData
.getKind());
6385 TmpVec
= VectReductionData
.createOp(Builder
, "op.rdx", ReductionOps
);
6388 // The result is in the first element of the vector.
6389 return Builder
.CreateExtractElement(TmpVec
, Builder
.getInt32(0));
6393 } // end anonymous namespace
6395 /// Recognize construction of vectors like
6396 /// %ra = insertelement <4 x float> undef, float %s0, i32 0
6397 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
6398 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
6399 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
6400 /// starting from the last insertelement instruction.
6402 /// Returns true if it matches
6403 static bool findBuildVector(InsertElementInst
*LastInsertElem
,
6404 TargetTransformInfo
*TTI
,
6405 SmallVectorImpl
<Value
*> &BuildVectorOpds
,
6410 if (auto *CI
= dyn_cast
<ConstantInt
>(LastInsertElem
->getOperand(2))) {
6411 UserCost
+= TTI
->getVectorInstrCost(Instruction::InsertElement
,
6412 LastInsertElem
->getType(),
6413 CI
->getZExtValue());
6415 BuildVectorOpds
.push_back(LastInsertElem
->getOperand(1));
6416 V
= LastInsertElem
->getOperand(0);
6417 if (isa
<UndefValue
>(V
))
6419 LastInsertElem
= dyn_cast
<InsertElementInst
>(V
);
6420 if (!LastInsertElem
|| !LastInsertElem
->hasOneUse())
6423 std::reverse(BuildVectorOpds
.begin(), BuildVectorOpds
.end());
6427 /// Like findBuildVector, but looks for construction of aggregate.
6429 /// \return true if it matches.
6430 static bool findBuildAggregate(InsertValueInst
*IV
,
6431 SmallVectorImpl
<Value
*> &BuildVectorOpds
) {
6434 BuildVectorOpds
.push_back(IV
->getInsertedValueOperand());
6435 V
= IV
->getAggregateOperand();
6436 if (isa
<UndefValue
>(V
))
6438 IV
= dyn_cast
<InsertValueInst
>(V
);
6439 if (!IV
|| !IV
->hasOneUse())
6442 std::reverse(BuildVectorOpds
.begin(), BuildVectorOpds
.end());
6446 static bool PhiTypeSorterFunc(Value
*V
, Value
*V2
) {
6447 return V
->getType() < V2
->getType();
6450 /// Try and get a reduction value from a phi node.
6452 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
6453 /// if they come from either \p ParentBB or a containing loop latch.
6455 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
6456 /// if not possible.
6457 static Value
*getReductionValue(const DominatorTree
*DT
, PHINode
*P
,
6458 BasicBlock
*ParentBB
, LoopInfo
*LI
) {
6459 // There are situations where the reduction value is not dominated by the
6460 // reduction phi. Vectorizing such cases has been reported to cause
6461 // miscompiles. See PR25787.
6462 auto DominatedReduxValue
= [&](Value
*R
) {
6463 return isa
<Instruction
>(R
) &&
6464 DT
->dominates(P
->getParent(), cast
<Instruction
>(R
)->getParent());
6467 Value
*Rdx
= nullptr;
6469 // Return the incoming value if it comes from the same BB as the phi node.
6470 if (P
->getIncomingBlock(0) == ParentBB
) {
6471 Rdx
= P
->getIncomingValue(0);
6472 } else if (P
->getIncomingBlock(1) == ParentBB
) {
6473 Rdx
= P
->getIncomingValue(1);
6476 if (Rdx
&& DominatedReduxValue(Rdx
))
6479 // Otherwise, check whether we have a loop latch to look at.
6480 Loop
*BBL
= LI
->getLoopFor(ParentBB
);
6483 BasicBlock
*BBLatch
= BBL
->getLoopLatch();
6487 // There is a loop latch, return the incoming value if it comes from
6488 // that. This reduction pattern occasionally turns up.
6489 if (P
->getIncomingBlock(0) == BBLatch
) {
6490 Rdx
= P
->getIncomingValue(0);
6491 } else if (P
->getIncomingBlock(1) == BBLatch
) {
6492 Rdx
= P
->getIncomingValue(1);
6495 if (Rdx
&& DominatedReduxValue(Rdx
))
6501 /// Attempt to reduce a horizontal reduction.
6502 /// If it is legal to match a horizontal reduction feeding the phi node \a P
6503 /// with reduction operators \a Root (or one of its operands) in a basic block
6504 /// \a BB, then check if it can be done. If horizontal reduction is not found
6505 /// and root instruction is a binary operation, vectorization of the operands is
6507 /// \returns true if a horizontal reduction was matched and reduced or operands
6508 /// of one of the binary instruction were vectorized.
6509 /// \returns false if a horizontal reduction was not matched (or not possible)
6510 /// or no vectorization of any binary operation feeding \a Root instruction was
6512 static bool tryToVectorizeHorReductionOrInstOperands(
6513 PHINode
*P
, Instruction
*Root
, BasicBlock
*BB
, BoUpSLP
&R
,
6514 TargetTransformInfo
*TTI
,
6515 const function_ref
<bool(Instruction
*, BoUpSLP
&)> Vectorize
) {
6516 if (!ShouldVectorizeHor
)
6522 if (Root
->getParent() != BB
|| isa
<PHINode
>(Root
))
6524 // Start analysis starting from Root instruction. If horizontal reduction is
6525 // found, try to vectorize it. If it is not a horizontal reduction or
6526 // vectorization is not possible or not effective, and currently analyzed
6527 // instruction is a binary operation, try to vectorize the operands, using
6528 // pre-order DFS traversal order. If the operands were not vectorized, repeat
6529 // the same procedure considering each operand as a possible root of the
6530 // horizontal reduction.
6531 // Interrupt the process if the Root instruction itself was vectorized or all
6532 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
6533 SmallVector
<std::pair
<WeakTrackingVH
, unsigned>, 8> Stack(1, {Root
, 0});
6534 SmallPtrSet
<Value
*, 8> VisitedInstrs
;
6536 while (!Stack
.empty()) {
6539 std::tie(V
, Level
) = Stack
.pop_back_val();
6542 auto *Inst
= dyn_cast
<Instruction
>(V
);
6545 auto *BI
= dyn_cast
<BinaryOperator
>(Inst
);
6546 auto *SI
= dyn_cast
<SelectInst
>(Inst
);
6548 HorizontalReduction HorRdx
;
6549 if (HorRdx
.matchAssociativeReduction(P
, Inst
)) {
6550 if (HorRdx
.tryToReduce(R
, TTI
)) {
6552 // Set P to nullptr to avoid re-analysis of phi node in
6553 // matchAssociativeReduction function unless this is the root node.
6559 Inst
= dyn_cast
<Instruction
>(BI
->getOperand(0));
6561 Inst
= dyn_cast
<Instruction
>(BI
->getOperand(1));
6563 // Set P to nullptr to avoid re-analysis of phi node in
6564 // matchAssociativeReduction function unless this is the root node.
6570 // Set P to nullptr to avoid re-analysis of phi node in
6571 // matchAssociativeReduction function unless this is the root node.
6573 if (Vectorize(Inst
, R
)) {
6578 // Try to vectorize operands.
6579 // Continue analysis for the instruction from the same basic block only to
6580 // save compile time.
6581 if (++Level
< RecursionMaxDepth
)
6582 for (auto *Op
: Inst
->operand_values())
6583 if (VisitedInstrs
.insert(Op
).second
)
6584 if (auto *I
= dyn_cast
<Instruction
>(Op
))
6585 if (!isa
<PHINode
>(I
) && I
->getParent() == BB
)
6586 Stack
.emplace_back(Op
, Level
);
6591 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode
*P
, Value
*V
,
6592 BasicBlock
*BB
, BoUpSLP
&R
,
6593 TargetTransformInfo
*TTI
) {
6596 auto *I
= dyn_cast
<Instruction
>(V
);
6600 if (!isa
<BinaryOperator
>(I
))
6602 // Try to match and vectorize a horizontal reduction.
6603 auto &&ExtraVectorization
= [this](Instruction
*I
, BoUpSLP
&R
) -> bool {
6604 return tryToVectorize(I
, R
);
6606 return tryToVectorizeHorReductionOrInstOperands(P
, I
, BB
, R
, TTI
,
6607 ExtraVectorization
);
6610 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst
*IVI
,
6611 BasicBlock
*BB
, BoUpSLP
&R
) {
6612 const DataLayout
&DL
= BB
->getModule()->getDataLayout();
6613 if (!R
.canMapToVector(IVI
->getType(), DL
))
6616 SmallVector
<Value
*, 16> BuildVectorOpds
;
6617 if (!findBuildAggregate(IVI
, BuildVectorOpds
))
6620 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI
<< "\n");
6621 // Aggregate value is unlikely to be processed in vector register, we need to
6622 // extract scalars into scalar registers, so NeedExtraction is set true.
6623 return tryToVectorizeList(BuildVectorOpds
, R
);
6626 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst
*IEI
,
6627 BasicBlock
*BB
, BoUpSLP
&R
) {
6629 SmallVector
<Value
*, 16> BuildVectorOpds
;
6630 if (!findBuildVector(IEI
, TTI
, BuildVectorOpds
, UserCost
) ||
6631 (llvm::all_of(BuildVectorOpds
,
6632 [](Value
*V
) { return isa
<ExtractElementInst
>(V
); }) &&
6633 isShuffle(BuildVectorOpds
)))
6636 // Vectorize starting with the build vector operands ignoring the BuildVector
6637 // instructions for the purpose of scheduling and user extraction.
6638 return tryToVectorizeList(BuildVectorOpds
, R
, UserCost
);
6641 bool SLPVectorizerPass::vectorizeCmpInst(CmpInst
*CI
, BasicBlock
*BB
,
6643 if (tryToVectorizePair(CI
->getOperand(0), CI
->getOperand(1), R
))
6646 bool OpsChanged
= false;
6647 for (int Idx
= 0; Idx
< 2; ++Idx
) {
6649 vectorizeRootInstruction(nullptr, CI
->getOperand(Idx
), BB
, R
, TTI
);
6654 bool SLPVectorizerPass::vectorizeSimpleInstructions(
6655 SmallVectorImpl
<WeakVH
> &Instructions
, BasicBlock
*BB
, BoUpSLP
&R
) {
6656 bool OpsChanged
= false;
6657 for (auto &VH
: reverse(Instructions
)) {
6658 auto *I
= dyn_cast_or_null
<Instruction
>(VH
);
6661 if (auto *LastInsertValue
= dyn_cast
<InsertValueInst
>(I
))
6662 OpsChanged
|= vectorizeInsertValueInst(LastInsertValue
, BB
, R
);
6663 else if (auto *LastInsertElem
= dyn_cast
<InsertElementInst
>(I
))
6664 OpsChanged
|= vectorizeInsertElementInst(LastInsertElem
, BB
, R
);
6665 else if (auto *CI
= dyn_cast
<CmpInst
>(I
))
6666 OpsChanged
|= vectorizeCmpInst(CI
, BB
, R
);
6668 Instructions
.clear();
6672 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock
*BB
, BoUpSLP
&R
) {
6673 bool Changed
= false;
6674 SmallVector
<Value
*, 4> Incoming
;
6675 SmallPtrSet
<Value
*, 16> VisitedInstrs
;
6677 bool HaveVectorizedPhiNodes
= true;
6678 while (HaveVectorizedPhiNodes
) {
6679 HaveVectorizedPhiNodes
= false;
6681 // Collect the incoming values from the PHIs.
6683 for (Instruction
&I
: *BB
) {
6684 PHINode
*P
= dyn_cast
<PHINode
>(&I
);
6688 if (!VisitedInstrs
.count(P
))
6689 Incoming
.push_back(P
);
6693 llvm::stable_sort(Incoming
, PhiTypeSorterFunc
);
6695 // Try to vectorize elements base on their type.
6696 for (SmallVector
<Value
*, 4>::iterator IncIt
= Incoming
.begin(),
6700 // Look for the next elements with the same type.
6701 SmallVector
<Value
*, 4>::iterator SameTypeIt
= IncIt
;
6702 while (SameTypeIt
!= E
&&
6703 (*SameTypeIt
)->getType() == (*IncIt
)->getType()) {
6704 VisitedInstrs
.insert(*SameTypeIt
);
6708 // Try to vectorize them.
6709 unsigned NumElts
= (SameTypeIt
- IncIt
);
6710 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at PHIs ("
6711 << NumElts
<< ")\n");
6712 // The order in which the phi nodes appear in the program does not matter.
6713 // So allow tryToVectorizeList to reorder them if it is beneficial. This
6714 // is done when there are exactly two elements since tryToVectorizeList
6715 // asserts that there are only two values when AllowReorder is true.
6716 bool AllowReorder
= NumElts
== 2;
6717 if (NumElts
> 1 && tryToVectorizeList(makeArrayRef(IncIt
, NumElts
), R
,
6718 /*UserCost=*/0, AllowReorder
)) {
6719 // Success start over because instructions might have been changed.
6720 HaveVectorizedPhiNodes
= true;
6725 // Start over at the next instruction of a different type (or the end).
6730 VisitedInstrs
.clear();
6732 SmallVector
<WeakVH
, 8> PostProcessInstructions
;
6733 SmallDenseSet
<Instruction
*, 4> KeyNodes
;
6734 for (BasicBlock::iterator it
= BB
->begin(), e
= BB
->end(); it
!= e
; ++it
) {
6735 // We may go through BB multiple times so skip the one we have checked.
6736 if (!VisitedInstrs
.insert(&*it
).second
) {
6737 if (it
->use_empty() && KeyNodes
.count(&*it
) > 0 &&
6738 vectorizeSimpleInstructions(PostProcessInstructions
, BB
, R
)) {
6739 // We would like to start over since some instructions are deleted
6740 // and the iterator may become invalid value.
6748 if (isa
<DbgInfoIntrinsic
>(it
))
6751 // Try to vectorize reductions that use PHINodes.
6752 if (PHINode
*P
= dyn_cast
<PHINode
>(it
)) {
6753 // Check that the PHI is a reduction PHI.
6754 if (P
->getNumIncomingValues() != 2)
6757 // Try to match and vectorize a horizontal reduction.
6758 if (vectorizeRootInstruction(P
, getReductionValue(DT
, P
, BB
, LI
), BB
, R
,
6768 // Ran into an instruction without users, like terminator, or function call
6769 // with ignored return value, store. Ignore unused instructions (basing on
6770 // instruction type, except for CallInst and InvokeInst).
6771 if (it
->use_empty() && (it
->getType()->isVoidTy() || isa
<CallInst
>(it
) ||
6772 isa
<InvokeInst
>(it
))) {
6773 KeyNodes
.insert(&*it
);
6774 bool OpsChanged
= false;
6775 if (ShouldStartVectorizeHorAtStore
|| !isa
<StoreInst
>(it
)) {
6776 for (auto *V
: it
->operand_values()) {
6777 // Try to match and vectorize a horizontal reduction.
6778 OpsChanged
|= vectorizeRootInstruction(nullptr, V
, BB
, R
, TTI
);
6781 // Start vectorization of post-process list of instructions from the
6782 // top-tree instructions to try to vectorize as many instructions as
6784 OpsChanged
|= vectorizeSimpleInstructions(PostProcessInstructions
, BB
, R
);
6786 // We would like to start over since some instructions are deleted
6787 // and the iterator may become invalid value.
6795 if (isa
<InsertElementInst
>(it
) || isa
<CmpInst
>(it
) ||
6796 isa
<InsertValueInst
>(it
))
6797 PostProcessInstructions
.push_back(&*it
);
6803 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock
*BB
, BoUpSLP
&R
) {
6804 auto Changed
= false;
6805 for (auto &Entry
: GEPs
) {
6806 // If the getelementptr list has fewer than two elements, there's nothing
6808 if (Entry
.second
.size() < 2)
6811 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
6812 << Entry
.second
.size() << ".\n");
6814 // We process the getelementptr list in chunks of 16 (like we do for
6815 // stores) to minimize compile-time.
6816 for (unsigned BI
= 0, BE
= Entry
.second
.size(); BI
< BE
; BI
+= 16) {
6817 auto Len
= std::min
<unsigned>(BE
- BI
, 16);
6818 auto GEPList
= makeArrayRef(&Entry
.second
[BI
], Len
);
6820 // Initialize a set a candidate getelementptrs. Note that we use a
6821 // SetVector here to preserve program order. If the index computations
6822 // are vectorizable and begin with loads, we want to minimize the chance
6823 // of having to reorder them later.
6824 SetVector
<Value
*> Candidates(GEPList
.begin(), GEPList
.end());
6826 // Some of the candidates may have already been vectorized after we
6827 // initially collected them. If so, the WeakTrackingVHs will have
6829 // values, so remove them from the set of candidates.
6830 Candidates
.remove(nullptr);
6832 // Remove from the set of candidates all pairs of getelementptrs with
6833 // constant differences. Such getelementptrs are likely not good
6834 // candidates for vectorization in a bottom-up phase since one can be
6835 // computed from the other. We also ensure all candidate getelementptr
6836 // indices are unique.
6837 for (int I
= 0, E
= GEPList
.size(); I
< E
&& Candidates
.size() > 1; ++I
) {
6838 auto *GEPI
= cast
<GetElementPtrInst
>(GEPList
[I
]);
6839 if (!Candidates
.count(GEPI
))
6841 auto *SCEVI
= SE
->getSCEV(GEPList
[I
]);
6842 for (int J
= I
+ 1; J
< E
&& Candidates
.size() > 1; ++J
) {
6843 auto *GEPJ
= cast
<GetElementPtrInst
>(GEPList
[J
]);
6844 auto *SCEVJ
= SE
->getSCEV(GEPList
[J
]);
6845 if (isa
<SCEVConstant
>(SE
->getMinusSCEV(SCEVI
, SCEVJ
))) {
6846 Candidates
.remove(GEPList
[I
]);
6847 Candidates
.remove(GEPList
[J
]);
6848 } else if (GEPI
->idx_begin()->get() == GEPJ
->idx_begin()->get()) {
6849 Candidates
.remove(GEPList
[J
]);
6854 // We break out of the above computation as soon as we know there are
6855 // fewer than two candidates remaining.
6856 if (Candidates
.size() < 2)
6859 // Add the single, non-constant index of each candidate to the bundle. We
6860 // ensured the indices met these constraints when we originally collected
6861 // the getelementptrs.
6862 SmallVector
<Value
*, 16> Bundle(Candidates
.size());
6863 auto BundleIndex
= 0u;
6864 for (auto *V
: Candidates
) {
6865 auto *GEP
= cast
<GetElementPtrInst
>(V
);
6866 auto *GEPIdx
= GEP
->idx_begin()->get();
6867 assert(GEP
->getNumIndices() == 1 || !isa
<Constant
>(GEPIdx
));
6868 Bundle
[BundleIndex
++] = GEPIdx
;
6871 // Try and vectorize the indices. We are currently only interested in
6872 // gather-like cases of the form:
6874 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
6876 // where the loads of "a", the loads of "b", and the subtractions can be
6877 // performed in parallel. It's likely that detecting this pattern in a
6878 // bottom-up phase will be simpler and less costly than building a
6879 // full-blown top-down phase beginning at the consecutive loads.
6880 Changed
|= tryToVectorizeList(Bundle
, R
);
6886 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP
&R
) {
6887 bool Changed
= false;
6888 // Attempt to sort and vectorize each of the store-groups.
6889 for (StoreListMap::iterator it
= Stores
.begin(), e
= Stores
.end(); it
!= e
;
6891 if (it
->second
.size() < 2)
6894 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
6895 << it
->second
.size() << ".\n");
6897 // Process the stores in chunks of 16.
6898 // TODO: The limit of 16 inhibits greater vectorization factors.
6899 // For example, AVX2 supports v32i8. Increasing this limit, however,
6900 // may cause a significant compile-time increase.
6901 for (unsigned CI
= 0, CE
= it
->second
.size(); CI
< CE
; CI
+= 16) {
6902 unsigned Len
= std::min
<unsigned>(CE
- CI
, 16);
6903 Changed
|= vectorizeStores(makeArrayRef(&it
->second
[CI
], Len
), R
);
6909 char SLPVectorizer::ID
= 0;
6911 static const char lv_name
[] = "SLP Vectorizer";
6913 INITIALIZE_PASS_BEGIN(SLPVectorizer
, SV_NAME
, lv_name
, false, false)
6914 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
6915 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
6916 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
6917 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
)
6918 INITIALIZE_PASS_DEPENDENCY(LoopSimplify
)
6919 INITIALIZE_PASS_DEPENDENCY(DemandedBitsWrapperPass
)
6920 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass
)
6921 INITIALIZE_PASS_END(SLPVectorizer
, SV_NAME
, lv_name
, false, false)
6923 Pass
*llvm::createSLPVectorizerPass() { return new SLPVectorizer(); }