[ControlHeightReduction] Add assert to avoid underflow (#116339)
[llvm-project.git] / llvm / lib / Transforms / Vectorize / SLPVectorizer.cpp
blob4b661ad40f2d46504aa70a142b367ba21c976d78
1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/PriorityQueue.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/ScopeExit.h"
25 #include "llvm/ADT/SetOperations.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/ADT/SmallBitVector.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallString.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/iterator.h"
33 #include "llvm/ADT/iterator_range.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Analysis/AssumptionCache.h"
36 #include "llvm/Analysis/CodeMetrics.h"
37 #include "llvm/Analysis/ConstantFolding.h"
38 #include "llvm/Analysis/DemandedBits.h"
39 #include "llvm/Analysis/GlobalsModRef.h"
40 #include "llvm/Analysis/IVDescriptors.h"
41 #include "llvm/Analysis/LoopAccessAnalysis.h"
42 #include "llvm/Analysis/LoopInfo.h"
43 #include "llvm/Analysis/MemoryLocation.h"
44 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
45 #include "llvm/Analysis/ScalarEvolution.h"
46 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
47 #include "llvm/Analysis/TargetLibraryInfo.h"
48 #include "llvm/Analysis/TargetTransformInfo.h"
49 #include "llvm/Analysis/ValueTracking.h"
50 #include "llvm/Analysis/VectorUtils.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/BasicBlock.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #ifdef EXPENSIVE_CHECKS
74 #include "llvm/IR/Verifier.h"
75 #endif
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CommandLine.h"
79 #include "llvm/Support/Compiler.h"
80 #include "llvm/Support/DOTGraphTraits.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/DebugCounter.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/GraphWriter.h"
85 #include "llvm/Support/InstructionCost.h"
86 #include "llvm/Support/KnownBits.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
90 #include "llvm/Transforms/Utils/Local.h"
91 #include "llvm/Transforms/Utils/LoopUtils.h"
92 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <iterator>
97 #include <memory>
98 #include <optional>
99 #include <set>
100 #include <string>
101 #include <tuple>
102 #include <utility>
104 using namespace llvm;
105 using namespace llvm::PatternMatch;
106 using namespace slpvectorizer;
108 #define SV_NAME "slp-vectorizer"
109 #define DEBUG_TYPE "SLP"
111 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
113 DEBUG_COUNTER(VectorizedGraphs, "slp-vectorized",
114 "Controls which SLP graphs should be vectorized.");
116 static cl::opt<bool>
117 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
118 cl::desc("Run the SLP vectorization passes"));
120 static cl::opt<bool>
121 SLPReVec("slp-revec", cl::init(false), cl::Hidden,
122 cl::desc("Enable vectorization for wider vector utilization"));
124 static cl::opt<int>
125 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
126 cl::desc("Only vectorize if you gain more than this "
127 "number "));
129 static cl::opt<bool> SLPSkipEarlyProfitabilityCheck(
130 "slp-skip-early-profitability-check", cl::init(false), cl::Hidden,
131 cl::desc("When true, SLP vectorizer bypasses profitability checks based on "
132 "heuristics and makes vectorization decision via cost modeling."));
134 static cl::opt<bool>
135 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
136 cl::desc("Attempt to vectorize horizontal reductions"));
138 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
139 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
140 cl::desc(
141 "Attempt to vectorize horizontal reductions feeding into a store"));
143 static cl::opt<int>
144 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
145 cl::desc("Attempt to vectorize for this register size in bits"));
147 static cl::opt<unsigned>
148 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
149 cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
151 /// Limits the size of scheduling regions in a block.
152 /// It avoid long compile times for _very_ large blocks where vector
153 /// instructions are spread over a wide range.
154 /// This limit is way higher than needed by real-world functions.
155 static cl::opt<int>
156 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
157 cl::desc("Limit the size of the SLP scheduling region per block"));
159 static cl::opt<int> MinVectorRegSizeOption(
160 "slp-min-reg-size", cl::init(128), cl::Hidden,
161 cl::desc("Attempt to vectorize for this register size in bits"));
163 static cl::opt<unsigned> RecursionMaxDepth(
164 "slp-recursion-max-depth", cl::init(12), cl::Hidden,
165 cl::desc("Limit the recursion depth when building a vectorizable tree"));
167 static cl::opt<unsigned> MinTreeSize(
168 "slp-min-tree-size", cl::init(3), cl::Hidden,
169 cl::desc("Only vectorize small trees if they are fully vectorizable"));
171 // The maximum depth that the look-ahead score heuristic will explore.
172 // The higher this value, the higher the compilation time overhead.
173 static cl::opt<int> LookAheadMaxDepth(
174 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
175 cl::desc("The maximum look-ahead depth for operand reordering scores"));
177 // The maximum depth that the look-ahead score heuristic will explore
178 // when it probing among candidates for vectorization tree roots.
179 // The higher this value, the higher the compilation time overhead but unlike
180 // similar limit for operands ordering this is less frequently used, hence
181 // impact of higher value is less noticeable.
182 static cl::opt<int> RootLookAheadMaxDepth(
183 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden,
184 cl::desc("The maximum look-ahead depth for searching best rooting option"));
186 static cl::opt<unsigned> MinProfitableStridedLoads(
187 "slp-min-strided-loads", cl::init(2), cl::Hidden,
188 cl::desc("The minimum number of loads, which should be considered strided, "
189 "if the stride is > 1 or is runtime value"));
191 static cl::opt<unsigned> MaxProfitableLoadStride(
192 "slp-max-stride", cl::init(8), cl::Hidden,
193 cl::desc("The maximum stride, considered to be profitable."));
195 static cl::opt<bool>
196 ViewSLPTree("view-slp-tree", cl::Hidden,
197 cl::desc("Display the SLP trees with Graphviz"));
199 static cl::opt<bool> VectorizeNonPowerOf2(
200 "slp-vectorize-non-power-of-2", cl::init(false), cl::Hidden,
201 cl::desc("Try to vectorize with non-power-of-2 number of elements."));
203 // Limit the number of alias checks. The limit is chosen so that
204 // it has no negative effect on the llvm benchmarks.
205 static const unsigned AliasedCheckLimit = 10;
207 // Limit of the number of uses for potentially transformed instructions/values,
208 // used in checks to avoid compile-time explode.
209 static constexpr int UsesLimit = 64;
211 // Another limit for the alias checks: The maximum distance between load/store
212 // instructions where alias checks are done.
213 // This limit is useful for very large basic blocks.
214 static const unsigned MaxMemDepDistance = 160;
216 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
217 /// regions to be handled.
218 static const int MinScheduleRegionSize = 16;
220 /// Maximum allowed number of operands in the PHI nodes.
221 static const unsigned MaxPHINumOperands = 128;
223 /// Predicate for the element types that the SLP vectorizer supports.
225 /// The most important thing to filter here are types which are invalid in LLVM
226 /// vectors. We also filter target specific types which have absolutely no
227 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
228 /// avoids spending time checking the cost model and realizing that they will
229 /// be inevitably scalarized.
230 static bool isValidElementType(Type *Ty) {
231 // TODO: Support ScalableVectorType.
232 if (SLPReVec && isa<FixedVectorType>(Ty))
233 Ty = Ty->getScalarType();
234 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
235 !Ty->isPPC_FP128Ty();
238 /// Returns the type of the given value/instruction \p V. If it is store,
239 /// returns the type of its value operand, for Cmp - the types of the compare
240 /// operands and for insertelement - the type os the inserted operand.
241 /// Otherwise, just the type of the value is returned.
242 static Type *getValueType(Value *V) {
243 if (auto *SI = dyn_cast<StoreInst>(V))
244 return SI->getValueOperand()->getType();
245 if (auto *CI = dyn_cast<CmpInst>(V))
246 return CI->getOperand(0)->getType();
247 if (auto *IE = dyn_cast<InsertElementInst>(V))
248 return IE->getOperand(1)->getType();
249 return V->getType();
252 /// \returns the number of elements for Ty.
253 static unsigned getNumElements(Type *Ty) {
254 assert(!isa<ScalableVectorType>(Ty) &&
255 "ScalableVectorType is not supported.");
256 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty))
257 return VecTy->getNumElements();
258 return 1;
261 /// \returns the vector type of ScalarTy based on vectorization factor.
262 static FixedVectorType *getWidenedType(Type *ScalarTy, unsigned VF) {
263 return FixedVectorType::get(ScalarTy->getScalarType(),
264 VF * getNumElements(ScalarTy));
267 /// Returns the number of elements of the given type \p Ty, not less than \p Sz,
268 /// which forms type, which splits by \p TTI into whole vector types during
269 /// legalization.
270 static unsigned getFullVectorNumberOfElements(const TargetTransformInfo &TTI,
271 Type *Ty, unsigned Sz) {
272 if (!isValidElementType(Ty))
273 return bit_ceil(Sz);
274 // Find the number of elements, which forms full vectors.
275 const unsigned NumParts = TTI.getNumberOfParts(getWidenedType(Ty, Sz));
276 if (NumParts == 0 || NumParts >= Sz)
277 return bit_ceil(Sz);
278 return bit_ceil(divideCeil(Sz, NumParts)) * NumParts;
281 /// Returns the number of elements of the given type \p Ty, not greater than \p
282 /// Sz, which forms type, which splits by \p TTI into whole vector types during
283 /// legalization.
284 static unsigned
285 getFloorFullVectorNumberOfElements(const TargetTransformInfo &TTI, Type *Ty,
286 unsigned Sz) {
287 if (!isValidElementType(Ty))
288 return bit_floor(Sz);
289 // Find the number of elements, which forms full vectors.
290 unsigned NumParts = TTI.getNumberOfParts(getWidenedType(Ty, Sz));
291 if (NumParts == 0 || NumParts >= Sz)
292 return bit_floor(Sz);
293 unsigned RegVF = bit_ceil(divideCeil(Sz, NumParts));
294 if (RegVF > Sz)
295 return bit_floor(Sz);
296 return (Sz / RegVF) * RegVF;
299 static void transformScalarShuffleIndiciesToVector(unsigned VecTyNumElements,
300 SmallVectorImpl<int> &Mask) {
301 // The ShuffleBuilder implementation use shufflevector to splat an "element".
302 // But the element have different meaning for SLP (scalar) and REVEC
303 // (vector). We need to expand Mask into masks which shufflevector can use
304 // directly.
305 SmallVector<int> NewMask(Mask.size() * VecTyNumElements);
306 for (unsigned I : seq<unsigned>(Mask.size()))
307 for (auto [J, MaskV] : enumerate(MutableArrayRef(NewMask).slice(
308 I * VecTyNumElements, VecTyNumElements)))
309 MaskV = Mask[I] == PoisonMaskElem ? PoisonMaskElem
310 : Mask[I] * VecTyNumElements + J;
311 Mask.swap(NewMask);
314 /// \returns the number of groups of shufflevector
315 /// A group has the following features
316 /// 1. All of value in a group are shufflevector.
317 /// 2. The mask of all shufflevector is isExtractSubvectorMask.
318 /// 3. The mask of all shufflevector uses all of the elements of the source.
319 /// e.g., it is 1 group (%0)
320 /// %1 = shufflevector <16 x i8> %0, <16 x i8> poison,
321 /// <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
322 /// %2 = shufflevector <16 x i8> %0, <16 x i8> poison,
323 /// <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
324 /// it is 2 groups (%3 and %4)
325 /// %5 = shufflevector <8 x i16> %3, <8 x i16> poison,
326 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
327 /// %6 = shufflevector <8 x i16> %3, <8 x i16> poison,
328 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
329 /// %7 = shufflevector <8 x i16> %4, <8 x i16> poison,
330 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
331 /// %8 = shufflevector <8 x i16> %4, <8 x i16> poison,
332 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
333 /// it is 0 group
334 /// %12 = shufflevector <8 x i16> %10, <8 x i16> poison,
335 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
336 /// %13 = shufflevector <8 x i16> %11, <8 x i16> poison,
337 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
338 static unsigned getShufflevectorNumGroups(ArrayRef<Value *> VL) {
339 if (VL.empty())
340 return 0;
341 if (!all_of(VL, IsaPred<ShuffleVectorInst>))
342 return 0;
343 auto *SV = cast<ShuffleVectorInst>(VL.front());
344 unsigned SVNumElements =
345 cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
346 unsigned ShuffleMaskSize = SV->getShuffleMask().size();
347 unsigned GroupSize = SVNumElements / ShuffleMaskSize;
348 if (GroupSize == 0 || (VL.size() % GroupSize) != 0)
349 return 0;
350 unsigned NumGroup = 0;
351 for (size_t I = 0, E = VL.size(); I != E; I += GroupSize) {
352 auto *SV = cast<ShuffleVectorInst>(VL[I]);
353 Value *Src = SV->getOperand(0);
354 ArrayRef<Value *> Group = VL.slice(I, GroupSize);
355 SmallBitVector ExpectedIndex(GroupSize);
356 if (!all_of(Group, [&](Value *V) {
357 auto *SV = cast<ShuffleVectorInst>(V);
358 // From the same source.
359 if (SV->getOperand(0) != Src)
360 return false;
361 int Index;
362 if (!SV->isExtractSubvectorMask(Index))
363 return false;
364 ExpectedIndex.set(Index / ShuffleMaskSize);
365 return true;
367 return 0;
368 if (!ExpectedIndex.all())
369 return 0;
370 ++NumGroup;
372 assert(NumGroup == (VL.size() / GroupSize) && "Unexpected number of groups");
373 return NumGroup;
376 /// \returns a shufflevector mask which is used to vectorize shufflevectors
377 /// e.g.,
378 /// %5 = shufflevector <8 x i16> %3, <8 x i16> poison,
379 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
380 /// %6 = shufflevector <8 x i16> %3, <8 x i16> poison,
381 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
382 /// %7 = shufflevector <8 x i16> %4, <8 x i16> poison,
383 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
384 /// %8 = shufflevector <8 x i16> %4, <8 x i16> poison,
385 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
386 /// the result is
387 /// <0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19, 28, 29, 30, 31>
388 static SmallVector<int> calculateShufflevectorMask(ArrayRef<Value *> VL) {
389 assert(getShufflevectorNumGroups(VL) && "Not supported shufflevector usage.");
390 auto *SV = cast<ShuffleVectorInst>(VL.front());
391 unsigned SVNumElements =
392 cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
393 SmallVector<int> Mask;
394 unsigned AccumulateLength = 0;
395 for (Value *V : VL) {
396 auto *SV = cast<ShuffleVectorInst>(V);
397 for (int M : SV->getShuffleMask())
398 Mask.push_back(M == PoisonMaskElem ? PoisonMaskElem
399 : AccumulateLength + M);
400 AccumulateLength += SVNumElements;
402 return Mask;
405 /// \returns True if the value is a constant (but not globals/constant
406 /// expressions).
407 static bool isConstant(Value *V) {
408 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V);
411 /// Checks if \p V is one of vector-like instructions, i.e. undef,
412 /// insertelement/extractelement with constant indices for fixed vector type or
413 /// extractvalue instruction.
414 static bool isVectorLikeInstWithConstOps(Value *V) {
415 if (!isa<InsertElementInst, ExtractElementInst>(V) &&
416 !isa<ExtractValueInst, UndefValue>(V))
417 return false;
418 auto *I = dyn_cast<Instruction>(V);
419 if (!I || isa<ExtractValueInst>(I))
420 return true;
421 if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
422 return false;
423 if (isa<ExtractElementInst>(I))
424 return isConstant(I->getOperand(1));
425 assert(isa<InsertElementInst>(V) && "Expected only insertelement.");
426 return isConstant(I->getOperand(2));
429 /// Returns power-of-2 number of elements in a single register (part), given the
430 /// total number of elements \p Size and number of registers (parts) \p
431 /// NumParts.
432 static unsigned getPartNumElems(unsigned Size, unsigned NumParts) {
433 return std::min<unsigned>(Size, bit_ceil(divideCeil(Size, NumParts)));
436 /// Returns correct remaining number of elements, considering total amount \p
437 /// Size, (power-of-2 number) of elements in a single register \p PartNumElems
438 /// and current register (part) \p Part.
439 static unsigned getNumElems(unsigned Size, unsigned PartNumElems,
440 unsigned Part) {
441 return std::min<unsigned>(PartNumElems, Size - Part * PartNumElems);
444 #if !defined(NDEBUG)
445 /// Print a short descriptor of the instruction bundle suitable for debug output.
446 static std::string shortBundleName(ArrayRef<Value *> VL, int Idx = -1) {
447 std::string Result;
448 raw_string_ostream OS(Result);
449 if (Idx >= 0)
450 OS << "Idx: " << Idx << ", ";
451 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]";
452 return Result;
454 #endif
456 /// \returns true if all of the instructions in \p VL are in the same block or
457 /// false otherwise.
458 static bool allSameBlock(ArrayRef<Value *> VL) {
459 Instruction *I0 = dyn_cast<Instruction>(VL[0]);
460 if (!I0)
461 return false;
462 if (all_of(VL, isVectorLikeInstWithConstOps))
463 return true;
465 BasicBlock *BB = I0->getParent();
466 for (int I = 1, E = VL.size(); I < E; I++) {
467 auto *II = dyn_cast<Instruction>(VL[I]);
468 if (!II)
469 return false;
471 if (BB != II->getParent())
472 return false;
474 return true;
477 /// \returns True if all of the values in \p VL are constants (but not
478 /// globals/constant expressions).
479 static bool allConstant(ArrayRef<Value *> VL) {
480 // Constant expressions and globals can't be vectorized like normal integer/FP
481 // constants.
482 return all_of(VL, isConstant);
485 /// \returns True if all of the values in \p VL are identical or some of them
486 /// are UndefValue.
487 static bool isSplat(ArrayRef<Value *> VL) {
488 Value *FirstNonUndef = nullptr;
489 for (Value *V : VL) {
490 if (isa<UndefValue>(V))
491 continue;
492 if (!FirstNonUndef) {
493 FirstNonUndef = V;
494 continue;
496 if (V != FirstNonUndef)
497 return false;
499 return FirstNonUndef != nullptr;
502 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
503 static bool isCommutative(Instruction *I) {
504 if (auto *Cmp = dyn_cast<CmpInst>(I))
505 return Cmp->isCommutative();
506 if (auto *BO = dyn_cast<BinaryOperator>(I))
507 return BO->isCommutative() ||
508 (BO->getOpcode() == Instruction::Sub &&
509 !BO->hasNUsesOrMore(UsesLimit) &&
510 all_of(
511 BO->uses(),
512 [](const Use &U) {
513 // Commutative, if icmp eq/ne sub, 0
514 ICmpInst::Predicate Pred;
515 if (match(U.getUser(),
516 m_ICmp(Pred, m_Specific(U.get()), m_Zero())) &&
517 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE))
518 return true;
519 // Commutative, if abs(sub nsw, true) or abs(sub, false).
520 ConstantInt *Flag;
521 return match(U.getUser(),
522 m_Intrinsic<Intrinsic::abs>(
523 m_Specific(U.get()), m_ConstantInt(Flag))) &&
524 (!cast<Instruction>(U.get())->hasNoSignedWrap() ||
525 Flag->isOne());
526 })) ||
527 (BO->getOpcode() == Instruction::FSub &&
528 !BO->hasNUsesOrMore(UsesLimit) &&
529 all_of(BO->uses(), [](const Use &U) {
530 return match(U.getUser(),
531 m_Intrinsic<Intrinsic::fabs>(m_Specific(U.get())));
532 }));
533 return I->isCommutative();
536 template <typename T>
537 static std::optional<unsigned> getInsertExtractIndex(const Value *Inst,
538 unsigned Offset) {
539 static_assert(std::is_same_v<T, InsertElementInst> ||
540 std::is_same_v<T, ExtractElementInst>,
541 "unsupported T");
542 int Index = Offset;
543 if (const auto *IE = dyn_cast<T>(Inst)) {
544 const auto *VT = dyn_cast<FixedVectorType>(IE->getType());
545 if (!VT)
546 return std::nullopt;
547 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
548 if (!CI)
549 return std::nullopt;
550 if (CI->getValue().uge(VT->getNumElements()))
551 return std::nullopt;
552 Index *= VT->getNumElements();
553 Index += CI->getZExtValue();
554 return Index;
556 return std::nullopt;
559 /// \returns inserting or extracting index of InsertElement, ExtractElement or
560 /// InsertValue instruction, using Offset as base offset for index.
561 /// \returns std::nullopt if the index is not an immediate.
562 static std::optional<unsigned> getElementIndex(const Value *Inst,
563 unsigned Offset = 0) {
564 if (auto Index = getInsertExtractIndex<InsertElementInst>(Inst, Offset))
565 return Index;
566 if (auto Index = getInsertExtractIndex<ExtractElementInst>(Inst, Offset))
567 return Index;
569 int Index = Offset;
571 const auto *IV = dyn_cast<InsertValueInst>(Inst);
572 if (!IV)
573 return std::nullopt;
575 Type *CurrentType = IV->getType();
576 for (unsigned I : IV->indices()) {
577 if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
578 Index *= ST->getNumElements();
579 CurrentType = ST->getElementType(I);
580 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) {
581 Index *= AT->getNumElements();
582 CurrentType = AT->getElementType();
583 } else {
584 return std::nullopt;
586 Index += I;
588 return Index;
591 namespace {
592 /// Specifies the way the mask should be analyzed for undefs/poisonous elements
593 /// in the shuffle mask.
594 enum class UseMask {
595 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors,
596 ///< check for the mask elements for the first argument (mask
597 ///< indices are in range [0:VF)).
598 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check
599 ///< for the mask elements for the second argument (mask indices
600 ///< are in range [VF:2*VF))
601 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for
602 ///< future shuffle elements and mark them as ones as being used
603 ///< in future. Non-undef elements are considered as unused since
604 ///< they're already marked as used in the mask.
606 } // namespace
608 /// Prepares a use bitset for the given mask either for the first argument or
609 /// for the second.
610 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask,
611 UseMask MaskArg) {
612 SmallBitVector UseMask(VF, true);
613 for (auto [Idx, Value] : enumerate(Mask)) {
614 if (Value == PoisonMaskElem) {
615 if (MaskArg == UseMask::UndefsAsMask)
616 UseMask.reset(Idx);
617 continue;
619 if (MaskArg == UseMask::FirstArg && Value < VF)
620 UseMask.reset(Value);
621 else if (MaskArg == UseMask::SecondArg && Value >= VF)
622 UseMask.reset(Value - VF);
624 return UseMask;
627 /// Checks if the given value is actually an undefined constant vector.
628 /// Also, if the \p UseMask is not empty, tries to check if the non-masked
629 /// elements actually mask the insertelement buildvector, if any.
630 template <bool IsPoisonOnly = false>
631 static SmallBitVector isUndefVector(const Value *V,
632 const SmallBitVector &UseMask = {}) {
633 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true);
634 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>;
635 if (isa<T>(V))
636 return Res;
637 auto *VecTy = dyn_cast<FixedVectorType>(V->getType());
638 if (!VecTy)
639 return Res.reset();
640 auto *C = dyn_cast<Constant>(V);
641 if (!C) {
642 if (!UseMask.empty()) {
643 const Value *Base = V;
644 while (auto *II = dyn_cast<InsertElementInst>(Base)) {
645 Base = II->getOperand(0);
646 if (isa<T>(II->getOperand(1)))
647 continue;
648 std::optional<unsigned> Idx = getElementIndex(II);
649 if (!Idx) {
650 Res.reset();
651 return Res;
653 if (*Idx < UseMask.size() && !UseMask.test(*Idx))
654 Res.reset(*Idx);
656 // TODO: Add analysis for shuffles here too.
657 if (V == Base) {
658 Res.reset();
659 } else {
660 SmallBitVector SubMask(UseMask.size(), false);
661 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask);
663 } else {
664 Res.reset();
666 return Res;
668 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
669 if (Constant *Elem = C->getAggregateElement(I))
670 if (!isa<T>(Elem) &&
671 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I))))
672 Res.reset(I);
674 return Res;
677 /// Checks if the vector of instructions can be represented as a shuffle, like:
678 /// %x0 = extractelement <4 x i8> %x, i32 0
679 /// %x3 = extractelement <4 x i8> %x, i32 3
680 /// %y1 = extractelement <4 x i8> %y, i32 1
681 /// %y2 = extractelement <4 x i8> %y, i32 2
682 /// %x0x0 = mul i8 %x0, %x0
683 /// %x3x3 = mul i8 %x3, %x3
684 /// %y1y1 = mul i8 %y1, %y1
685 /// %y2y2 = mul i8 %y2, %y2
686 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
687 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
688 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
689 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
690 /// ret <4 x i8> %ins4
691 /// can be transformed into:
692 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
693 /// i32 6>
694 /// %2 = mul <4 x i8> %1, %1
695 /// ret <4 x i8> %2
696 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
697 /// TODO: Can we split off and reuse the shuffle mask detection from
698 /// ShuffleVectorInst/getShuffleCost?
699 static std::optional<TargetTransformInfo::ShuffleKind>
700 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
701 const auto *It = find_if(VL, IsaPred<ExtractElementInst>);
702 if (It == VL.end())
703 return std::nullopt;
704 unsigned Size =
705 std::accumulate(VL.begin(), VL.end(), 0u, [](unsigned S, Value *V) {
706 auto *EI = dyn_cast<ExtractElementInst>(V);
707 if (!EI)
708 return S;
709 auto *VTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType());
710 if (!VTy)
711 return S;
712 return std::max(S, VTy->getNumElements());
715 Value *Vec1 = nullptr;
716 Value *Vec2 = nullptr;
717 bool HasNonUndefVec = any_of(VL, [](Value *V) {
718 auto *EE = dyn_cast<ExtractElementInst>(V);
719 if (!EE)
720 return false;
721 Value *Vec = EE->getVectorOperand();
722 if (isa<UndefValue>(Vec))
723 return false;
724 return isGuaranteedNotToBePoison(Vec);
726 enum ShuffleMode { Unknown, Select, Permute };
727 ShuffleMode CommonShuffleMode = Unknown;
728 Mask.assign(VL.size(), PoisonMaskElem);
729 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
730 // Undef can be represented as an undef element in a vector.
731 if (isa<UndefValue>(VL[I]))
732 continue;
733 auto *EI = cast<ExtractElementInst>(VL[I]);
734 if (isa<ScalableVectorType>(EI->getVectorOperandType()))
735 return std::nullopt;
736 auto *Vec = EI->getVectorOperand();
737 // We can extractelement from undef or poison vector.
738 if (isUndefVector</*isPoisonOnly=*/true>(Vec).all())
739 continue;
740 // All vector operands must have the same number of vector elements.
741 if (isa<UndefValue>(Vec)) {
742 Mask[I] = I;
743 } else {
744 if (isa<UndefValue>(EI->getIndexOperand()))
745 continue;
746 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
747 if (!Idx)
748 return std::nullopt;
749 // Undefined behavior if Idx is negative or >= Size.
750 if (Idx->getValue().uge(Size))
751 continue;
752 unsigned IntIdx = Idx->getValue().getZExtValue();
753 Mask[I] = IntIdx;
755 if (isUndefVector(Vec).all() && HasNonUndefVec)
756 continue;
757 // For correct shuffling we have to have at most 2 different vector operands
758 // in all extractelement instructions.
759 if (!Vec1 || Vec1 == Vec) {
760 Vec1 = Vec;
761 } else if (!Vec2 || Vec2 == Vec) {
762 Vec2 = Vec;
763 Mask[I] += Size;
764 } else {
765 return std::nullopt;
767 if (CommonShuffleMode == Permute)
768 continue;
769 // If the extract index is not the same as the operation number, it is a
770 // permutation.
771 if (Mask[I] % Size != I) {
772 CommonShuffleMode = Permute;
773 continue;
775 CommonShuffleMode = Select;
777 // If we're not crossing lanes in different vectors, consider it as blending.
778 if (CommonShuffleMode == Select && Vec2)
779 return TargetTransformInfo::SK_Select;
780 // If Vec2 was never used, we have a permutation of a single vector, otherwise
781 // we have permutation of 2 vectors.
782 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
783 : TargetTransformInfo::SK_PermuteSingleSrc;
786 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
787 static std::optional<unsigned> getExtractIndex(Instruction *E) {
788 unsigned Opcode = E->getOpcode();
789 assert((Opcode == Instruction::ExtractElement ||
790 Opcode == Instruction::ExtractValue) &&
791 "Expected extractelement or extractvalue instruction.");
792 if (Opcode == Instruction::ExtractElement) {
793 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
794 if (!CI)
795 return std::nullopt;
796 return CI->getZExtValue();
798 auto *EI = cast<ExtractValueInst>(E);
799 if (EI->getNumIndices() != 1)
800 return std::nullopt;
801 return *EI->idx_begin();
804 namespace {
806 /// Main data required for vectorization of instructions.
807 struct InstructionsState {
808 /// The very first instruction in the list with the main opcode.
809 Value *OpValue = nullptr;
811 /// The main/alternate instruction.
812 Instruction *MainOp = nullptr;
813 Instruction *AltOp = nullptr;
815 /// The main/alternate opcodes for the list of instructions.
816 unsigned getOpcode() const {
817 return MainOp ? MainOp->getOpcode() : 0;
820 unsigned getAltOpcode() const {
821 return AltOp ? AltOp->getOpcode() : 0;
824 /// Some of the instructions in the list have alternate opcodes.
825 bool isAltShuffle() const { return AltOp != MainOp; }
827 bool isOpcodeOrAlt(Instruction *I) const {
828 unsigned CheckedOpcode = I->getOpcode();
829 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
832 InstructionsState() = delete;
833 InstructionsState(Value *OpValue, Instruction *MainOp, Instruction *AltOp)
834 : OpValue(OpValue), MainOp(MainOp), AltOp(AltOp) {}
835 static InstructionsState invalid() { return {nullptr, nullptr, nullptr}; }
838 } // end anonymous namespace
840 /// \returns true if \p Opcode is allowed as part of the main/alternate
841 /// instruction for SLP vectorization.
843 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
844 /// "shuffled out" lane would result in division by zero.
845 static bool isValidForAlternation(unsigned Opcode) {
846 if (Instruction::isIntDivRem(Opcode))
847 return false;
849 return true;
852 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
853 const TargetLibraryInfo &TLI);
855 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e.
856 /// compatible instructions or constants, or just some other regular values.
857 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
858 Value *Op1, const TargetLibraryInfo &TLI) {
859 return (isConstant(BaseOp0) && isConstant(Op0)) ||
860 (isConstant(BaseOp1) && isConstant(Op1)) ||
861 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
862 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
863 BaseOp0 == Op0 || BaseOp1 == Op1 ||
864 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() ||
865 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode();
868 /// \returns true if a compare instruction \p CI has similar "look" and
869 /// same predicate as \p BaseCI, "as is" or with its operands and predicate
870 /// swapped, false otherwise.
871 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI,
872 const TargetLibraryInfo &TLI) {
873 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() &&
874 "Assessing comparisons of different types?");
875 CmpInst::Predicate BasePred = BaseCI->getPredicate();
876 CmpInst::Predicate Pred = CI->getPredicate();
877 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred);
879 Value *BaseOp0 = BaseCI->getOperand(0);
880 Value *BaseOp1 = BaseCI->getOperand(1);
881 Value *Op0 = CI->getOperand(0);
882 Value *Op1 = CI->getOperand(1);
884 return (BasePred == Pred &&
885 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) ||
886 (BasePred == SwappedPred &&
887 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI));
890 /// \returns analysis of the Instructions in \p VL described in
891 /// InstructionsState, the Opcode that we suppose the whole list
892 /// could be vectorized even if its structure is diverse.
893 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
894 const TargetLibraryInfo &TLI) {
895 // Make sure these are all Instructions.
896 if (!all_of(VL, IsaPred<Instruction>))
897 return InstructionsState::invalid();
899 Value *V = VL.front();
900 bool IsCastOp = isa<CastInst>(V);
901 bool IsBinOp = isa<BinaryOperator>(V);
902 bool IsCmpOp = isa<CmpInst>(V);
903 CmpInst::Predicate BasePred =
904 IsCmpOp ? cast<CmpInst>(V)->getPredicate() : CmpInst::BAD_ICMP_PREDICATE;
905 unsigned Opcode = cast<Instruction>(V)->getOpcode();
906 unsigned AltOpcode = Opcode;
907 unsigned AltIndex = 0;
909 bool SwappedPredsCompatible = [&]() {
910 if (!IsCmpOp)
911 return false;
912 SetVector<unsigned> UniquePreds, UniqueNonSwappedPreds;
913 UniquePreds.insert(BasePred);
914 UniqueNonSwappedPreds.insert(BasePred);
915 for (Value *V : VL) {
916 auto *I = dyn_cast<CmpInst>(V);
917 if (!I)
918 return false;
919 CmpInst::Predicate CurrentPred = I->getPredicate();
920 CmpInst::Predicate SwappedCurrentPred =
921 CmpInst::getSwappedPredicate(CurrentPred);
922 UniqueNonSwappedPreds.insert(CurrentPred);
923 if (!UniquePreds.contains(CurrentPred) &&
924 !UniquePreds.contains(SwappedCurrentPred))
925 UniquePreds.insert(CurrentPred);
927 // Total number of predicates > 2, but if consider swapped predicates
928 // compatible only 2, consider swappable predicates as compatible opcodes,
929 // not alternate.
930 return UniqueNonSwappedPreds.size() > 2 && UniquePreds.size() == 2;
931 }();
932 // Check for one alternate opcode from another BinaryOperator.
933 // TODO - generalize to support all operators (types, calls etc.).
934 auto *IBase = cast<Instruction>(V);
935 Intrinsic::ID BaseID = 0;
936 SmallVector<VFInfo> BaseMappings;
937 if (auto *CallBase = dyn_cast<CallInst>(IBase)) {
938 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI);
939 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase);
940 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty())
941 return InstructionsState::invalid();
943 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
944 auto *I = cast<Instruction>(VL[Cnt]);
945 unsigned InstOpcode = I->getOpcode();
946 if (IsBinOp && isa<BinaryOperator>(I)) {
947 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
948 continue;
949 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
950 isValidForAlternation(Opcode)) {
951 AltOpcode = InstOpcode;
952 AltIndex = Cnt;
953 continue;
955 } else if (IsCastOp && isa<CastInst>(I)) {
956 Value *Op0 = IBase->getOperand(0);
957 Type *Ty0 = Op0->getType();
958 Value *Op1 = I->getOperand(0);
959 Type *Ty1 = Op1->getType();
960 if (Ty0 == Ty1) {
961 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
962 continue;
963 if (Opcode == AltOpcode) {
964 assert(isValidForAlternation(Opcode) &&
965 isValidForAlternation(InstOpcode) &&
966 "Cast isn't safe for alternation, logic needs to be updated!");
967 AltOpcode = InstOpcode;
968 AltIndex = Cnt;
969 continue;
972 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) {
973 auto *BaseInst = cast<CmpInst>(V);
974 Type *Ty0 = BaseInst->getOperand(0)->getType();
975 Type *Ty1 = Inst->getOperand(0)->getType();
976 if (Ty0 == Ty1) {
977 assert(InstOpcode == Opcode && "Expected same CmpInst opcode.");
978 // Check for compatible operands. If the corresponding operands are not
979 // compatible - need to perform alternate vectorization.
980 CmpInst::Predicate CurrentPred = Inst->getPredicate();
981 CmpInst::Predicate SwappedCurrentPred =
982 CmpInst::getSwappedPredicate(CurrentPred);
984 if ((E == 2 || SwappedPredsCompatible) &&
985 (BasePred == CurrentPred || BasePred == SwappedCurrentPred))
986 continue;
988 if (isCmpSameOrSwapped(BaseInst, Inst, TLI))
989 continue;
990 auto *AltInst = cast<CmpInst>(VL[AltIndex]);
991 if (AltIndex) {
992 if (isCmpSameOrSwapped(AltInst, Inst, TLI))
993 continue;
994 } else if (BasePred != CurrentPred) {
995 assert(
996 isValidForAlternation(InstOpcode) &&
997 "CmpInst isn't safe for alternation, logic needs to be updated!");
998 AltIndex = Cnt;
999 continue;
1001 CmpInst::Predicate AltPred = AltInst->getPredicate();
1002 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
1003 AltPred == CurrentPred || AltPred == SwappedCurrentPred)
1004 continue;
1006 } else if (InstOpcode == Opcode || InstOpcode == AltOpcode) {
1007 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
1008 if (Gep->getNumOperands() != 2 ||
1009 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType())
1010 return InstructionsState::invalid();
1011 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) {
1012 if (!isVectorLikeInstWithConstOps(EI))
1013 return InstructionsState::invalid();
1014 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1015 auto *BaseLI = cast<LoadInst>(IBase);
1016 if (!LI->isSimple() || !BaseLI->isSimple())
1017 return InstructionsState::invalid();
1018 } else if (auto *Call = dyn_cast<CallInst>(I)) {
1019 auto *CallBase = cast<CallInst>(IBase);
1020 if (Call->getCalledFunction() != CallBase->getCalledFunction())
1021 return InstructionsState::invalid();
1022 if (Call->hasOperandBundles() &&
1023 (!CallBase->hasOperandBundles() ||
1024 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(),
1025 Call->op_begin() + Call->getBundleOperandsEndIndex(),
1026 CallBase->op_begin() +
1027 CallBase->getBundleOperandsStartIndex())))
1028 return InstructionsState::invalid();
1029 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI);
1030 if (ID != BaseID)
1031 return InstructionsState::invalid();
1032 if (!ID) {
1033 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call);
1034 if (Mappings.size() != BaseMappings.size() ||
1035 Mappings.front().ISA != BaseMappings.front().ISA ||
1036 Mappings.front().ScalarName != BaseMappings.front().ScalarName ||
1037 Mappings.front().VectorName != BaseMappings.front().VectorName ||
1038 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF ||
1039 Mappings.front().Shape.Parameters !=
1040 BaseMappings.front().Shape.Parameters)
1041 return InstructionsState::invalid();
1044 continue;
1046 return InstructionsState::invalid();
1049 return InstructionsState(V, cast<Instruction>(V),
1050 cast<Instruction>(VL[AltIndex]));
1053 /// \returns true if all of the values in \p VL have the same type or false
1054 /// otherwise.
1055 static bool allSameType(ArrayRef<Value *> VL) {
1056 Type *Ty = VL.front()->getType();
1057 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; });
1060 /// \returns True if in-tree use also needs extract. This refers to
1061 /// possible scalar operand in vectorized instruction.
1062 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
1063 TargetLibraryInfo *TLI) {
1064 if (!UserInst)
1065 return false;
1066 unsigned Opcode = UserInst->getOpcode();
1067 switch (Opcode) {
1068 case Instruction::Load: {
1069 LoadInst *LI = cast<LoadInst>(UserInst);
1070 return (LI->getPointerOperand() == Scalar);
1072 case Instruction::Store: {
1073 StoreInst *SI = cast<StoreInst>(UserInst);
1074 return (SI->getPointerOperand() == Scalar);
1076 case Instruction::Call: {
1077 CallInst *CI = cast<CallInst>(UserInst);
1078 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1079 return any_of(enumerate(CI->args()), [&](auto &&Arg) {
1080 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) &&
1081 Arg.value().get() == Scalar;
1084 default:
1085 return false;
1089 /// \returns the AA location that is being access by the instruction.
1090 static MemoryLocation getLocation(Instruction *I) {
1091 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1092 return MemoryLocation::get(SI);
1093 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1094 return MemoryLocation::get(LI);
1095 return MemoryLocation();
1098 /// \returns True if the instruction is not a volatile or atomic load/store.
1099 static bool isSimple(Instruction *I) {
1100 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1101 return LI->isSimple();
1102 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1103 return SI->isSimple();
1104 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
1105 return !MI->isVolatile();
1106 return true;
1109 /// Shuffles \p Mask in accordance with the given \p SubMask.
1110 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only
1111 /// one but two input vectors.
1112 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask,
1113 bool ExtendingManyInputs = false) {
1114 if (SubMask.empty())
1115 return;
1116 assert(
1117 (!ExtendingManyInputs || SubMask.size() > Mask.size() ||
1118 // Check if input scalars were extended to match the size of other node.
1119 (SubMask.size() == Mask.size() &&
1120 std::all_of(std::next(Mask.begin(), Mask.size() / 2), Mask.end(),
1121 [](int Idx) { return Idx == PoisonMaskElem; }))) &&
1122 "SubMask with many inputs support must be larger than the mask.");
1123 if (Mask.empty()) {
1124 Mask.append(SubMask.begin(), SubMask.end());
1125 return;
1127 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem);
1128 int TermValue = std::min(Mask.size(), SubMask.size());
1129 for (int I = 0, E = SubMask.size(); I < E; ++I) {
1130 if (SubMask[I] == PoisonMaskElem ||
1131 (!ExtendingManyInputs &&
1132 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue)))
1133 continue;
1134 NewMask[I] = Mask[SubMask[I]];
1136 Mask.swap(NewMask);
1139 /// Order may have elements assigned special value (size) which is out of
1140 /// bounds. Such indices only appear on places which correspond to undef values
1141 /// (see canReuseExtract for details) and used in order to avoid undef values
1142 /// have effect on operands ordering.
1143 /// The first loop below simply finds all unused indices and then the next loop
1144 /// nest assigns these indices for undef values positions.
1145 /// As an example below Order has two undef positions and they have assigned
1146 /// values 3 and 7 respectively:
1147 /// before: 6 9 5 4 9 2 1 0
1148 /// after: 6 3 5 4 7 2 1 0
1149 static void fixupOrderingIndices(MutableArrayRef<unsigned> Order) {
1150 const unsigned Sz = Order.size();
1151 SmallBitVector UnusedIndices(Sz, /*t=*/true);
1152 SmallBitVector MaskedIndices(Sz);
1153 for (unsigned I = 0; I < Sz; ++I) {
1154 if (Order[I] < Sz)
1155 UnusedIndices.reset(Order[I]);
1156 else
1157 MaskedIndices.set(I);
1159 if (MaskedIndices.none())
1160 return;
1161 assert(UnusedIndices.count() == MaskedIndices.count() &&
1162 "Non-synced masked/available indices.");
1163 int Idx = UnusedIndices.find_first();
1164 int MIdx = MaskedIndices.find_first();
1165 while (MIdx >= 0) {
1166 assert(Idx >= 0 && "Indices must be synced.");
1167 Order[MIdx] = Idx;
1168 Idx = UnusedIndices.find_next(Idx);
1169 MIdx = MaskedIndices.find_next(MIdx);
1173 /// \returns a bitset for selecting opcodes. false for Opcode0 and true for
1174 /// Opcode1.
1175 static SmallBitVector getAltInstrMask(ArrayRef<Value *> VL, unsigned Opcode0,
1176 unsigned Opcode1) {
1177 Type *ScalarTy = VL[0]->getType();
1178 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
1179 SmallBitVector OpcodeMask(VL.size() * ScalarTyNumElements, false);
1180 for (unsigned Lane : seq<unsigned>(VL.size()))
1181 if (cast<Instruction>(VL[Lane])->getOpcode() == Opcode1)
1182 OpcodeMask.set(Lane * ScalarTyNumElements,
1183 Lane * ScalarTyNumElements + ScalarTyNumElements);
1184 return OpcodeMask;
1187 namespace llvm {
1189 static void inversePermutation(ArrayRef<unsigned> Indices,
1190 SmallVectorImpl<int> &Mask) {
1191 Mask.clear();
1192 const unsigned E = Indices.size();
1193 Mask.resize(E, PoisonMaskElem);
1194 for (unsigned I = 0; I < E; ++I)
1195 Mask[Indices[I]] = I;
1198 /// Reorders the list of scalars in accordance with the given \p Mask.
1199 static void reorderScalars(SmallVectorImpl<Value *> &Scalars,
1200 ArrayRef<int> Mask) {
1201 assert(!Mask.empty() && "Expected non-empty mask.");
1202 SmallVector<Value *> Prev(Scalars.size(),
1203 PoisonValue::get(Scalars.front()->getType()));
1204 Prev.swap(Scalars);
1205 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
1206 if (Mask[I] != PoisonMaskElem)
1207 Scalars[Mask[I]] = Prev[I];
1210 /// Checks if the provided value does not require scheduling. It does not
1211 /// require scheduling if this is not an instruction or it is an instruction
1212 /// that does not read/write memory and all operands are either not instructions
1213 /// or phi nodes or instructions from different blocks.
1214 static bool areAllOperandsNonInsts(Value *V) {
1215 auto *I = dyn_cast<Instruction>(V);
1216 if (!I)
1217 return true;
1218 return !mayHaveNonDefUseDependency(*I) &&
1219 all_of(I->operands(), [I](Value *V) {
1220 auto *IO = dyn_cast<Instruction>(V);
1221 if (!IO)
1222 return true;
1223 return isa<PHINode>(IO) || IO->getParent() != I->getParent();
1227 /// Checks if the provided value does not require scheduling. It does not
1228 /// require scheduling if this is not an instruction or it is an instruction
1229 /// that does not read/write memory and all users are phi nodes or instructions
1230 /// from the different blocks.
1231 static bool isUsedOutsideBlock(Value *V) {
1232 auto *I = dyn_cast<Instruction>(V);
1233 if (!I)
1234 return true;
1235 // Limits the number of uses to save compile time.
1236 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
1237 all_of(I->users(), [I](User *U) {
1238 auto *IU = dyn_cast<Instruction>(U);
1239 if (!IU)
1240 return true;
1241 return IU->getParent() != I->getParent() || isa<PHINode>(IU);
1245 /// Checks if the specified value does not require scheduling. It does not
1246 /// require scheduling if all operands and all users do not need to be scheduled
1247 /// in the current basic block.
1248 static bool doesNotNeedToBeScheduled(Value *V) {
1249 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V);
1252 /// Checks if the specified array of instructions does not require scheduling.
1253 /// It is so if all either instructions have operands that do not require
1254 /// scheduling or their users do not require scheduling since they are phis or
1255 /// in other basic blocks.
1256 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) {
1257 return !VL.empty() &&
1258 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts));
1261 /// Returns true if widened type of \p Ty elements with size \p Sz represents
1262 /// full vector type, i.e. adding extra element results in extra parts upon type
1263 /// legalization.
1264 static bool hasFullVectorsOrPowerOf2(const TargetTransformInfo &TTI, Type *Ty,
1265 unsigned Sz) {
1266 if (Sz <= 1)
1267 return false;
1268 if (!isValidElementType(Ty) && !isa<FixedVectorType>(Ty))
1269 return false;
1270 if (has_single_bit(Sz))
1271 return true;
1272 const unsigned NumParts = TTI.getNumberOfParts(getWidenedType(Ty, Sz));
1273 return NumParts > 0 && NumParts < Sz && has_single_bit(Sz / NumParts) &&
1274 Sz % NumParts == 0;
1277 namespace slpvectorizer {
1279 /// Bottom Up SLP Vectorizer.
1280 class BoUpSLP {
1281 struct TreeEntry;
1282 struct ScheduleData;
1283 class ShuffleCostEstimator;
1284 class ShuffleInstructionBuilder;
1286 public:
1287 /// Tracks the state we can represent the loads in the given sequence.
1288 enum class LoadsState {
1289 Gather,
1290 Vectorize,
1291 ScatterVectorize,
1292 StridedVectorize
1295 using ValueList = SmallVector<Value *, 8>;
1296 using InstrList = SmallVector<Instruction *, 16>;
1297 using ValueSet = SmallPtrSet<Value *, 16>;
1298 using StoreList = SmallVector<StoreInst *, 8>;
1299 using ExtraValueToDebugLocsMap = SmallDenseSet<Value *, 4>;
1300 using OrdersType = SmallVector<unsigned, 4>;
1302 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
1303 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
1304 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
1305 const DataLayout *DL, OptimizationRemarkEmitter *ORE)
1306 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), DT(Dt),
1307 AC(AC), DB(DB), DL(DL), ORE(ORE),
1308 Builder(Se->getContext(), TargetFolder(*DL)) {
1309 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
1310 // Use the vector register size specified by the target unless overridden
1311 // by a command-line option.
1312 // TODO: It would be better to limit the vectorization factor based on
1313 // data type rather than just register size. For example, x86 AVX has
1314 // 256-bit registers, but it does not support integer operations
1315 // at that width (that requires AVX2).
1316 if (MaxVectorRegSizeOption.getNumOccurrences())
1317 MaxVecRegSize = MaxVectorRegSizeOption;
1318 else
1319 MaxVecRegSize =
1320 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
1321 .getFixedValue();
1323 if (MinVectorRegSizeOption.getNumOccurrences())
1324 MinVecRegSize = MinVectorRegSizeOption;
1325 else
1326 MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
1329 /// Vectorize the tree that starts with the elements in \p VL.
1330 /// Returns the vectorized root.
1331 Value *vectorizeTree();
1333 /// Vectorize the tree but with the list of externally used values \p
1334 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
1335 /// generated extractvalue instructions.
1336 Value *
1337 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues,
1338 Instruction *ReductionRoot = nullptr);
1340 /// \returns the cost incurred by unwanted spills and fills, caused by
1341 /// holding live values over call sites.
1342 InstructionCost getSpillCost() const;
1344 /// \returns the vectorization cost of the subtree that starts at \p VL.
1345 /// A negative number means that this is profitable.
1346 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = {});
1348 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
1349 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
1350 void buildTree(ArrayRef<Value *> Roots,
1351 const SmallDenseSet<Value *> &UserIgnoreLst);
1353 /// Construct a vectorizable tree that starts at \p Roots.
1354 void buildTree(ArrayRef<Value *> Roots);
1356 /// Returns whether the root node has in-tree uses.
1357 bool doesRootHaveInTreeUses() const {
1358 return !VectorizableTree.empty() &&
1359 !VectorizableTree.front()->UserTreeIndices.empty();
1362 /// Return the scalars of the root node.
1363 ArrayRef<Value *> getRootNodeScalars() const {
1364 assert(!VectorizableTree.empty() && "No graph to get the first node from");
1365 return VectorizableTree.front()->Scalars;
1368 /// Checks if the root graph node can be emitted with narrower bitwidth at
1369 /// codegen and returns it signedness, if so.
1370 bool isSignedMinBitwidthRootNode() const {
1371 return MinBWs.at(VectorizableTree.front().get()).second;
1374 /// Builds external uses of the vectorized scalars, i.e. the list of
1375 /// vectorized scalars to be extracted, their lanes and their scalar users. \p
1376 /// ExternallyUsedValues contains additional list of external uses to handle
1377 /// vectorization of reductions.
1378 void
1379 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
1381 /// Transforms graph nodes to target specific representations, if profitable.
1382 void transformNodes();
1384 /// Clear the internal data structures that are created by 'buildTree'.
1385 void deleteTree() {
1386 VectorizableTree.clear();
1387 ScalarToTreeEntry.clear();
1388 MultiNodeScalars.clear();
1389 MustGather.clear();
1390 NonScheduledFirst.clear();
1391 EntryToLastInstruction.clear();
1392 LoadEntriesToVectorize.clear();
1393 IsGraphTransformMode = false;
1394 GatheredLoadsEntriesFirst.reset();
1395 ExternalUses.clear();
1396 ExternalUsesAsOriginalScalar.clear();
1397 for (auto &Iter : BlocksSchedules) {
1398 BlockScheduling *BS = Iter.second.get();
1399 BS->clear();
1401 MinBWs.clear();
1402 ReductionBitWidth = 0;
1403 BaseGraphSize = 1;
1404 CastMaxMinBWSizes.reset();
1405 ExtraBitWidthNodes.clear();
1406 InstrElementSize.clear();
1407 UserIgnoreList = nullptr;
1408 PostponedGathers.clear();
1409 ValueToGatherNodes.clear();
1412 unsigned getTreeSize() const { return VectorizableTree.size(); }
1414 /// Returns the base graph size, before any transformations.
1415 unsigned getCanonicalGraphSize() const { return BaseGraphSize; }
1417 /// Perform LICM and CSE on the newly generated gather sequences.
1418 void optimizeGatherSequence();
1420 /// Does this non-empty order represent an identity order? Identity
1421 /// should be represented as an empty order, so this is used to
1422 /// decide if we can canonicalize a computed order. Undef elements
1423 /// (represented as size) are ignored.
1424 bool isIdentityOrder(ArrayRef<unsigned> Order) const {
1425 assert(!Order.empty() && "expected non-empty order");
1426 const unsigned Sz = Order.size();
1427 return all_of(enumerate(Order), [&](const auto &P) {
1428 return P.value() == P.index() || P.value() == Sz;
1432 /// Checks if the specified gather tree entry \p TE can be represented as a
1433 /// shuffled vector entry + (possibly) permutation with other gathers. It
1434 /// implements the checks only for possibly ordered scalars (Loads,
1435 /// ExtractElement, ExtractValue), which can be part of the graph.
1436 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
1438 /// Sort loads into increasing pointers offsets to allow greater clustering.
1439 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
1441 /// Gets reordering data for the given tree entry. If the entry is vectorized
1442 /// - just return ReorderIndices, otherwise check if the scalars can be
1443 /// reordered and return the most optimal order.
1444 /// \return std::nullopt if ordering is not important, empty order, if
1445 /// identity order is important, or the actual order.
1446 /// \param TopToBottom If true, include the order of vectorized stores and
1447 /// insertelement nodes, otherwise skip them.
1448 std::optional<OrdersType> getReorderingData(const TreeEntry &TE,
1449 bool TopToBottom);
1451 /// Reorders the current graph to the most profitable order starting from the
1452 /// root node to the leaf nodes. The best order is chosen only from the nodes
1453 /// of the same size (vectorization factor). Smaller nodes are considered
1454 /// parts of subgraph with smaller VF and they are reordered independently. We
1455 /// can make it because we still need to extend smaller nodes to the wider VF
1456 /// and we can merge reordering shuffles with the widening shuffles.
1457 void reorderTopToBottom();
1459 /// Reorders the current graph to the most profitable order starting from
1460 /// leaves to the root. It allows to rotate small subgraphs and reduce the
1461 /// number of reshuffles if the leaf nodes use the same order. In this case we
1462 /// can merge the orders and just shuffle user node instead of shuffling its
1463 /// operands. Plus, even the leaf nodes have different orders, it allows to
1464 /// sink reordering in the graph closer to the root node and merge it later
1465 /// during analysis.
1466 void reorderBottomToTop(bool IgnoreReorder = false);
1468 /// \return The vector element size in bits to use when vectorizing the
1469 /// expression tree ending at \p V. If V is a store, the size is the width of
1470 /// the stored value. Otherwise, the size is the width of the largest loaded
1471 /// value reaching V. This method is used by the vectorizer to calculate
1472 /// vectorization factors.
1473 unsigned getVectorElementSize(Value *V);
1475 /// Compute the minimum type sizes required to represent the entries in a
1476 /// vectorizable tree.
1477 void computeMinimumValueSizes();
1479 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
1480 unsigned getMaxVecRegSize() const {
1481 return MaxVecRegSize;
1484 // \returns minimum vector register size as set by cl::opt.
1485 unsigned getMinVecRegSize() const {
1486 return MinVecRegSize;
1489 unsigned getMinVF(unsigned Sz) const {
1490 return std::max(2U, getMinVecRegSize() / Sz);
1493 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
1494 unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
1495 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
1496 return MaxVF ? MaxVF : UINT_MAX;
1499 /// Check if homogeneous aggregate is isomorphic to some VectorType.
1500 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
1501 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
1502 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
1504 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
1505 unsigned canMapToVector(Type *T) const;
1507 /// \returns True if the VectorizableTree is both tiny and not fully
1508 /// vectorizable. We do not vectorize such trees.
1509 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
1511 /// Checks if the graph and all its subgraphs cannot be better vectorized.
1512 /// It may happen, if all gather nodes are loads and they cannot be
1513 /// "clusterized". In this case even subgraphs cannot be vectorized more
1514 /// effectively than the base graph.
1515 bool isTreeNotExtendable() const;
1517 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
1518 /// can be load combined in the backend. Load combining may not be allowed in
1519 /// the IR optimizer, so we do not want to alter the pattern. For example,
1520 /// partially transforming a scalar bswap() pattern into vector code is
1521 /// effectively impossible for the backend to undo.
1522 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1523 /// may not be necessary.
1524 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
1526 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
1527 /// can be load combined in the backend. Load combining may not be allowed in
1528 /// the IR optimizer, so we do not want to alter the pattern. For example,
1529 /// partially transforming a scalar bswap() pattern into vector code is
1530 /// effectively impossible for the backend to undo.
1531 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1532 /// may not be necessary.
1533 bool isLoadCombineCandidate(ArrayRef<Value *> Stores) const;
1535 /// Checks if the given array of loads can be represented as a vectorized,
1536 /// scatter or just simple gather.
1537 /// \param VL list of loads.
1538 /// \param VL0 main load value.
1539 /// \param Order returned order of load instructions.
1540 /// \param PointerOps returned list of pointer operands.
1541 /// \param BestVF return best vector factor, if recursive check found better
1542 /// vectorization sequences rather than masked gather.
1543 /// \param TryRecursiveCheck used to check if long masked gather can be
1544 /// represented as a serie of loads/insert subvector, if profitable.
1545 LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
1546 SmallVectorImpl<unsigned> &Order,
1547 SmallVectorImpl<Value *> &PointerOps,
1548 unsigned *BestVF = nullptr,
1549 bool TryRecursiveCheck = true) const;
1551 /// Registers non-vectorizable sequence of loads
1552 template <typename T> void registerNonVectorizableLoads(ArrayRef<T *> VL) {
1553 ListOfKnonwnNonVectorizableLoads.insert(hash_value(VL));
1556 /// Checks if the given loads sequence is known as not vectorizable
1557 template <typename T>
1558 bool areKnownNonVectorizableLoads(ArrayRef<T *> VL) const {
1559 return ListOfKnonwnNonVectorizableLoads.contains(hash_value(VL));
1562 OptimizationRemarkEmitter *getORE() { return ORE; }
1564 /// This structure holds any data we need about the edges being traversed
1565 /// during buildTree_rec(). We keep track of:
1566 /// (i) the user TreeEntry index, and
1567 /// (ii) the index of the edge.
1568 struct EdgeInfo {
1569 EdgeInfo() = default;
1570 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
1571 : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
1572 /// The user TreeEntry.
1573 TreeEntry *UserTE = nullptr;
1574 /// The operand index of the use.
1575 unsigned EdgeIdx = UINT_MAX;
1576 #ifndef NDEBUG
1577 friend inline raw_ostream &operator<<(raw_ostream &OS,
1578 const BoUpSLP::EdgeInfo &EI) {
1579 EI.dump(OS);
1580 return OS;
1582 /// Debug print.
1583 void dump(raw_ostream &OS) const {
1584 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
1585 << " EdgeIdx:" << EdgeIdx << "}";
1587 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
1588 #endif
1589 bool operator == (const EdgeInfo &Other) const {
1590 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx;
1594 /// A helper class used for scoring candidates for two consecutive lanes.
1595 class LookAheadHeuristics {
1596 const TargetLibraryInfo &TLI;
1597 const DataLayout &DL;
1598 ScalarEvolution &SE;
1599 const BoUpSLP &R;
1600 int NumLanes; // Total number of lanes (aka vectorization factor).
1601 int MaxLevel; // The maximum recursion depth for accumulating score.
1603 public:
1604 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL,
1605 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes,
1606 int MaxLevel)
1607 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes),
1608 MaxLevel(MaxLevel) {}
1610 // The hard-coded scores listed here are not very important, though it shall
1611 // be higher for better matches to improve the resulting cost. When
1612 // computing the scores of matching one sub-tree with another, we are
1613 // basically counting the number of values that are matching. So even if all
1614 // scores are set to 1, we would still get a decent matching result.
1615 // However, sometimes we have to break ties. For example we may have to
1616 // choose between matching loads vs matching opcodes. This is what these
1617 // scores are helping us with: they provide the order of preference. Also,
1618 // this is important if the scalar is externally used or used in another
1619 // tree entry node in the different lane.
1621 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
1622 static const int ScoreConsecutiveLoads = 4;
1623 /// The same load multiple times. This should have a better score than
1624 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it
1625 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for
1626 /// a vector load and 1.0 for a broadcast.
1627 static const int ScoreSplatLoads = 3;
1628 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]).
1629 static const int ScoreReversedLoads = 3;
1630 /// A load candidate for masked gather.
1631 static const int ScoreMaskedGatherCandidate = 1;
1632 /// ExtractElementInst from same vector and consecutive indexes.
1633 static const int ScoreConsecutiveExtracts = 4;
1634 /// ExtractElementInst from same vector and reversed indices.
1635 static const int ScoreReversedExtracts = 3;
1636 /// Constants.
1637 static const int ScoreConstants = 2;
1638 /// Instructions with the same opcode.
1639 static const int ScoreSameOpcode = 2;
1640 /// Instructions with alt opcodes (e.g, add + sub).
1641 static const int ScoreAltOpcodes = 1;
1642 /// Identical instructions (a.k.a. splat or broadcast).
1643 static const int ScoreSplat = 1;
1644 /// Matching with an undef is preferable to failing.
1645 static const int ScoreUndef = 1;
1646 /// Score for failing to find a decent match.
1647 static const int ScoreFail = 0;
1648 /// Score if all users are vectorized.
1649 static const int ScoreAllUserVectorized = 1;
1651 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
1652 /// \p U1 and \p U2 are the users of \p V1 and \p V2.
1653 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p
1654 /// MainAltOps.
1655 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2,
1656 ArrayRef<Value *> MainAltOps) const {
1657 if (!isValidElementType(V1->getType()) ||
1658 !isValidElementType(V2->getType()))
1659 return LookAheadHeuristics::ScoreFail;
1661 if (V1 == V2) {
1662 if (isa<LoadInst>(V1)) {
1663 // Retruns true if the users of V1 and V2 won't need to be extracted.
1664 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
1665 // Bail out if we have too many uses to save compilation time.
1666 if (V1->hasNUsesOrMore(UsesLimit) || V2->hasNUsesOrMore(UsesLimit))
1667 return false;
1669 auto AllUsersVectorized = [U1, U2, this](Value *V) {
1670 return llvm::all_of(V->users(), [U1, U2, this](Value *U) {
1671 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr;
1674 return AllUsersVectorized(V1) && AllUsersVectorized(V2);
1676 // A broadcast of a load can be cheaper on some targets.
1677 if (R.TTI->isLegalBroadcastLoad(V1->getType(),
1678 ElementCount::getFixed(NumLanes)) &&
1679 ((int)V1->getNumUses() == NumLanes ||
1680 AllUsersAreInternal(V1, V2)))
1681 return LookAheadHeuristics::ScoreSplatLoads;
1683 return LookAheadHeuristics::ScoreSplat;
1686 auto CheckSameEntryOrFail = [&]() {
1687 if (const TreeEntry *TE1 = R.getTreeEntry(V1);
1688 TE1 && TE1 == R.getTreeEntry(V2))
1689 return LookAheadHeuristics::ScoreSplatLoads;
1690 return LookAheadHeuristics::ScoreFail;
1693 auto *LI1 = dyn_cast<LoadInst>(V1);
1694 auto *LI2 = dyn_cast<LoadInst>(V2);
1695 if (LI1 && LI2) {
1696 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() ||
1697 !LI2->isSimple())
1698 return CheckSameEntryOrFail();
1700 std::optional<int> Dist = getPointersDiff(
1701 LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
1702 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
1703 if (!Dist || *Dist == 0) {
1704 if (getUnderlyingObject(LI1->getPointerOperand()) ==
1705 getUnderlyingObject(LI2->getPointerOperand()) &&
1706 R.TTI->isLegalMaskedGather(
1707 getWidenedType(LI1->getType(), NumLanes), LI1->getAlign()))
1708 return LookAheadHeuristics::ScoreMaskedGatherCandidate;
1709 return CheckSameEntryOrFail();
1711 // The distance is too large - still may be profitable to use masked
1712 // loads/gathers.
1713 if (std::abs(*Dist) > NumLanes / 2)
1714 return LookAheadHeuristics::ScoreMaskedGatherCandidate;
1715 // This still will detect consecutive loads, but we might have "holes"
1716 // in some cases. It is ok for non-power-2 vectorization and may produce
1717 // better results. It should not affect current vectorization.
1718 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads
1719 : LookAheadHeuristics::ScoreReversedLoads;
1722 auto *C1 = dyn_cast<Constant>(V1);
1723 auto *C2 = dyn_cast<Constant>(V2);
1724 if (C1 && C2)
1725 return LookAheadHeuristics::ScoreConstants;
1727 // Extracts from consecutive indexes of the same vector better score as
1728 // the extracts could be optimized away.
1729 Value *EV1;
1730 ConstantInt *Ex1Idx;
1731 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
1732 // Undefs are always profitable for extractelements.
1733 // Compiler can easily combine poison and extractelement <non-poison> or
1734 // undef and extractelement <poison>. But combining undef +
1735 // extractelement <non-poison-but-may-produce-poison> requires some
1736 // extra operations.
1737 if (isa<UndefValue>(V2))
1738 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all())
1739 ? LookAheadHeuristics::ScoreConsecutiveExtracts
1740 : LookAheadHeuristics::ScoreSameOpcode;
1741 Value *EV2 = nullptr;
1742 ConstantInt *Ex2Idx = nullptr;
1743 if (match(V2,
1744 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx),
1745 m_Undef())))) {
1746 // Undefs are always profitable for extractelements.
1747 if (!Ex2Idx)
1748 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1749 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType())
1750 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1751 if (EV2 == EV1) {
1752 int Idx1 = Ex1Idx->getZExtValue();
1753 int Idx2 = Ex2Idx->getZExtValue();
1754 int Dist = Idx2 - Idx1;
1755 // The distance is too large - still may be profitable to use
1756 // shuffles.
1757 if (std::abs(Dist) == 0)
1758 return LookAheadHeuristics::ScoreSplat;
1759 if (std::abs(Dist) > NumLanes / 2)
1760 return LookAheadHeuristics::ScoreSameOpcode;
1761 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts
1762 : LookAheadHeuristics::ScoreReversedExtracts;
1764 return LookAheadHeuristics::ScoreAltOpcodes;
1766 return CheckSameEntryOrFail();
1769 auto *I1 = dyn_cast<Instruction>(V1);
1770 auto *I2 = dyn_cast<Instruction>(V2);
1771 if (I1 && I2) {
1772 if (I1->getParent() != I2->getParent())
1773 return CheckSameEntryOrFail();
1774 SmallVector<Value *, 4> Ops(MainAltOps);
1775 Ops.push_back(I1);
1776 Ops.push_back(I2);
1777 InstructionsState S = getSameOpcode(Ops, TLI);
1778 // Note: Only consider instructions with <= 2 operands to avoid
1779 // complexity explosion.
1780 if (S.getOpcode() &&
1781 (S.MainOp->getNumOperands() <= 2 || !MainAltOps.empty() ||
1782 !S.isAltShuffle()) &&
1783 all_of(Ops, [&S](Value *V) {
1784 return cast<Instruction>(V)->getNumOperands() ==
1785 S.MainOp->getNumOperands();
1787 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes
1788 : LookAheadHeuristics::ScoreSameOpcode;
1791 if (isa<UndefValue>(V2))
1792 return LookAheadHeuristics::ScoreUndef;
1794 return CheckSameEntryOrFail();
1797 /// Go through the operands of \p LHS and \p RHS recursively until
1798 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are
1799 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands
1800 /// of \p U1 and \p U2), except at the beginning of the recursion where
1801 /// these are set to nullptr.
1803 /// For example:
1804 /// \verbatim
1805 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1]
1806 /// \ / \ / \ / \ /
1807 /// + + + +
1808 /// G1 G2 G3 G4
1809 /// \endverbatim
1810 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1811 /// each level recursively, accumulating the score. It starts from matching
1812 /// the additions at level 0, then moves on to the loads (level 1). The
1813 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1814 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while
1815 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail.
1816 /// Please note that the order of the operands does not matter, as we
1817 /// evaluate the score of all profitable combinations of operands. In
1818 /// other words the score of G1 and G4 is the same as G1 and G2. This
1819 /// heuristic is based on ideas described in:
1820 /// Look-ahead SLP: Auto-vectorization in the presence of commutative
1821 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1822 /// Luís F. W. Góes
1823 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1,
1824 Instruction *U2, int CurrLevel,
1825 ArrayRef<Value *> MainAltOps) const {
1827 // Get the shallow score of V1 and V2.
1828 int ShallowScoreAtThisLevel =
1829 getShallowScore(LHS, RHS, U1, U2, MainAltOps);
1831 // If reached MaxLevel,
1832 // or if V1 and V2 are not instructions,
1833 // or if they are SPLAT,
1834 // or if they are not consecutive,
1835 // or if profitable to vectorize loads or extractelements, early return
1836 // the current cost.
1837 auto *I1 = dyn_cast<Instruction>(LHS);
1838 auto *I2 = dyn_cast<Instruction>(RHS);
1839 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1840 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail ||
1841 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
1842 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
1843 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
1844 ShallowScoreAtThisLevel))
1845 return ShallowScoreAtThisLevel;
1846 assert(I1 && I2 && "Should have early exited.");
1848 // Contains the I2 operand indexes that got matched with I1 operands.
1849 SmallSet<unsigned, 4> Op2Used;
1851 // Recursion towards the operands of I1 and I2. We are trying all possible
1852 // operand pairs, and keeping track of the best score.
1853 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1854 OpIdx1 != NumOperands1; ++OpIdx1) {
1855 // Try to pair op1I with the best operand of I2.
1856 int MaxTmpScore = 0;
1857 unsigned MaxOpIdx2 = 0;
1858 bool FoundBest = false;
1859 // If I2 is commutative try all combinations.
1860 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1861 unsigned ToIdx = isCommutative(I2)
1862 ? I2->getNumOperands()
1863 : std::min(I2->getNumOperands(), OpIdx1 + 1);
1864 assert(FromIdx <= ToIdx && "Bad index");
1865 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1866 // Skip operands already paired with OpIdx1.
1867 if (Op2Used.count(OpIdx2))
1868 continue;
1869 // Recursively calculate the cost at each level
1870 int TmpScore =
1871 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
1872 I1, I2, CurrLevel + 1, {});
1873 // Look for the best score.
1874 if (TmpScore > LookAheadHeuristics::ScoreFail &&
1875 TmpScore > MaxTmpScore) {
1876 MaxTmpScore = TmpScore;
1877 MaxOpIdx2 = OpIdx2;
1878 FoundBest = true;
1881 if (FoundBest) {
1882 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1883 Op2Used.insert(MaxOpIdx2);
1884 ShallowScoreAtThisLevel += MaxTmpScore;
1887 return ShallowScoreAtThisLevel;
1890 /// A helper data structure to hold the operands of a vector of instructions.
1891 /// This supports a fixed vector length for all operand vectors.
1892 class VLOperands {
1893 /// For each operand we need (i) the value, and (ii) the opcode that it
1894 /// would be attached to if the expression was in a left-linearized form.
1895 /// This is required to avoid illegal operand reordering.
1896 /// For example:
1897 /// \verbatim
1898 /// 0 Op1
1899 /// |/
1900 /// Op1 Op2 Linearized + Op2
1901 /// \ / ----------> |/
1902 /// - -
1904 /// Op1 - Op2 (0 + Op1) - Op2
1905 /// \endverbatim
1907 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
1909 /// Another way to think of this is to track all the operations across the
1910 /// path from the operand all the way to the root of the tree and to
1911 /// calculate the operation that corresponds to this path. For example, the
1912 /// path from Op2 to the root crosses the RHS of the '-', therefore the
1913 /// corresponding operation is a '-' (which matches the one in the
1914 /// linearized tree, as shown above).
1916 /// For lack of a better term, we refer to this operation as Accumulated
1917 /// Path Operation (APO).
1918 struct OperandData {
1919 OperandData() = default;
1920 OperandData(Value *V, bool APO, bool IsUsed)
1921 : V(V), APO(APO), IsUsed(IsUsed) {}
1922 /// The operand value.
1923 Value *V = nullptr;
1924 /// TreeEntries only allow a single opcode, or an alternate sequence of
1925 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
1926 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
1927 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
1928 /// (e.g., Add/Mul)
1929 bool APO = false;
1930 /// Helper data for the reordering function.
1931 bool IsUsed = false;
1934 /// During operand reordering, we are trying to select the operand at lane
1935 /// that matches best with the operand at the neighboring lane. Our
1936 /// selection is based on the type of value we are looking for. For example,
1937 /// if the neighboring lane has a load, we need to look for a load that is
1938 /// accessing a consecutive address. These strategies are summarized in the
1939 /// 'ReorderingMode' enumerator.
1940 enum class ReorderingMode {
1941 Load, ///< Matching loads to consecutive memory addresses
1942 Opcode, ///< Matching instructions based on opcode (same or alternate)
1943 Constant, ///< Matching constants
1944 Splat, ///< Matching the same instruction multiple times (broadcast)
1945 Failed, ///< We failed to create a vectorizable group
1948 using OperandDataVec = SmallVector<OperandData, 2>;
1950 /// A vector of operand vectors.
1951 SmallVector<OperandDataVec, 4> OpsVec;
1953 const TargetLibraryInfo &TLI;
1954 const DataLayout &DL;
1955 ScalarEvolution &SE;
1956 const BoUpSLP &R;
1957 const Loop *L = nullptr;
1959 /// \returns the operand data at \p OpIdx and \p Lane.
1960 OperandData &getData(unsigned OpIdx, unsigned Lane) {
1961 return OpsVec[OpIdx][Lane];
1964 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
1965 const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
1966 return OpsVec[OpIdx][Lane];
1969 /// Clears the used flag for all entries.
1970 void clearUsed() {
1971 for (unsigned OpIdx = 0, NumOperands = getNumOperands();
1972 OpIdx != NumOperands; ++OpIdx)
1973 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
1974 ++Lane)
1975 OpsVec[OpIdx][Lane].IsUsed = false;
1978 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
1979 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
1980 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
1983 /// \param Lane lane of the operands under analysis.
1984 /// \param OpIdx operand index in \p Lane lane we're looking the best
1985 /// candidate for.
1986 /// \param Idx operand index of the current candidate value.
1987 /// \returns The additional score due to possible broadcasting of the
1988 /// elements in the lane. It is more profitable to have power-of-2 unique
1989 /// elements in the lane, it will be vectorized with higher probability
1990 /// after removing duplicates. Currently the SLP vectorizer supports only
1991 /// vectorization of the power-of-2 number of unique scalars.
1992 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx,
1993 const SmallBitVector &UsedLanes) const {
1994 Value *IdxLaneV = getData(Idx, Lane).V;
1995 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V ||
1996 isa<ExtractElementInst>(IdxLaneV))
1997 return 0;
1998 SmallDenseMap<Value *, unsigned, 4> Uniques;
1999 for (unsigned Ln : seq<unsigned>(getNumLanes())) {
2000 if (Ln == Lane)
2001 continue;
2002 Value *OpIdxLnV = getData(OpIdx, Ln).V;
2003 if (!isa<Instruction>(OpIdxLnV))
2004 return 0;
2005 Uniques.try_emplace(OpIdxLnV, Ln);
2007 unsigned UniquesCount = Uniques.size();
2008 auto IdxIt = Uniques.find(IdxLaneV);
2009 unsigned UniquesCntWithIdxLaneV =
2010 IdxIt != Uniques.end() ? UniquesCount : UniquesCount + 1;
2011 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
2012 auto OpIdxIt = Uniques.find(OpIdxLaneV);
2013 unsigned UniquesCntWithOpIdxLaneV =
2014 OpIdxIt != Uniques.end() ? UniquesCount : UniquesCount + 1;
2015 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
2016 return 0;
2017 return std::min(bit_ceil(UniquesCntWithOpIdxLaneV) -
2018 UniquesCntWithOpIdxLaneV,
2019 UniquesCntWithOpIdxLaneV -
2020 bit_floor(UniquesCntWithOpIdxLaneV)) -
2021 ((IdxIt != Uniques.end() && UsedLanes.test(IdxIt->second))
2022 ? UniquesCntWithIdxLaneV - bit_floor(UniquesCntWithIdxLaneV)
2023 : bit_ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
2026 /// \param Lane lane of the operands under analysis.
2027 /// \param OpIdx operand index in \p Lane lane we're looking the best
2028 /// candidate for.
2029 /// \param Idx operand index of the current candidate value.
2030 /// \returns The additional score for the scalar which users are all
2031 /// vectorized.
2032 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
2033 Value *IdxLaneV = getData(Idx, Lane).V;
2034 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
2035 // Do not care about number of uses for vector-like instructions
2036 // (extractelement/extractvalue with constant indices), they are extracts
2037 // themselves and already externally used. Vectorization of such
2038 // instructions does not add extra extractelement instruction, just may
2039 // remove it.
2040 if (isVectorLikeInstWithConstOps(IdxLaneV) &&
2041 isVectorLikeInstWithConstOps(OpIdxLaneV))
2042 return LookAheadHeuristics::ScoreAllUserVectorized;
2043 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
2044 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
2045 return 0;
2046 return R.areAllUsersVectorized(IdxLaneI)
2047 ? LookAheadHeuristics::ScoreAllUserVectorized
2048 : 0;
2051 /// Score scaling factor for fully compatible instructions but with
2052 /// different number of external uses. Allows better selection of the
2053 /// instructions with less external uses.
2054 static const int ScoreScaleFactor = 10;
2056 /// \Returns the look-ahead score, which tells us how much the sub-trees
2057 /// rooted at \p LHS and \p RHS match, the more they match the higher the
2058 /// score. This helps break ties in an informed way when we cannot decide on
2059 /// the order of the operands by just considering the immediate
2060 /// predecessors.
2061 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
2062 int Lane, unsigned OpIdx, unsigned Idx,
2063 bool &IsUsed, const SmallBitVector &UsedLanes) {
2064 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(),
2065 LookAheadMaxDepth);
2066 // Keep track of the instruction stack as we recurse into the operands
2067 // during the look-ahead score exploration.
2068 int Score =
2069 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr,
2070 /*CurrLevel=*/1, MainAltOps);
2071 if (Score) {
2072 int SplatScore = getSplatScore(Lane, OpIdx, Idx, UsedLanes);
2073 if (Score <= -SplatScore) {
2074 // Failed score.
2075 Score = 0;
2076 } else {
2077 Score += SplatScore;
2078 // Scale score to see the difference between different operands
2079 // and similar operands but all vectorized/not all vectorized
2080 // uses. It does not affect actual selection of the best
2081 // compatible operand in general, just allows to select the
2082 // operand with all vectorized uses.
2083 Score *= ScoreScaleFactor;
2084 Score += getExternalUseScore(Lane, OpIdx, Idx);
2085 IsUsed = true;
2088 return Score;
2091 /// Best defined scores per lanes between the passes. Used to choose the
2092 /// best operand (with the highest score) between the passes.
2093 /// The key - {Operand Index, Lane}.
2094 /// The value - the best score between the passes for the lane and the
2095 /// operand.
2096 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8>
2097 BestScoresPerLanes;
2099 // Search all operands in Ops[*][Lane] for the one that matches best
2100 // Ops[OpIdx][LastLane] and return its opreand index.
2101 // If no good match can be found, return std::nullopt.
2102 std::optional<unsigned>
2103 getBestOperand(unsigned OpIdx, int Lane, int LastLane,
2104 ArrayRef<ReorderingMode> ReorderingModes,
2105 ArrayRef<Value *> MainAltOps,
2106 const SmallBitVector &UsedLanes) {
2107 unsigned NumOperands = getNumOperands();
2109 // The operand of the previous lane at OpIdx.
2110 Value *OpLastLane = getData(OpIdx, LastLane).V;
2112 // Our strategy mode for OpIdx.
2113 ReorderingMode RMode = ReorderingModes[OpIdx];
2114 if (RMode == ReorderingMode::Failed)
2115 return std::nullopt;
2117 // The linearized opcode of the operand at OpIdx, Lane.
2118 bool OpIdxAPO = getData(OpIdx, Lane).APO;
2120 // The best operand index and its score.
2121 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
2122 // are using the score to differentiate between the two.
2123 struct BestOpData {
2124 std::optional<unsigned> Idx;
2125 unsigned Score = 0;
2126 } BestOp;
2127 BestOp.Score =
2128 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
2129 .first->second;
2131 // Track if the operand must be marked as used. If the operand is set to
2132 // Score 1 explicitly (because of non power-of-2 unique scalars, we may
2133 // want to reestimate the operands again on the following iterations).
2134 bool IsUsed = RMode == ReorderingMode::Splat ||
2135 RMode == ReorderingMode::Constant ||
2136 RMode == ReorderingMode::Load;
2137 // Iterate through all unused operands and look for the best.
2138 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
2139 // Get the operand at Idx and Lane.
2140 OperandData &OpData = getData(Idx, Lane);
2141 Value *Op = OpData.V;
2142 bool OpAPO = OpData.APO;
2144 // Skip already selected operands.
2145 if (OpData.IsUsed)
2146 continue;
2148 // Skip if we are trying to move the operand to a position with a
2149 // different opcode in the linearized tree form. This would break the
2150 // semantics.
2151 if (OpAPO != OpIdxAPO)
2152 continue;
2154 // Look for an operand that matches the current mode.
2155 switch (RMode) {
2156 case ReorderingMode::Load:
2157 case ReorderingMode::Opcode: {
2158 bool LeftToRight = Lane > LastLane;
2159 Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
2160 Value *OpRight = (LeftToRight) ? Op : OpLastLane;
2161 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
2162 OpIdx, Idx, IsUsed, UsedLanes);
2163 if (Score > static_cast<int>(BestOp.Score) ||
2164 (Score > 0 && Score == static_cast<int>(BestOp.Score) &&
2165 Idx == OpIdx)) {
2166 BestOp.Idx = Idx;
2167 BestOp.Score = Score;
2168 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
2170 break;
2172 case ReorderingMode::Constant:
2173 if (isa<Constant>(Op) ||
2174 (!BestOp.Score && L && L->isLoopInvariant(Op))) {
2175 BestOp.Idx = Idx;
2176 if (isa<Constant>(Op)) {
2177 BestOp.Score = LookAheadHeuristics::ScoreConstants;
2178 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] =
2179 LookAheadHeuristics::ScoreConstants;
2181 if (isa<UndefValue>(Op) || !isa<Constant>(Op))
2182 IsUsed = false;
2184 break;
2185 case ReorderingMode::Splat:
2186 if (Op == OpLastLane || (!BestOp.Score && isa<Constant>(Op))) {
2187 IsUsed = Op == OpLastLane;
2188 if (Op == OpLastLane) {
2189 BestOp.Score = LookAheadHeuristics::ScoreSplat;
2190 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] =
2191 LookAheadHeuristics::ScoreSplat;
2193 BestOp.Idx = Idx;
2195 break;
2196 case ReorderingMode::Failed:
2197 llvm_unreachable("Not expected Failed reordering mode.");
2201 if (BestOp.Idx) {
2202 getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
2203 return BestOp.Idx;
2205 // If we could not find a good match return std::nullopt.
2206 return std::nullopt;
2209 /// Helper for reorderOperandVecs.
2210 /// \returns the lane that we should start reordering from. This is the one
2211 /// which has the least number of operands that can freely move about or
2212 /// less profitable because it already has the most optimal set of operands.
2213 unsigned getBestLaneToStartReordering() const {
2214 unsigned Min = UINT_MAX;
2215 unsigned SameOpNumber = 0;
2216 // std::pair<unsigned, unsigned> is used to implement a simple voting
2217 // algorithm and choose the lane with the least number of operands that
2218 // can freely move about or less profitable because it already has the
2219 // most optimal set of operands. The first unsigned is a counter for
2220 // voting, the second unsigned is the counter of lanes with instructions
2221 // with same/alternate opcodes and same parent basic block.
2222 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap;
2223 // Try to be closer to the original results, if we have multiple lanes
2224 // with same cost. If 2 lanes have the same cost, use the one with the
2225 // highest index.
2226 for (int I = getNumLanes(); I > 0; --I) {
2227 unsigned Lane = I - 1;
2228 OperandsOrderData NumFreeOpsHash =
2229 getMaxNumOperandsThatCanBeReordered(Lane);
2230 // Compare the number of operands that can move and choose the one with
2231 // the least number.
2232 if (NumFreeOpsHash.NumOfAPOs < Min) {
2233 Min = NumFreeOpsHash.NumOfAPOs;
2234 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
2235 HashMap.clear();
2236 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
2237 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
2238 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
2239 // Select the most optimal lane in terms of number of operands that
2240 // should be moved around.
2241 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
2242 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
2243 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
2244 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
2245 auto [It, Inserted] =
2246 HashMap.try_emplace(NumFreeOpsHash.Hash, 1, Lane);
2247 if (!Inserted)
2248 ++It->second.first;
2251 // Select the lane with the minimum counter.
2252 unsigned BestLane = 0;
2253 unsigned CntMin = UINT_MAX;
2254 for (const auto &Data : reverse(HashMap)) {
2255 if (Data.second.first < CntMin) {
2256 CntMin = Data.second.first;
2257 BestLane = Data.second.second;
2260 return BestLane;
2263 /// Data structure that helps to reorder operands.
2264 struct OperandsOrderData {
2265 /// The best number of operands with the same APOs, which can be
2266 /// reordered.
2267 unsigned NumOfAPOs = UINT_MAX;
2268 /// Number of operands with the same/alternate instruction opcode and
2269 /// parent.
2270 unsigned NumOpsWithSameOpcodeParent = 0;
2271 /// Hash for the actual operands ordering.
2272 /// Used to count operands, actually their position id and opcode
2273 /// value. It is used in the voting mechanism to find the lane with the
2274 /// least number of operands that can freely move about or less profitable
2275 /// because it already has the most optimal set of operands. Can be
2276 /// replaced with SmallVector<unsigned> instead but hash code is faster
2277 /// and requires less memory.
2278 unsigned Hash = 0;
2280 /// \returns the maximum number of operands that are allowed to be reordered
2281 /// for \p Lane and the number of compatible instructions(with the same
2282 /// parent/opcode). This is used as a heuristic for selecting the first lane
2283 /// to start operand reordering.
2284 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
2285 unsigned CntTrue = 0;
2286 unsigned NumOperands = getNumOperands();
2287 // Operands with the same APO can be reordered. We therefore need to count
2288 // how many of them we have for each APO, like this: Cnt[APO] = x.
2289 // Since we only have two APOs, namely true and false, we can avoid using
2290 // a map. Instead we can simply count the number of operands that
2291 // correspond to one of them (in this case the 'true' APO), and calculate
2292 // the other by subtracting it from the total number of operands.
2293 // Operands with the same instruction opcode and parent are more
2294 // profitable since we don't need to move them in many cases, with a high
2295 // probability such lane already can be vectorized effectively.
2296 bool AllUndefs = true;
2297 unsigned NumOpsWithSameOpcodeParent = 0;
2298 Instruction *OpcodeI = nullptr;
2299 BasicBlock *Parent = nullptr;
2300 unsigned Hash = 0;
2301 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2302 const OperandData &OpData = getData(OpIdx, Lane);
2303 if (OpData.APO)
2304 ++CntTrue;
2305 // Use Boyer-Moore majority voting for finding the majority opcode and
2306 // the number of times it occurs.
2307 if (auto *I = dyn_cast<Instruction>(OpData.V)) {
2308 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() ||
2309 I->getParent() != Parent) {
2310 if (NumOpsWithSameOpcodeParent == 0) {
2311 NumOpsWithSameOpcodeParent = 1;
2312 OpcodeI = I;
2313 Parent = I->getParent();
2314 } else {
2315 --NumOpsWithSameOpcodeParent;
2317 } else {
2318 ++NumOpsWithSameOpcodeParent;
2321 Hash = hash_combine(
2322 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
2323 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
2325 if (AllUndefs)
2326 return {};
2327 OperandsOrderData Data;
2328 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
2329 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
2330 Data.Hash = Hash;
2331 return Data;
2334 /// Go through the instructions in VL and append their operands.
2335 void appendOperandsOfVL(ArrayRef<Value *> VL) {
2336 assert(!VL.empty() && "Bad VL");
2337 assert((empty() || VL.size() == getNumLanes()) &&
2338 "Expected same number of lanes");
2339 assert(isa<Instruction>(VL[0]) && "Expected instruction");
2340 constexpr unsigned IntrinsicNumOperands = 2;
2341 unsigned NumOperands = isa<IntrinsicInst>(VL[0])
2342 ? IntrinsicNumOperands
2343 : cast<Instruction>(VL[0])->getNumOperands();
2344 OpsVec.resize(NumOperands);
2345 unsigned NumLanes = VL.size();
2346 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2347 OpsVec[OpIdx].resize(NumLanes);
2348 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
2349 assert(isa<Instruction>(VL[Lane]) && "Expected instruction");
2350 // Our tree has just 3 nodes: the root and two operands.
2351 // It is therefore trivial to get the APO. We only need to check the
2352 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
2353 // RHS operand. The LHS operand of both add and sub is never attached
2354 // to an inversese operation in the linearized form, therefore its APO
2355 // is false. The RHS is true only if VL[Lane] is an inverse operation.
2357 // Since operand reordering is performed on groups of commutative
2358 // operations or alternating sequences (e.g., +, -), we can safely
2359 // tell the inverse operations by checking commutativity.
2360 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
2361 bool APO = (OpIdx == 0) ? false : IsInverseOperation;
2362 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
2363 APO, false};
2368 /// \returns the number of operands.
2369 unsigned getNumOperands() const { return OpsVec.size(); }
2371 /// \returns the number of lanes.
2372 unsigned getNumLanes() const { return OpsVec[0].size(); }
2374 /// \returns the operand value at \p OpIdx and \p Lane.
2375 Value *getValue(unsigned OpIdx, unsigned Lane) const {
2376 return getData(OpIdx, Lane).V;
2379 /// \returns true if the data structure is empty.
2380 bool empty() const { return OpsVec.empty(); }
2382 /// Clears the data.
2383 void clear() { OpsVec.clear(); }
2385 /// \Returns true if there are enough operands identical to \p Op to fill
2386 /// the whole vector (it is mixed with constants or loop invariant values).
2387 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
2388 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
2389 // Small number of loads - try load matching.
2390 if (isa<LoadInst>(Op) && getNumLanes() == 2 && getNumOperands() == 2)
2391 return false;
2392 bool OpAPO = getData(OpIdx, Lane).APO;
2393 bool IsInvariant = L && L->isLoopInvariant(Op);
2394 unsigned Cnt = 0;
2395 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
2396 if (Ln == Lane)
2397 continue;
2398 // This is set to true if we found a candidate for broadcast at Lane.
2399 bool FoundCandidate = false;
2400 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
2401 OperandData &Data = getData(OpI, Ln);
2402 if (Data.APO != OpAPO || Data.IsUsed)
2403 continue;
2404 Value *OpILane = getValue(OpI, Lane);
2405 bool IsConstantOp = isa<Constant>(OpILane);
2406 // Consider the broadcast candidate if:
2407 // 1. Same value is found in one of the operands.
2408 if (Data.V == Op ||
2409 // 2. The operand in the given lane is not constant but there is a
2410 // constant operand in another lane (which can be moved to the
2411 // given lane). In this case we can represent it as a simple
2412 // permutation of constant and broadcast.
2413 (!IsConstantOp &&
2414 ((Lns > 2 && isa<Constant>(Data.V)) ||
2415 // 2.1. If we have only 2 lanes, need to check that value in the
2416 // next lane does not build same opcode sequence.
2417 (Lns == 2 &&
2418 !getSameOpcode({Op, getValue((OpI + 1) % OpE, Ln)}, TLI)
2419 .getOpcode() &&
2420 isa<Constant>(Data.V)))) ||
2421 // 3. The operand in the current lane is loop invariant (can be
2422 // hoisted out) and another operand is also a loop invariant
2423 // (though not a constant). In this case the whole vector can be
2424 // hoisted out.
2425 // FIXME: need to teach the cost model about this case for better
2426 // estimation.
2427 (IsInvariant && !isa<Constant>(Data.V) &&
2428 !getSameOpcode({Op, Data.V}, TLI).getOpcode() &&
2429 L->isLoopInvariant(Data.V))) {
2430 FoundCandidate = true;
2431 Data.IsUsed = Data.V == Op;
2432 if (Data.V == Op)
2433 ++Cnt;
2434 break;
2437 if (!FoundCandidate)
2438 return false;
2440 return getNumLanes() == 2 || Cnt > 1;
2443 /// Checks if there is at least single compatible operand in lanes other
2444 /// than \p Lane, compatible with the operand \p Op.
2445 bool canBeVectorized(Instruction *Op, unsigned OpIdx, unsigned Lane) const {
2446 bool OpAPO = getData(OpIdx, Lane).APO;
2447 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
2448 if (Ln == Lane)
2449 continue;
2450 if (any_of(seq<unsigned>(getNumOperands()), [&](unsigned OpI) {
2451 const OperandData &Data = getData(OpI, Ln);
2452 if (Data.APO != OpAPO || Data.IsUsed)
2453 return true;
2454 Value *OpILn = getValue(OpI, Ln);
2455 return (L && L->isLoopInvariant(OpILn)) ||
2456 (getSameOpcode({Op, OpILn}, TLI).getOpcode() &&
2457 Op->getParent() == cast<Instruction>(OpILn)->getParent());
2459 return true;
2461 return false;
2464 public:
2465 /// Initialize with all the operands of the instruction vector \p RootVL.
2466 VLOperands(ArrayRef<Value *> RootVL, const BoUpSLP &R)
2467 : TLI(*R.TLI), DL(*R.DL), SE(*R.SE), R(R),
2468 L(R.LI->getLoopFor(
2469 (cast<Instruction>(RootVL.front())->getParent()))) {
2470 // Append all the operands of RootVL.
2471 appendOperandsOfVL(RootVL);
2474 /// \Returns a value vector with the operands across all lanes for the
2475 /// opearnd at \p OpIdx.
2476 ValueList getVL(unsigned OpIdx) const {
2477 ValueList OpVL(OpsVec[OpIdx].size());
2478 assert(OpsVec[OpIdx].size() == getNumLanes() &&
2479 "Expected same num of lanes across all operands");
2480 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
2481 OpVL[Lane] = OpsVec[OpIdx][Lane].V;
2482 return OpVL;
2485 // Performs operand reordering for 2 or more operands.
2486 // The original operands are in OrigOps[OpIdx][Lane].
2487 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
2488 void reorder() {
2489 unsigned NumOperands = getNumOperands();
2490 unsigned NumLanes = getNumLanes();
2491 // Each operand has its own mode. We are using this mode to help us select
2492 // the instructions for each lane, so that they match best with the ones
2493 // we have selected so far.
2494 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
2496 // This is a greedy single-pass algorithm. We are going over each lane
2497 // once and deciding on the best order right away with no back-tracking.
2498 // However, in order to increase its effectiveness, we start with the lane
2499 // that has operands that can move the least. For example, given the
2500 // following lanes:
2501 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
2502 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
2503 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
2504 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
2505 // we will start at Lane 1, since the operands of the subtraction cannot
2506 // be reordered. Then we will visit the rest of the lanes in a circular
2507 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
2509 // Find the first lane that we will start our search from.
2510 unsigned FirstLane = getBestLaneToStartReordering();
2512 // Initialize the modes.
2513 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2514 Value *OpLane0 = getValue(OpIdx, FirstLane);
2515 // Keep track if we have instructions with all the same opcode on one
2516 // side.
2517 if (auto *OpILane0 = dyn_cast<Instruction>(OpLane0)) {
2518 // Check if OpLane0 should be broadcast.
2519 if (shouldBroadcast(OpLane0, OpIdx, FirstLane) ||
2520 !canBeVectorized(OpILane0, OpIdx, FirstLane))
2521 ReorderingModes[OpIdx] = ReorderingMode::Splat;
2522 else if (isa<LoadInst>(OpILane0))
2523 ReorderingModes[OpIdx] = ReorderingMode::Load;
2524 else
2525 ReorderingModes[OpIdx] = ReorderingMode::Opcode;
2526 } else if (isa<Constant>(OpLane0)) {
2527 ReorderingModes[OpIdx] = ReorderingMode::Constant;
2528 } else if (isa<Argument>(OpLane0)) {
2529 // Our best hope is a Splat. It may save some cost in some cases.
2530 ReorderingModes[OpIdx] = ReorderingMode::Splat;
2531 } else {
2532 llvm_unreachable("Unexpected value kind.");
2536 // Check that we don't have same operands. No need to reorder if operands
2537 // are just perfect diamond or shuffled diamond match. Do not do it only
2538 // for possible broadcasts or non-power of 2 number of scalars (just for
2539 // now).
2540 auto &&SkipReordering = [this]() {
2541 SmallPtrSet<Value *, 4> UniqueValues;
2542 ArrayRef<OperandData> Op0 = OpsVec.front();
2543 for (const OperandData &Data : Op0)
2544 UniqueValues.insert(Data.V);
2545 for (ArrayRef<OperandData> Op : drop_begin(OpsVec, 1)) {
2546 if (any_of(Op, [&UniqueValues](const OperandData &Data) {
2547 return !UniqueValues.contains(Data.V);
2549 return false;
2551 // TODO: Check if we can remove a check for non-power-2 number of
2552 // scalars after full support of non-power-2 vectorization.
2553 return UniqueValues.size() != 2 && has_single_bit(UniqueValues.size());
2556 // If the initial strategy fails for any of the operand indexes, then we
2557 // perform reordering again in a second pass. This helps avoid assigning
2558 // high priority to the failed strategy, and should improve reordering for
2559 // the non-failed operand indexes.
2560 for (int Pass = 0; Pass != 2; ++Pass) {
2561 // Check if no need to reorder operands since they're are perfect or
2562 // shuffled diamond match.
2563 // Need to do it to avoid extra external use cost counting for
2564 // shuffled matches, which may cause regressions.
2565 if (SkipReordering())
2566 break;
2567 // Skip the second pass if the first pass did not fail.
2568 bool StrategyFailed = false;
2569 // Mark all operand data as free to use.
2570 clearUsed();
2571 // We keep the original operand order for the FirstLane, so reorder the
2572 // rest of the lanes. We are visiting the nodes in a circular fashion,
2573 // using FirstLane as the center point and increasing the radius
2574 // distance.
2575 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
2576 for (unsigned I = 0; I < NumOperands; ++I)
2577 MainAltOps[I].push_back(getData(I, FirstLane).V);
2579 SmallBitVector UsedLanes(NumLanes);
2580 UsedLanes.set(FirstLane);
2581 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
2582 // Visit the lane on the right and then the lane on the left.
2583 for (int Direction : {+1, -1}) {
2584 int Lane = FirstLane + Direction * Distance;
2585 if (Lane < 0 || Lane >= (int)NumLanes)
2586 continue;
2587 UsedLanes.set(Lane);
2588 int LastLane = Lane - Direction;
2589 assert(LastLane >= 0 && LastLane < (int)NumLanes &&
2590 "Out of bounds");
2591 // Look for a good match for each operand.
2592 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2593 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
2594 std::optional<unsigned> BestIdx =
2595 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes,
2596 MainAltOps[OpIdx], UsedLanes);
2597 // By not selecting a value, we allow the operands that follow to
2598 // select a better matching value. We will get a non-null value in
2599 // the next run of getBestOperand().
2600 if (BestIdx) {
2601 // Swap the current operand with the one returned by
2602 // getBestOperand().
2603 swap(OpIdx, *BestIdx, Lane);
2604 } else {
2605 // Enable the second pass.
2606 StrategyFailed = true;
2608 // Try to get the alternate opcode and follow it during analysis.
2609 if (MainAltOps[OpIdx].size() != 2) {
2610 OperandData &AltOp = getData(OpIdx, Lane);
2611 InstructionsState OpS =
2612 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI);
2613 if (OpS.getOpcode() && OpS.isAltShuffle())
2614 MainAltOps[OpIdx].push_back(AltOp.V);
2619 // Skip second pass if the strategy did not fail.
2620 if (!StrategyFailed)
2621 break;
2625 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2626 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
2627 switch (RMode) {
2628 case ReorderingMode::Load:
2629 return "Load";
2630 case ReorderingMode::Opcode:
2631 return "Opcode";
2632 case ReorderingMode::Constant:
2633 return "Constant";
2634 case ReorderingMode::Splat:
2635 return "Splat";
2636 case ReorderingMode::Failed:
2637 return "Failed";
2639 llvm_unreachable("Unimplemented Reordering Type");
2642 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
2643 raw_ostream &OS) {
2644 return OS << getModeStr(RMode);
2647 /// Debug print.
2648 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
2649 printMode(RMode, dbgs());
2652 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
2653 return printMode(RMode, OS);
2656 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
2657 const unsigned Indent = 2;
2658 unsigned Cnt = 0;
2659 for (const OperandDataVec &OpDataVec : OpsVec) {
2660 OS << "Operand " << Cnt++ << "\n";
2661 for (const OperandData &OpData : OpDataVec) {
2662 OS.indent(Indent) << "{";
2663 if (Value *V = OpData.V)
2664 OS << *V;
2665 else
2666 OS << "null";
2667 OS << ", APO:" << OpData.APO << "}\n";
2669 OS << "\n";
2671 return OS;
2674 /// Debug print.
2675 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
2676 #endif
2679 /// Evaluate each pair in \p Candidates and return index into \p Candidates
2680 /// for a pair which have highest score deemed to have best chance to form
2681 /// root of profitable tree to vectorize. Return std::nullopt if no candidate
2682 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit
2683 /// of the cost, considered to be good enough score.
2684 std::optional<int>
2685 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
2686 int Limit = LookAheadHeuristics::ScoreFail) const {
2687 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2,
2688 RootLookAheadMaxDepth);
2689 int BestScore = Limit;
2690 std::optional<int> Index;
2691 for (int I : seq<int>(0, Candidates.size())) {
2692 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
2693 Candidates[I].second,
2694 /*U1=*/nullptr, /*U2=*/nullptr,
2695 /*CurrLevel=*/1, {});
2696 if (Score > BestScore) {
2697 BestScore = Score;
2698 Index = I;
2701 return Index;
2704 /// Checks if the instruction is marked for deletion.
2705 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
2707 /// Removes an instruction from its block and eventually deletes it.
2708 /// It's like Instruction::eraseFromParent() except that the actual deletion
2709 /// is delayed until BoUpSLP is destructed.
2710 void eraseInstruction(Instruction *I) {
2711 DeletedInstructions.insert(I);
2714 /// Remove instructions from the parent function and clear the operands of \p
2715 /// DeadVals instructions, marking for deletion trivially dead operands.
2716 template <typename T>
2717 void removeInstructionsAndOperands(ArrayRef<T *> DeadVals) {
2718 SmallVector<WeakTrackingVH> DeadInsts;
2719 for (T *V : DeadVals) {
2720 auto *I = cast<Instruction>(V);
2721 DeletedInstructions.insert(I);
2723 DenseSet<Value *> Processed;
2724 for (T *V : DeadVals) {
2725 if (!V || !Processed.insert(V).second)
2726 continue;
2727 auto *I = cast<Instruction>(V);
2728 salvageDebugInfo(*I);
2729 SmallVector<const TreeEntry *> Entries;
2730 if (const TreeEntry *Entry = getTreeEntry(I)) {
2731 Entries.push_back(Entry);
2732 auto It = MultiNodeScalars.find(I);
2733 if (It != MultiNodeScalars.end())
2734 Entries.append(It->second.begin(), It->second.end());
2736 for (Use &U : I->operands()) {
2737 if (auto *OpI = dyn_cast_if_present<Instruction>(U.get());
2738 OpI && !DeletedInstructions.contains(OpI) && OpI->hasOneUser() &&
2739 wouldInstructionBeTriviallyDead(OpI, TLI) &&
2740 (Entries.empty() || none_of(Entries, [&](const TreeEntry *Entry) {
2741 return Entry->VectorizedValue == OpI;
2742 })))
2743 DeadInsts.push_back(OpI);
2745 I->dropAllReferences();
2747 for (T *V : DeadVals) {
2748 auto *I = cast<Instruction>(V);
2749 if (!I->getParent())
2750 continue;
2751 assert((I->use_empty() || all_of(I->uses(),
2752 [&](Use &U) {
2753 return isDeleted(
2754 cast<Instruction>(U.getUser()));
2755 })) &&
2756 "trying to erase instruction with users.");
2757 I->removeFromParent();
2758 SE->forgetValue(I);
2760 // Process the dead instruction list until empty.
2761 while (!DeadInsts.empty()) {
2762 Value *V = DeadInsts.pop_back_val();
2763 Instruction *VI = cast_or_null<Instruction>(V);
2764 if (!VI || !VI->getParent())
2765 continue;
2766 assert(isInstructionTriviallyDead(VI, TLI) &&
2767 "Live instruction found in dead worklist!");
2768 assert(VI->use_empty() && "Instructions with uses are not dead.");
2770 // Don't lose the debug info while deleting the instructions.
2771 salvageDebugInfo(*VI);
2773 // Null out all of the instruction's operands to see if any operand
2774 // becomes dead as we go.
2775 for (Use &OpU : VI->operands()) {
2776 Value *OpV = OpU.get();
2777 if (!OpV)
2778 continue;
2779 OpU.set(nullptr);
2781 if (!OpV->use_empty())
2782 continue;
2784 // If the operand is an instruction that became dead as we nulled out
2785 // the operand, and if it is 'trivially' dead, delete it in a future
2786 // loop iteration.
2787 if (auto *OpI = dyn_cast<Instruction>(OpV))
2788 if (!DeletedInstructions.contains(OpI) &&
2789 isInstructionTriviallyDead(OpI, TLI))
2790 DeadInsts.push_back(OpI);
2793 VI->removeFromParent();
2794 DeletedInstructions.insert(VI);
2795 SE->forgetValue(VI);
2799 /// Checks if the instruction was already analyzed for being possible
2800 /// reduction root.
2801 bool isAnalyzedReductionRoot(Instruction *I) const {
2802 return AnalyzedReductionsRoots.count(I);
2804 /// Register given instruction as already analyzed for being possible
2805 /// reduction root.
2806 void analyzedReductionRoot(Instruction *I) {
2807 AnalyzedReductionsRoots.insert(I);
2809 /// Checks if the provided list of reduced values was checked already for
2810 /// vectorization.
2811 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const {
2812 return AnalyzedReductionVals.contains(hash_value(VL));
2814 /// Adds the list of reduced values to list of already checked values for the
2815 /// vectorization.
2816 void analyzedReductionVals(ArrayRef<Value *> VL) {
2817 AnalyzedReductionVals.insert(hash_value(VL));
2819 /// Clear the list of the analyzed reduction root instructions.
2820 void clearReductionData() {
2821 AnalyzedReductionsRoots.clear();
2822 AnalyzedReductionVals.clear();
2823 AnalyzedMinBWVals.clear();
2825 /// Checks if the given value is gathered in one of the nodes.
2826 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const {
2827 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); });
2829 /// Checks if the given value is gathered in one of the nodes.
2830 bool isGathered(const Value *V) const {
2831 return MustGather.contains(V);
2833 /// Checks if the specified value was not schedule.
2834 bool isNotScheduled(const Value *V) const {
2835 return NonScheduledFirst.contains(V);
2838 /// Check if the value is vectorized in the tree.
2839 bool isVectorized(Value *V) const { return getTreeEntry(V); }
2841 ~BoUpSLP();
2843 private:
2844 /// Determine if a node \p E in can be demoted to a smaller type with a
2845 /// truncation. We collect the entries that will be demoted in ToDemote.
2846 /// \param E Node for analysis
2847 /// \param ToDemote indices of the nodes to be demoted.
2848 bool collectValuesToDemote(const TreeEntry &E, bool IsProfitableToDemoteRoot,
2849 unsigned &BitWidth,
2850 SmallVectorImpl<unsigned> &ToDemote,
2851 DenseSet<const TreeEntry *> &Visited,
2852 unsigned &MaxDepthLevel,
2853 bool &IsProfitableToDemote,
2854 bool IsTruncRoot) const;
2856 /// Check if the operands on the edges \p Edges of the \p UserTE allows
2857 /// reordering (i.e. the operands can be reordered because they have only one
2858 /// user and reordarable).
2859 /// \param ReorderableGathers List of all gather nodes that require reordering
2860 /// (e.g., gather of extractlements or partially vectorizable loads).
2861 /// \param GatherOps List of gather operand nodes for \p UserTE that require
2862 /// reordering, subset of \p NonVectorized.
2863 bool
2864 canReorderOperands(TreeEntry *UserTE,
2865 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
2866 ArrayRef<TreeEntry *> ReorderableGathers,
2867 SmallVectorImpl<TreeEntry *> &GatherOps);
2869 /// Checks if the given \p TE is a gather node with clustered reused scalars
2870 /// and reorders it per given \p Mask.
2871 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const;
2873 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2874 /// if any. If it is not vectorized (gather node), returns nullptr.
2875 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
2876 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
2877 TreeEntry *TE = nullptr;
2878 const auto *It = find_if(VL, [&](Value *V) {
2879 TE = getTreeEntry(V);
2880 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx)))
2881 return true;
2882 auto It = MultiNodeScalars.find(V);
2883 if (It != MultiNodeScalars.end()) {
2884 for (TreeEntry *E : It->second) {
2885 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) {
2886 TE = E;
2887 return true;
2891 return false;
2893 if (It != VL.end()) {
2894 assert(TE->isSame(VL) && "Expected same scalars.");
2895 return TE;
2897 return nullptr;
2900 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2901 /// if any. If it is not vectorized (gather node), returns nullptr.
2902 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
2903 unsigned OpIdx) const {
2904 return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
2905 const_cast<TreeEntry *>(UserTE), OpIdx);
2908 /// Checks if all users of \p I are the part of the vectorization tree.
2909 bool areAllUsersVectorized(
2910 Instruction *I,
2911 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const;
2913 /// Return information about the vector formed for the specified index
2914 /// of a vector of (the same) instruction.
2915 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops);
2917 /// \ returns the graph entry for the \p Idx operand of the \p E entry.
2918 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const;
2920 /// Gets the root instruction for the given node. If the node is a strided
2921 /// load/store node with the reverse order, the root instruction is the last
2922 /// one.
2923 Instruction *getRootEntryInstruction(const TreeEntry &Entry) const;
2925 /// \returns Cast context for the given graph node.
2926 TargetTransformInfo::CastContextHint
2927 getCastContextHint(const TreeEntry &TE) const;
2929 /// \returns the cost of the vectorizable entry.
2930 InstructionCost getEntryCost(const TreeEntry *E,
2931 ArrayRef<Value *> VectorizedVals,
2932 SmallPtrSetImpl<Value *> &CheckedExtracts);
2934 /// This is the recursive part of buildTree.
2935 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
2936 const EdgeInfo &EI, unsigned InterleaveFactor = 0);
2938 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
2939 /// be vectorized to use the original vector (or aggregate "bitcast" to a
2940 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
2941 /// returns false, setting \p CurrentOrder to either an empty vector or a
2942 /// non-identity permutation that allows to reuse extract instructions.
2943 /// \param ResizeAllowed indicates whether it is allowed to handle subvector
2944 /// extract order.
2945 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
2946 SmallVectorImpl<unsigned> &CurrentOrder,
2947 bool ResizeAllowed = false) const;
2949 /// Vectorize a single entry in the tree.
2950 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to
2951 /// avoid issues with def-use order.
2952 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs);
2954 /// Returns vectorized operand node, that matches the order of the scalars
2955 /// operand number \p NodeIdx in entry \p E.
2956 TreeEntry *getMatchedVectorizedOperand(const TreeEntry *E, unsigned NodeIdx);
2957 const TreeEntry *getMatchedVectorizedOperand(const TreeEntry *E,
2958 unsigned NodeIdx) const {
2959 return const_cast<BoUpSLP *>(this)->getMatchedVectorizedOperand(E, NodeIdx);
2962 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry
2963 /// \p E.
2964 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to
2965 /// avoid issues with def-use order.
2966 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs);
2968 /// Create a new vector from a list of scalar values. Produces a sequence
2969 /// which exploits values reused across lanes, and arranges the inserts
2970 /// for ease of later optimization.
2971 template <typename BVTy, typename ResTy, typename... Args>
2972 ResTy processBuildVector(const TreeEntry *E, Type *ScalarTy, Args &...Params);
2974 /// Create a new vector from a list of scalar values. Produces a sequence
2975 /// which exploits values reused across lanes, and arranges the inserts
2976 /// for ease of later optimization.
2977 Value *createBuildVector(const TreeEntry *E, Type *ScalarTy,
2978 bool PostponedPHIs);
2980 /// Returns the instruction in the bundle, which can be used as a base point
2981 /// for scheduling. Usually it is the last instruction in the bundle, except
2982 /// for the case when all operands are external (in this case, it is the first
2983 /// instruction in the list).
2984 Instruction &getLastInstructionInBundle(const TreeEntry *E);
2986 /// Tries to find extractelement instructions with constant indices from fixed
2987 /// vector type and gather such instructions into a bunch, which highly likely
2988 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt
2989 /// was successful, the matched scalars are replaced by poison values in \p VL
2990 /// for future analysis.
2991 std::optional<TargetTransformInfo::ShuffleKind>
2992 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL,
2993 SmallVectorImpl<int> &Mask) const;
2995 /// Tries to find extractelement instructions with constant indices from fixed
2996 /// vector type and gather such instructions into a bunch, which highly likely
2997 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt
2998 /// was successful, the matched scalars are replaced by poison values in \p VL
2999 /// for future analysis.
3000 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
3001 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL,
3002 SmallVectorImpl<int> &Mask,
3003 unsigned NumParts) const;
3005 /// Checks if the gathered \p VL can be represented as a single register
3006 /// shuffle(s) of previous tree entries.
3007 /// \param TE Tree entry checked for permutation.
3008 /// \param VL List of scalars (a subset of the TE scalar), checked for
3009 /// permutations. Must form single-register vector.
3010 /// \param ForOrder Tries to fetch the best candidates for ordering info. Also
3011 /// commands to build the mask using the original vector value, without
3012 /// relying on the potential reordering.
3013 /// \returns ShuffleKind, if gathered values can be represented as shuffles of
3014 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask.
3015 std::optional<TargetTransformInfo::ShuffleKind>
3016 isGatherShuffledSingleRegisterEntry(
3017 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask,
3018 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part,
3019 bool ForOrder);
3021 /// Checks if the gathered \p VL can be represented as multi-register
3022 /// shuffle(s) of previous tree entries.
3023 /// \param TE Tree entry checked for permutation.
3024 /// \param VL List of scalars (a subset of the TE scalar), checked for
3025 /// permutations.
3026 /// \param ForOrder Tries to fetch the best candidates for ordering info. Also
3027 /// commands to build the mask using the original vector value, without
3028 /// relying on the potential reordering.
3029 /// \returns per-register series of ShuffleKind, if gathered values can be
3030 /// represented as shuffles of previous tree entries. \p Mask is filled with
3031 /// the shuffle mask (also on per-register base).
3032 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
3033 isGatherShuffledEntry(
3034 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask,
3035 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries,
3036 unsigned NumParts, bool ForOrder = false);
3038 /// \returns the scalarization cost for this list of values. Assuming that
3039 /// this subtree gets vectorized, we may need to extract the values from the
3040 /// roots. This method calculates the cost of extracting the values.
3041 /// \param ForPoisonSrc true if initial vector is poison, false otherwise.
3042 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
3043 Type *ScalarTy) const;
3045 /// Set the Builder insert point to one after the last instruction in
3046 /// the bundle
3047 void setInsertPointAfterBundle(const TreeEntry *E);
3049 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not
3050 /// specified, the starting vector value is poison.
3051 Value *
3052 gather(ArrayRef<Value *> VL, Value *Root, Type *ScalarTy,
3053 function_ref<Value *(Value *, Value *, ArrayRef<int>)> CreateShuffle);
3055 /// \returns whether the VectorizableTree is fully vectorizable and will
3056 /// be beneficial even the tree height is tiny.
3057 bool isFullyVectorizableTinyTree(bool ForReduction) const;
3059 /// Run through the list of all gathered loads in the graph and try to find
3060 /// vector loads/masked gathers instead of regular gathers. Later these loads
3061 /// are reshufled to build final gathered nodes.
3062 void tryToVectorizeGatheredLoads(
3063 const SmallMapVector<std::tuple<BasicBlock *, Value *, Type *>,
3064 SmallVector<SmallVector<std::pair<LoadInst *, int>>>,
3065 8> &GatheredLoads);
3067 /// Reorder commutative or alt operands to get better probability of
3068 /// generating vectorized code.
3069 static void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
3070 SmallVectorImpl<Value *> &Left,
3071 SmallVectorImpl<Value *> &Right,
3072 const BoUpSLP &R);
3074 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the
3075 /// users of \p TE and collects the stores. It returns the map from the store
3076 /// pointers to the collected stores.
3077 SmallVector<SmallVector<StoreInst *>>
3078 collectUserStores(const BoUpSLP::TreeEntry *TE) const;
3080 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the
3081 /// stores in \p StoresVec can form a vector instruction. If so it returns
3082 /// true and populates \p ReorderIndices with the shuffle indices of the
3083 /// stores when compared to the sorted vector.
3084 bool canFormVector(ArrayRef<StoreInst *> StoresVec,
3085 OrdersType &ReorderIndices) const;
3087 /// Iterates through the users of \p TE, looking for scalar stores that can be
3088 /// potentially vectorized in a future SLP-tree. If found, it keeps track of
3089 /// their order and builds an order index vector for each store bundle. It
3090 /// returns all these order vectors found.
3091 /// We run this after the tree has formed, otherwise we may come across user
3092 /// instructions that are not yet in the tree.
3093 SmallVector<OrdersType, 1>
3094 findExternalStoreUsersReorderIndices(TreeEntry *TE) const;
3096 /// Tries to reorder the gathering node for better vectorization
3097 /// opportunities.
3098 void reorderGatherNode(TreeEntry &TE);
3100 struct TreeEntry {
3101 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
3102 TreeEntry(VecTreeTy &Container) : Container(Container) {}
3104 /// \returns Common mask for reorder indices and reused scalars.
3105 SmallVector<int> getCommonMask() const {
3106 SmallVector<int> Mask;
3107 inversePermutation(ReorderIndices, Mask);
3108 ::addMask(Mask, ReuseShuffleIndices);
3109 return Mask;
3112 /// \returns true if the scalars in VL are equal to this entry.
3113 bool isSame(ArrayRef<Value *> VL) const {
3114 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
3115 if (Mask.size() != VL.size() && VL.size() == Scalars.size())
3116 return std::equal(VL.begin(), VL.end(), Scalars.begin());
3117 return VL.size() == Mask.size() &&
3118 std::equal(VL.begin(), VL.end(), Mask.begin(),
3119 [Scalars](Value *V, int Idx) {
3120 return (isa<UndefValue>(V) &&
3121 Idx == PoisonMaskElem) ||
3122 (Idx != PoisonMaskElem && V == Scalars[Idx]);
3125 if (!ReorderIndices.empty()) {
3126 // TODO: implement matching if the nodes are just reordered, still can
3127 // treat the vector as the same if the list of scalars matches VL
3128 // directly, without reordering.
3129 SmallVector<int> Mask;
3130 inversePermutation(ReorderIndices, Mask);
3131 if (VL.size() == Scalars.size())
3132 return IsSame(Scalars, Mask);
3133 if (VL.size() == ReuseShuffleIndices.size()) {
3134 ::addMask(Mask, ReuseShuffleIndices);
3135 return IsSame(Scalars, Mask);
3137 return false;
3139 return IsSame(Scalars, ReuseShuffleIndices);
3142 bool isOperandGatherNode(const EdgeInfo &UserEI) const {
3143 return isGather() && !UserTreeIndices.empty() &&
3144 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx &&
3145 UserTreeIndices.front().UserTE == UserEI.UserTE;
3148 /// \returns true if current entry has same operands as \p TE.
3149 bool hasEqualOperands(const TreeEntry &TE) const {
3150 if (TE.getNumOperands() != getNumOperands())
3151 return false;
3152 SmallBitVector Used(getNumOperands());
3153 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
3154 unsigned PrevCount = Used.count();
3155 for (unsigned K = 0; K < E; ++K) {
3156 if (Used.test(K))
3157 continue;
3158 if (getOperand(K) == TE.getOperand(I)) {
3159 Used.set(K);
3160 break;
3163 // Check if we actually found the matching operand.
3164 if (PrevCount == Used.count())
3165 return false;
3167 return true;
3170 /// \return Final vectorization factor for the node. Defined by the total
3171 /// number of vectorized scalars, including those, used several times in the
3172 /// entry and counted in the \a ReuseShuffleIndices, if any.
3173 unsigned getVectorFactor() const {
3174 if (!ReuseShuffleIndices.empty())
3175 return ReuseShuffleIndices.size();
3176 return Scalars.size();
3179 /// Checks if the current node is a gather node.
3180 bool isGather() const {return State == NeedToGather; }
3182 /// A vector of scalars.
3183 ValueList Scalars;
3185 /// The Scalars are vectorized into this value. It is initialized to Null.
3186 WeakTrackingVH VectorizedValue = nullptr;
3188 /// New vector phi instructions emitted for the vectorized phi nodes.
3189 PHINode *PHI = nullptr;
3191 /// Do we need to gather this sequence or vectorize it
3192 /// (either with vector instruction or with scatter/gather
3193 /// intrinsics for store/load)?
3194 enum EntryState {
3195 Vectorize, ///< The node is regularly vectorized.
3196 ScatterVectorize, ///< Masked scatter/gather node.
3197 StridedVectorize, ///< Strided loads (and stores)
3198 NeedToGather, ///< Gather/buildvector node.
3199 CombinedVectorize, ///< Vectorized node, combined with its user into more
3200 ///< complex node like select/cmp to minmax, mul/add to
3201 ///< fma, etc. Must be used for the following nodes in
3202 ///< the pattern, not the very first one.
3204 EntryState State;
3206 /// List of combined opcodes supported by the vectorizer.
3207 enum CombinedOpcode {
3208 NotCombinedOp = -1,
3209 MinMax = Instruction::OtherOpsEnd + 1,
3211 CombinedOpcode CombinedOp = NotCombinedOp;
3213 /// Does this sequence require some shuffling?
3214 SmallVector<int, 4> ReuseShuffleIndices;
3216 /// Does this entry require reordering?
3217 SmallVector<unsigned, 4> ReorderIndices;
3219 /// Points back to the VectorizableTree.
3221 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
3222 /// to be a pointer and needs to be able to initialize the child iterator.
3223 /// Thus we need a reference back to the container to translate the indices
3224 /// to entries.
3225 VecTreeTy &Container;
3227 /// The TreeEntry index containing the user of this entry. We can actually
3228 /// have multiple users so the data structure is not truly a tree.
3229 SmallVector<EdgeInfo, 1> UserTreeIndices;
3231 /// The index of this treeEntry in VectorizableTree.
3232 unsigned Idx = 0;
3234 /// For gather/buildvector/alt opcode (TODO) nodes, which are combined from
3235 /// other nodes as a series of insertvector instructions.
3236 SmallVector<std::pair<unsigned, unsigned>, 0> CombinedEntriesWithIndices;
3238 private:
3239 /// The operands of each instruction in each lane Operands[op_index][lane].
3240 /// Note: This helps avoid the replication of the code that performs the
3241 /// reordering of operands during buildTree_rec() and vectorizeTree().
3242 SmallVector<ValueList, 2> Operands;
3244 /// The main/alternate instruction.
3245 Instruction *MainOp = nullptr;
3246 Instruction *AltOp = nullptr;
3248 /// Interleaving factor for interleaved loads Vectorize nodes.
3249 unsigned InterleaveFactor = 0;
3251 public:
3252 /// Returns interleave factor for interleave nodes.
3253 unsigned getInterleaveFactor() const { return InterleaveFactor; }
3254 /// Sets interleaving factor for the interleaving nodes.
3255 void setInterleave(unsigned Factor) { InterleaveFactor = Factor; }
3257 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
3258 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
3259 if (Operands.size() < OpIdx + 1)
3260 Operands.resize(OpIdx + 1);
3261 assert(Operands[OpIdx].empty() && "Already resized?");
3262 assert(OpVL.size() <= Scalars.size() &&
3263 "Number of operands is greater than the number of scalars.");
3264 Operands[OpIdx].resize(OpVL.size());
3265 copy(OpVL, Operands[OpIdx].begin());
3268 /// Set the operands of this bundle in their original order.
3269 void setOperandsInOrder() {
3270 assert(Operands.empty() && "Already initialized?");
3271 auto *I0 = cast<Instruction>(Scalars[0]);
3272 Operands.resize(I0->getNumOperands());
3273 unsigned NumLanes = Scalars.size();
3274 for (unsigned OpIdx = 0, NumOperands = I0->getNumOperands();
3275 OpIdx != NumOperands; ++OpIdx) {
3276 Operands[OpIdx].resize(NumLanes);
3277 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
3278 auto *I = cast<Instruction>(Scalars[Lane]);
3279 assert(I->getNumOperands() == NumOperands &&
3280 "Expected same number of operands");
3281 Operands[OpIdx][Lane] = I->getOperand(OpIdx);
3286 /// Reorders operands of the node to the given mask \p Mask.
3287 void reorderOperands(ArrayRef<int> Mask) {
3288 for (ValueList &Operand : Operands)
3289 reorderScalars(Operand, Mask);
3292 /// \returns the \p OpIdx operand of this TreeEntry.
3293 ValueList &getOperand(unsigned OpIdx) {
3294 assert(OpIdx < Operands.size() && "Off bounds");
3295 return Operands[OpIdx];
3298 /// \returns the \p OpIdx operand of this TreeEntry.
3299 ArrayRef<Value *> getOperand(unsigned OpIdx) const {
3300 assert(OpIdx < Operands.size() && "Off bounds");
3301 return Operands[OpIdx];
3304 /// \returns the number of operands.
3305 unsigned getNumOperands() const { return Operands.size(); }
3307 /// \return the single \p OpIdx operand.
3308 Value *getSingleOperand(unsigned OpIdx) const {
3309 assert(OpIdx < Operands.size() && "Off bounds");
3310 assert(!Operands[OpIdx].empty() && "No operand available");
3311 return Operands[OpIdx][0];
3314 /// Some of the instructions in the list have alternate opcodes.
3315 bool isAltShuffle() const { return MainOp != AltOp; }
3317 bool isOpcodeOrAlt(Instruction *I) const {
3318 unsigned CheckedOpcode = I->getOpcode();
3319 return (getOpcode() == CheckedOpcode ||
3320 getAltOpcode() == CheckedOpcode);
3323 /// Chooses the correct key for scheduling data. If \p Op has the same (or
3324 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
3325 /// \p OpValue.
3326 Value *isOneOf(Value *Op) const {
3327 auto *I = dyn_cast<Instruction>(Op);
3328 if (I && isOpcodeOrAlt(I))
3329 return Op;
3330 return MainOp;
3333 void setOperations(const InstructionsState &S) {
3334 MainOp = S.MainOp;
3335 AltOp = S.AltOp;
3338 Instruction *getMainOp() const {
3339 return MainOp;
3342 Instruction *getAltOp() const {
3343 return AltOp;
3346 /// The main/alternate opcodes for the list of instructions.
3347 unsigned getOpcode() const {
3348 return MainOp ? MainOp->getOpcode() : 0;
3351 unsigned getAltOpcode() const {
3352 return AltOp ? AltOp->getOpcode() : 0;
3355 /// When ReuseReorderShuffleIndices is empty it just returns position of \p
3356 /// V within vector of Scalars. Otherwise, try to remap on its reuse index.
3357 int findLaneForValue(Value *V) const {
3358 unsigned FoundLane = getVectorFactor();
3359 for (auto *It = find(Scalars, V), *End = Scalars.end(); It != End;
3360 std::advance(It, 1)) {
3361 if (*It != V)
3362 continue;
3363 FoundLane = std::distance(Scalars.begin(), It);
3364 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
3365 if (!ReorderIndices.empty())
3366 FoundLane = ReorderIndices[FoundLane];
3367 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
3368 if (ReuseShuffleIndices.empty())
3369 break;
3370 if (auto *RIt = find(ReuseShuffleIndices, FoundLane);
3371 RIt != ReuseShuffleIndices.end()) {
3372 FoundLane = std::distance(ReuseShuffleIndices.begin(), RIt);
3373 break;
3376 assert(FoundLane < getVectorFactor() && "Unable to find given value.");
3377 return FoundLane;
3380 /// Build a shuffle mask for graph entry which represents a merge of main
3381 /// and alternate operations.
3382 void
3383 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp,
3384 SmallVectorImpl<int> &Mask,
3385 SmallVectorImpl<Value *> *OpScalars = nullptr,
3386 SmallVectorImpl<Value *> *AltScalars = nullptr) const;
3388 /// Return true if this is a non-power-of-2 node.
3389 bool isNonPowOf2Vec() const {
3390 bool IsNonPowerOf2 = !has_single_bit(Scalars.size());
3391 return IsNonPowerOf2;
3394 /// Return true if this is a node, which tries to vectorize number of
3395 /// elements, forming whole vectors.
3396 bool
3397 hasNonWholeRegisterOrNonPowerOf2Vec(const TargetTransformInfo &TTI) const {
3398 bool IsNonPowerOf2 = !hasFullVectorsOrPowerOf2(
3399 TTI, getValueType(Scalars.front()), Scalars.size());
3400 assert((!IsNonPowerOf2 || ReuseShuffleIndices.empty()) &&
3401 "Reshuffling not supported with non-power-of-2 vectors yet.");
3402 return IsNonPowerOf2;
3405 Value *getOrdered(unsigned Idx) const {
3406 assert(isGather() && "Must be used only for buildvectors/gathers.");
3407 if (ReorderIndices.empty())
3408 return Scalars[Idx];
3409 SmallVector<int> Mask;
3410 inversePermutation(ReorderIndices, Mask);
3411 return Scalars[Mask[Idx]];
3414 #ifndef NDEBUG
3415 /// Debug printer.
3416 LLVM_DUMP_METHOD void dump() const {
3417 dbgs() << Idx << ".\n";
3418 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
3419 dbgs() << "Operand " << OpI << ":\n";
3420 for (const Value *V : Operands[OpI])
3421 dbgs().indent(2) << *V << "\n";
3423 dbgs() << "Scalars: \n";
3424 for (Value *V : Scalars)
3425 dbgs().indent(2) << *V << "\n";
3426 dbgs() << "State: ";
3427 switch (State) {
3428 case Vectorize:
3429 if (InterleaveFactor > 0) {
3430 dbgs() << "Vectorize with interleave factor " << InterleaveFactor
3431 << "\n";
3432 } else {
3433 dbgs() << "Vectorize\n";
3435 break;
3436 case ScatterVectorize:
3437 dbgs() << "ScatterVectorize\n";
3438 break;
3439 case StridedVectorize:
3440 dbgs() << "StridedVectorize\n";
3441 break;
3442 case NeedToGather:
3443 dbgs() << "NeedToGather\n";
3444 break;
3445 case CombinedVectorize:
3446 dbgs() << "CombinedVectorize\n";
3447 break;
3449 dbgs() << "MainOp: ";
3450 if (MainOp)
3451 dbgs() << *MainOp << "\n";
3452 else
3453 dbgs() << "NULL\n";
3454 dbgs() << "AltOp: ";
3455 if (AltOp)
3456 dbgs() << *AltOp << "\n";
3457 else
3458 dbgs() << "NULL\n";
3459 dbgs() << "VectorizedValue: ";
3460 if (VectorizedValue)
3461 dbgs() << *VectorizedValue << "\n";
3462 else
3463 dbgs() << "NULL\n";
3464 dbgs() << "ReuseShuffleIndices: ";
3465 if (ReuseShuffleIndices.empty())
3466 dbgs() << "Empty";
3467 else
3468 for (int ReuseIdx : ReuseShuffleIndices)
3469 dbgs() << ReuseIdx << ", ";
3470 dbgs() << "\n";
3471 dbgs() << "ReorderIndices: ";
3472 for (unsigned ReorderIdx : ReorderIndices)
3473 dbgs() << ReorderIdx << ", ";
3474 dbgs() << "\n";
3475 dbgs() << "UserTreeIndices: ";
3476 for (const auto &EInfo : UserTreeIndices)
3477 dbgs() << EInfo << ", ";
3478 dbgs() << "\n";
3480 #endif
3483 #ifndef NDEBUG
3484 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
3485 InstructionCost VecCost, InstructionCost ScalarCost,
3486 StringRef Banner) const {
3487 dbgs() << "SLP: " << Banner << ":\n";
3488 E->dump();
3489 dbgs() << "SLP: Costs:\n";
3490 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
3491 dbgs() << "SLP: VectorCost = " << VecCost << "\n";
3492 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
3493 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = "
3494 << ReuseShuffleCost + VecCost - ScalarCost << "\n";
3496 #endif
3498 /// Create a new VectorizableTree entry.
3499 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
3500 std::optional<ScheduleData *> Bundle,
3501 const InstructionsState &S,
3502 const EdgeInfo &UserTreeIdx,
3503 ArrayRef<int> ReuseShuffleIndices = {},
3504 ArrayRef<unsigned> ReorderIndices = {},
3505 unsigned InterleaveFactor = 0) {
3506 TreeEntry::EntryState EntryState =
3507 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
3508 TreeEntry *E = newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
3509 ReuseShuffleIndices, ReorderIndices);
3510 if (E && InterleaveFactor > 0)
3511 E->setInterleave(InterleaveFactor);
3512 return E;
3515 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
3516 TreeEntry::EntryState EntryState,
3517 std::optional<ScheduleData *> Bundle,
3518 const InstructionsState &S,
3519 const EdgeInfo &UserTreeIdx,
3520 ArrayRef<int> ReuseShuffleIndices = {},
3521 ArrayRef<unsigned> ReorderIndices = {}) {
3522 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
3523 (Bundle && EntryState != TreeEntry::NeedToGather)) &&
3524 "Need to vectorize gather entry?");
3525 // Gathered loads still gathered? Do not create entry, use the original one.
3526 if (GatheredLoadsEntriesFirst.has_value() &&
3527 EntryState == TreeEntry::NeedToGather &&
3528 S.getOpcode() == Instruction::Load && UserTreeIdx.EdgeIdx == UINT_MAX &&
3529 !UserTreeIdx.UserTE)
3530 return nullptr;
3531 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
3532 TreeEntry *Last = VectorizableTree.back().get();
3533 Last->Idx = VectorizableTree.size() - 1;
3534 Last->State = EntryState;
3535 // FIXME: Remove once support for ReuseShuffleIndices has been implemented
3536 // for non-power-of-two vectors.
3537 assert(
3538 (hasFullVectorsOrPowerOf2(*TTI, getValueType(VL.front()), VL.size()) ||
3539 ReuseShuffleIndices.empty()) &&
3540 "Reshuffling scalars not yet supported for nodes with padding");
3541 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
3542 ReuseShuffleIndices.end());
3543 if (ReorderIndices.empty()) {
3544 Last->Scalars.assign(VL.begin(), VL.end());
3545 Last->setOperations(S);
3546 } else {
3547 // Reorder scalars and build final mask.
3548 Last->Scalars.assign(VL.size(), nullptr);
3549 transform(ReorderIndices, Last->Scalars.begin(),
3550 [VL](unsigned Idx) -> Value * {
3551 if (Idx >= VL.size())
3552 return UndefValue::get(VL.front()->getType());
3553 return VL[Idx];
3555 InstructionsState S = getSameOpcode(Last->Scalars, *TLI);
3556 Last->setOperations(S);
3557 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
3559 if (!Last->isGather()) {
3560 for (Value *V : VL) {
3561 const TreeEntry *TE = getTreeEntry(V);
3562 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) &&
3563 "Scalar already in tree!");
3564 if (TE) {
3565 if (TE != Last)
3566 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last);
3567 continue;
3569 ScalarToTreeEntry[V] = Last;
3571 // Update the scheduler bundle to point to this TreeEntry.
3572 ScheduleData *BundleMember = *Bundle;
3573 assert((BundleMember || isa<PHINode>(S.MainOp) ||
3574 isVectorLikeInstWithConstOps(S.MainOp) ||
3575 doesNotNeedToSchedule(VL)) &&
3576 "Bundle and VL out of sync");
3577 if (BundleMember) {
3578 for (Value *V : VL) {
3579 if (doesNotNeedToBeScheduled(V))
3580 continue;
3581 if (!BundleMember)
3582 continue;
3583 BundleMember->TE = Last;
3584 BundleMember = BundleMember->NextInBundle;
3587 assert(!BundleMember && "Bundle and VL out of sync");
3588 } else {
3589 // Build a map for gathered scalars to the nodes where they are used.
3590 bool AllConstsOrCasts = true;
3591 for (Value *V : VL)
3592 if (!isConstant(V)) {
3593 auto *I = dyn_cast<CastInst>(V);
3594 AllConstsOrCasts &= I && I->getType()->isIntegerTy();
3595 if (UserTreeIdx.EdgeIdx != UINT_MAX || !UserTreeIdx.UserTE ||
3596 !UserTreeIdx.UserTE->isGather())
3597 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last);
3599 if (AllConstsOrCasts)
3600 CastMaxMinBWSizes =
3601 std::make_pair(std::numeric_limits<unsigned>::max(), 1);
3602 MustGather.insert(VL.begin(), VL.end());
3605 if (UserTreeIdx.UserTE)
3606 Last->UserTreeIndices.push_back(UserTreeIdx);
3607 return Last;
3610 /// -- Vectorization State --
3611 /// Holds all of the tree entries.
3612 TreeEntry::VecTreeTy VectorizableTree;
3614 #ifndef NDEBUG
3615 /// Debug printer.
3616 LLVM_DUMP_METHOD void dumpVectorizableTree() const {
3617 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
3618 VectorizableTree[Id]->dump();
3619 dbgs() << "\n";
3622 #endif
3624 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
3626 const TreeEntry *getTreeEntry(Value *V) const {
3627 return ScalarToTreeEntry.lookup(V);
3630 /// Check that the operand node of alternate node does not generate
3631 /// buildvector sequence. If it is, then probably not worth it to build
3632 /// alternate shuffle, if number of buildvector operands + alternate
3633 /// instruction > than the number of buildvector instructions.
3634 /// \param S the instructions state of the analyzed values.
3635 /// \param VL list of the instructions with alternate opcodes.
3636 bool areAltOperandsProfitable(const InstructionsState &S,
3637 ArrayRef<Value *> VL) const;
3639 /// Checks if the specified list of the instructions/values can be vectorized
3640 /// and fills required data before actual scheduling of the instructions.
3641 TreeEntry::EntryState getScalarsVectorizationState(
3642 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE,
3643 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps);
3645 /// Maps a specific scalar to its tree entry.
3646 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry;
3648 /// List of scalars, used in several vectorize nodes, and the list of the
3649 /// nodes.
3650 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars;
3652 /// Maps a value to the proposed vectorizable size.
3653 SmallDenseMap<Value *, unsigned> InstrElementSize;
3655 /// A list of scalars that we found that we need to keep as scalars.
3656 ValueSet MustGather;
3658 /// A set of first non-schedulable values.
3659 ValueSet NonScheduledFirst;
3661 /// A map between the vectorized entries and the last instructions in the
3662 /// bundles. The bundles are built in use order, not in the def order of the
3663 /// instructions. So, we cannot rely directly on the last instruction in the
3664 /// bundle being the last instruction in the program order during
3665 /// vectorization process since the basic blocks are affected, need to
3666 /// pre-gather them before.
3667 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction;
3669 /// List of gather nodes, depending on other gather/vector nodes, which should
3670 /// be emitted after the vector instruction emission process to correctly
3671 /// handle order of the vector instructions and shuffles.
3672 SetVector<const TreeEntry *> PostponedGathers;
3674 using ValueToGatherNodesMap =
3675 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>;
3676 ValueToGatherNodesMap ValueToGatherNodes;
3678 /// A list of the load entries (node indices), which can be vectorized using
3679 /// strided or masked gather approach, but attempted to be represented as
3680 /// contiguous loads.
3681 SetVector<unsigned> LoadEntriesToVectorize;
3683 /// true if graph nodes transforming mode is on.
3684 bool IsGraphTransformMode = false;
3686 /// The index of the first gathered load entry in the VectorizeTree.
3687 std::optional<unsigned> GatheredLoadsEntriesFirst;
3689 /// This POD struct describes one external user in the vectorized tree.
3690 struct ExternalUser {
3691 ExternalUser(Value *S, llvm::User *U, int L)
3692 : Scalar(S), User(U), Lane(L) {}
3694 // Which scalar in our function.
3695 Value *Scalar;
3697 // Which user that uses the scalar.
3698 llvm::User *User;
3700 // Which lane does the scalar belong to.
3701 int Lane;
3703 using UserList = SmallVector<ExternalUser, 16>;
3705 /// Checks if two instructions may access the same memory.
3707 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
3708 /// is invariant in the calling loop.
3709 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
3710 Instruction *Inst2) {
3711 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2))
3712 return true;
3713 // First check if the result is already in the cache.
3714 AliasCacheKey Key = std::make_pair(Inst1, Inst2);
3715 auto It = AliasCache.find(Key);
3716 if (It != AliasCache.end())
3717 return It->second;
3718 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
3719 // Store the result in the cache.
3720 AliasCache.try_emplace(Key, Aliased);
3721 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased);
3722 return Aliased;
3725 using AliasCacheKey = std::pair<Instruction *, Instruction *>;
3727 /// Cache for alias results.
3728 /// TODO: consider moving this to the AliasAnalysis itself.
3729 DenseMap<AliasCacheKey, bool> AliasCache;
3731 // Cache for pointerMayBeCaptured calls inside AA. This is preserved
3732 // globally through SLP because we don't perform any action which
3733 // invalidates capture results.
3734 BatchAAResults BatchAA;
3736 /// Temporary store for deleted instructions. Instructions will be deleted
3737 /// eventually when the BoUpSLP is destructed. The deferral is required to
3738 /// ensure that there are no incorrect collisions in the AliasCache, which
3739 /// can happen if a new instruction is allocated at the same address as a
3740 /// previously deleted instruction.
3741 DenseSet<Instruction *> DeletedInstructions;
3743 /// Set of the instruction, being analyzed already for reductions.
3744 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots;
3746 /// Set of hashes for the list of reduction values already being analyzed.
3747 DenseSet<size_t> AnalyzedReductionVals;
3749 /// Values, already been analyzed for mininmal bitwidth and found to be
3750 /// non-profitable.
3751 DenseSet<Value *> AnalyzedMinBWVals;
3753 /// A list of values that need to extracted out of the tree.
3754 /// This list holds pairs of (Internal Scalar : External User). External User
3755 /// can be nullptr, it means that this Internal Scalar will be used later,
3756 /// after vectorization.
3757 UserList ExternalUses;
3759 /// A list of GEPs which can be reaplced by scalar GEPs instead of
3760 /// extractelement instructions.
3761 SmallPtrSet<Value *, 4> ExternalUsesAsOriginalScalar;
3763 /// Values used only by @llvm.assume calls.
3764 SmallPtrSet<const Value *, 32> EphValues;
3766 /// Holds all of the instructions that we gathered, shuffle instructions and
3767 /// extractelements.
3768 SetVector<Instruction *> GatherShuffleExtractSeq;
3770 /// A list of blocks that we are going to CSE.
3771 DenseSet<BasicBlock *> CSEBlocks;
3773 /// List of hashes of vector of loads, which are known to be non vectorizable.
3774 DenseSet<size_t> ListOfKnonwnNonVectorizableLoads;
3776 /// Contains all scheduling relevant data for an instruction.
3777 /// A ScheduleData either represents a single instruction or a member of an
3778 /// instruction bundle (= a group of instructions which is combined into a
3779 /// vector instruction).
3780 struct ScheduleData {
3781 // The initial value for the dependency counters. It means that the
3782 // dependencies are not calculated yet.
3783 enum { InvalidDeps = -1 };
3785 ScheduleData() = default;
3787 void init(int BlockSchedulingRegionID, Instruction *I) {
3788 FirstInBundle = this;
3789 NextInBundle = nullptr;
3790 NextLoadStore = nullptr;
3791 IsScheduled = false;
3792 SchedulingRegionID = BlockSchedulingRegionID;
3793 clearDependencies();
3794 Inst = I;
3795 TE = nullptr;
3798 /// Verify basic self consistency properties
3799 void verify() {
3800 if (hasValidDependencies()) {
3801 assert(UnscheduledDeps <= Dependencies && "invariant");
3802 } else {
3803 assert(UnscheduledDeps == Dependencies && "invariant");
3806 if (IsScheduled) {
3807 assert(isSchedulingEntity() &&
3808 "unexpected scheduled state");
3809 for (const ScheduleData *BundleMember = this; BundleMember;
3810 BundleMember = BundleMember->NextInBundle) {
3811 assert(BundleMember->hasValidDependencies() &&
3812 BundleMember->UnscheduledDeps == 0 &&
3813 "unexpected scheduled state");
3814 assert((BundleMember == this || !BundleMember->IsScheduled) &&
3815 "only bundle is marked scheduled");
3819 assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&
3820 "all bundle members must be in same basic block");
3823 /// Returns true if the dependency information has been calculated.
3824 /// Note that depenendency validity can vary between instructions within
3825 /// a single bundle.
3826 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
3828 /// Returns true for single instructions and for bundle representatives
3829 /// (= the head of a bundle).
3830 bool isSchedulingEntity() const { return FirstInBundle == this; }
3832 /// Returns true if it represents an instruction bundle and not only a
3833 /// single instruction.
3834 bool isPartOfBundle() const {
3835 return NextInBundle != nullptr || FirstInBundle != this || TE;
3838 /// Returns true if it is ready for scheduling, i.e. it has no more
3839 /// unscheduled depending instructions/bundles.
3840 bool isReady() const {
3841 assert(isSchedulingEntity() &&
3842 "can't consider non-scheduling entity for ready list");
3843 return unscheduledDepsInBundle() == 0 && !IsScheduled;
3846 /// Modifies the number of unscheduled dependencies for this instruction,
3847 /// and returns the number of remaining dependencies for the containing
3848 /// bundle.
3849 int incrementUnscheduledDeps(int Incr) {
3850 assert(hasValidDependencies() &&
3851 "increment of unscheduled deps would be meaningless");
3852 UnscheduledDeps += Incr;
3853 return FirstInBundle->unscheduledDepsInBundle();
3856 /// Sets the number of unscheduled dependencies to the number of
3857 /// dependencies.
3858 void resetUnscheduledDeps() {
3859 UnscheduledDeps = Dependencies;
3862 /// Clears all dependency information.
3863 void clearDependencies() {
3864 Dependencies = InvalidDeps;
3865 resetUnscheduledDeps();
3866 MemoryDependencies.clear();
3867 ControlDependencies.clear();
3870 int unscheduledDepsInBundle() const {
3871 assert(isSchedulingEntity() && "only meaningful on the bundle");
3872 int Sum = 0;
3873 for (const ScheduleData *BundleMember = this; BundleMember;
3874 BundleMember = BundleMember->NextInBundle) {
3875 if (BundleMember->UnscheduledDeps == InvalidDeps)
3876 return InvalidDeps;
3877 Sum += BundleMember->UnscheduledDeps;
3879 return Sum;
3882 void dump(raw_ostream &os) const {
3883 if (!isSchedulingEntity()) {
3884 os << "/ " << *Inst;
3885 } else if (NextInBundle) {
3886 os << '[' << *Inst;
3887 ScheduleData *SD = NextInBundle;
3888 while (SD) {
3889 os << ';' << *SD->Inst;
3890 SD = SD->NextInBundle;
3892 os << ']';
3893 } else {
3894 os << *Inst;
3898 Instruction *Inst = nullptr;
3900 /// The TreeEntry that this instruction corresponds to.
3901 TreeEntry *TE = nullptr;
3903 /// Points to the head in an instruction bundle (and always to this for
3904 /// single instructions).
3905 ScheduleData *FirstInBundle = nullptr;
3907 /// Single linked list of all instructions in a bundle. Null if it is a
3908 /// single instruction.
3909 ScheduleData *NextInBundle = nullptr;
3911 /// Single linked list of all memory instructions (e.g. load, store, call)
3912 /// in the block - until the end of the scheduling region.
3913 ScheduleData *NextLoadStore = nullptr;
3915 /// The dependent memory instructions.
3916 /// This list is derived on demand in calculateDependencies().
3917 SmallVector<ScheduleData *, 4> MemoryDependencies;
3919 /// List of instructions which this instruction could be control dependent
3920 /// on. Allowing such nodes to be scheduled below this one could introduce
3921 /// a runtime fault which didn't exist in the original program.
3922 /// ex: this is a load or udiv following a readonly call which inf loops
3923 SmallVector<ScheduleData *, 4> ControlDependencies;
3925 /// This ScheduleData is in the current scheduling region if this matches
3926 /// the current SchedulingRegionID of BlockScheduling.
3927 int SchedulingRegionID = 0;
3929 /// Used for getting a "good" final ordering of instructions.
3930 int SchedulingPriority = 0;
3932 /// The number of dependencies. Constitutes of the number of users of the
3933 /// instruction plus the number of dependent memory instructions (if any).
3934 /// This value is calculated on demand.
3935 /// If InvalidDeps, the number of dependencies is not calculated yet.
3936 int Dependencies = InvalidDeps;
3938 /// The number of dependencies minus the number of dependencies of scheduled
3939 /// instructions. As soon as this is zero, the instruction/bundle gets ready
3940 /// for scheduling.
3941 /// Note that this is negative as long as Dependencies is not calculated.
3942 int UnscheduledDeps = InvalidDeps;
3944 /// True if this instruction is scheduled (or considered as scheduled in the
3945 /// dry-run).
3946 bool IsScheduled = false;
3949 #ifndef NDEBUG
3950 friend inline raw_ostream &operator<<(raw_ostream &os,
3951 const BoUpSLP::ScheduleData &SD) {
3952 SD.dump(os);
3953 return os;
3955 #endif
3957 friend struct GraphTraits<BoUpSLP *>;
3958 friend struct DOTGraphTraits<BoUpSLP *>;
3960 /// Contains all scheduling data for a basic block.
3961 /// It does not schedules instructions, which are not memory read/write
3962 /// instructions and their operands are either constants, or arguments, or
3963 /// phis, or instructions from others blocks, or their users are phis or from
3964 /// the other blocks. The resulting vector instructions can be placed at the
3965 /// beginning of the basic block without scheduling (if operands does not need
3966 /// to be scheduled) or at the end of the block (if users are outside of the
3967 /// block). It allows to save some compile time and memory used by the
3968 /// compiler.
3969 /// ScheduleData is assigned for each instruction in between the boundaries of
3970 /// the tree entry, even for those, which are not part of the graph. It is
3971 /// required to correctly follow the dependencies between the instructions and
3972 /// their correct scheduling. The ScheduleData is not allocated for the
3973 /// instructions, which do not require scheduling, like phis, nodes with
3974 /// extractelements/insertelements only or nodes with instructions, with
3975 /// uses/operands outside of the block.
3976 struct BlockScheduling {
3977 BlockScheduling(BasicBlock *BB)
3978 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
3980 void clear() {
3981 ReadyInsts.clear();
3982 ScheduleStart = nullptr;
3983 ScheduleEnd = nullptr;
3984 FirstLoadStoreInRegion = nullptr;
3985 LastLoadStoreInRegion = nullptr;
3986 RegionHasStackSave = false;
3988 // Reduce the maximum schedule region size by the size of the
3989 // previous scheduling run.
3990 ScheduleRegionSizeLimit -= ScheduleRegionSize;
3991 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
3992 ScheduleRegionSizeLimit = MinScheduleRegionSize;
3993 ScheduleRegionSize = 0;
3995 // Make a new scheduling region, i.e. all existing ScheduleData is not
3996 // in the new region yet.
3997 ++SchedulingRegionID;
4000 ScheduleData *getScheduleData(Instruction *I) {
4001 if (BB != I->getParent())
4002 // Avoid lookup if can't possibly be in map.
4003 return nullptr;
4004 ScheduleData *SD = ScheduleDataMap.lookup(I);
4005 if (SD && isInSchedulingRegion(SD))
4006 return SD;
4007 return nullptr;
4010 ScheduleData *getScheduleData(Value *V) {
4011 if (auto *I = dyn_cast<Instruction>(V))
4012 return getScheduleData(I);
4013 return nullptr;
4016 bool isInSchedulingRegion(ScheduleData *SD) const {
4017 return SD->SchedulingRegionID == SchedulingRegionID;
4020 /// Marks an instruction as scheduled and puts all dependent ready
4021 /// instructions into the ready-list.
4022 template <typename ReadyListType>
4023 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
4024 SD->IsScheduled = true;
4025 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
4027 for (ScheduleData *BundleMember = SD; BundleMember;
4028 BundleMember = BundleMember->NextInBundle) {
4030 // Handle the def-use chain dependencies.
4032 // Decrement the unscheduled counter and insert to ready list if ready.
4033 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
4034 ScheduleData *OpDef = getScheduleData(I);
4035 if (OpDef && OpDef->hasValidDependencies() &&
4036 OpDef->incrementUnscheduledDeps(-1) == 0) {
4037 // There are no more unscheduled dependencies after
4038 // decrementing, so we can put the dependent instruction
4039 // into the ready list.
4040 ScheduleData *DepBundle = OpDef->FirstInBundle;
4041 assert(!DepBundle->IsScheduled &&
4042 "already scheduled bundle gets ready");
4043 ReadyList.insert(DepBundle);
4044 LLVM_DEBUG(dbgs()
4045 << "SLP: gets ready (def): " << *DepBundle << "\n");
4049 // If BundleMember is a vector bundle, its operands may have been
4050 // reordered during buildTree(). We therefore need to get its operands
4051 // through the TreeEntry.
4052 if (TreeEntry *TE = BundleMember->TE) {
4053 // Need to search for the lane since the tree entry can be reordered.
4054 int Lane = std::distance(TE->Scalars.begin(),
4055 find(TE->Scalars, BundleMember->Inst));
4056 assert(Lane >= 0 && "Lane not set");
4058 // Since vectorization tree is being built recursively this assertion
4059 // ensures that the tree entry has all operands set before reaching
4060 // this code. Couple of exceptions known at the moment are extracts
4061 // where their second (immediate) operand is not added. Since
4062 // immediates do not affect scheduler behavior this is considered
4063 // okay.
4064 auto *In = BundleMember->Inst;
4065 assert(
4066 In &&
4067 (isa<ExtractValueInst, ExtractElementInst, IntrinsicInst>(In) ||
4068 In->getNumOperands() == TE->getNumOperands()) &&
4069 "Missed TreeEntry operands?");
4070 (void)In; // fake use to avoid build failure when assertions disabled
4072 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
4073 OpIdx != NumOperands; ++OpIdx)
4074 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
4075 DecrUnsched(I);
4076 } else {
4077 // If BundleMember is a stand-alone instruction, no operand reordering
4078 // has taken place, so we directly access its operands.
4079 for (Use &U : BundleMember->Inst->operands())
4080 if (auto *I = dyn_cast<Instruction>(U.get()))
4081 DecrUnsched(I);
4083 // Handle the memory dependencies.
4084 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
4085 if (MemoryDepSD->hasValidDependencies() &&
4086 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
4087 // There are no more unscheduled dependencies after decrementing,
4088 // so we can put the dependent instruction into the ready list.
4089 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
4090 assert(!DepBundle->IsScheduled &&
4091 "already scheduled bundle gets ready");
4092 ReadyList.insert(DepBundle);
4093 LLVM_DEBUG(dbgs()
4094 << "SLP: gets ready (mem): " << *DepBundle << "\n");
4097 // Handle the control dependencies.
4098 for (ScheduleData *DepSD : BundleMember->ControlDependencies) {
4099 if (DepSD->incrementUnscheduledDeps(-1) == 0) {
4100 // There are no more unscheduled dependencies after decrementing,
4101 // so we can put the dependent instruction into the ready list.
4102 ScheduleData *DepBundle = DepSD->FirstInBundle;
4103 assert(!DepBundle->IsScheduled &&
4104 "already scheduled bundle gets ready");
4105 ReadyList.insert(DepBundle);
4106 LLVM_DEBUG(dbgs()
4107 << "SLP: gets ready (ctl): " << *DepBundle << "\n");
4113 /// Verify basic self consistency properties of the data structure.
4114 void verify() {
4115 if (!ScheduleStart)
4116 return;
4118 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&
4119 ScheduleStart->comesBefore(ScheduleEnd) &&
4120 "Not a valid scheduling region?");
4122 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
4123 auto *SD = getScheduleData(I);
4124 if (!SD)
4125 continue;
4126 assert(isInSchedulingRegion(SD) &&
4127 "primary schedule data not in window?");
4128 assert(isInSchedulingRegion(SD->FirstInBundle) &&
4129 "entire bundle in window!");
4130 SD->verify();
4133 for (auto *SD : ReadyInsts) {
4134 assert(SD->isSchedulingEntity() && SD->isReady() &&
4135 "item in ready list not ready?");
4136 (void)SD;
4140 /// Put all instructions into the ReadyList which are ready for scheduling.
4141 template <typename ReadyListType>
4142 void initialFillReadyList(ReadyListType &ReadyList) {
4143 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
4144 ScheduleData *SD = getScheduleData(I);
4145 if (SD && SD->isSchedulingEntity() && SD->hasValidDependencies() &&
4146 SD->isReady()) {
4147 ReadyList.insert(SD);
4148 LLVM_DEBUG(dbgs()
4149 << "SLP: initially in ready list: " << *SD << "\n");
4154 /// Build a bundle from the ScheduleData nodes corresponding to the
4155 /// scalar instruction for each lane.
4156 ScheduleData *buildBundle(ArrayRef<Value *> VL);
4158 /// Checks if a bundle of instructions can be scheduled, i.e. has no
4159 /// cyclic dependencies. This is only a dry-run, no instructions are
4160 /// actually moved at this stage.
4161 /// \returns the scheduling bundle. The returned Optional value is not
4162 /// std::nullopt if \p VL is allowed to be scheduled.
4163 std::optional<ScheduleData *>
4164 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
4165 const InstructionsState &S);
4167 /// Un-bundles a group of instructions.
4168 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
4170 /// Allocates schedule data chunk.
4171 ScheduleData *allocateScheduleDataChunks();
4173 /// Extends the scheduling region so that V is inside the region.
4174 /// \returns true if the region size is within the limit.
4175 bool extendSchedulingRegion(Value *V, const InstructionsState &S);
4177 /// Initialize the ScheduleData structures for new instructions in the
4178 /// scheduling region.
4179 void initScheduleData(Instruction *FromI, Instruction *ToI,
4180 ScheduleData *PrevLoadStore,
4181 ScheduleData *NextLoadStore);
4183 /// Updates the dependency information of a bundle and of all instructions/
4184 /// bundles which depend on the original bundle.
4185 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
4186 BoUpSLP *SLP);
4188 /// Sets all instruction in the scheduling region to un-scheduled.
4189 void resetSchedule();
4191 BasicBlock *BB;
4193 /// Simple memory allocation for ScheduleData.
4194 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
4196 /// The size of a ScheduleData array in ScheduleDataChunks.
4197 int ChunkSize;
4199 /// The allocator position in the current chunk, which is the last entry
4200 /// of ScheduleDataChunks.
4201 int ChunkPos;
4203 /// Attaches ScheduleData to Instruction.
4204 /// Note that the mapping survives during all vectorization iterations, i.e.
4205 /// ScheduleData structures are recycled.
4206 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap;
4208 /// The ready-list for scheduling (only used for the dry-run).
4209 SetVector<ScheduleData *> ReadyInsts;
4211 /// The first instruction of the scheduling region.
4212 Instruction *ScheduleStart = nullptr;
4214 /// The first instruction _after_ the scheduling region.
4215 Instruction *ScheduleEnd = nullptr;
4217 /// The first memory accessing instruction in the scheduling region
4218 /// (can be null).
4219 ScheduleData *FirstLoadStoreInRegion = nullptr;
4221 /// The last memory accessing instruction in the scheduling region
4222 /// (can be null).
4223 ScheduleData *LastLoadStoreInRegion = nullptr;
4225 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling
4226 /// region? Used to optimize the dependence calculation for the
4227 /// common case where there isn't.
4228 bool RegionHasStackSave = false;
4230 /// The current size of the scheduling region.
4231 int ScheduleRegionSize = 0;
4233 /// The maximum size allowed for the scheduling region.
4234 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
4236 /// The ID of the scheduling region. For a new vectorization iteration this
4237 /// is incremented which "removes" all ScheduleData from the region.
4238 /// Make sure that the initial SchedulingRegionID is greater than the
4239 /// initial SchedulingRegionID in ScheduleData (which is 0).
4240 int SchedulingRegionID = 1;
4243 /// Attaches the BlockScheduling structures to basic blocks.
4244 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
4246 /// Performs the "real" scheduling. Done before vectorization is actually
4247 /// performed in a basic block.
4248 void scheduleBlock(BlockScheduling *BS);
4250 /// List of users to ignore during scheduling and that don't need extracting.
4251 const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
4253 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
4254 /// sorted SmallVectors of unsigned.
4255 struct OrdersTypeDenseMapInfo {
4256 static OrdersType getEmptyKey() {
4257 OrdersType V;
4258 V.push_back(~1U);
4259 return V;
4262 static OrdersType getTombstoneKey() {
4263 OrdersType V;
4264 V.push_back(~2U);
4265 return V;
4268 static unsigned getHashValue(const OrdersType &V) {
4269 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
4272 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
4273 return LHS == RHS;
4277 // Analysis and block reference.
4278 Function *F;
4279 ScalarEvolution *SE;
4280 TargetTransformInfo *TTI;
4281 TargetLibraryInfo *TLI;
4282 LoopInfo *LI;
4283 DominatorTree *DT;
4284 AssumptionCache *AC;
4285 DemandedBits *DB;
4286 const DataLayout *DL;
4287 OptimizationRemarkEmitter *ORE;
4289 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
4290 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
4292 /// Instruction builder to construct the vectorized tree.
4293 IRBuilder<TargetFolder> Builder;
4295 /// A map of scalar integer values to the smallest bit width with which they
4296 /// can legally be represented. The values map to (width, signed) pairs,
4297 /// where "width" indicates the minimum bit width and "signed" is True if the
4298 /// value must be signed-extended, rather than zero-extended, back to its
4299 /// original width.
4300 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs;
4302 /// Final size of the reduced vector, if the current graph represents the
4303 /// input for the reduction and it was possible to narrow the size of the
4304 /// reduction.
4305 unsigned ReductionBitWidth = 0;
4307 /// Canonical graph size before the transformations.
4308 unsigned BaseGraphSize = 1;
4310 /// If the tree contains any zext/sext/trunc nodes, contains max-min pair of
4311 /// type sizes, used in the tree.
4312 std::optional<std::pair<unsigned, unsigned>> CastMaxMinBWSizes;
4314 /// Indices of the vectorized nodes, which supposed to be the roots of the new
4315 /// bitwidth analysis attempt, like trunc, IToFP or ICmp.
4316 DenseSet<unsigned> ExtraBitWidthNodes;
4319 } // end namespace slpvectorizer
4321 template <> struct GraphTraits<BoUpSLP *> {
4322 using TreeEntry = BoUpSLP::TreeEntry;
4324 /// NodeRef has to be a pointer per the GraphWriter.
4325 using NodeRef = TreeEntry *;
4327 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
4329 /// Add the VectorizableTree to the index iterator to be able to return
4330 /// TreeEntry pointers.
4331 struct ChildIteratorType
4332 : public iterator_adaptor_base<
4333 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
4334 ContainerTy &VectorizableTree;
4336 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
4337 ContainerTy &VT)
4338 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
4340 NodeRef operator*() { return I->UserTE; }
4343 static NodeRef getEntryNode(BoUpSLP &R) {
4344 return R.VectorizableTree[0].get();
4347 static ChildIteratorType child_begin(NodeRef N) {
4348 return {N->UserTreeIndices.begin(), N->Container};
4351 static ChildIteratorType child_end(NodeRef N) {
4352 return {N->UserTreeIndices.end(), N->Container};
4355 /// For the node iterator we just need to turn the TreeEntry iterator into a
4356 /// TreeEntry* iterator so that it dereferences to NodeRef.
4357 class nodes_iterator {
4358 using ItTy = ContainerTy::iterator;
4359 ItTy It;
4361 public:
4362 nodes_iterator(const ItTy &It2) : It(It2) {}
4363 NodeRef operator*() { return It->get(); }
4364 nodes_iterator operator++() {
4365 ++It;
4366 return *this;
4368 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
4371 static nodes_iterator nodes_begin(BoUpSLP *R) {
4372 return nodes_iterator(R->VectorizableTree.begin());
4375 static nodes_iterator nodes_end(BoUpSLP *R) {
4376 return nodes_iterator(R->VectorizableTree.end());
4379 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
4382 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
4383 using TreeEntry = BoUpSLP::TreeEntry;
4385 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
4387 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
4388 std::string Str;
4389 raw_string_ostream OS(Str);
4390 OS << Entry->Idx << ".\n";
4391 if (isSplat(Entry->Scalars))
4392 OS << "<splat> ";
4393 for (auto *V : Entry->Scalars) {
4394 OS << *V;
4395 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
4396 return EU.Scalar == V;
4398 OS << " <extract>";
4399 OS << "\n";
4401 return Str;
4404 static std::string getNodeAttributes(const TreeEntry *Entry,
4405 const BoUpSLP *) {
4406 if (Entry->isGather())
4407 return "color=red";
4408 if (Entry->State == TreeEntry::ScatterVectorize ||
4409 Entry->State == TreeEntry::StridedVectorize)
4410 return "color=blue";
4411 return "";
4415 } // end namespace llvm
4417 BoUpSLP::~BoUpSLP() {
4418 SmallVector<WeakTrackingVH> DeadInsts;
4419 for (auto *I : DeletedInstructions) {
4420 if (!I->getParent()) {
4421 // Temporarily insert instruction back to erase them from parent and
4422 // memory later.
4423 if (isa<PHINode>(I))
4424 // Phi nodes must be the very first instructions in the block.
4425 I->insertBefore(F->getEntryBlock(),
4426 F->getEntryBlock().getFirstNonPHIIt());
4427 else
4428 I->insertBefore(F->getEntryBlock().getTerminator());
4429 continue;
4431 for (Use &U : I->operands()) {
4432 auto *Op = dyn_cast<Instruction>(U.get());
4433 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() &&
4434 wouldInstructionBeTriviallyDead(Op, TLI))
4435 DeadInsts.emplace_back(Op);
4437 I->dropAllReferences();
4439 for (auto *I : DeletedInstructions) {
4440 assert(I->use_empty() &&
4441 "trying to erase instruction with users.");
4442 I->eraseFromParent();
4445 // Cleanup any dead scalar code feeding the vectorized instructions
4446 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI);
4448 #ifdef EXPENSIVE_CHECKS
4449 // If we could guarantee that this call is not extremely slow, we could
4450 // remove the ifdef limitation (see PR47712).
4451 assert(!verifyFunction(*F, &dbgs()));
4452 #endif
4455 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses
4456 /// contains original mask for the scalars reused in the node. Procedure
4457 /// transform this mask in accordance with the given \p Mask.
4458 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) {
4459 assert(!Mask.empty() && Reuses.size() == Mask.size() &&
4460 "Expected non-empty mask.");
4461 SmallVector<int> Prev(Reuses.begin(), Reuses.end());
4462 Prev.swap(Reuses);
4463 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
4464 if (Mask[I] != PoisonMaskElem)
4465 Reuses[Mask[I]] = Prev[I];
4468 /// Reorders the given \p Order according to the given \p Mask. \p Order - is
4469 /// the original order of the scalars. Procedure transforms the provided order
4470 /// in accordance with the given \p Mask. If the resulting \p Order is just an
4471 /// identity order, \p Order is cleared.
4472 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask,
4473 bool BottomOrder = false) {
4474 assert(!Mask.empty() && "Expected non-empty mask.");
4475 unsigned Sz = Mask.size();
4476 if (BottomOrder) {
4477 SmallVector<unsigned> PrevOrder;
4478 if (Order.empty()) {
4479 PrevOrder.resize(Sz);
4480 std::iota(PrevOrder.begin(), PrevOrder.end(), 0);
4481 } else {
4482 PrevOrder.swap(Order);
4484 Order.assign(Sz, Sz);
4485 for (unsigned I = 0; I < Sz; ++I)
4486 if (Mask[I] != PoisonMaskElem)
4487 Order[I] = PrevOrder[Mask[I]];
4488 if (all_of(enumerate(Order), [&](const auto &Data) {
4489 return Data.value() == Sz || Data.index() == Data.value();
4490 })) {
4491 Order.clear();
4492 return;
4494 fixupOrderingIndices(Order);
4495 return;
4497 SmallVector<int> MaskOrder;
4498 if (Order.empty()) {
4499 MaskOrder.resize(Sz);
4500 std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
4501 } else {
4502 inversePermutation(Order, MaskOrder);
4504 reorderReuses(MaskOrder, Mask);
4505 if (ShuffleVectorInst::isIdentityMask(MaskOrder, Sz)) {
4506 Order.clear();
4507 return;
4509 Order.assign(Sz, Sz);
4510 for (unsigned I = 0; I < Sz; ++I)
4511 if (MaskOrder[I] != PoisonMaskElem)
4512 Order[MaskOrder[I]] = I;
4513 fixupOrderingIndices(Order);
4516 std::optional<BoUpSLP::OrdersType>
4517 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
4518 assert(TE.isGather() && "Expected gather node only.");
4519 // Try to find subvector extract/insert patterns and reorder only such
4520 // patterns.
4521 SmallVector<Value *> GatheredScalars(TE.Scalars.begin(), TE.Scalars.end());
4522 Type *ScalarTy = GatheredScalars.front()->getType();
4523 int NumScalars = GatheredScalars.size();
4524 if (!isValidElementType(ScalarTy))
4525 return std::nullopt;
4526 auto *VecTy = getWidenedType(ScalarTy, NumScalars);
4527 int NumParts = TTI->getNumberOfParts(VecTy);
4528 if (NumParts == 0 || NumParts >= NumScalars ||
4529 VecTy->getNumElements() % NumParts != 0 ||
4530 !hasFullVectorsOrPowerOf2(*TTI, VecTy->getElementType(),
4531 VecTy->getNumElements() / NumParts))
4532 NumParts = 1;
4533 SmallVector<int> ExtractMask;
4534 SmallVector<int> Mask;
4535 SmallVector<SmallVector<const TreeEntry *>> Entries;
4536 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> ExtractShuffles =
4537 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts);
4538 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles =
4539 isGatherShuffledEntry(&TE, GatheredScalars, Mask, Entries, NumParts,
4540 /*ForOrder=*/true);
4541 // No shuffled operands - ignore.
4542 if (GatherShuffles.empty() && ExtractShuffles.empty())
4543 return std::nullopt;
4544 OrdersType CurrentOrder(NumScalars, NumScalars);
4545 if (GatherShuffles.size() == 1 &&
4546 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc &&
4547 Entries.front().front()->isSame(TE.Scalars)) {
4548 // Perfect match in the graph, will reuse the previously vectorized
4549 // node. Cost is 0.
4550 std::iota(CurrentOrder.begin(), CurrentOrder.end(), 0);
4551 return CurrentOrder;
4553 auto IsSplatMask = [](ArrayRef<int> Mask) {
4554 int SingleElt = PoisonMaskElem;
4555 return all_of(Mask, [&](int I) {
4556 if (SingleElt == PoisonMaskElem && I != PoisonMaskElem)
4557 SingleElt = I;
4558 return I == PoisonMaskElem || I == SingleElt;
4561 // Exclusive broadcast mask - ignore.
4562 if ((ExtractShuffles.empty() && IsSplatMask(Mask) &&
4563 (Entries.size() != 1 ||
4564 Entries.front().front()->ReorderIndices.empty())) ||
4565 (GatherShuffles.empty() && IsSplatMask(ExtractMask)))
4566 return std::nullopt;
4567 SmallBitVector ShuffledSubMasks(NumParts);
4568 auto TransformMaskToOrder = [&](MutableArrayRef<unsigned> CurrentOrder,
4569 ArrayRef<int> Mask, int PartSz, int NumParts,
4570 function_ref<unsigned(unsigned)> GetVF) {
4571 for (int I : seq<int>(0, NumParts)) {
4572 if (ShuffledSubMasks.test(I))
4573 continue;
4574 const int VF = GetVF(I);
4575 if (VF == 0)
4576 continue;
4577 unsigned Limit = getNumElems(CurrentOrder.size(), PartSz, I);
4578 MutableArrayRef<unsigned> Slice = CurrentOrder.slice(I * PartSz, Limit);
4579 // Shuffle of at least 2 vectors - ignore.
4580 if (any_of(Slice, [&](int I) { return I != NumScalars; })) {
4581 std::fill(Slice.begin(), Slice.end(), NumScalars);
4582 ShuffledSubMasks.set(I);
4583 continue;
4585 // Try to include as much elements from the mask as possible.
4586 int FirstMin = INT_MAX;
4587 int SecondVecFound = false;
4588 for (int K : seq<int>(Limit)) {
4589 int Idx = Mask[I * PartSz + K];
4590 if (Idx == PoisonMaskElem) {
4591 Value *V = GatheredScalars[I * PartSz + K];
4592 if (isConstant(V) && !isa<PoisonValue>(V)) {
4593 SecondVecFound = true;
4594 break;
4596 continue;
4598 if (Idx < VF) {
4599 if (FirstMin > Idx)
4600 FirstMin = Idx;
4601 } else {
4602 SecondVecFound = true;
4603 break;
4606 FirstMin = (FirstMin / PartSz) * PartSz;
4607 // Shuffle of at least 2 vectors - ignore.
4608 if (SecondVecFound) {
4609 std::fill(Slice.begin(), Slice.end(), NumScalars);
4610 ShuffledSubMasks.set(I);
4611 continue;
4613 for (int K : seq<int>(Limit)) {
4614 int Idx = Mask[I * PartSz + K];
4615 if (Idx == PoisonMaskElem)
4616 continue;
4617 Idx -= FirstMin;
4618 if (Idx >= PartSz) {
4619 SecondVecFound = true;
4620 break;
4622 if (CurrentOrder[I * PartSz + Idx] >
4623 static_cast<unsigned>(I * PartSz + K) &&
4624 CurrentOrder[I * PartSz + Idx] !=
4625 static_cast<unsigned>(I * PartSz + Idx))
4626 CurrentOrder[I * PartSz + Idx] = I * PartSz + K;
4628 // Shuffle of at least 2 vectors - ignore.
4629 if (SecondVecFound) {
4630 std::fill(Slice.begin(), Slice.end(), NumScalars);
4631 ShuffledSubMasks.set(I);
4632 continue;
4636 int PartSz = getPartNumElems(NumScalars, NumParts);
4637 if (!ExtractShuffles.empty())
4638 TransformMaskToOrder(
4639 CurrentOrder, ExtractMask, PartSz, NumParts, [&](unsigned I) {
4640 if (!ExtractShuffles[I])
4641 return 0U;
4642 unsigned VF = 0;
4643 unsigned Sz = getNumElems(TE.getVectorFactor(), PartSz, I);
4644 for (unsigned Idx : seq<unsigned>(Sz)) {
4645 int K = I * PartSz + Idx;
4646 if (ExtractMask[K] == PoisonMaskElem)
4647 continue;
4648 if (!TE.ReuseShuffleIndices.empty())
4649 K = TE.ReuseShuffleIndices[K];
4650 if (K == PoisonMaskElem)
4651 continue;
4652 if (!TE.ReorderIndices.empty())
4653 K = std::distance(TE.ReorderIndices.begin(),
4654 find(TE.ReorderIndices, K));
4655 auto *EI = dyn_cast<ExtractElementInst>(TE.Scalars[K]);
4656 if (!EI)
4657 continue;
4658 VF = std::max(VF, cast<VectorType>(EI->getVectorOperandType())
4659 ->getElementCount()
4660 .getKnownMinValue());
4662 return VF;
4664 // Check special corner case - single shuffle of the same entry.
4665 if (GatherShuffles.size() == 1 && NumParts != 1) {
4666 if (ShuffledSubMasks.any())
4667 return std::nullopt;
4668 PartSz = NumScalars;
4669 NumParts = 1;
4671 if (!Entries.empty())
4672 TransformMaskToOrder(CurrentOrder, Mask, PartSz, NumParts, [&](unsigned I) {
4673 if (!GatherShuffles[I])
4674 return 0U;
4675 return std::max(Entries[I].front()->getVectorFactor(),
4676 Entries[I].back()->getVectorFactor());
4678 int NumUndefs =
4679 count_if(CurrentOrder, [&](int Idx) { return Idx == NumScalars; });
4680 if (ShuffledSubMasks.all() || (NumScalars > 2 && NumUndefs >= NumScalars / 2))
4681 return std::nullopt;
4682 return std::move(CurrentOrder);
4685 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2,
4686 const TargetLibraryInfo &TLI,
4687 bool CompareOpcodes = true) {
4688 if (getUnderlyingObject(Ptr1, RecursionMaxDepth) !=
4689 getUnderlyingObject(Ptr2, RecursionMaxDepth))
4690 return false;
4691 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
4692 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
4693 return (!GEP1 || GEP1->getNumOperands() == 2) &&
4694 (!GEP2 || GEP2->getNumOperands() == 2) &&
4695 (((!GEP1 || isConstant(GEP1->getOperand(1))) &&
4696 (!GEP2 || isConstant(GEP2->getOperand(1)))) ||
4697 !CompareOpcodes ||
4698 (GEP1 && GEP2 &&
4699 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI)
4700 .getOpcode()));
4703 /// Calculates minimal alignment as a common alignment.
4704 template <typename T>
4705 static Align computeCommonAlignment(ArrayRef<Value *> VL) {
4706 Align CommonAlignment = cast<T>(VL.front())->getAlign();
4707 for (Value *V : VL.drop_front())
4708 CommonAlignment = std::min(CommonAlignment, cast<T>(V)->getAlign());
4709 return CommonAlignment;
4712 /// Check if \p Order represents reverse order.
4713 static bool isReverseOrder(ArrayRef<unsigned> Order) {
4714 unsigned Sz = Order.size();
4715 return !Order.empty() && all_of(enumerate(Order), [&](const auto &Pair) {
4716 return Pair.value() == Sz || Sz - Pair.index() - 1 == Pair.value();
4720 /// Checks if the provided list of pointers \p Pointers represents the strided
4721 /// pointers for type ElemTy. If they are not, std::nullopt is returned.
4722 /// Otherwise, if \p Inst is not specified, just initialized optional value is
4723 /// returned to show that the pointers represent strided pointers. If \p Inst
4724 /// specified, the runtime stride is materialized before the given \p Inst.
4725 /// \returns std::nullopt if the pointers are not pointers with the runtime
4726 /// stride, nullptr or actual stride value, otherwise.
4727 static std::optional<Value *>
4728 calculateRtStride(ArrayRef<Value *> PointerOps, Type *ElemTy,
4729 const DataLayout &DL, ScalarEvolution &SE,
4730 SmallVectorImpl<unsigned> &SortedIndices,
4731 Instruction *Inst = nullptr) {
4732 SmallVector<const SCEV *> SCEVs;
4733 const SCEV *PtrSCEVLowest = nullptr;
4734 const SCEV *PtrSCEVHighest = nullptr;
4735 // Find lower/upper pointers from the PointerOps (i.e. with lowest and highest
4736 // addresses).
4737 for (Value *Ptr : PointerOps) {
4738 const SCEV *PtrSCEV = SE.getSCEV(Ptr);
4739 if (!PtrSCEV)
4740 return std::nullopt;
4741 SCEVs.push_back(PtrSCEV);
4742 if (!PtrSCEVLowest && !PtrSCEVHighest) {
4743 PtrSCEVLowest = PtrSCEVHighest = PtrSCEV;
4744 continue;
4746 const SCEV *Diff = SE.getMinusSCEV(PtrSCEV, PtrSCEVLowest);
4747 if (isa<SCEVCouldNotCompute>(Diff))
4748 return std::nullopt;
4749 if (Diff->isNonConstantNegative()) {
4750 PtrSCEVLowest = PtrSCEV;
4751 continue;
4753 const SCEV *Diff1 = SE.getMinusSCEV(PtrSCEVHighest, PtrSCEV);
4754 if (isa<SCEVCouldNotCompute>(Diff1))
4755 return std::nullopt;
4756 if (Diff1->isNonConstantNegative()) {
4757 PtrSCEVHighest = PtrSCEV;
4758 continue;
4761 // Dist = PtrSCEVHighest - PtrSCEVLowest;
4762 const SCEV *Dist = SE.getMinusSCEV(PtrSCEVHighest, PtrSCEVLowest);
4763 if (isa<SCEVCouldNotCompute>(Dist))
4764 return std::nullopt;
4765 int Size = DL.getTypeStoreSize(ElemTy);
4766 auto TryGetStride = [&](const SCEV *Dist,
4767 const SCEV *Multiplier) -> const SCEV * {
4768 if (const auto *M = dyn_cast<SCEVMulExpr>(Dist)) {
4769 if (M->getOperand(0) == Multiplier)
4770 return M->getOperand(1);
4771 if (M->getOperand(1) == Multiplier)
4772 return M->getOperand(0);
4773 return nullptr;
4775 if (Multiplier == Dist)
4776 return SE.getConstant(Dist->getType(), 1);
4777 return SE.getUDivExactExpr(Dist, Multiplier);
4779 // Stride_in_elements = Dist / element_size * (num_elems - 1).
4780 const SCEV *Stride = nullptr;
4781 if (Size != 1 || SCEVs.size() > 2) {
4782 const SCEV *Sz = SE.getConstant(Dist->getType(), Size * (SCEVs.size() - 1));
4783 Stride = TryGetStride(Dist, Sz);
4784 if (!Stride)
4785 return std::nullopt;
4787 if (!Stride || isa<SCEVConstant>(Stride))
4788 return std::nullopt;
4789 // Iterate through all pointers and check if all distances are
4790 // unique multiple of Stride.
4791 using DistOrdPair = std::pair<int64_t, int>;
4792 auto Compare = llvm::less_first();
4793 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
4794 int Cnt = 0;
4795 bool IsConsecutive = true;
4796 for (const SCEV *PtrSCEV : SCEVs) {
4797 unsigned Dist = 0;
4798 if (PtrSCEV != PtrSCEVLowest) {
4799 const SCEV *Diff = SE.getMinusSCEV(PtrSCEV, PtrSCEVLowest);
4800 const SCEV *Coeff = TryGetStride(Diff, Stride);
4801 if (!Coeff)
4802 return std::nullopt;
4803 const auto *SC = dyn_cast<SCEVConstant>(Coeff);
4804 if (!SC || isa<SCEVCouldNotCompute>(SC))
4805 return std::nullopt;
4806 if (!SE.getMinusSCEV(PtrSCEV, SE.getAddExpr(PtrSCEVLowest,
4807 SE.getMulExpr(Stride, SC)))
4808 ->isZero())
4809 return std::nullopt;
4810 Dist = SC->getAPInt().getZExtValue();
4812 // If the strides are not the same or repeated, we can't vectorize.
4813 if ((Dist / Size) * Size != Dist || (Dist / Size) >= SCEVs.size())
4814 return std::nullopt;
4815 auto Res = Offsets.emplace(Dist, Cnt);
4816 if (!Res.second)
4817 return std::nullopt;
4818 // Consecutive order if the inserted element is the last one.
4819 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
4820 ++Cnt;
4822 if (Offsets.size() != SCEVs.size())
4823 return std::nullopt;
4824 SortedIndices.clear();
4825 if (!IsConsecutive) {
4826 // Fill SortedIndices array only if it is non-consecutive.
4827 SortedIndices.resize(PointerOps.size());
4828 Cnt = 0;
4829 for (const std::pair<int64_t, int> &Pair : Offsets) {
4830 SortedIndices[Cnt] = Pair.second;
4831 ++Cnt;
4834 if (!Inst)
4835 return nullptr;
4836 SCEVExpander Expander(SE, DL, "strided-load-vec");
4837 return Expander.expandCodeFor(Stride, Stride->getType(), Inst);
4840 static std::pair<InstructionCost, InstructionCost>
4841 getGEPCosts(const TargetTransformInfo &TTI, ArrayRef<Value *> Ptrs,
4842 Value *BasePtr, unsigned Opcode, TTI::TargetCostKind CostKind,
4843 Type *ScalarTy, VectorType *VecTy);
4845 /// Returns the cost of the shuffle instructions with the given \p Kind, vector
4846 /// type \p Tp and optional \p Mask. Adds SLP-specifc cost estimation for insert
4847 /// subvector pattern.
4848 static InstructionCost
4849 getShuffleCost(const TargetTransformInfo &TTI, TTI::ShuffleKind Kind,
4850 VectorType *Tp, ArrayRef<int> Mask = {},
4851 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
4852 int Index = 0, VectorType *SubTp = nullptr,
4853 ArrayRef<const Value *> Args = {}) {
4854 if (Kind != TTI::SK_PermuteTwoSrc)
4855 return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args);
4856 int NumSrcElts = Tp->getElementCount().getKnownMinValue();
4857 int NumSubElts;
4858 if (Mask.size() > 2 && ShuffleVectorInst::isInsertSubvectorMask(
4859 Mask, NumSrcElts, NumSubElts, Index)) {
4860 if (Index + NumSubElts > NumSrcElts &&
4861 Index + NumSrcElts <= static_cast<int>(Mask.size()))
4862 return TTI.getShuffleCost(
4863 TTI::SK_InsertSubvector,
4864 getWidenedType(Tp->getElementType(), Mask.size()), Mask,
4865 TTI::TCK_RecipThroughput, Index, Tp);
4867 return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args);
4870 BoUpSLP::LoadsState
4871 BoUpSLP::canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
4872 SmallVectorImpl<unsigned> &Order,
4873 SmallVectorImpl<Value *> &PointerOps,
4874 unsigned *BestVF, bool TryRecursiveCheck) const {
4875 // Check that a vectorized load would load the same memory as a scalar
4876 // load. For example, we don't want to vectorize loads that are smaller
4877 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
4878 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
4879 // from such a struct, we read/write packed bits disagreeing with the
4880 // unvectorized version.
4881 if (BestVF)
4882 *BestVF = 0;
4883 if (areKnownNonVectorizableLoads(VL))
4884 return LoadsState::Gather;
4885 Type *ScalarTy = VL0->getType();
4887 if (DL->getTypeSizeInBits(ScalarTy) != DL->getTypeAllocSizeInBits(ScalarTy))
4888 return LoadsState::Gather;
4890 // Make sure all loads in the bundle are simple - we can't vectorize
4891 // atomic or volatile loads.
4892 PointerOps.clear();
4893 const unsigned Sz = VL.size();
4894 PointerOps.resize(Sz);
4895 auto *POIter = PointerOps.begin();
4896 for (Value *V : VL) {
4897 auto *L = cast<LoadInst>(V);
4898 if (!L->isSimple())
4899 return LoadsState::Gather;
4900 *POIter = L->getPointerOperand();
4901 ++POIter;
4904 Order.clear();
4905 // Check the order of pointer operands or that all pointers are the same.
4906 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, Order);
4908 auto *VecTy = getWidenedType(ScalarTy, Sz);
4909 Align CommonAlignment = computeCommonAlignment<LoadInst>(VL);
4910 if (!IsSorted) {
4911 if (Sz > MinProfitableStridedLoads && TTI->isTypeLegal(VecTy)) {
4912 if (TTI->isLegalStridedLoadStore(VecTy, CommonAlignment) &&
4913 calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order))
4914 return LoadsState::StridedVectorize;
4917 if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
4918 TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment))
4919 return LoadsState::Gather;
4921 if (!all_of(PointerOps, [&](Value *P) {
4922 return arePointersCompatible(P, PointerOps.front(), *TLI);
4924 return LoadsState::Gather;
4926 } else {
4927 Value *Ptr0;
4928 Value *PtrN;
4929 if (Order.empty()) {
4930 Ptr0 = PointerOps.front();
4931 PtrN = PointerOps.back();
4932 } else {
4933 Ptr0 = PointerOps[Order.front()];
4934 PtrN = PointerOps[Order.back()];
4936 std::optional<int> Diff =
4937 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
4938 // Check that the sorted loads are consecutive.
4939 if (static_cast<unsigned>(*Diff) == Sz - 1)
4940 return LoadsState::Vectorize;
4941 if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
4942 TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment))
4943 return LoadsState::Gather;
4944 // Simple check if not a strided access - clear order.
4945 bool IsPossibleStrided = *Diff % (Sz - 1) == 0;
4946 // Try to generate strided load node if:
4947 // 1. Target with strided load support is detected.
4948 // 2. The number of loads is greater than MinProfitableStridedLoads,
4949 // or the potential stride <= MaxProfitableLoadStride and the
4950 // potential stride is power-of-2 (to avoid perf regressions for the very
4951 // small number of loads) and max distance > number of loads, or potential
4952 // stride is -1.
4953 // 3. The loads are ordered, or number of unordered loads <=
4954 // MaxProfitableUnorderedLoads, or loads are in reversed order.
4955 // (this check is to avoid extra costs for very expensive shuffles).
4956 // 4. Any pointer operand is an instruction with the users outside of the
4957 // current graph (for masked gathers extra extractelement instructions
4958 // might be required).
4959 auto IsAnyPointerUsedOutGraph =
4960 IsPossibleStrided && any_of(PointerOps, [&](Value *V) {
4961 return isa<Instruction>(V) && any_of(V->users(), [&](User *U) {
4962 return !getTreeEntry(U) && !MustGather.contains(U);
4965 const unsigned AbsoluteDiff = std::abs(*Diff);
4966 if (IsPossibleStrided && (IsAnyPointerUsedOutGraph ||
4967 ((Sz > MinProfitableStridedLoads ||
4968 (AbsoluteDiff <= MaxProfitableLoadStride * Sz &&
4969 has_single_bit(AbsoluteDiff))) &&
4970 AbsoluteDiff > Sz) ||
4971 *Diff == -(static_cast<int>(Sz) - 1))) {
4972 int Stride = *Diff / static_cast<int>(Sz - 1);
4973 if (*Diff == Stride * static_cast<int>(Sz - 1)) {
4974 Align Alignment =
4975 cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
4976 ->getAlign();
4977 if (TTI->isLegalStridedLoadStore(VecTy, Alignment)) {
4978 // Iterate through all pointers and check if all distances are
4979 // unique multiple of Dist.
4980 SmallSet<int, 4> Dists;
4981 for (Value *Ptr : PointerOps) {
4982 int Dist = 0;
4983 if (Ptr == PtrN)
4984 Dist = *Diff;
4985 else if (Ptr != Ptr0)
4986 Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
4987 // If the strides are not the same or repeated, we can't
4988 // vectorize.
4989 if (((Dist / Stride) * Stride) != Dist ||
4990 !Dists.insert(Dist).second)
4991 break;
4993 if (Dists.size() == Sz)
4994 return LoadsState::StridedVectorize;
4999 // Correctly identify compare the cost of loads + shuffles rather than
5000 // strided/masked gather loads. Returns true if vectorized + shuffles
5001 // representation is better than just gather.
5002 auto CheckForShuffledLoads = [&, &TTI = *TTI](Align CommonAlignment,
5003 unsigned *BestVF,
5004 bool ProfitableGatherPointers) {
5005 if (BestVF)
5006 *BestVF = 0;
5007 // Compare masked gather cost and loads + insert subvector costs.
5008 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5009 auto [ScalarGEPCost, VectorGEPCost] =
5010 getGEPCosts(TTI, PointerOps, PointerOps.front(),
5011 Instruction::GetElementPtr, CostKind, ScalarTy, VecTy);
5012 // Estimate the cost of masked gather GEP. If not a splat, roughly
5013 // estimate as a buildvector, otherwise estimate as splat.
5014 APInt DemandedElts = APInt::getAllOnes(VecTy->getNumElements());
5015 VectorType *PtrVecTy =
5016 getWidenedType(PointerOps.front()->getType()->getScalarType(),
5017 VecTy->getNumElements());
5018 if (static_cast<unsigned>(count_if(
5019 PointerOps, IsaPred<GetElementPtrInst>)) < PointerOps.size() - 1 ||
5020 any_of(PointerOps, [&](Value *V) {
5021 return getUnderlyingObject(V) !=
5022 getUnderlyingObject(PointerOps.front());
5024 VectorGEPCost += TTI.getScalarizationOverhead(
5025 PtrVecTy, DemandedElts, /*Insert=*/true, /*Extract=*/false, CostKind);
5026 else
5027 VectorGEPCost +=
5028 TTI.getScalarizationOverhead(
5029 PtrVecTy, APInt::getOneBitSet(VecTy->getNumElements(), 0),
5030 /*Insert=*/true, /*Extract=*/false, CostKind) +
5031 ::getShuffleCost(TTI, TTI::SK_Broadcast, PtrVecTy, {}, CostKind);
5032 // The cost of scalar loads.
5033 InstructionCost ScalarLoadsCost =
5034 std::accumulate(VL.begin(), VL.end(), InstructionCost(),
5035 [&](InstructionCost C, Value *V) {
5036 return C + TTI.getInstructionCost(
5037 cast<Instruction>(V), CostKind);
5038 }) +
5039 ScalarGEPCost;
5040 // The cost of masked gather.
5041 InstructionCost MaskedGatherCost =
5042 TTI.getGatherScatterOpCost(
5043 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
5044 /*VariableMask=*/false, CommonAlignment, CostKind) +
5045 (ProfitableGatherPointers ? 0 : VectorGEPCost);
5046 InstructionCost GatherCost =
5047 TTI.getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
5048 /*Extract=*/false, CostKind) +
5049 ScalarLoadsCost;
5050 // The list of loads is small or perform partial check already - directly
5051 // compare masked gather cost and gather cost.
5052 constexpr unsigned ListLimit = 4;
5053 if (!TryRecursiveCheck || VL.size() < ListLimit)
5054 return MaskedGatherCost - GatherCost >= -SLPCostThreshold;
5056 // FIXME: The following code has not been updated for non-power-of-2
5057 // vectors. The splitting logic here does not cover the original
5058 // vector if the vector factor is not a power of two. FIXME
5059 if (!has_single_bit(VL.size()))
5060 return false;
5062 unsigned Sz = DL->getTypeSizeInBits(ScalarTy);
5063 unsigned MinVF = getMinVF(2 * Sz);
5064 DemandedElts.clearAllBits();
5065 // Iterate through possible vectorization factors and check if vectorized +
5066 // shuffles is better than just gather.
5067 for (unsigned VF = VL.size() / 2; VF >= MinVF; VF /= 2) {
5068 SmallVector<LoadsState> States;
5069 for (unsigned Cnt = 0, End = VL.size(); Cnt + VF <= End; Cnt += VF) {
5070 ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
5071 SmallVector<unsigned> Order;
5072 SmallVector<Value *> PointerOps;
5073 LoadsState LS =
5074 canVectorizeLoads(Slice, Slice.front(), Order, PointerOps, BestVF,
5075 /*TryRecursiveCheck=*/false);
5076 // Check that the sorted loads are consecutive.
5077 if (LS == LoadsState::Gather) {
5078 if (BestVF) {
5079 DemandedElts.setAllBits();
5080 break;
5082 DemandedElts.setBits(Cnt, Cnt + VF);
5083 continue;
5085 // If need the reorder - consider as high-cost masked gather for now.
5086 if ((LS == LoadsState::Vectorize ||
5087 LS == LoadsState::StridedVectorize) &&
5088 !Order.empty() && !isReverseOrder(Order))
5089 LS = LoadsState::ScatterVectorize;
5090 States.push_back(LS);
5092 if (DemandedElts.isAllOnes())
5093 // All loads gathered - try smaller VF.
5094 continue;
5095 // Can be vectorized later as a serie of loads/insertelements.
5096 InstructionCost VecLdCost = 0;
5097 if (!DemandedElts.isZero()) {
5098 VecLdCost =
5099 TTI.getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
5100 /*Extract=*/false, CostKind) +
5101 ScalarGEPCost;
5102 for (unsigned Idx : seq<unsigned>(VL.size()))
5103 if (DemandedElts[Idx])
5104 VecLdCost +=
5105 TTI.getInstructionCost(cast<Instruction>(VL[Idx]), CostKind);
5107 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
5108 auto *SubVecTy = getWidenedType(ScalarTy, VF);
5109 for (auto [I, LS] : enumerate(States)) {
5110 auto *LI0 = cast<LoadInst>(VL[I * VF]);
5111 InstructionCost VectorGEPCost =
5112 (LS == LoadsState::ScatterVectorize && ProfitableGatherPointers)
5114 : getGEPCosts(TTI, ArrayRef(PointerOps).slice(I * VF, VF),
5115 LI0->getPointerOperand(),
5116 Instruction::GetElementPtr, CostKind, ScalarTy,
5117 SubVecTy)
5118 .second;
5119 if (LS == LoadsState::ScatterVectorize) {
5120 if (static_cast<unsigned>(
5121 count_if(PointerOps, IsaPred<GetElementPtrInst>)) <
5122 PointerOps.size() - 1 ||
5123 any_of(PointerOps, [&](Value *V) {
5124 return getUnderlyingObject(V) !=
5125 getUnderlyingObject(PointerOps.front());
5127 VectorGEPCost += TTI.getScalarizationOverhead(
5128 SubVecTy, APInt::getAllOnes(VF),
5129 /*Insert=*/true, /*Extract=*/false, CostKind);
5130 else
5131 VectorGEPCost +=
5132 TTI.getScalarizationOverhead(
5133 SubVecTy, APInt::getOneBitSet(ScalarTyNumElements * VF, 0),
5134 /*Insert=*/true, /*Extract=*/false, CostKind) +
5135 ::getShuffleCost(TTI, TTI::SK_Broadcast, SubVecTy, {},
5136 CostKind);
5138 switch (LS) {
5139 case LoadsState::Vectorize:
5140 VecLdCost +=
5141 TTI.getMemoryOpCost(Instruction::Load, SubVecTy, LI0->getAlign(),
5142 LI0->getPointerAddressSpace(), CostKind,
5143 TTI::OperandValueInfo()) +
5144 VectorGEPCost;
5145 break;
5146 case LoadsState::StridedVectorize:
5147 VecLdCost += TTI.getStridedMemoryOpCost(Instruction::Load, SubVecTy,
5148 LI0->getPointerOperand(),
5149 /*VariableMask=*/false,
5150 CommonAlignment, CostKind) +
5151 VectorGEPCost;
5152 break;
5153 case LoadsState::ScatterVectorize:
5154 VecLdCost += TTI.getGatherScatterOpCost(Instruction::Load, SubVecTy,
5155 LI0->getPointerOperand(),
5156 /*VariableMask=*/false,
5157 CommonAlignment, CostKind) +
5158 VectorGEPCost;
5159 break;
5160 case LoadsState::Gather:
5161 // Gathers are already calculated - ignore.
5162 continue;
5164 SmallVector<int> ShuffleMask(VL.size());
5165 for (int Idx : seq<int>(0, VL.size()))
5166 ShuffleMask[Idx] = Idx / VF == I ? VL.size() + Idx % VF : Idx;
5167 if (I > 0)
5168 VecLdCost +=
5169 ::getShuffleCost(TTI, TTI::SK_InsertSubvector, VecTy, ShuffleMask,
5170 CostKind, I * VF, SubVecTy);
5172 // If masked gather cost is higher - better to vectorize, so
5173 // consider it as a gather node. It will be better estimated
5174 // later.
5175 if (MaskedGatherCost >= VecLdCost &&
5176 VecLdCost - GatherCost < -SLPCostThreshold) {
5177 if (BestVF)
5178 *BestVF = VF;
5179 return true;
5182 return MaskedGatherCost - GatherCost >= -SLPCostThreshold;
5184 // TODO: need to improve analysis of the pointers, if not all of them are
5185 // GEPs or have > 2 operands, we end up with a gather node, which just
5186 // increases the cost.
5187 Loop *L = LI->getLoopFor(cast<LoadInst>(VL0)->getParent());
5188 bool ProfitableGatherPointers =
5189 L && Sz > 2 && static_cast<unsigned>(count_if(PointerOps, [L](Value *V) {
5190 return L->isLoopInvariant(V);
5191 })) <= Sz / 2;
5192 if (ProfitableGatherPointers || all_of(PointerOps, [](Value *P) {
5193 auto *GEP = dyn_cast<GetElementPtrInst>(P);
5194 return (!GEP && doesNotNeedToBeScheduled(P)) ||
5195 (GEP && GEP->getNumOperands() == 2 &&
5196 isa<Constant, Instruction>(GEP->getOperand(1)));
5197 })) {
5198 // Check if potential masked gather can be represented as series
5199 // of loads + insertsubvectors.
5200 // If masked gather cost is higher - better to vectorize, so
5201 // consider it as a gather node. It will be better estimated
5202 // later.
5203 if (!TryRecursiveCheck || !CheckForShuffledLoads(CommonAlignment, BestVF,
5204 ProfitableGatherPointers))
5205 return LoadsState::ScatterVectorize;
5208 return LoadsState::Gather;
5211 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL,
5212 ArrayRef<BasicBlock *> BBs, Type *ElemTy,
5213 const DataLayout &DL, ScalarEvolution &SE,
5214 SmallVectorImpl<unsigned> &SortedIndices) {
5215 assert(
5216 all_of(VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
5217 "Expected list of pointer operands.");
5218 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each
5219 // Ptr into, sort and return the sorted indices with values next to one
5220 // another.
5221 SmallMapVector<std::pair<BasicBlock *, Value *>,
5222 SmallVector<SmallVector<std::tuple<Value *, int, unsigned>>>, 8>
5223 Bases;
5224 Bases
5225 .try_emplace(std::make_pair(
5226 BBs.front(), getUnderlyingObject(VL.front(), RecursionMaxDepth)))
5227 .first->second.emplace_back().emplace_back(VL.front(), 0U, 0U);
5229 SortedIndices.clear();
5230 for (auto [Cnt, Ptr] : enumerate(VL.drop_front())) {
5231 auto Key = std::make_pair(BBs[Cnt + 1],
5232 getUnderlyingObject(Ptr, RecursionMaxDepth));
5233 bool Found = any_of(Bases.try_emplace(Key).first->second,
5234 [&, &Cnt = Cnt, &Ptr = Ptr](auto &Base) {
5235 std::optional<int> Diff = getPointersDiff(
5236 ElemTy, std::get<0>(Base.front()), ElemTy,
5237 Ptr, DL, SE,
5238 /*StrictCheck=*/true);
5239 if (!Diff)
5240 return false;
5242 Base.emplace_back(Ptr, *Diff, Cnt + 1);
5243 return true;
5246 if (!Found) {
5247 // If we haven't found enough to usefully cluster, return early.
5248 if (Bases.size() > VL.size() / 2 - 1)
5249 return false;
5251 // Not found already - add a new Base
5252 Bases.find(Key)->second.emplace_back().emplace_back(Ptr, 0, Cnt + 1);
5256 if (Bases.size() == VL.size())
5257 return false;
5259 if (Bases.size() == 1 && (Bases.front().second.size() == 1 ||
5260 Bases.front().second.size() == VL.size()))
5261 return false;
5263 // For each of the bases sort the pointers by Offset and check if any of the
5264 // base become consecutively allocated.
5265 auto ComparePointers = [](Value *Ptr1, Value *Ptr2) {
5266 SmallPtrSet<Value *, 13> FirstPointers;
5267 SmallPtrSet<Value *, 13> SecondPointers;
5268 Value *P1 = Ptr1;
5269 Value *P2 = Ptr2;
5270 if (P1 == P2)
5271 return false;
5272 unsigned Depth = 0;
5273 while (!FirstPointers.contains(P2) && !SecondPointers.contains(P1) &&
5274 Depth <= RecursionMaxDepth) {
5275 FirstPointers.insert(P1);
5276 SecondPointers.insert(P2);
5277 P1 = getUnderlyingObject(P1, /*MaxLookup=*/1);
5278 P2 = getUnderlyingObject(P2, /*MaxLookup=*/1);
5279 ++Depth;
5281 assert((FirstPointers.contains(P2) || SecondPointers.contains(P1)) &&
5282 "Unable to find matching root.");
5283 return FirstPointers.contains(P2) && !SecondPointers.contains(P1);
5285 for (auto &Base : Bases) {
5286 for (auto &Vec : Base.second) {
5287 if (Vec.size() > 1) {
5288 stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X,
5289 const std::tuple<Value *, int, unsigned> &Y) {
5290 return std::get<1>(X) < std::get<1>(Y);
5292 int InitialOffset = std::get<1>(Vec[0]);
5293 bool AnyConsecutive =
5294 all_of(enumerate(Vec), [InitialOffset](const auto &P) {
5295 return std::get<1>(P.value()) == int(P.index()) + InitialOffset;
5297 // Fill SortedIndices array only if it looks worth-while to sort the
5298 // ptrs.
5299 if (!AnyConsecutive)
5300 return false;
5303 stable_sort(Base.second, [&](const auto &V1, const auto &V2) {
5304 return ComparePointers(std::get<0>(V1.front()), std::get<0>(V2.front()));
5308 for (auto &T : Bases)
5309 for (const auto &Vec : T.second)
5310 for (const auto &P : Vec)
5311 SortedIndices.push_back(std::get<2>(P));
5313 assert(SortedIndices.size() == VL.size() &&
5314 "Expected SortedIndices to be the size of VL");
5315 return true;
5318 std::optional<BoUpSLP::OrdersType>
5319 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
5320 assert(TE.isGather() && "Expected gather node only.");
5321 Type *ScalarTy = TE.Scalars[0]->getType();
5323 SmallVector<Value *> Ptrs;
5324 Ptrs.reserve(TE.Scalars.size());
5325 SmallVector<BasicBlock *> BBs;
5326 BBs.reserve(TE.Scalars.size());
5327 for (Value *V : TE.Scalars) {
5328 auto *L = dyn_cast<LoadInst>(V);
5329 if (!L || !L->isSimple())
5330 return std::nullopt;
5331 Ptrs.push_back(L->getPointerOperand());
5332 BBs.push_back(L->getParent());
5335 BoUpSLP::OrdersType Order;
5336 if (!LoadEntriesToVectorize.contains(TE.Idx) &&
5337 clusterSortPtrAccesses(Ptrs, BBs, ScalarTy, *DL, *SE, Order))
5338 return std::move(Order);
5339 return std::nullopt;
5342 /// Check if two insertelement instructions are from the same buildvector.
5343 static bool areTwoInsertFromSameBuildVector(
5344 InsertElementInst *VU, InsertElementInst *V,
5345 function_ref<Value *(InsertElementInst *)> GetBaseOperand) {
5346 // Instructions must be from the same basic blocks.
5347 if (VU->getParent() != V->getParent())
5348 return false;
5349 // Checks if 2 insertelements are from the same buildvector.
5350 if (VU->getType() != V->getType())
5351 return false;
5352 // Multiple used inserts are separate nodes.
5353 if (!VU->hasOneUse() && !V->hasOneUse())
5354 return false;
5355 auto *IE1 = VU;
5356 auto *IE2 = V;
5357 std::optional<unsigned> Idx1 = getElementIndex(IE1);
5358 std::optional<unsigned> Idx2 = getElementIndex(IE2);
5359 if (Idx1 == std::nullopt || Idx2 == std::nullopt)
5360 return false;
5361 // Go through the vector operand of insertelement instructions trying to find
5362 // either VU as the original vector for IE2 or V as the original vector for
5363 // IE1.
5364 SmallBitVector ReusedIdx(
5365 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue());
5366 bool IsReusedIdx = false;
5367 do {
5368 if (IE2 == VU && !IE1)
5369 return VU->hasOneUse();
5370 if (IE1 == V && !IE2)
5371 return V->hasOneUse();
5372 if (IE1 && IE1 != V) {
5373 unsigned Idx1 = getElementIndex(IE1).value_or(*Idx2);
5374 IsReusedIdx |= ReusedIdx.test(Idx1);
5375 ReusedIdx.set(Idx1);
5376 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx)
5377 IE1 = nullptr;
5378 else
5379 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1));
5381 if (IE2 && IE2 != VU) {
5382 unsigned Idx2 = getElementIndex(IE2).value_or(*Idx1);
5383 IsReusedIdx |= ReusedIdx.test(Idx2);
5384 ReusedIdx.set(Idx2);
5385 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx)
5386 IE2 = nullptr;
5387 else
5388 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2));
5390 } while (!IsReusedIdx && (IE1 || IE2));
5391 return false;
5394 std::optional<BoUpSLP::OrdersType>
5395 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
5396 // No need to reorder if need to shuffle reuses, still need to shuffle the
5397 // node.
5398 if (!TE.ReuseShuffleIndices.empty()) {
5399 // FIXME: Support ReuseShuffleIndices for non-power-of-two vectors.
5400 assert(!TE.hasNonWholeRegisterOrNonPowerOf2Vec(*TTI) &&
5401 "Reshuffling scalars not yet supported for nodes with padding");
5403 if (isSplat(TE.Scalars))
5404 return std::nullopt;
5405 // Check if reuse shuffle indices can be improved by reordering.
5406 // For this, check that reuse mask is "clustered", i.e. each scalar values
5407 // is used once in each submask of size <number_of_scalars>.
5408 // Example: 4 scalar values.
5409 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered.
5410 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because
5411 // element 3 is used twice in the second submask.
5412 unsigned Sz = TE.Scalars.size();
5413 if (TE.isGather()) {
5414 if (std::optional<OrdersType> CurrentOrder =
5415 findReusedOrderedScalars(TE)) {
5416 SmallVector<int> Mask;
5417 fixupOrderingIndices(*CurrentOrder);
5418 inversePermutation(*CurrentOrder, Mask);
5419 ::addMask(Mask, TE.ReuseShuffleIndices);
5420 OrdersType Res(TE.getVectorFactor(), TE.getVectorFactor());
5421 unsigned Sz = TE.Scalars.size();
5422 for (int K = 0, E = TE.getVectorFactor() / Sz; K < E; ++K) {
5423 for (auto [I, Idx] : enumerate(ArrayRef(Mask).slice(K * Sz, Sz)))
5424 if (Idx != PoisonMaskElem)
5425 Res[Idx + K * Sz] = I + K * Sz;
5427 return std::move(Res);
5430 if (Sz == 2 && TE.getVectorFactor() == 4 &&
5431 TTI->getNumberOfParts(getWidenedType(TE.Scalars.front()->getType(),
5432 2 * TE.getVectorFactor())) == 1)
5433 return std::nullopt;
5434 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
5435 Sz)) {
5436 SmallVector<int> ReorderMask(Sz, PoisonMaskElem);
5437 if (TE.ReorderIndices.empty())
5438 std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
5439 else
5440 inversePermutation(TE.ReorderIndices, ReorderMask);
5441 ::addMask(ReorderMask, TE.ReuseShuffleIndices);
5442 unsigned VF = ReorderMask.size();
5443 OrdersType ResOrder(VF, VF);
5444 unsigned NumParts = divideCeil(VF, Sz);
5445 SmallBitVector UsedVals(NumParts);
5446 for (unsigned I = 0; I < VF; I += Sz) {
5447 int Val = PoisonMaskElem;
5448 unsigned UndefCnt = 0;
5449 unsigned Limit = std::min(Sz, VF - I);
5450 if (any_of(ArrayRef(ReorderMask).slice(I, Limit),
5451 [&](int Idx) {
5452 if (Val == PoisonMaskElem && Idx != PoisonMaskElem)
5453 Val = Idx;
5454 if (Idx == PoisonMaskElem)
5455 ++UndefCnt;
5456 return Idx != PoisonMaskElem && Idx != Val;
5457 }) ||
5458 Val >= static_cast<int>(NumParts) || UsedVals.test(Val) ||
5459 UndefCnt > Sz / 2)
5460 return std::nullopt;
5461 UsedVals.set(Val);
5462 for (unsigned K = 0; K < NumParts; ++K) {
5463 unsigned Idx = Val + Sz * K;
5464 if (Idx < VF)
5465 ResOrder[Idx] = I + K;
5468 return std::move(ResOrder);
5470 unsigned VF = TE.getVectorFactor();
5471 // Try build correct order for extractelement instructions.
5472 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(),
5473 TE.ReuseShuffleIndices.end());
5474 if (TE.getOpcode() == Instruction::ExtractElement && !TE.isAltShuffle() &&
5475 all_of(TE.Scalars, [Sz](Value *V) {
5476 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V));
5477 return Idx && *Idx < Sz;
5478 })) {
5479 SmallVector<int> ReorderMask(Sz, PoisonMaskElem);
5480 if (TE.ReorderIndices.empty())
5481 std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
5482 else
5483 inversePermutation(TE.ReorderIndices, ReorderMask);
5484 for (unsigned I = 0; I < VF; ++I) {
5485 int &Idx = ReusedMask[I];
5486 if (Idx == PoisonMaskElem)
5487 continue;
5488 Value *V = TE.Scalars[ReorderMask[Idx]];
5489 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V));
5490 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI));
5493 // Build the order of the VF size, need to reorder reuses shuffles, they are
5494 // always of VF size.
5495 OrdersType ResOrder(VF);
5496 std::iota(ResOrder.begin(), ResOrder.end(), 0);
5497 auto *It = ResOrder.begin();
5498 for (unsigned K = 0; K < VF; K += Sz) {
5499 OrdersType CurrentOrder(TE.ReorderIndices);
5500 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)};
5501 if (SubMask.front() == PoisonMaskElem)
5502 std::iota(SubMask.begin(), SubMask.end(), 0);
5503 reorderOrder(CurrentOrder, SubMask);
5504 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; });
5505 std::advance(It, Sz);
5507 if (TE.isGather() && all_of(enumerate(ResOrder), [](const auto &Data) {
5508 return Data.index() == Data.value();
5510 return std::nullopt; // No need to reorder.
5511 return std::move(ResOrder);
5513 if (TE.State == TreeEntry::StridedVectorize && !TopToBottom &&
5514 any_of(TE.UserTreeIndices,
5515 [](const EdgeInfo &EI) {
5516 return !Instruction::isBinaryOp(EI.UserTE->getOpcode());
5517 }) &&
5518 (TE.ReorderIndices.empty() || isReverseOrder(TE.ReorderIndices)))
5519 return std::nullopt;
5520 if ((TE.State == TreeEntry::Vectorize ||
5521 TE.State == TreeEntry::StridedVectorize) &&
5522 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
5523 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp()))) &&
5524 !TE.isAltShuffle())
5525 return TE.ReorderIndices;
5526 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) {
5527 if (!TE.ReorderIndices.empty())
5528 return TE.ReorderIndices;
5530 SmallVector<Instruction *> UserBVHead(TE.Scalars.size());
5531 for (auto [I, V] : zip(UserBVHead, TE.Scalars)) {
5532 if (!V->hasNUsesOrMore(1))
5533 continue;
5534 auto *II = dyn_cast<InsertElementInst>(*V->user_begin());
5535 if (!II)
5536 continue;
5537 Instruction *BVHead = nullptr;
5538 BasicBlock *BB = II->getParent();
5539 while (II && II->hasOneUse() && II->getParent() == BB) {
5540 BVHead = II;
5541 II = dyn_cast<InsertElementInst>(II->getOperand(0));
5543 I = BVHead;
5546 auto CompareByBasicBlocks = [&](BasicBlock *BB1, BasicBlock *BB2) {
5547 assert(BB1 != BB2 && "Expected different basic blocks.");
5548 auto *NodeA = DT->getNode(BB1);
5549 auto *NodeB = DT->getNode(BB2);
5550 assert(NodeA && "Should only process reachable instructions");
5551 assert(NodeB && "Should only process reachable instructions");
5552 assert((NodeA == NodeB) ==
5553 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
5554 "Different nodes should have different DFS numbers");
5555 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
5557 auto PHICompare = [&](unsigned I1, unsigned I2) {
5558 Value *V1 = TE.Scalars[I1];
5559 Value *V2 = TE.Scalars[I2];
5560 if (V1 == V2 || (V1->getNumUses() == 0 && V2->getNumUses() == 0))
5561 return false;
5562 if (V1->getNumUses() < V2->getNumUses())
5563 return true;
5564 if (V1->getNumUses() > V2->getNumUses())
5565 return false;
5566 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin());
5567 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin());
5568 if (FirstUserOfPhi1->getParent() != FirstUserOfPhi2->getParent())
5569 return CompareByBasicBlocks(FirstUserOfPhi1->getParent(),
5570 FirstUserOfPhi2->getParent());
5571 auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1);
5572 auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2);
5573 auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1);
5574 auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2);
5575 if (IE1 && !IE2)
5576 return true;
5577 if (!IE1 && IE2)
5578 return false;
5579 if (IE1 && IE2) {
5580 if (UserBVHead[I1] && !UserBVHead[I2])
5581 return true;
5582 if (!UserBVHead[I1])
5583 return false;
5584 if (UserBVHead[I1] == UserBVHead[I2])
5585 return getElementIndex(IE1) < getElementIndex(IE2);
5586 if (UserBVHead[I1]->getParent() != UserBVHead[I2]->getParent())
5587 return CompareByBasicBlocks(UserBVHead[I1]->getParent(),
5588 UserBVHead[I2]->getParent());
5589 return UserBVHead[I1]->comesBefore(UserBVHead[I2]);
5591 if (EE1 && !EE2)
5592 return true;
5593 if (!EE1 && EE2)
5594 return false;
5595 if (EE1 && EE2) {
5596 auto *Inst1 = dyn_cast<Instruction>(EE1->getOperand(0));
5597 auto *Inst2 = dyn_cast<Instruction>(EE2->getOperand(0));
5598 auto *P1 = dyn_cast<Argument>(EE1->getOperand(0));
5599 auto *P2 = dyn_cast<Argument>(EE2->getOperand(0));
5600 if (!Inst2 && !P2)
5601 return Inst1 || P1;
5602 if (EE1->getOperand(0) == EE2->getOperand(0))
5603 return getElementIndex(EE1) < getElementIndex(EE2);
5604 if (!Inst1 && Inst2)
5605 return false;
5606 if (Inst1 && Inst2) {
5607 if (Inst1->getParent() != Inst2->getParent())
5608 return CompareByBasicBlocks(Inst1->getParent(), Inst2->getParent());
5609 return Inst1->comesBefore(Inst2);
5611 if (!P1 && P2)
5612 return false;
5613 assert(P1 && P2 &&
5614 "Expected either instructions or arguments vector operands.");
5615 return P1->getArgNo() < P2->getArgNo();
5617 return false;
5619 SmallDenseMap<unsigned, unsigned, 16> PhiToId;
5620 SmallVector<unsigned> Phis(TE.Scalars.size());
5621 std::iota(Phis.begin(), Phis.end(), 0);
5622 OrdersType ResOrder(TE.Scalars.size());
5623 for (unsigned Id = 0, Sz = TE.Scalars.size(); Id < Sz; ++Id)
5624 PhiToId[Id] = Id;
5625 stable_sort(Phis, PHICompare);
5626 for (unsigned Id = 0, Sz = Phis.size(); Id < Sz; ++Id)
5627 ResOrder[Id] = PhiToId[Phis[Id]];
5628 if (isIdentityOrder(ResOrder))
5629 return std::nullopt; // No need to reorder.
5630 return std::move(ResOrder);
5632 if (TE.isGather() && !TE.isAltShuffle() && allSameType(TE.Scalars)) {
5633 // TODO: add analysis of other gather nodes with extractelement
5634 // instructions and other values/instructions, not only undefs.
5635 if ((TE.getOpcode() == Instruction::ExtractElement ||
5636 (all_of(TE.Scalars, IsaPred<UndefValue, ExtractElementInst>) &&
5637 any_of(TE.Scalars, IsaPred<ExtractElementInst>))) &&
5638 all_of(TE.Scalars, [](Value *V) {
5639 auto *EE = dyn_cast<ExtractElementInst>(V);
5640 return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
5641 })) {
5642 // Check that gather of extractelements can be represented as
5643 // just a shuffle of a single vector.
5644 OrdersType CurrentOrder;
5645 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder,
5646 /*ResizeAllowed=*/true);
5647 if (Reuse || !CurrentOrder.empty())
5648 return std::move(CurrentOrder);
5650 // If the gather node is <undef, v, .., poison> and
5651 // insertelement poison, v, 0 [+ permute]
5652 // is cheaper than
5653 // insertelement poison, v, n - try to reorder.
5654 // If rotating the whole graph, exclude the permute cost, the whole graph
5655 // might be transformed.
5656 int Sz = TE.Scalars.size();
5657 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) &&
5658 count_if(TE.Scalars, IsaPred<UndefValue>) == Sz - 1) {
5659 const auto *It =
5660 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); });
5661 if (It == TE.Scalars.begin())
5662 return OrdersType();
5663 auto *Ty = getWidenedType(TE.Scalars.front()->getType(), Sz);
5664 if (It != TE.Scalars.end()) {
5665 OrdersType Order(Sz, Sz);
5666 unsigned Idx = std::distance(TE.Scalars.begin(), It);
5667 Order[Idx] = 0;
5668 fixupOrderingIndices(Order);
5669 SmallVector<int> Mask;
5670 inversePermutation(Order, Mask);
5671 InstructionCost PermuteCost =
5672 TopToBottom
5674 : ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, Ty, Mask);
5675 InstructionCost InsertFirstCost = TTI->getVectorInstrCost(
5676 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0,
5677 PoisonValue::get(Ty), *It);
5678 InstructionCost InsertIdxCost = TTI->getVectorInstrCost(
5679 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx,
5680 PoisonValue::get(Ty), *It);
5681 if (InsertFirstCost + PermuteCost < InsertIdxCost) {
5682 OrdersType Order(Sz, Sz);
5683 Order[Idx] = 0;
5684 return std::move(Order);
5688 if (isSplat(TE.Scalars))
5689 return std::nullopt;
5690 if (TE.Scalars.size() >= 3)
5691 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
5692 return Order;
5693 // Check if can include the order of vectorized loads. For masked gathers do
5694 // extra analysis later, so include such nodes into a special list.
5695 if (TE.isGather() && TE.getOpcode() == Instruction::Load) {
5696 SmallVector<Value *> PointerOps;
5697 OrdersType CurrentOrder;
5698 LoadsState Res = canVectorizeLoads(TE.Scalars, TE.Scalars.front(),
5699 CurrentOrder, PointerOps);
5700 if (Res == LoadsState::Vectorize || Res == LoadsState::StridedVectorize)
5701 return std::move(CurrentOrder);
5703 // FIXME: Remove the non-power-of-two check once findReusedOrderedScalars
5704 // has been auditted for correctness with non-power-of-two vectors.
5705 if (!VectorizeNonPowerOf2 || !TE.hasNonWholeRegisterOrNonPowerOf2Vec(*TTI))
5706 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
5707 return CurrentOrder;
5709 return std::nullopt;
5712 /// Checks if the given mask is a "clustered" mask with the same clusters of
5713 /// size \p Sz, which are not identity submasks.
5714 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask,
5715 unsigned Sz) {
5716 ArrayRef<int> FirstCluster = Mask.slice(0, Sz);
5717 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz))
5718 return false;
5719 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) {
5720 ArrayRef<int> Cluster = Mask.slice(I, Sz);
5721 if (Cluster != FirstCluster)
5722 return false;
5724 return true;
5727 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const {
5728 // Reorder reuses mask.
5729 reorderReuses(TE.ReuseShuffleIndices, Mask);
5730 const unsigned Sz = TE.Scalars.size();
5731 // For vectorized and non-clustered reused no need to do anything else.
5732 if (!TE.isGather() ||
5733 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
5734 Sz) ||
5735 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz))
5736 return;
5737 SmallVector<int> NewMask;
5738 inversePermutation(TE.ReorderIndices, NewMask);
5739 addMask(NewMask, TE.ReuseShuffleIndices);
5740 // Clear reorder since it is going to be applied to the new mask.
5741 TE.ReorderIndices.clear();
5742 // Try to improve gathered nodes with clustered reuses, if possible.
5743 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz);
5744 SmallVector<unsigned> NewOrder(Slice);
5745 inversePermutation(NewOrder, NewMask);
5746 reorderScalars(TE.Scalars, NewMask);
5747 // Fill the reuses mask with the identity submasks.
5748 for (auto *It = TE.ReuseShuffleIndices.begin(),
5749 *End = TE.ReuseShuffleIndices.end();
5750 It != End; std::advance(It, Sz))
5751 std::iota(It, std::next(It, Sz), 0);
5754 static void combineOrders(MutableArrayRef<unsigned> Order,
5755 ArrayRef<unsigned> SecondaryOrder) {
5756 assert((SecondaryOrder.empty() || Order.size() == SecondaryOrder.size()) &&
5757 "Expected same size of orders");
5758 unsigned Sz = Order.size();
5759 SmallBitVector UsedIndices(Sz);
5760 for (unsigned Idx : seq<unsigned>(0, Sz)) {
5761 if (Order[Idx] != Sz)
5762 UsedIndices.set(Order[Idx]);
5764 if (SecondaryOrder.empty()) {
5765 for (unsigned Idx : seq<unsigned>(0, Sz))
5766 if (Order[Idx] == Sz && !UsedIndices.test(Idx))
5767 Order[Idx] = Idx;
5768 } else {
5769 for (unsigned Idx : seq<unsigned>(0, Sz))
5770 if (SecondaryOrder[Idx] != Sz && Order[Idx] == Sz &&
5771 !UsedIndices.test(SecondaryOrder[Idx]))
5772 Order[Idx] = SecondaryOrder[Idx];
5776 void BoUpSLP::reorderTopToBottom() {
5777 // Maps VF to the graph nodes.
5778 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
5779 // ExtractElement gather nodes which can be vectorized and need to handle
5780 // their ordering.
5781 DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
5783 // Phi nodes can have preferred ordering based on their result users
5784 DenseMap<const TreeEntry *, OrdersType> PhisToOrders;
5786 // AltShuffles can also have a preferred ordering that leads to fewer
5787 // instructions, e.g., the addsub instruction in x86.
5788 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
5790 // Maps a TreeEntry to the reorder indices of external users.
5791 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>>
5792 ExternalUserReorderMap;
5793 // Find all reorderable nodes with the given VF.
5794 // Currently the are vectorized stores,loads,extracts + some gathering of
5795 // extracts.
5796 for_each(VectorizableTree, [&, &TTIRef = *TTI](
5797 const std::unique_ptr<TreeEntry> &TE) {
5798 // Look for external users that will probably be vectorized.
5799 SmallVector<OrdersType, 1> ExternalUserReorderIndices =
5800 findExternalStoreUsersReorderIndices(TE.get());
5801 if (!ExternalUserReorderIndices.empty()) {
5802 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
5803 ExternalUserReorderMap.try_emplace(TE.get(),
5804 std::move(ExternalUserReorderIndices));
5807 // Patterns like [fadd,fsub] can be combined into a single instruction in
5808 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need
5809 // to take into account their order when looking for the most used order.
5810 if (TE->isAltShuffle()) {
5811 VectorType *VecTy =
5812 getWidenedType(TE->Scalars[0]->getType(), TE->Scalars.size());
5813 unsigned Opcode0 = TE->getOpcode();
5814 unsigned Opcode1 = TE->getAltOpcode();
5815 SmallBitVector OpcodeMask(getAltInstrMask(TE->Scalars, Opcode0, Opcode1));
5816 // If this pattern is supported by the target then we consider the order.
5817 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
5818 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
5819 AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
5821 // TODO: Check the reverse order too.
5824 if (std::optional<OrdersType> CurrentOrder =
5825 getReorderingData(*TE, /*TopToBottom=*/true)) {
5826 // Do not include ordering for nodes used in the alt opcode vectorization,
5827 // better to reorder them during bottom-to-top stage. If follow the order
5828 // here, it causes reordering of the whole graph though actually it is
5829 // profitable just to reorder the subgraph that starts from the alternate
5830 // opcode vectorization node. Such nodes already end-up with the shuffle
5831 // instruction and it is just enough to change this shuffle rather than
5832 // rotate the scalars for the whole graph.
5833 unsigned Cnt = 0;
5834 const TreeEntry *UserTE = TE.get();
5835 while (UserTE && Cnt < RecursionMaxDepth) {
5836 if (UserTE->UserTreeIndices.size() != 1)
5837 break;
5838 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
5839 return EI.UserTE->State == TreeEntry::Vectorize &&
5840 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
5842 return;
5843 UserTE = UserTE->UserTreeIndices.back().UserTE;
5844 ++Cnt;
5846 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
5847 if (!(TE->State == TreeEntry::Vectorize ||
5848 TE->State == TreeEntry::StridedVectorize) ||
5849 !TE->ReuseShuffleIndices.empty())
5850 GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
5851 if (TE->State == TreeEntry::Vectorize &&
5852 TE->getOpcode() == Instruction::PHI)
5853 PhisToOrders.try_emplace(TE.get(), *CurrentOrder);
5857 // Reorder the graph nodes according to their vectorization factor.
5858 for (unsigned VF = VectorizableTree.front()->getVectorFactor();
5859 !VFToOrderedEntries.empty() && VF > 1; VF -= 2 - (VF & 1U)) {
5860 auto It = VFToOrderedEntries.find(VF);
5861 if (It == VFToOrderedEntries.end())
5862 continue;
5863 // Try to find the most profitable order. We just are looking for the most
5864 // used order and reorder scalar elements in the nodes according to this
5865 // mostly used order.
5866 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
5867 // Delete VF entry upon exit.
5868 auto Cleanup = make_scope_exit([&]() { VFToOrderedEntries.erase(It); });
5870 // All operands are reordered and used only in this node - propagate the
5871 // most used order to the user node.
5872 MapVector<OrdersType, unsigned,
5873 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
5874 OrdersUses;
5875 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
5876 for (const TreeEntry *OpTE : OrderedEntries) {
5877 // No need to reorder this nodes, still need to extend and to use shuffle,
5878 // just need to merge reordering shuffle and the reuse shuffle.
5879 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
5880 continue;
5881 // Count number of orders uses.
5882 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders,
5883 &PhisToOrders]() -> const OrdersType & {
5884 if (OpTE->isGather() || !OpTE->ReuseShuffleIndices.empty()) {
5885 auto It = GathersToOrders.find(OpTE);
5886 if (It != GathersToOrders.end())
5887 return It->second;
5889 if (OpTE->isAltShuffle()) {
5890 auto It = AltShufflesToOrders.find(OpTE);
5891 if (It != AltShufflesToOrders.end())
5892 return It->second;
5894 if (OpTE->State == TreeEntry::Vectorize &&
5895 OpTE->getOpcode() == Instruction::PHI) {
5896 auto It = PhisToOrders.find(OpTE);
5897 if (It != PhisToOrders.end())
5898 return It->second;
5900 return OpTE->ReorderIndices;
5901 }();
5902 // First consider the order of the external scalar users.
5903 auto It = ExternalUserReorderMap.find(OpTE);
5904 if (It != ExternalUserReorderMap.end()) {
5905 const auto &ExternalUserReorderIndices = It->second;
5906 // If the OpTE vector factor != number of scalars - use natural order,
5907 // it is an attempt to reorder node with reused scalars but with
5908 // external uses.
5909 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) {
5910 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second +=
5911 ExternalUserReorderIndices.size();
5912 } else {
5913 for (const OrdersType &ExtOrder : ExternalUserReorderIndices)
5914 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second;
5916 // No other useful reorder data in this entry.
5917 if (Order.empty())
5918 continue;
5920 // Stores actually store the mask, not the order, need to invert.
5921 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
5922 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
5923 SmallVector<int> Mask;
5924 inversePermutation(Order, Mask);
5925 unsigned E = Order.size();
5926 OrdersType CurrentOrder(E, E);
5927 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
5928 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx);
5930 fixupOrderingIndices(CurrentOrder);
5931 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
5932 } else {
5933 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
5936 if (OrdersUses.empty())
5937 continue;
5938 // Choose the most used order.
5939 unsigned IdentityCnt = 0;
5940 unsigned FilledIdentityCnt = 0;
5941 OrdersType IdentityOrder(VF, VF);
5942 for (auto &Pair : OrdersUses) {
5943 if (Pair.first.empty() || isIdentityOrder(Pair.first)) {
5944 if (!Pair.first.empty())
5945 FilledIdentityCnt += Pair.second;
5946 IdentityCnt += Pair.second;
5947 combineOrders(IdentityOrder, Pair.first);
5950 MutableArrayRef<unsigned> BestOrder = IdentityOrder;
5951 unsigned Cnt = IdentityCnt;
5952 for (auto &Pair : OrdersUses) {
5953 // Prefer identity order. But, if filled identity found (non-empty order)
5954 // with same number of uses, as the new candidate order, we can choose
5955 // this candidate order.
5956 if (Cnt < Pair.second ||
5957 (Cnt == IdentityCnt && IdentityCnt == FilledIdentityCnt &&
5958 Cnt == Pair.second && !BestOrder.empty() &&
5959 isIdentityOrder(BestOrder))) {
5960 combineOrders(Pair.first, BestOrder);
5961 BestOrder = Pair.first;
5962 Cnt = Pair.second;
5963 } else {
5964 combineOrders(BestOrder, Pair.first);
5967 // Set order of the user node.
5968 if (isIdentityOrder(BestOrder))
5969 continue;
5970 fixupOrderingIndices(BestOrder);
5971 SmallVector<int> Mask;
5972 inversePermutation(BestOrder, Mask);
5973 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem);
5974 unsigned E = BestOrder.size();
5975 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
5976 return I < E ? static_cast<int>(I) : PoisonMaskElem;
5978 // Do an actual reordering, if profitable.
5979 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
5980 // Just do the reordering for the nodes with the given VF.
5981 if (TE->Scalars.size() != VF) {
5982 if (TE->ReuseShuffleIndices.size() == VF) {
5983 // Need to reorder the reuses masks of the operands with smaller VF to
5984 // be able to find the match between the graph nodes and scalar
5985 // operands of the given node during vectorization/cost estimation.
5986 assert(all_of(TE->UserTreeIndices,
5987 [VF, &TE](const EdgeInfo &EI) {
5988 return EI.UserTE->Scalars.size() == VF ||
5989 EI.UserTE->Scalars.size() ==
5990 TE->Scalars.size();
5991 }) &&
5992 "All users must be of VF size.");
5993 // Update ordering of the operands with the smaller VF than the given
5994 // one.
5995 reorderNodeWithReuses(*TE, Mask);
5997 continue;
5999 if ((TE->State == TreeEntry::Vectorize ||
6000 TE->State == TreeEntry::StridedVectorize) &&
6001 isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
6002 InsertElementInst>(TE->getMainOp()) &&
6003 !TE->isAltShuffle()) {
6004 // Build correct orders for extract{element,value}, loads and
6005 // stores.
6006 reorderOrder(TE->ReorderIndices, Mask);
6007 if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
6008 TE->reorderOperands(Mask);
6009 } else {
6010 // Reorder the node and its operands.
6011 TE->reorderOperands(Mask);
6012 assert(TE->ReorderIndices.empty() &&
6013 "Expected empty reorder sequence.");
6014 reorderScalars(TE->Scalars, Mask);
6016 if (!TE->ReuseShuffleIndices.empty()) {
6017 // Apply reversed order to keep the original ordering of the reused
6018 // elements to avoid extra reorder indices shuffling.
6019 OrdersType CurrentOrder;
6020 reorderOrder(CurrentOrder, MaskOrder);
6021 SmallVector<int> NewReuses;
6022 inversePermutation(CurrentOrder, NewReuses);
6023 addMask(NewReuses, TE->ReuseShuffleIndices);
6024 TE->ReuseShuffleIndices.swap(NewReuses);
6030 bool BoUpSLP::canReorderOperands(
6031 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
6032 ArrayRef<TreeEntry *> ReorderableGathers,
6033 SmallVectorImpl<TreeEntry *> &GatherOps) {
6034 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
6035 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
6036 return OpData.first == I &&
6037 (OpData.second->State == TreeEntry::Vectorize ||
6038 OpData.second->State == TreeEntry::StridedVectorize);
6040 continue;
6041 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
6042 // Do not reorder if operand node is used by many user nodes.
6043 if (any_of(TE->UserTreeIndices,
6044 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
6045 return false;
6046 // Add the node to the list of the ordered nodes with the identity
6047 // order.
6048 Edges.emplace_back(I, TE);
6049 // Add ScatterVectorize nodes to the list of operands, where just
6050 // reordering of the scalars is required. Similar to the gathers, so
6051 // simply add to the list of gathered ops.
6052 // If there are reused scalars, process this node as a regular vectorize
6053 // node, just reorder reuses mask.
6054 if (TE->State != TreeEntry::Vectorize &&
6055 TE->State != TreeEntry::StridedVectorize &&
6056 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty())
6057 GatherOps.push_back(TE);
6058 continue;
6060 TreeEntry *Gather = nullptr;
6061 if (count_if(ReorderableGathers,
6062 [&Gather, UserTE, I](TreeEntry *TE) {
6063 assert(TE->State != TreeEntry::Vectorize &&
6064 TE->State != TreeEntry::StridedVectorize &&
6065 "Only non-vectorized nodes are expected.");
6066 if (any_of(TE->UserTreeIndices,
6067 [UserTE, I](const EdgeInfo &EI) {
6068 return EI.UserTE == UserTE && EI.EdgeIdx == I;
6069 })) {
6070 assert(TE->isSame(UserTE->getOperand(I)) &&
6071 "Operand entry does not match operands.");
6072 Gather = TE;
6073 return true;
6075 return false;
6076 }) > 1 &&
6077 !allConstant(UserTE->getOperand(I)))
6078 return false;
6079 if (Gather)
6080 GatherOps.push_back(Gather);
6082 return true;
6085 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
6086 SetVector<TreeEntry *> OrderedEntries;
6087 DenseSet<const TreeEntry *> GathersToOrders;
6088 // Find all reorderable leaf nodes with the given VF.
6089 // Currently the are vectorized loads,extracts without alternate operands +
6090 // some gathering of extracts.
6091 SmallVector<TreeEntry *> NonVectorized;
6092 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
6093 if (TE->State != TreeEntry::Vectorize &&
6094 TE->State != TreeEntry::StridedVectorize)
6095 NonVectorized.push_back(TE.get());
6096 if (std::optional<OrdersType> CurrentOrder =
6097 getReorderingData(*TE, /*TopToBottom=*/false)) {
6098 OrderedEntries.insert(TE.get());
6099 if (!(TE->State == TreeEntry::Vectorize ||
6100 TE->State == TreeEntry::StridedVectorize) ||
6101 !TE->ReuseShuffleIndices.empty())
6102 GathersToOrders.insert(TE.get());
6106 // 1. Propagate order to the graph nodes, which use only reordered nodes.
6107 // I.e., if the node has operands, that are reordered, try to make at least
6108 // one operand order in the natural order and reorder others + reorder the
6109 // user node itself.
6110 SmallPtrSet<const TreeEntry *, 4> Visited;
6111 while (!OrderedEntries.empty()) {
6112 // 1. Filter out only reordered nodes.
6113 // 2. If the entry has multiple uses - skip it and jump to the next node.
6114 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users;
6115 SmallVector<TreeEntry *> Filtered;
6116 for (TreeEntry *TE : OrderedEntries) {
6117 if (!(TE->State == TreeEntry::Vectorize ||
6118 TE->State == TreeEntry::StridedVectorize ||
6119 (TE->isGather() && GathersToOrders.contains(TE))) ||
6120 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
6121 !all_of(drop_begin(TE->UserTreeIndices),
6122 [TE](const EdgeInfo &EI) {
6123 return EI.UserTE == TE->UserTreeIndices.front().UserTE;
6124 }) ||
6125 !Visited.insert(TE).second) {
6126 Filtered.push_back(TE);
6127 continue;
6129 // Build a map between user nodes and their operands order to speedup
6130 // search. The graph currently does not provide this dependency directly.
6131 for (EdgeInfo &EI : TE->UserTreeIndices)
6132 Users[EI.UserTE].emplace_back(EI.EdgeIdx, TE);
6134 // Erase filtered entries.
6135 for (TreeEntry *TE : Filtered)
6136 OrderedEntries.remove(TE);
6137 SmallVector<
6138 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>>
6139 UsersVec(Users.begin(), Users.end());
6140 sort(UsersVec, [](const auto &Data1, const auto &Data2) {
6141 return Data1.first->Idx > Data2.first->Idx;
6143 for (auto &Data : UsersVec) {
6144 // Check that operands are used only in the User node.
6145 SmallVector<TreeEntry *> GatherOps;
6146 if (!canReorderOperands(Data.first, Data.second, NonVectorized,
6147 GatherOps)) {
6148 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
6149 OrderedEntries.remove(Op.second);
6150 continue;
6152 // All operands are reordered and used only in this node - propagate the
6153 // most used order to the user node.
6154 MapVector<OrdersType, unsigned,
6155 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
6156 OrdersUses;
6157 // Do the analysis for each tree entry only once, otherwise the order of
6158 // the same node my be considered several times, though might be not
6159 // profitable.
6160 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
6161 SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
6162 for (const auto &Op : Data.second) {
6163 TreeEntry *OpTE = Op.second;
6164 if (!VisitedOps.insert(OpTE).second)
6165 continue;
6166 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
6167 continue;
6168 const auto Order = [&]() -> const OrdersType {
6169 if (OpTE->isGather() || !OpTE->ReuseShuffleIndices.empty())
6170 return getReorderingData(*OpTE, /*TopToBottom=*/false)
6171 .value_or(OrdersType(1));
6172 return OpTE->ReorderIndices;
6173 }();
6174 // The order is partially ordered, skip it in favor of fully non-ordered
6175 // orders.
6176 if (Order.size() == 1)
6177 continue;
6178 unsigned NumOps = count_if(
6179 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
6180 return P.second == OpTE;
6182 // Stores actually store the mask, not the order, need to invert.
6183 if (OpTE->State == TreeEntry::Vectorize && !OpTE->isAltShuffle() &&
6184 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
6185 SmallVector<int> Mask;
6186 inversePermutation(Order, Mask);
6187 unsigned E = Order.size();
6188 OrdersType CurrentOrder(E, E);
6189 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
6190 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx);
6192 fixupOrderingIndices(CurrentOrder);
6193 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
6194 NumOps;
6195 } else {
6196 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
6198 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
6199 const auto AllowsReordering = [&](const TreeEntry *TE) {
6200 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
6201 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
6202 (IgnoreReorder && TE->Idx == 0))
6203 return true;
6204 if (TE->isGather()) {
6205 if (GathersToOrders.contains(TE))
6206 return !getReorderingData(*TE, /*TopToBottom=*/false)
6207 .value_or(OrdersType(1))
6208 .empty();
6209 return true;
6211 return false;
6213 for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
6214 TreeEntry *UserTE = EI.UserTE;
6215 if (!VisitedUsers.insert(UserTE).second)
6216 continue;
6217 // May reorder user node if it requires reordering, has reused
6218 // scalars, is an alternate op vectorize node or its op nodes require
6219 // reordering.
6220 if (AllowsReordering(UserTE))
6221 continue;
6222 // Check if users allow reordering.
6223 // Currently look up just 1 level of operands to avoid increase of
6224 // the compile time.
6225 // Profitable to reorder if definitely more operands allow
6226 // reordering rather than those with natural order.
6227 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE];
6228 if (static_cast<unsigned>(count_if(
6229 Ops, [UserTE, &AllowsReordering](
6230 const std::pair<unsigned, TreeEntry *> &Op) {
6231 return AllowsReordering(Op.second) &&
6232 all_of(Op.second->UserTreeIndices,
6233 [UserTE](const EdgeInfo &EI) {
6234 return EI.UserTE == UserTE;
6236 })) <= Ops.size() / 2)
6237 ++Res.first->second;
6240 if (OrdersUses.empty()) {
6241 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
6242 OrderedEntries.remove(Op.second);
6243 continue;
6245 // Choose the most used order.
6246 unsigned IdentityCnt = 0;
6247 unsigned VF = Data.second.front().second->getVectorFactor();
6248 OrdersType IdentityOrder(VF, VF);
6249 for (auto &Pair : OrdersUses) {
6250 if (Pair.first.empty() || isIdentityOrder(Pair.first)) {
6251 IdentityCnt += Pair.second;
6252 combineOrders(IdentityOrder, Pair.first);
6255 MutableArrayRef<unsigned> BestOrder = IdentityOrder;
6256 unsigned Cnt = IdentityCnt;
6257 for (auto &Pair : OrdersUses) {
6258 // Prefer identity order. But, if filled identity found (non-empty
6259 // order) with same number of uses, as the new candidate order, we can
6260 // choose this candidate order.
6261 if (Cnt < Pair.second) {
6262 combineOrders(Pair.first, BestOrder);
6263 BestOrder = Pair.first;
6264 Cnt = Pair.second;
6265 } else {
6266 combineOrders(BestOrder, Pair.first);
6269 // Set order of the user node.
6270 if (isIdentityOrder(BestOrder)) {
6271 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
6272 OrderedEntries.remove(Op.second);
6273 continue;
6275 fixupOrderingIndices(BestOrder);
6276 // Erase operands from OrderedEntries list and adjust their orders.
6277 VisitedOps.clear();
6278 SmallVector<int> Mask;
6279 inversePermutation(BestOrder, Mask);
6280 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem);
6281 unsigned E = BestOrder.size();
6282 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
6283 return I < E ? static_cast<int>(I) : PoisonMaskElem;
6285 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
6286 TreeEntry *TE = Op.second;
6287 OrderedEntries.remove(TE);
6288 if (!VisitedOps.insert(TE).second)
6289 continue;
6290 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
6291 reorderNodeWithReuses(*TE, Mask);
6292 continue;
6294 // Gathers are processed separately.
6295 if (TE->State != TreeEntry::Vectorize &&
6296 TE->State != TreeEntry::StridedVectorize &&
6297 (TE->State != TreeEntry::ScatterVectorize ||
6298 TE->ReorderIndices.empty()))
6299 continue;
6300 assert((BestOrder.size() == TE->ReorderIndices.size() ||
6301 TE->ReorderIndices.empty()) &&
6302 "Non-matching sizes of user/operand entries.");
6303 reorderOrder(TE->ReorderIndices, Mask);
6304 if (IgnoreReorder && TE == VectorizableTree.front().get())
6305 IgnoreReorder = false;
6307 // For gathers just need to reorder its scalars.
6308 for (TreeEntry *Gather : GatherOps) {
6309 assert(Gather->ReorderIndices.empty() &&
6310 "Unexpected reordering of gathers.");
6311 if (!Gather->ReuseShuffleIndices.empty()) {
6312 // Just reorder reuses indices.
6313 reorderReuses(Gather->ReuseShuffleIndices, Mask);
6314 continue;
6316 reorderScalars(Gather->Scalars, Mask);
6317 OrderedEntries.remove(Gather);
6319 // Reorder operands of the user node and set the ordering for the user
6320 // node itself.
6321 if (Data.first->State != TreeEntry::Vectorize ||
6322 !isa<ExtractElementInst, ExtractValueInst, LoadInst>(
6323 Data.first->getMainOp()) ||
6324 Data.first->isAltShuffle())
6325 Data.first->reorderOperands(Mask);
6326 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
6327 Data.first->isAltShuffle() ||
6328 Data.first->State == TreeEntry::StridedVectorize) {
6329 reorderScalars(Data.first->Scalars, Mask);
6330 reorderOrder(Data.first->ReorderIndices, MaskOrder,
6331 /*BottomOrder=*/true);
6332 if (Data.first->ReuseShuffleIndices.empty() &&
6333 !Data.first->ReorderIndices.empty() &&
6334 !Data.first->isAltShuffle()) {
6335 // Insert user node to the list to try to sink reordering deeper in
6336 // the graph.
6337 OrderedEntries.insert(Data.first);
6339 } else {
6340 reorderOrder(Data.first->ReorderIndices, Mask);
6344 // If the reordering is unnecessary, just remove the reorder.
6345 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
6346 VectorizableTree.front()->ReuseShuffleIndices.empty())
6347 VectorizableTree.front()->ReorderIndices.clear();
6350 Instruction *BoUpSLP::getRootEntryInstruction(const TreeEntry &Entry) const {
6351 if ((Entry.getOpcode() == Instruction::Store ||
6352 Entry.getOpcode() == Instruction::Load) &&
6353 Entry.State == TreeEntry::StridedVectorize &&
6354 !Entry.ReorderIndices.empty() && isReverseOrder(Entry.ReorderIndices))
6355 return dyn_cast<Instruction>(Entry.Scalars[Entry.ReorderIndices.front()]);
6356 return dyn_cast<Instruction>(Entry.Scalars.front());
6359 void BoUpSLP::buildExternalUses(
6360 const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
6361 DenseMap<Value *, unsigned> ScalarToExtUses;
6362 // Collect the values that we need to extract from the tree.
6363 for (auto &TEPtr : VectorizableTree) {
6364 TreeEntry *Entry = TEPtr.get();
6366 // No need to handle users of gathered values.
6367 if (Entry->isGather())
6368 continue;
6370 // For each lane:
6371 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
6372 Value *Scalar = Entry->Scalars[Lane];
6373 if (!isa<Instruction>(Scalar))
6374 continue;
6375 // All uses must be replaced already? No need to do it again.
6376 auto It = ScalarToExtUses.find(Scalar);
6377 if (It != ScalarToExtUses.end() && !ExternalUses[It->second].User)
6378 continue;
6380 // Check if the scalar is externally used as an extra arg.
6381 const auto ExtI = ExternallyUsedValues.find(Scalar);
6382 if (ExtI != ExternallyUsedValues.end()) {
6383 int FoundLane = Entry->findLaneForValue(Scalar);
6384 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
6385 << FoundLane << " from " << *Scalar << ".\n");
6386 ScalarToExtUses.try_emplace(Scalar, ExternalUses.size());
6387 ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
6388 continue;
6390 for (User *U : Scalar->users()) {
6391 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
6393 Instruction *UserInst = dyn_cast<Instruction>(U);
6394 if (!UserInst || isDeleted(UserInst))
6395 continue;
6397 // Ignore users in the user ignore list.
6398 if (UserIgnoreList && UserIgnoreList->contains(UserInst))
6399 continue;
6401 // Skip in-tree scalars that become vectors
6402 if (TreeEntry *UseEntry = getTreeEntry(U)) {
6403 // Some in-tree scalars will remain as scalar in vectorized
6404 // instructions. If that is the case, the one in FoundLane will
6405 // be used.
6406 if (UseEntry->State == TreeEntry::ScatterVectorize ||
6407 !doesInTreeUserNeedToExtract(
6408 Scalar, getRootEntryInstruction(*UseEntry), TLI)) {
6409 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
6410 << ".\n");
6411 assert(!UseEntry->isGather() && "Bad state");
6412 continue;
6414 U = nullptr;
6415 if (It != ScalarToExtUses.end()) {
6416 ExternalUses[It->second].User = nullptr;
6417 break;
6421 if (U && Scalar->hasNUsesOrMore(UsesLimit))
6422 U = nullptr;
6423 int FoundLane = Entry->findLaneForValue(Scalar);
6424 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *UserInst
6425 << " from lane " << FoundLane << " from " << *Scalar
6426 << ".\n");
6427 It = ScalarToExtUses.try_emplace(Scalar, ExternalUses.size()).first;
6428 ExternalUses.emplace_back(Scalar, U, FoundLane);
6429 if (!U)
6430 break;
6436 SmallVector<SmallVector<StoreInst *>>
6437 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
6438 SmallDenseMap<std::tuple<BasicBlock *, Type *, Value *>,
6439 SmallVector<StoreInst *>, 8>
6440 PtrToStoresMap;
6441 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
6442 Value *V = TE->Scalars[Lane];
6443 // Don't iterate over the users of constant data.
6444 if (!isa<Instruction>(V))
6445 continue;
6446 // To save compilation time we don't visit if we have too many users.
6447 if (V->hasNUsesOrMore(UsesLimit))
6448 break;
6450 // Collect stores per pointer object.
6451 for (User *U : V->users()) {
6452 auto *SI = dyn_cast<StoreInst>(U);
6453 // Test whether we can handle the store. V might be a global, which could
6454 // be used in a different function.
6455 if (SI == nullptr || !SI->isSimple() || SI->getFunction() != F ||
6456 !isValidElementType(SI->getValueOperand()->getType()))
6457 continue;
6458 // Skip entry if already
6459 if (getTreeEntry(U))
6460 continue;
6462 Value *Ptr =
6463 getUnderlyingObject(SI->getPointerOperand(), RecursionMaxDepth);
6464 auto &StoresVec = PtrToStoresMap[{SI->getParent(),
6465 SI->getValueOperand()->getType(), Ptr}];
6466 // For now just keep one store per pointer object per lane.
6467 // TODO: Extend this to support multiple stores per pointer per lane
6468 if (StoresVec.size() > Lane)
6469 continue;
6470 if (!StoresVec.empty()) {
6471 std::optional<int> Diff = getPointersDiff(
6472 SI->getValueOperand()->getType(), SI->getPointerOperand(),
6473 SI->getValueOperand()->getType(),
6474 StoresVec.front()->getPointerOperand(), *DL, *SE,
6475 /*StrictCheck=*/true);
6476 // We failed to compare the pointers so just abandon this store.
6477 if (!Diff)
6478 continue;
6480 StoresVec.push_back(SI);
6483 SmallVector<SmallVector<StoreInst *>> Res(PtrToStoresMap.size());
6484 unsigned I = 0;
6485 for (auto &P : PtrToStoresMap) {
6486 Res[I].swap(P.second);
6487 ++I;
6489 return Res;
6492 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec,
6493 OrdersType &ReorderIndices) const {
6494 // We check whether the stores in StoreVec can form a vector by sorting them
6495 // and checking whether they are consecutive.
6497 // To avoid calling getPointersDiff() while sorting we create a vector of
6498 // pairs {store, offset from first} and sort this instead.
6499 SmallVector<std::pair<int, unsigned>> StoreOffsetVec;
6500 StoreInst *S0 = StoresVec[0];
6501 StoreOffsetVec.emplace_back(0, 0);
6502 Type *S0Ty = S0->getValueOperand()->getType();
6503 Value *S0Ptr = S0->getPointerOperand();
6504 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
6505 StoreInst *SI = StoresVec[Idx];
6506 std::optional<int> Diff =
6507 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
6508 SI->getPointerOperand(), *DL, *SE,
6509 /*StrictCheck=*/true);
6510 StoreOffsetVec.emplace_back(*Diff, Idx);
6513 // Check if the stores are consecutive by checking if their difference is 1.
6514 if (StoreOffsetVec.size() != StoresVec.size())
6515 return false;
6516 sort(StoreOffsetVec,
6517 [](const std::pair<int, unsigned> &L,
6518 const std::pair<int, unsigned> &R) { return L.first < R.first; });
6519 unsigned Idx = 0;
6520 int PrevDist = 0;
6521 for (const auto &P : StoreOffsetVec) {
6522 if (Idx > 0 && P.first != PrevDist + 1)
6523 return false;
6524 PrevDist = P.first;
6525 ++Idx;
6528 // Calculate the shuffle indices according to their offset against the sorted
6529 // StoreOffsetVec.
6530 ReorderIndices.assign(StoresVec.size(), 0);
6531 bool IsIdentity = true;
6532 for (auto [I, P] : enumerate(StoreOffsetVec)) {
6533 ReorderIndices[P.second] = I;
6534 IsIdentity &= P.second == I;
6536 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in
6537 // reorderTopToBottom() and reorderBottomToTop(), so we are following the
6538 // same convention here.
6539 if (IsIdentity)
6540 ReorderIndices.clear();
6542 return true;
6545 #ifndef NDEBUG
6546 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) {
6547 for (unsigned Idx : Order)
6548 dbgs() << Idx << ", ";
6549 dbgs() << "\n";
6551 #endif
6553 SmallVector<BoUpSLP::OrdersType, 1>
6554 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const {
6555 unsigned NumLanes = TE->Scalars.size();
6557 SmallVector<SmallVector<StoreInst *>> Stores = collectUserStores(TE);
6559 // Holds the reorder indices for each candidate store vector that is a user of
6560 // the current TreeEntry.
6561 SmallVector<OrdersType, 1> ExternalReorderIndices;
6563 // Now inspect the stores collected per pointer and look for vectorization
6564 // candidates. For each candidate calculate the reorder index vector and push
6565 // it into `ExternalReorderIndices`
6566 for (ArrayRef<StoreInst *> StoresVec : Stores) {
6567 // If we have fewer than NumLanes stores, then we can't form a vector.
6568 if (StoresVec.size() != NumLanes)
6569 continue;
6571 // If the stores are not consecutive then abandon this StoresVec.
6572 OrdersType ReorderIndices;
6573 if (!canFormVector(StoresVec, ReorderIndices))
6574 continue;
6576 // We now know that the scalars in StoresVec can form a vector instruction,
6577 // so set the reorder indices.
6578 ExternalReorderIndices.push_back(ReorderIndices);
6580 return ExternalReorderIndices;
6583 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
6584 const SmallDenseSet<Value *> &UserIgnoreLst) {
6585 deleteTree();
6586 UserIgnoreList = &UserIgnoreLst;
6587 if (!allSameType(Roots))
6588 return;
6589 buildTree_rec(Roots, 0, EdgeInfo());
6592 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) {
6593 deleteTree();
6594 if (!allSameType(Roots))
6595 return;
6596 buildTree_rec(Roots, 0, EdgeInfo());
6599 /// Tries to find subvector of loads and builds new vector of only loads if can
6600 /// be profitable.
6601 static void gatherPossiblyVectorizableLoads(
6602 const BoUpSLP &R, ArrayRef<Value *> VL, const DataLayout &DL,
6603 ScalarEvolution &SE, const TargetTransformInfo &TTI,
6604 SmallVectorImpl<SmallVector<std::pair<LoadInst *, int>>> &GatheredLoads,
6605 bool AddNew = true) {
6606 if (VL.empty())
6607 return;
6608 Type *ScalarTy = getValueType(VL.front());
6609 if (!isValidElementType(ScalarTy))
6610 return;
6611 SmallVector<SmallVector<std::pair<LoadInst *, int>>> ClusteredLoads;
6612 SmallVector<DenseMap<int, LoadInst *>> ClusteredDistToLoad;
6613 for (Value *V : VL) {
6614 auto *LI = dyn_cast<LoadInst>(V);
6615 if (!LI)
6616 continue;
6617 if (R.isDeleted(LI) || R.isVectorized(LI) || !LI->isSimple())
6618 continue;
6619 bool IsFound = false;
6620 for (auto [Map, Data] : zip(ClusteredDistToLoad, ClusteredLoads)) {
6621 assert(LI->getParent() == Data.front().first->getParent() &&
6622 LI->getType() == Data.front().first->getType() &&
6623 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth) ==
6624 getUnderlyingObject(Data.front().first->getPointerOperand(),
6625 RecursionMaxDepth) &&
6626 "Expected loads with the same type, same parent and same "
6627 "underlying pointer.");
6628 std::optional<int> Dist = getPointersDiff(
6629 LI->getType(), LI->getPointerOperand(), Data.front().first->getType(),
6630 Data.front().first->getPointerOperand(), DL, SE,
6631 /*StrictCheck=*/true);
6632 if (!Dist)
6633 continue;
6634 auto It = Map.find(*Dist);
6635 if (It != Map.end() && It->second != LI)
6636 continue;
6637 if (It == Map.end()) {
6638 Data.emplace_back(LI, *Dist);
6639 Map.try_emplace(*Dist, LI);
6641 IsFound = true;
6642 break;
6644 if (!IsFound) {
6645 ClusteredLoads.emplace_back().emplace_back(LI, 0);
6646 ClusteredDistToLoad.emplace_back().try_emplace(0, LI);
6649 auto FindMatchingLoads =
6650 [&](ArrayRef<std::pair<LoadInst *, int>> Loads,
6651 SmallVectorImpl<SmallVector<std::pair<LoadInst *, int>>>
6652 &GatheredLoads,
6653 SetVector<unsigned> &ToAdd, SetVector<unsigned> &Repeated,
6654 int &Offset, unsigned &Start) {
6655 if (Loads.empty())
6656 return GatheredLoads.end();
6657 SmallVector<std::pair<int, int>> Res;
6658 LoadInst *LI = Loads.front().first;
6659 for (auto [Idx, Data] : enumerate(GatheredLoads)) {
6660 if (Idx < Start)
6661 continue;
6662 ToAdd.clear();
6663 if (LI->getParent() != Data.front().first->getParent() ||
6664 LI->getType() != Data.front().first->getType())
6665 continue;
6666 std::optional<int> Dist =
6667 getPointersDiff(LI->getType(), LI->getPointerOperand(),
6668 Data.front().first->getType(),
6669 Data.front().first->getPointerOperand(), DL, SE,
6670 /*StrictCheck=*/true);
6671 if (!Dist)
6672 continue;
6673 SmallSet<int, 4> DataDists;
6674 SmallPtrSet<LoadInst *, 4> DataLoads;
6675 for (std::pair<LoadInst *, int> P : Data) {
6676 DataDists.insert(P.second);
6677 DataLoads.insert(P.first);
6679 // Found matching gathered loads - check if all loads are unique or
6680 // can be effectively vectorized.
6681 unsigned NumUniques = 0;
6682 for (auto [Cnt, Pair] : enumerate(Loads)) {
6683 bool Used = DataLoads.contains(Pair.first);
6684 if (!Used && !DataDists.contains(*Dist + Pair.second)) {
6685 ++NumUniques;
6686 ToAdd.insert(Cnt);
6687 } else if (Used) {
6688 Repeated.insert(Cnt);
6691 if (NumUniques > 0 &&
6692 (Loads.size() == NumUniques ||
6693 (Loads.size() - NumUniques >= 2 &&
6694 Loads.size() - NumUniques >= Loads.size() / 2 &&
6695 (has_single_bit(Data.size() + NumUniques) ||
6696 bit_ceil(Data.size()) <
6697 bit_ceil(Data.size() + NumUniques))))) {
6698 Offset = *Dist;
6699 Start = Idx + 1;
6700 return std::next(GatheredLoads.begin(), Idx);
6703 ToAdd.clear();
6704 return GatheredLoads.end();
6706 for (ArrayRef<std::pair<LoadInst *, int>> Data : ClusteredLoads) {
6707 unsigned Start = 0;
6708 SetVector<unsigned> ToAdd, LocalToAdd, Repeated;
6709 int Offset = 0;
6710 auto *It = FindMatchingLoads(Data, GatheredLoads, LocalToAdd, Repeated,
6711 Offset, Start);
6712 while (It != GatheredLoads.end()) {
6713 assert(!LocalToAdd.empty() && "Expected some elements to add.");
6714 for (unsigned Idx : LocalToAdd)
6715 It->emplace_back(Data[Idx].first, Data[Idx].second + Offset);
6716 ToAdd.insert(LocalToAdd.begin(), LocalToAdd.end());
6717 It = FindMatchingLoads(Data, GatheredLoads, LocalToAdd, Repeated, Offset,
6718 Start);
6720 if (any_of(seq<unsigned>(Data.size()), [&](unsigned Idx) {
6721 return !ToAdd.contains(Idx) && !Repeated.contains(Idx);
6722 })) {
6723 auto AddNewLoads =
6724 [&](SmallVectorImpl<std::pair<LoadInst *, int>> &Loads) {
6725 for (unsigned Idx : seq<unsigned>(Data.size())) {
6726 if (ToAdd.contains(Idx) || Repeated.contains(Idx))
6727 continue;
6728 Loads.push_back(Data[Idx]);
6731 if (!AddNew) {
6732 LoadInst *LI = Data.front().first;
6733 It = find_if(
6734 GatheredLoads, [&](ArrayRef<std::pair<LoadInst *, int>> PD) {
6735 return PD.front().first->getParent() == LI->getParent() &&
6736 PD.front().first->getType() == LI->getType();
6738 while (It != GatheredLoads.end()) {
6739 AddNewLoads(*It);
6740 It = std::find_if(
6741 std::next(It), GatheredLoads.end(),
6742 [&](ArrayRef<std::pair<LoadInst *, int>> PD) {
6743 return PD.front().first->getParent() == LI->getParent() &&
6744 PD.front().first->getType() == LI->getType();
6748 GatheredLoads.emplace_back().append(Data.begin(), Data.end());
6749 AddNewLoads(GatheredLoads.emplace_back());
6754 void BoUpSLP::tryToVectorizeGatheredLoads(
6755 const SmallMapVector<std::tuple<BasicBlock *, Value *, Type *>,
6756 SmallVector<SmallVector<std::pair<LoadInst *, int>>>,
6757 8> &GatheredLoads) {
6758 GatheredLoadsEntriesFirst = VectorizableTree.size();
6760 SmallVector<SmallPtrSet<const Value *, 4>> LoadSetsToVectorize(
6761 LoadEntriesToVectorize.size());
6762 for (auto [Idx, Set] : zip(LoadEntriesToVectorize, LoadSetsToVectorize))
6763 Set.insert(VectorizableTree[Idx]->Scalars.begin(),
6764 VectorizableTree[Idx]->Scalars.end());
6766 // Sort loads by distance.
6767 auto LoadSorter = [](const std::pair<LoadInst *, int> &L1,
6768 const std::pair<LoadInst *, int> &L2) {
6769 return L1.second > L2.second;
6772 auto IsMaskedGatherSupported = [&](ArrayRef<LoadInst *> Loads) {
6773 ArrayRef<Value *> Values(reinterpret_cast<Value *const *>(Loads.begin()),
6774 Loads.size());
6775 Align Alignment = computeCommonAlignment<LoadInst>(Values);
6776 auto *Ty = getWidenedType(Loads.front()->getType(), Loads.size());
6777 return TTI->isLegalMaskedGather(Ty, Alignment) &&
6778 !TTI->forceScalarizeMaskedGather(Ty, Alignment);
6781 auto GetVectorizedRanges = [this](ArrayRef<LoadInst *> Loads,
6782 BoUpSLP::ValueSet &VectorizedLoads,
6783 SmallVectorImpl<LoadInst *> &NonVectorized,
6784 bool Final, unsigned MaxVF) {
6785 SmallVector<std::pair<ArrayRef<Value *>, LoadsState>> Results;
6786 unsigned StartIdx = 0;
6787 SmallVector<int> CandidateVFs;
6788 if (VectorizeNonPowerOf2 && has_single_bit(MaxVF + 1))
6789 CandidateVFs.push_back(MaxVF);
6790 for (int NumElts = getFloorFullVectorNumberOfElements(
6791 *TTI, Loads.front()->getType(), MaxVF);
6792 NumElts > 1; NumElts = getFloorFullVectorNumberOfElements(
6793 *TTI, Loads.front()->getType(), NumElts - 1)) {
6794 CandidateVFs.push_back(NumElts);
6795 if (VectorizeNonPowerOf2 && NumElts > 2)
6796 CandidateVFs.push_back(NumElts - 1);
6799 if (Final && CandidateVFs.empty())
6800 return Results;
6802 unsigned BestVF = Final ? CandidateVFs.back() : 0;
6803 for (unsigned NumElts : CandidateVFs) {
6804 if (Final && NumElts > BestVF)
6805 continue;
6806 SmallVector<unsigned> MaskedGatherVectorized;
6807 for (unsigned Cnt = StartIdx, E = Loads.size(); Cnt < E;
6808 ++Cnt) {
6809 ArrayRef<LoadInst *> Slice =
6810 ArrayRef(Loads).slice(Cnt, std::min(NumElts, E - Cnt));
6811 if (VectorizedLoads.count(Slice.front()) ||
6812 VectorizedLoads.count(Slice.back()) ||
6813 areKnownNonVectorizableLoads(Slice))
6814 continue;
6815 // Check if it is profitable to try vectorizing gathered loads. It is
6816 // profitable if we have more than 3 consecutive loads or if we have
6817 // less but all users are vectorized or deleted.
6818 bool AllowToVectorize = false;
6819 // Check if it is profitable to vectorize 2-elements loads.
6820 if (NumElts == 2) {
6821 bool IsLegalBroadcastLoad = TTI->isLegalBroadcastLoad(
6822 Slice.front()->getType(), ElementCount::getFixed(NumElts));
6823 auto CheckIfAllowed = [=](ArrayRef<LoadInst *> Slice) {
6824 for (LoadInst *LI : Slice) {
6825 // If single use/user - allow to vectorize.
6826 if (LI->hasOneUse())
6827 continue;
6828 // 1. Check if number of uses equals number of users.
6829 // 2. All users are deleted.
6830 // 3. The load broadcasts are not allowed or the load is not
6831 // broadcasted.
6832 if (std::distance(LI->user_begin(), LI->user_end()) !=
6833 LI->getNumUses())
6834 return false;
6835 if (!IsLegalBroadcastLoad)
6836 continue;
6837 if (LI->hasNUsesOrMore(UsesLimit))
6838 return false;
6839 for (User *U : LI->users()) {
6840 if (auto *UI = dyn_cast<Instruction>(U); UI && isDeleted(UI))
6841 continue;
6842 if (const TreeEntry *UTE = getTreeEntry(U)) {
6843 for (int I : seq<int>(UTE->getNumOperands())) {
6844 if (all_of(UTE->getOperand(I),
6845 [LI](Value *V) { return V == LI; }))
6846 // Found legal broadcast - do not vectorize.
6847 return false;
6852 return true;
6854 AllowToVectorize = CheckIfAllowed(Slice);
6855 } else {
6856 AllowToVectorize =
6857 (NumElts >= 3 ||
6858 any_of(ValueToGatherNodes.at(Slice.front()),
6859 [=](const TreeEntry *TE) {
6860 return TE->Scalars.size() == 2 &&
6861 ((TE->Scalars.front() == Slice.front() &&
6862 TE->Scalars.back() == Slice.back()) ||
6863 (TE->Scalars.front() == Slice.back() &&
6864 TE->Scalars.back() == Slice.front()));
6865 })) &&
6866 hasFullVectorsOrPowerOf2(*TTI, Slice.front()->getType(),
6867 Slice.size());
6869 if (AllowToVectorize) {
6870 SmallVector<Value *> PointerOps;
6871 OrdersType CurrentOrder;
6872 // Try to build vector load.
6873 ArrayRef<Value *> Values(
6874 reinterpret_cast<Value *const *>(Slice.begin()), Slice.size());
6875 LoadsState LS = canVectorizeLoads(Values, Slice.front(), CurrentOrder,
6876 PointerOps, &BestVF);
6877 if (LS != LoadsState::Gather ||
6878 (BestVF > 1 && static_cast<unsigned>(NumElts) == 2 * BestVF)) {
6879 if (LS == LoadsState::ScatterVectorize) {
6880 if (MaskedGatherVectorized.empty() ||
6881 Cnt >= MaskedGatherVectorized.back() + NumElts)
6882 MaskedGatherVectorized.push_back(Cnt);
6883 continue;
6885 if (LS != LoadsState::Gather) {
6886 Results.emplace_back(Values, LS);
6887 VectorizedLoads.insert(Slice.begin(), Slice.end());
6888 // If we vectorized initial block, no need to try to vectorize it
6889 // again.
6890 if (Cnt == StartIdx)
6891 StartIdx += NumElts;
6893 // Check if the whole array was vectorized already - exit.
6894 if (StartIdx >= Loads.size())
6895 break;
6896 // Erase last masked gather candidate, if another candidate within
6897 // the range is found to be better.
6898 if (!MaskedGatherVectorized.empty() &&
6899 Cnt < MaskedGatherVectorized.back() + NumElts)
6900 MaskedGatherVectorized.pop_back();
6901 Cnt += NumElts - 1;
6902 continue;
6905 if (!AllowToVectorize || BestVF == 0)
6906 registerNonVectorizableLoads(Slice);
6908 // Mark masked gathers candidates as vectorized, if any.
6909 for (unsigned Cnt : MaskedGatherVectorized) {
6910 ArrayRef<LoadInst *> Slice = ArrayRef(Loads).slice(
6911 Cnt, std::min<unsigned>(NumElts, Loads.size() - Cnt));
6912 ArrayRef<Value *> Values(
6913 reinterpret_cast<Value *const *>(Slice.begin()), Slice.size());
6914 Results.emplace_back(Values, LoadsState::ScatterVectorize);
6915 VectorizedLoads.insert(Slice.begin(), Slice.end());
6916 // If we vectorized initial block, no need to try to vectorize it again.
6917 if (Cnt == StartIdx)
6918 StartIdx += NumElts;
6921 for (LoadInst *LI : Loads) {
6922 if (!VectorizedLoads.contains(LI))
6923 NonVectorized.push_back(LI);
6925 return Results;
6927 auto ProcessGatheredLoads =
6928 [&, &TTI = *TTI](
6929 ArrayRef<SmallVector<std::pair<LoadInst *, int>>> GatheredLoads,
6930 bool Final = false) {
6931 SmallVector<LoadInst *> NonVectorized;
6932 for (ArrayRef<std::pair<LoadInst *, int>> LoadsDists : GatheredLoads) {
6933 if (LoadsDists.size() <= 1) {
6934 NonVectorized.push_back(LoadsDists.back().first);
6935 continue;
6937 SmallVector<std::pair<LoadInst *, int>> LocalLoadsDists(LoadsDists);
6938 SmallVector<LoadInst *> OriginalLoads(LocalLoadsDists.size());
6939 transform(
6940 LoadsDists, OriginalLoads.begin(),
6941 [](const std::pair<LoadInst *, int> &L) { return L.first; });
6942 stable_sort(LocalLoadsDists, LoadSorter);
6943 SmallVector<LoadInst *> Loads;
6944 unsigned MaxConsecutiveDistance = 0;
6945 unsigned CurrentConsecutiveDist = 1;
6946 int LastDist = LocalLoadsDists.front().second;
6947 bool AllowMaskedGather = IsMaskedGatherSupported(OriginalLoads);
6948 for (const std::pair<LoadInst *, int> &L : LocalLoadsDists) {
6949 if (getTreeEntry(L.first))
6950 continue;
6951 assert(LastDist >= L.second &&
6952 "Expected first distance always not less than second");
6953 if (static_cast<unsigned>(LastDist - L.second) ==
6954 CurrentConsecutiveDist) {
6955 ++CurrentConsecutiveDist;
6956 MaxConsecutiveDistance =
6957 std::max(MaxConsecutiveDistance, CurrentConsecutiveDist);
6958 Loads.push_back(L.first);
6959 continue;
6961 if (!AllowMaskedGather && CurrentConsecutiveDist == 1 &&
6962 !Loads.empty())
6963 Loads.pop_back();
6964 CurrentConsecutiveDist = 1;
6965 LastDist = L.second;
6966 Loads.push_back(L.first);
6968 if (Loads.size() <= 1)
6969 continue;
6970 if (AllowMaskedGather)
6971 MaxConsecutiveDistance = Loads.size();
6972 else if (MaxConsecutiveDistance < 2)
6973 continue;
6974 BoUpSLP::ValueSet VectorizedLoads;
6975 SmallVector<LoadInst *> SortedNonVectorized;
6976 SmallVector<std::pair<ArrayRef<Value *>, LoadsState>> Results =
6977 GetVectorizedRanges(Loads, VectorizedLoads, SortedNonVectorized,
6978 Final, MaxConsecutiveDistance);
6979 if (!Results.empty() && !SortedNonVectorized.empty() &&
6980 OriginalLoads.size() == Loads.size() &&
6981 MaxConsecutiveDistance == Loads.size() &&
6982 all_of(Results,
6983 [](const std::pair<ArrayRef<Value *>, LoadsState> &P) {
6984 return P.second == LoadsState::ScatterVectorize;
6985 })) {
6986 VectorizedLoads.clear();
6987 SmallVector<LoadInst *> UnsortedNonVectorized;
6988 SmallVector<std::pair<ArrayRef<Value *>, LoadsState>>
6989 UnsortedResults =
6990 GetVectorizedRanges(OriginalLoads, VectorizedLoads,
6991 UnsortedNonVectorized, Final,
6992 OriginalLoads.size());
6993 if (SortedNonVectorized.size() >= UnsortedNonVectorized.size()) {
6994 SortedNonVectorized.swap(UnsortedNonVectorized);
6995 Results.swap(UnsortedResults);
6998 for (auto [Slice, _] : Results) {
6999 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize gathered loads ("
7000 << Slice.size() << ")\n");
7001 if (any_of(Slice, [&](Value *V) { return getTreeEntry(V); })) {
7002 for (Value *L : Slice)
7003 if (!getTreeEntry(L))
7004 SortedNonVectorized.push_back(cast<LoadInst>(L));
7005 continue;
7008 // Select maximum VF as a maximum of user gathered nodes and
7009 // distance between scalar loads in these nodes.
7010 unsigned MaxVF = Slice.size();
7011 unsigned UserMaxVF = 0;
7012 unsigned InterleaveFactor = 0;
7013 if (MaxVF == 2) {
7014 UserMaxVF = MaxVF;
7015 } else {
7016 // Found distance between segments of the interleaved loads.
7017 std::optional<unsigned> InterleavedLoadsDistance = 0;
7018 unsigned Order = 0;
7019 std::optional<unsigned> CommonVF = 0;
7020 DenseMap<const TreeEntry *, unsigned> EntryToPosition;
7021 SmallPtrSet<const TreeEntry *, 8> DeinterleavedNodes;
7022 for (auto [Idx, V] : enumerate(Slice)) {
7023 for (const TreeEntry *E : ValueToGatherNodes.at(V)) {
7024 UserMaxVF = std::max<unsigned>(UserMaxVF, E->Scalars.size());
7025 unsigned Pos =
7026 EntryToPosition.try_emplace(E, Idx).first->second;
7027 UserMaxVF = std::max<unsigned>(UserMaxVF, Idx - Pos + 1);
7028 if (CommonVF) {
7029 if (*CommonVF == 0) {
7030 CommonVF = E->Scalars.size();
7031 continue;
7033 if (*CommonVF != E->Scalars.size())
7034 CommonVF.reset();
7036 // Check if the load is the part of the interleaved load.
7037 if (Pos != Idx && InterleavedLoadsDistance) {
7038 if (!DeinterleavedNodes.contains(E) &&
7039 any_of(E->Scalars, [&, Slice = Slice](Value *V) {
7040 if (isa<Constant>(V))
7041 return false;
7042 if (getTreeEntry(V))
7043 return true;
7044 const auto &Nodes = ValueToGatherNodes.at(V);
7045 return (Nodes.size() != 1 || !Nodes.contains(E)) &&
7046 !is_contained(Slice, V);
7047 })) {
7048 InterleavedLoadsDistance.reset();
7049 continue;
7051 DeinterleavedNodes.insert(E);
7052 if (*InterleavedLoadsDistance == 0) {
7053 InterleavedLoadsDistance = Idx - Pos;
7054 continue;
7056 if ((Idx - Pos) % *InterleavedLoadsDistance != 0 ||
7057 (Idx - Pos) / *InterleavedLoadsDistance < Order)
7058 InterleavedLoadsDistance.reset();
7059 Order = (Idx - Pos) / InterleavedLoadsDistance.value_or(1);
7063 DeinterleavedNodes.clear();
7064 // Check if the large load represents interleaved load operation.
7065 if (InterleavedLoadsDistance.value_or(0) > 1 &&
7066 CommonVF.value_or(0) != 0) {
7067 InterleaveFactor = bit_ceil(*InterleavedLoadsDistance);
7068 unsigned VF = *CommonVF;
7069 OrdersType Order;
7070 SmallVector<Value *> PointerOps;
7071 // Segmented load detected - vectorize at maximum vector factor.
7072 if (InterleaveFactor <= Slice.size() &&
7073 TTI.isLegalInterleavedAccessType(
7074 getWidenedType(Slice.front()->getType(), VF),
7075 InterleaveFactor,
7076 cast<LoadInst>(Slice.front())->getAlign(),
7077 cast<LoadInst>(Slice.front())
7078 ->getPointerAddressSpace()) &&
7079 canVectorizeLoads(Slice, Slice.front(), Order,
7080 PointerOps) == LoadsState::Vectorize) {
7081 UserMaxVF = InterleaveFactor * VF;
7082 } else {
7083 InterleaveFactor = 0;
7086 // Cannot represent the loads as consecutive vectorizable nodes -
7087 // just exit.
7088 unsigned ConsecutiveNodesSize = 0;
7089 if (!LoadEntriesToVectorize.empty() && InterleaveFactor == 0 &&
7090 any_of(zip(LoadEntriesToVectorize, LoadSetsToVectorize),
7091 [&, Slice = Slice](const auto &P) {
7092 const auto *It = find_if(Slice, [&](Value *V) {
7093 return std::get<1>(P).contains(V);
7095 if (It == Slice.end())
7096 return false;
7097 ArrayRef<Value *> VL =
7098 VectorizableTree[std::get<0>(P)]->Scalars;
7099 ConsecutiveNodesSize += VL.size();
7100 unsigned Start = std::distance(Slice.begin(), It);
7101 unsigned Sz = Slice.size() - Start;
7102 return Sz < VL.size() ||
7103 Slice.slice(std::distance(Slice.begin(), It),
7104 VL.size()) != VL;
7106 continue;
7107 // Try to build long masked gather loads.
7108 UserMaxVF = bit_ceil(UserMaxVF);
7109 if (InterleaveFactor == 0 &&
7110 any_of(seq<unsigned>(Slice.size() / UserMaxVF),
7111 [&, Slice = Slice](unsigned Idx) {
7112 OrdersType Order;
7113 SmallVector<Value *> PointerOps;
7114 return canVectorizeLoads(
7115 Slice.slice(Idx * UserMaxVF, UserMaxVF),
7116 Slice[Idx * UserMaxVF], Order,
7117 PointerOps) ==
7118 LoadsState::ScatterVectorize;
7120 UserMaxVF = MaxVF;
7121 if (Slice.size() != ConsecutiveNodesSize)
7122 MaxVF = std::min<unsigned>(MaxVF, UserMaxVF);
7124 for (unsigned VF = MaxVF; VF >= 2; VF /= 2) {
7125 bool IsVectorized = true;
7126 for (unsigned I = 0, E = Slice.size(); I < E; I += VF) {
7127 ArrayRef<Value *> SubSlice =
7128 Slice.slice(I, std::min(VF, E - I));
7129 if (getTreeEntry(SubSlice.front()))
7130 continue;
7131 // Check if the subslice is to be-vectorized entry, which is not
7132 // equal to entry.
7133 if (any_of(zip(LoadEntriesToVectorize, LoadSetsToVectorize),
7134 [&](const auto &P) {
7135 return !SubSlice.equals(
7136 VectorizableTree[std::get<0>(P)]
7137 ->Scalars) &&
7138 set_is_subset(SubSlice, std::get<1>(P));
7140 continue;
7141 unsigned Sz = VectorizableTree.size();
7142 buildTree_rec(SubSlice, 0, EdgeInfo(), InterleaveFactor);
7143 if (Sz == VectorizableTree.size()) {
7144 IsVectorized = false;
7145 // Try non-interleaved vectorization with smaller vector
7146 // factor.
7147 if (InterleaveFactor > 0) {
7148 VF = 2 * (MaxVF / InterleaveFactor);
7149 InterleaveFactor = 0;
7151 continue;
7154 if (IsVectorized)
7155 break;
7158 NonVectorized.append(SortedNonVectorized);
7160 return NonVectorized;
7162 for (const auto &GLs : GatheredLoads) {
7163 const auto &Ref = GLs.second;
7164 SmallVector<LoadInst *> NonVectorized = ProcessGatheredLoads(Ref);
7165 if (!Ref.empty() && !NonVectorized.empty() &&
7166 std::accumulate(
7167 Ref.begin(), Ref.end(), 0u,
7168 [](unsigned S, ArrayRef<std::pair<LoadInst *, int>> LoadsDists) {
7169 return S + LoadsDists.size();
7170 }) != NonVectorized.size() &&
7171 IsMaskedGatherSupported(NonVectorized)) {
7172 SmallVector<SmallVector<std::pair<LoadInst *, int>>> FinalGatheredLoads;
7173 for (LoadInst *LI : NonVectorized) {
7174 // Reinsert non-vectorized loads to other list of loads with the same
7175 // base pointers.
7176 gatherPossiblyVectorizableLoads(*this, LI, *DL, *SE, *TTI,
7177 FinalGatheredLoads,
7178 /*AddNew=*/false);
7180 // Final attempt to vectorize non-vectorized loads.
7181 (void)ProcessGatheredLoads(FinalGatheredLoads, /*Final=*/true);
7184 // Try to vectorize postponed load entries, previously marked as gathered.
7185 for (unsigned Idx : LoadEntriesToVectorize) {
7186 const TreeEntry &E = *VectorizableTree[Idx];
7187 SmallVector<Value *> GatheredScalars(E.Scalars.begin(), E.Scalars.end());
7188 // Avoid reordering, if possible.
7189 if (!E.ReorderIndices.empty()) {
7190 // Build a mask out of the reorder indices and reorder scalars per this
7191 // mask.
7192 SmallVector<int> ReorderMask;
7193 inversePermutation(E.ReorderIndices, ReorderMask);
7194 reorderScalars(GatheredScalars, ReorderMask);
7196 buildTree_rec(GatheredScalars, 0, EdgeInfo());
7198 // If no new entries created, consider it as no gathered loads entries must be
7199 // handled.
7200 if (static_cast<unsigned>(*GatheredLoadsEntriesFirst) ==
7201 VectorizableTree.size())
7202 GatheredLoadsEntriesFirst.reset();
7205 /// \return true if the specified list of values has only one instruction that
7206 /// requires scheduling, false otherwise.
7207 #ifndef NDEBUG
7208 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) {
7209 Value *NeedsScheduling = nullptr;
7210 for (Value *V : VL) {
7211 if (doesNotNeedToBeScheduled(V))
7212 continue;
7213 if (!NeedsScheduling) {
7214 NeedsScheduling = V;
7215 continue;
7217 return false;
7219 return NeedsScheduling;
7221 #endif
7223 /// Generates key/subkey pair for the given value to provide effective sorting
7224 /// of the values and better detection of the vectorizable values sequences. The
7225 /// keys/subkeys can be used for better sorting of the values themselves (keys)
7226 /// and in values subgroups (subkeys).
7227 static std::pair<size_t, size_t> generateKeySubkey(
7228 Value *V, const TargetLibraryInfo *TLI,
7229 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator,
7230 bool AllowAlternate) {
7231 hash_code Key = hash_value(V->getValueID() + 2);
7232 hash_code SubKey = hash_value(0);
7233 // Sort the loads by the distance between the pointers.
7234 if (auto *LI = dyn_cast<LoadInst>(V)) {
7235 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key);
7236 if (LI->isSimple())
7237 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI));
7238 else
7239 Key = SubKey = hash_value(LI);
7240 } else if (isVectorLikeInstWithConstOps(V)) {
7241 // Sort extracts by the vector operands.
7242 if (isa<ExtractElementInst, UndefValue>(V))
7243 Key = hash_value(Value::UndefValueVal + 1);
7244 if (auto *EI = dyn_cast<ExtractElementInst>(V)) {
7245 if (!isUndefVector(EI->getVectorOperand()).all() &&
7246 !isa<UndefValue>(EI->getIndexOperand()))
7247 SubKey = hash_value(EI->getVectorOperand());
7249 } else if (auto *I = dyn_cast<Instruction>(V)) {
7250 // Sort other instructions just by the opcodes except for CMPInst.
7251 // For CMP also sort by the predicate kind.
7252 if ((isa<BinaryOperator, CastInst>(I)) &&
7253 isValidForAlternation(I->getOpcode())) {
7254 if (AllowAlternate)
7255 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0);
7256 else
7257 Key = hash_combine(hash_value(I->getOpcode()), Key);
7258 SubKey = hash_combine(
7259 hash_value(I->getOpcode()), hash_value(I->getType()),
7260 hash_value(isa<BinaryOperator>(I)
7261 ? I->getType()
7262 : cast<CastInst>(I)->getOperand(0)->getType()));
7263 // For casts, look through the only operand to improve compile time.
7264 if (isa<CastInst>(I)) {
7265 std::pair<size_t, size_t> OpVals =
7266 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator,
7267 /*AllowAlternate=*/true);
7268 Key = hash_combine(OpVals.first, Key);
7269 SubKey = hash_combine(OpVals.first, SubKey);
7271 } else if (auto *CI = dyn_cast<CmpInst>(I)) {
7272 CmpInst::Predicate Pred = CI->getPredicate();
7273 if (CI->isCommutative())
7274 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred));
7275 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred);
7276 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred),
7277 hash_value(SwapPred),
7278 hash_value(CI->getOperand(0)->getType()));
7279 } else if (auto *Call = dyn_cast<CallInst>(I)) {
7280 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI);
7281 if (isTriviallyVectorizable(ID)) {
7282 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID));
7283 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) {
7284 SubKey = hash_combine(hash_value(I->getOpcode()),
7285 hash_value(Call->getCalledFunction()));
7286 } else {
7287 Key = hash_combine(hash_value(Call), Key);
7288 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call));
7290 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos())
7291 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End),
7292 hash_value(Op.Tag), SubKey);
7293 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
7294 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1)))
7295 SubKey = hash_value(Gep->getPointerOperand());
7296 else
7297 SubKey = hash_value(Gep);
7298 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) &&
7299 !isa<ConstantInt>(I->getOperand(1))) {
7300 // Do not try to vectorize instructions with potentially high cost.
7301 SubKey = hash_value(I);
7302 } else {
7303 SubKey = hash_value(I->getOpcode());
7305 Key = hash_combine(hash_value(I->getParent()), Key);
7307 return std::make_pair(Key, SubKey);
7310 /// Checks if the specified instruction \p I is an alternate operation for
7311 /// the given \p MainOp and \p AltOp instructions.
7312 static bool isAlternateInstruction(const Instruction *I,
7313 const Instruction *MainOp,
7314 const Instruction *AltOp,
7315 const TargetLibraryInfo &TLI);
7317 bool BoUpSLP::areAltOperandsProfitable(const InstructionsState &S,
7318 ArrayRef<Value *> VL) const {
7319 unsigned Opcode0 = S.getOpcode();
7320 unsigned Opcode1 = S.getAltOpcode();
7321 SmallBitVector OpcodeMask(getAltInstrMask(VL, Opcode0, Opcode1));
7322 // If this pattern is supported by the target then consider it profitable.
7323 if (TTI->isLegalAltInstr(getWidenedType(S.MainOp->getType(), VL.size()),
7324 Opcode0, Opcode1, OpcodeMask))
7325 return true;
7326 SmallVector<ValueList> Operands;
7327 for (unsigned I : seq<unsigned>(0, S.MainOp->getNumOperands())) {
7328 Operands.emplace_back();
7329 // Prepare the operand vector.
7330 for (Value *V : VL)
7331 Operands.back().push_back(cast<Instruction>(V)->getOperand(I));
7333 if (Operands.size() == 2) {
7334 // Try find best operands candidates.
7335 for (unsigned I : seq<unsigned>(0, VL.size() - 1)) {
7336 SmallVector<std::pair<Value *, Value *>> Candidates(3);
7337 Candidates[0] = std::make_pair(Operands[0][I], Operands[0][I + 1]);
7338 Candidates[1] = std::make_pair(Operands[0][I], Operands[1][I + 1]);
7339 Candidates[2] = std::make_pair(Operands[1][I], Operands[0][I + 1]);
7340 std::optional<int> Res = findBestRootPair(Candidates);
7341 switch (Res.value_or(0)) {
7342 case 0:
7343 break;
7344 case 1:
7345 std::swap(Operands[0][I + 1], Operands[1][I + 1]);
7346 break;
7347 case 2:
7348 std::swap(Operands[0][I], Operands[1][I]);
7349 break;
7350 default:
7351 llvm_unreachable("Unexpected index.");
7355 DenseSet<unsigned> UniqueOpcodes;
7356 constexpr unsigned NumAltInsts = 3; // main + alt + shuffle.
7357 unsigned NonInstCnt = 0;
7358 // Estimate number of instructions, required for the vectorized node and for
7359 // the buildvector node.
7360 unsigned UndefCnt = 0;
7361 // Count the number of extra shuffles, required for vector nodes.
7362 unsigned ExtraShuffleInsts = 0;
7363 // Check that operands do not contain same values and create either perfect
7364 // diamond match or shuffled match.
7365 if (Operands.size() == 2) {
7366 // Do not count same operands twice.
7367 if (Operands.front() == Operands.back()) {
7368 Operands.erase(Operands.begin());
7369 } else if (!allConstant(Operands.front()) &&
7370 all_of(Operands.front(), [&](Value *V) {
7371 return is_contained(Operands.back(), V);
7372 })) {
7373 Operands.erase(Operands.begin());
7374 ++ExtraShuffleInsts;
7377 const Loop *L = LI->getLoopFor(S.MainOp->getParent());
7378 // Vectorize node, if:
7379 // 1. at least single operand is constant or splat.
7380 // 2. Operands have many loop invariants (the instructions are not loop
7381 // invariants).
7382 // 3. At least single unique operands is supposed to vectorized.
7383 return none_of(Operands,
7384 [&](ArrayRef<Value *> Op) {
7385 if (allConstant(Op) ||
7386 (!isSplat(Op) && allSameBlock(Op) && allSameType(Op) &&
7387 getSameOpcode(Op, *TLI).MainOp))
7388 return false;
7389 DenseMap<Value *, unsigned> Uniques;
7390 for (Value *V : Op) {
7391 if (isa<Constant, ExtractElementInst>(V) ||
7392 getTreeEntry(V) || (L && L->isLoopInvariant(V))) {
7393 if (isa<UndefValue>(V))
7394 ++UndefCnt;
7395 continue;
7397 auto Res = Uniques.try_emplace(V, 0);
7398 // Found first duplicate - need to add shuffle.
7399 if (!Res.second && Res.first->second == 1)
7400 ++ExtraShuffleInsts;
7401 ++Res.first->getSecond();
7402 if (auto *I = dyn_cast<Instruction>(V))
7403 UniqueOpcodes.insert(I->getOpcode());
7404 else if (Res.second)
7405 ++NonInstCnt;
7407 return none_of(Uniques, [&](const auto &P) {
7408 return P.first->hasNUsesOrMore(P.second + 1) &&
7409 none_of(P.first->users(), [&](User *U) {
7410 return getTreeEntry(U) || Uniques.contains(U);
7413 }) ||
7414 // Do not vectorize node, if estimated number of vector instructions is
7415 // more than estimated number of buildvector instructions. Number of
7416 // vector operands is number of vector instructions + number of vector
7417 // instructions for operands (buildvectors). Number of buildvector
7418 // instructions is just number_of_operands * number_of_scalars.
7419 (UndefCnt < (VL.size() - 1) * S.MainOp->getNumOperands() &&
7420 (UniqueOpcodes.size() + NonInstCnt + ExtraShuffleInsts +
7421 NumAltInsts) < S.MainOp->getNumOperands() * VL.size());
7424 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
7425 InstructionsState &S, ArrayRef<Value *> VL, bool IsScatterVectorizeUserTE,
7426 OrdersType &CurrentOrder, SmallVectorImpl<Value *> &PointerOps) {
7427 assert(S.MainOp && "Expected instructions with same/alternate opcodes only.");
7429 unsigned ShuffleOrOp =
7430 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode();
7431 auto *VL0 = cast<Instruction>(S.OpValue);
7432 switch (ShuffleOrOp) {
7433 case Instruction::PHI: {
7434 // Too many operands - gather, most probably won't be vectorized.
7435 if (VL0->getNumOperands() > MaxPHINumOperands)
7436 return TreeEntry::NeedToGather;
7437 // Check for terminator values (e.g. invoke).
7438 for (Value *V : VL)
7439 for (Value *Incoming : cast<PHINode>(V)->incoming_values()) {
7440 Instruction *Term = dyn_cast<Instruction>(Incoming);
7441 if (Term && Term->isTerminator()) {
7442 LLVM_DEBUG(dbgs()
7443 << "SLP: Need to swizzle PHINodes (terminator use).\n");
7444 return TreeEntry::NeedToGather;
7448 return TreeEntry::Vectorize;
7450 case Instruction::ExtractValue:
7451 case Instruction::ExtractElement: {
7452 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
7453 // FIXME: Vectorizing is not supported yet for non-power-of-2 ops.
7454 if (!has_single_bit(VL.size()))
7455 return TreeEntry::NeedToGather;
7456 if (Reuse || !CurrentOrder.empty())
7457 return TreeEntry::Vectorize;
7458 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
7459 return TreeEntry::NeedToGather;
7461 case Instruction::InsertElement: {
7462 // Check that we have a buildvector and not a shuffle of 2 or more
7463 // different vectors.
7464 ValueSet SourceVectors;
7465 for (Value *V : VL) {
7466 SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
7467 assert(getElementIndex(V) != std::nullopt &&
7468 "Non-constant or undef index?");
7471 if (count_if(VL, [&SourceVectors](Value *V) {
7472 return !SourceVectors.contains(V);
7473 }) >= 2) {
7474 // Found 2nd source vector - cancel.
7475 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
7476 "different source vectors.\n");
7477 return TreeEntry::NeedToGather;
7480 if (any_of(VL, [&SourceVectors](Value *V) {
7481 // The last InsertElement can have multiple uses.
7482 return SourceVectors.contains(V) && !V->hasOneUse();
7483 })) {
7484 assert(SLPReVec && "Only supported by REVEC.");
7485 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
7486 "multiple uses.\n");
7487 return TreeEntry::NeedToGather;
7490 return TreeEntry::Vectorize;
7492 case Instruction::Load: {
7493 // Check that a vectorized load would load the same memory as a scalar
7494 // load. For example, we don't want to vectorize loads that are smaller
7495 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
7496 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
7497 // from such a struct, we read/write packed bits disagreeing with the
7498 // unvectorized version.
7499 switch (canVectorizeLoads(VL, VL0, CurrentOrder, PointerOps)) {
7500 case LoadsState::Vectorize:
7501 return TreeEntry::Vectorize;
7502 case LoadsState::ScatterVectorize:
7503 if (!IsGraphTransformMode && !VectorizableTree.empty()) {
7504 // Delay slow vectorized nodes for better vectorization attempts.
7505 LoadEntriesToVectorize.insert(VectorizableTree.size());
7506 return TreeEntry::NeedToGather;
7508 return TreeEntry::ScatterVectorize;
7509 case LoadsState::StridedVectorize:
7510 if (!IsGraphTransformMode && VectorizableTree.size() > 1) {
7511 // Delay slow vectorized nodes for better vectorization attempts.
7512 LoadEntriesToVectorize.insert(VectorizableTree.size());
7513 return TreeEntry::NeedToGather;
7515 return TreeEntry::StridedVectorize;
7516 case LoadsState::Gather:
7517 #ifndef NDEBUG
7518 Type *ScalarTy = VL0->getType();
7519 if (DL->getTypeSizeInBits(ScalarTy) !=
7520 DL->getTypeAllocSizeInBits(ScalarTy))
7521 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
7522 else if (any_of(VL,
7523 [](Value *V) { return !cast<LoadInst>(V)->isSimple(); }))
7524 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
7525 else
7526 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
7527 #endif // NDEBUG
7528 registerNonVectorizableLoads(VL);
7529 return TreeEntry::NeedToGather;
7531 llvm_unreachable("Unexpected state of loads");
7533 case Instruction::ZExt:
7534 case Instruction::SExt:
7535 case Instruction::FPToUI:
7536 case Instruction::FPToSI:
7537 case Instruction::FPExt:
7538 case Instruction::PtrToInt:
7539 case Instruction::IntToPtr:
7540 case Instruction::SIToFP:
7541 case Instruction::UIToFP:
7542 case Instruction::Trunc:
7543 case Instruction::FPTrunc:
7544 case Instruction::BitCast: {
7545 Type *SrcTy = VL0->getOperand(0)->getType();
7546 for (Value *V : VL) {
7547 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
7548 if (Ty != SrcTy || !isValidElementType(Ty)) {
7549 LLVM_DEBUG(
7550 dbgs() << "SLP: Gathering casts with different src types.\n");
7551 return TreeEntry::NeedToGather;
7554 return TreeEntry::Vectorize;
7556 case Instruction::ICmp:
7557 case Instruction::FCmp: {
7558 // Check that all of the compares have the same predicate.
7559 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
7560 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
7561 Type *ComparedTy = VL0->getOperand(0)->getType();
7562 for (Value *V : VL) {
7563 CmpInst *Cmp = cast<CmpInst>(V);
7564 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
7565 Cmp->getOperand(0)->getType() != ComparedTy) {
7566 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
7567 return TreeEntry::NeedToGather;
7570 return TreeEntry::Vectorize;
7572 case Instruction::Select:
7573 case Instruction::FNeg:
7574 case Instruction::Add:
7575 case Instruction::FAdd:
7576 case Instruction::Sub:
7577 case Instruction::FSub:
7578 case Instruction::Mul:
7579 case Instruction::FMul:
7580 case Instruction::UDiv:
7581 case Instruction::SDiv:
7582 case Instruction::FDiv:
7583 case Instruction::URem:
7584 case Instruction::SRem:
7585 case Instruction::FRem:
7586 case Instruction::Shl:
7587 case Instruction::LShr:
7588 case Instruction::AShr:
7589 case Instruction::And:
7590 case Instruction::Or:
7591 case Instruction::Xor:
7592 case Instruction::Freeze:
7593 if (S.MainOp->getType()->isFloatingPointTy() &&
7594 TTI->isFPVectorizationPotentiallyUnsafe() && any_of(VL, [](Value *V) {
7595 auto *I = dyn_cast<Instruction>(V);
7596 return I && I->isBinaryOp() && !I->isFast();
7598 return TreeEntry::NeedToGather;
7599 return TreeEntry::Vectorize;
7600 case Instruction::GetElementPtr: {
7601 // We don't combine GEPs with complicated (nested) indexing.
7602 for (Value *V : VL) {
7603 auto *I = dyn_cast<GetElementPtrInst>(V);
7604 if (!I)
7605 continue;
7606 if (I->getNumOperands() != 2) {
7607 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
7608 return TreeEntry::NeedToGather;
7612 // We can't combine several GEPs into one vector if they operate on
7613 // different types.
7614 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType();
7615 for (Value *V : VL) {
7616 auto *GEP = dyn_cast<GEPOperator>(V);
7617 if (!GEP)
7618 continue;
7619 Type *CurTy = GEP->getSourceElementType();
7620 if (Ty0 != CurTy) {
7621 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
7622 return TreeEntry::NeedToGather;
7626 // We don't combine GEPs with non-constant indexes.
7627 Type *Ty1 = VL0->getOperand(1)->getType();
7628 for (Value *V : VL) {
7629 auto *I = dyn_cast<GetElementPtrInst>(V);
7630 if (!I)
7631 continue;
7632 auto *Op = I->getOperand(1);
7633 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
7634 (Op->getType() != Ty1 &&
7635 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
7636 Op->getType()->getScalarSizeInBits() >
7637 DL->getIndexSizeInBits(
7638 V->getType()->getPointerAddressSpace())))) {
7639 LLVM_DEBUG(
7640 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
7641 return TreeEntry::NeedToGather;
7645 return TreeEntry::Vectorize;
7647 case Instruction::Store: {
7648 // Check if the stores are consecutive or if we need to swizzle them.
7649 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
7650 // Avoid types that are padded when being allocated as scalars, while
7651 // being packed together in a vector (such as i1).
7652 if (DL->getTypeSizeInBits(ScalarTy) !=
7653 DL->getTypeAllocSizeInBits(ScalarTy)) {
7654 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
7655 return TreeEntry::NeedToGather;
7657 // Make sure all stores in the bundle are simple - we can't vectorize
7658 // atomic or volatile stores.
7659 for (Value *V : VL) {
7660 auto *SI = cast<StoreInst>(V);
7661 if (!SI->isSimple()) {
7662 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
7663 return TreeEntry::NeedToGather;
7665 PointerOps.push_back(SI->getPointerOperand());
7668 // Check the order of pointer operands.
7669 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
7670 Value *Ptr0;
7671 Value *PtrN;
7672 if (CurrentOrder.empty()) {
7673 Ptr0 = PointerOps.front();
7674 PtrN = PointerOps.back();
7675 } else {
7676 Ptr0 = PointerOps[CurrentOrder.front()];
7677 PtrN = PointerOps[CurrentOrder.back()];
7679 std::optional<int> Dist =
7680 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
7681 // Check that the sorted pointer operands are consecutive.
7682 if (static_cast<unsigned>(*Dist) == VL.size() - 1)
7683 return TreeEntry::Vectorize;
7686 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
7687 return TreeEntry::NeedToGather;
7689 case Instruction::Call: {
7690 if (S.MainOp->getType()->isFloatingPointTy() &&
7691 TTI->isFPVectorizationPotentiallyUnsafe() && any_of(VL, [](Value *V) {
7692 auto *I = dyn_cast<Instruction>(V);
7693 return I && !I->isFast();
7695 return TreeEntry::NeedToGather;
7696 // Check if the calls are all to the same vectorizable intrinsic or
7697 // library function.
7698 CallInst *CI = cast<CallInst>(VL0);
7699 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7701 VFShape Shape = VFShape::get(
7702 CI->getFunctionType(),
7703 ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
7704 false /*HasGlobalPred*/);
7705 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
7707 if (!VecFunc && !isTriviallyVectorizable(ID)) {
7708 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
7709 return TreeEntry::NeedToGather;
7711 Function *F = CI->getCalledFunction();
7712 unsigned NumArgs = CI->arg_size();
7713 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr);
7714 for (unsigned J = 0; J != NumArgs; ++J)
7715 if (isVectorIntrinsicWithScalarOpAtArg(ID, J))
7716 ScalarArgs[J] = CI->getArgOperand(J);
7717 for (Value *V : VL) {
7718 CallInst *CI2 = dyn_cast<CallInst>(V);
7719 if (!CI2 || CI2->getCalledFunction() != F ||
7720 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
7721 (VecFunc &&
7722 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
7723 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
7724 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
7725 << "\n");
7726 return TreeEntry::NeedToGather;
7728 // Some intrinsics have scalar arguments and should be same in order for
7729 // them to be vectorized.
7730 for (unsigned J = 0; J != NumArgs; ++J) {
7731 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) {
7732 Value *A1J = CI2->getArgOperand(J);
7733 if (ScalarArgs[J] != A1J) {
7734 LLVM_DEBUG(dbgs()
7735 << "SLP: mismatched arguments in call:" << *CI
7736 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n");
7737 return TreeEntry::NeedToGather;
7741 // Verify that the bundle operands are identical between the two calls.
7742 if (CI->hasOperandBundles() &&
7743 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
7744 CI->op_begin() + CI->getBundleOperandsEndIndex(),
7745 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
7746 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI
7747 << "!=" << *V << '\n');
7748 return TreeEntry::NeedToGather;
7752 return TreeEntry::Vectorize;
7754 case Instruction::ShuffleVector: {
7755 if (!S.isAltShuffle()) {
7756 // REVEC can support non alternate shuffle.
7757 if (SLPReVec && getShufflevectorNumGroups(VL))
7758 return TreeEntry::Vectorize;
7759 // If this is not an alternate sequence of opcode like add-sub
7760 // then do not vectorize this instruction.
7761 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
7762 return TreeEntry::NeedToGather;
7764 if (!SLPSkipEarlyProfitabilityCheck && !areAltOperandsProfitable(S, VL)) {
7765 LLVM_DEBUG(
7766 dbgs()
7767 << "SLP: ShuffleVector not vectorized, operands are buildvector and "
7768 "the whole alt sequence is not profitable.\n");
7769 return TreeEntry::NeedToGather;
7772 return TreeEntry::Vectorize;
7774 default:
7775 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
7776 return TreeEntry::NeedToGather;
7780 namespace {
7781 /// Allows to correctly handle operands of the phi nodes based on the \p Main
7782 /// PHINode order of incoming basic blocks/values.
7783 class PHIHandler {
7784 DominatorTree &DT;
7785 PHINode *Main = nullptr;
7786 SmallVector<Value *> Phis;
7787 SmallVector<SmallVector<Value *>> Operands;
7789 public:
7790 PHIHandler() = delete;
7791 PHIHandler(DominatorTree &DT, PHINode *Main, ArrayRef<Value *> Phis)
7792 : DT(DT), Main(Main), Phis(Phis),
7793 Operands(Main->getNumIncomingValues(),
7794 SmallVector<Value *>(Phis.size(), nullptr)) {}
7795 void buildOperands() {
7796 constexpr unsigned FastLimit = 4;
7797 if (Main->getNumIncomingValues() <= FastLimit) {
7798 for (unsigned I : seq<unsigned>(0, Main->getNumIncomingValues())) {
7799 BasicBlock *InBB = Main->getIncomingBlock(I);
7800 if (!DT.isReachableFromEntry(InBB)) {
7801 Operands[I].assign(Phis.size(), PoisonValue::get(Main->getType()));
7802 continue;
7804 // Prepare the operand vector.
7805 for (auto [Idx, V] : enumerate(Phis)) {
7806 auto *P = cast<PHINode>(V);
7807 if (P->getIncomingBlock(I) == InBB)
7808 Operands[I][Idx] = P->getIncomingValue(I);
7809 else
7810 Operands[I][Idx] = P->getIncomingValueForBlock(InBB);
7813 return;
7815 SmallDenseMap<BasicBlock *, SmallVector<unsigned>, 4> Blocks;
7816 for (unsigned I : seq<unsigned>(0, Main->getNumIncomingValues())) {
7817 BasicBlock *InBB = Main->getIncomingBlock(I);
7818 if (!DT.isReachableFromEntry(InBB)) {
7819 Operands[I].assign(Phis.size(), PoisonValue::get(Main->getType()));
7820 continue;
7822 Blocks.try_emplace(InBB).first->second.push_back(I);
7824 for (auto [Idx, V] : enumerate(Phis)) {
7825 auto *P = cast<PHINode>(V);
7826 for (unsigned I : seq<unsigned>(0, P->getNumIncomingValues())) {
7827 BasicBlock *InBB = P->getIncomingBlock(I);
7828 if (InBB == Main->getIncomingBlock(I)) {
7829 if (isa_and_nonnull<PoisonValue>(Operands[I][Idx]))
7830 continue;
7831 Operands[I][Idx] = P->getIncomingValue(I);
7832 continue;
7834 auto It = Blocks.find(InBB);
7835 if (It == Blocks.end())
7836 continue;
7837 Operands[It->second.front()][Idx] = P->getIncomingValue(I);
7840 for (const auto &P : Blocks) {
7841 if (P.getSecond().size() <= 1)
7842 continue;
7843 unsigned BasicI = P.getSecond().front();
7844 for (unsigned I : ArrayRef(P.getSecond()).drop_front()) {
7845 assert(all_of(enumerate(Operands[I]),
7846 [&](const auto &Data) {
7847 return !Data.value() ||
7848 Data.value() == Operands[BasicI][Data.index()];
7849 }) &&
7850 "Expected empty operands list.");
7851 Operands[I] = Operands[BasicI];
7855 ArrayRef<Value *> getOperands(unsigned I) const { return Operands[I]; }
7857 } // namespace
7859 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
7860 const EdgeInfo &UserTreeIdx,
7861 unsigned InterleaveFactor) {
7862 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
7864 SmallVector<int> ReuseShuffleIndices;
7865 SmallVector<Value *> UniqueValues;
7866 SmallVector<Value *> NonUniqueValueVL;
7867 auto TryToFindDuplicates = [&](const InstructionsState &S,
7868 bool DoNotFail = false) {
7869 // Check that every instruction appears once in this bundle.
7870 SmallDenseMap<Value *, unsigned, 16> UniquePositions(VL.size());
7871 for (Value *V : VL) {
7872 if (isConstant(V)) {
7873 ReuseShuffleIndices.emplace_back(
7874 isa<UndefValue>(V) ? PoisonMaskElem : UniqueValues.size());
7875 UniqueValues.emplace_back(V);
7876 continue;
7878 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
7879 ReuseShuffleIndices.emplace_back(Res.first->second);
7880 if (Res.second)
7881 UniqueValues.emplace_back(V);
7883 size_t NumUniqueScalarValues = UniqueValues.size();
7884 bool IsFullVectors = hasFullVectorsOrPowerOf2(
7885 *TTI, getValueType(UniqueValues.front()), NumUniqueScalarValues);
7886 if (NumUniqueScalarValues == VL.size() &&
7887 (VectorizeNonPowerOf2 || IsFullVectors)) {
7888 ReuseShuffleIndices.clear();
7889 } else {
7890 // FIXME: Reshuffing scalars is not supported yet for non-power-of-2 ops.
7891 if ((UserTreeIdx.UserTE &&
7892 UserTreeIdx.UserTE->hasNonWholeRegisterOrNonPowerOf2Vec(*TTI)) ||
7893 !has_single_bit(VL.size())) {
7894 LLVM_DEBUG(dbgs() << "SLP: Reshuffling scalars not yet supported "
7895 "for nodes with padding.\n");
7896 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
7897 return false;
7899 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
7900 if (NumUniqueScalarValues <= 1 || !IsFullVectors ||
7901 (UniquePositions.size() == 1 && all_of(UniqueValues, [](Value *V) {
7902 return isa<UndefValue>(V) || !isConstant(V);
7903 }))) {
7904 if (DoNotFail && UniquePositions.size() > 1 &&
7905 NumUniqueScalarValues > 1 && S.MainOp->isSafeToRemove() &&
7906 all_of(UniqueValues, [=](Value *V) {
7907 return isa<ExtractElementInst>(V) ||
7908 areAllUsersVectorized(cast<Instruction>(V),
7909 UserIgnoreList);
7910 })) {
7911 // Find the number of elements, which forms full vectors.
7912 unsigned PWSz = getFullVectorNumberOfElements(
7913 *TTI, UniqueValues.front()->getType(), UniqueValues.size());
7914 if (PWSz == VL.size()) {
7915 ReuseShuffleIndices.clear();
7916 } else {
7917 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end());
7918 NonUniqueValueVL.append(PWSz - UniqueValues.size(),
7919 UniqueValues.back());
7920 VL = NonUniqueValueVL;
7922 return true;
7924 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
7925 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
7926 return false;
7928 VL = UniqueValues;
7930 return true;
7933 InstructionsState S = getSameOpcode(VL, *TLI);
7935 // Don't go into catchswitch blocks, which can happen with PHIs.
7936 // Such blocks can only have PHIs and the catchswitch. There is no
7937 // place to insert a shuffle if we need to, so just avoid that issue.
7938 if (S.MainOp &&
7939 isa<CatchSwitchInst>(S.MainOp->getParent()->getTerminator())) {
7940 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n");
7941 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
7942 return;
7945 // Check if this is a duplicate of another entry.
7946 if (S.getOpcode()) {
7947 if (TreeEntry *E = getTreeEntry(S.OpValue)) {
7948 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.OpValue << ".\n");
7949 if (GatheredLoadsEntriesFirst.has_value() || !E->isSame(VL)) {
7950 auto It = MultiNodeScalars.find(S.OpValue);
7951 if (It != MultiNodeScalars.end()) {
7952 auto *TEIt = find_if(It->getSecond(),
7953 [&](TreeEntry *ME) { return ME->isSame(VL); });
7954 if (TEIt != It->getSecond().end())
7955 E = *TEIt;
7956 else
7957 E = nullptr;
7958 } else {
7959 E = nullptr;
7962 if (!E) {
7963 if (!doesNotNeedToBeScheduled(S.OpValue)) {
7964 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
7965 if (TryToFindDuplicates(S))
7966 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
7967 ReuseShuffleIndices);
7968 return;
7970 SmallPtrSet<const TreeEntry *, 4> Nodes;
7971 Nodes.insert(getTreeEntry(S.OpValue));
7972 for (const TreeEntry *E : MultiNodeScalars.lookup(S.OpValue))
7973 Nodes.insert(E);
7974 SmallPtrSet<Value *, 8> Values(VL.begin(), VL.end());
7975 if (any_of(Nodes, [&](const TreeEntry *E) {
7976 if (all_of(E->Scalars,
7977 [&](Value *V) { return Values.contains(V); }))
7978 return true;
7979 SmallPtrSet<Value *, 8> EValues(E->Scalars.begin(),
7980 E->Scalars.end());
7981 return (
7982 all_of(VL, [&](Value *V) { return EValues.contains(V); }));
7983 })) {
7984 LLVM_DEBUG(dbgs() << "SLP: Gathering due to full overlap.\n");
7985 if (TryToFindDuplicates(S))
7986 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
7987 ReuseShuffleIndices);
7988 return;
7990 } else {
7991 // Record the reuse of the tree node. FIXME, currently this is only
7992 // used to properly draw the graph rather than for the actual
7993 // vectorization.
7994 E->UserTreeIndices.push_back(UserTreeIdx);
7995 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.OpValue
7996 << ".\n");
7997 return;
8002 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of
8003 // a load), in which case peek through to include it in the tree, without
8004 // ballooning over-budget.
8005 if (Depth >= RecursionMaxDepth &&
8006 !(S.MainOp && isa<Instruction>(S.MainOp) && S.MainOp == S.AltOp &&
8007 VL.size() >= 4 &&
8008 (match(S.MainOp, m_Load(m_Value())) || all_of(VL, [&S](const Value *I) {
8009 return match(I,
8010 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) &&
8011 cast<Instruction>(I)->getOpcode() ==
8012 cast<Instruction>(S.MainOp)->getOpcode();
8013 })))) {
8014 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
8015 if (TryToFindDuplicates(S))
8016 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8017 ReuseShuffleIndices);
8018 return;
8021 // Don't handle scalable vectors
8022 if (S.getOpcode() == Instruction::ExtractElement &&
8023 isa<ScalableVectorType>(
8024 cast<ExtractElementInst>(S.OpValue)->getVectorOperandType())) {
8025 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
8026 if (TryToFindDuplicates(S))
8027 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8028 ReuseShuffleIndices);
8029 return;
8032 // Don't handle vectors.
8033 if (!SLPReVec && getValueType(VL.front())->isVectorTy()) {
8034 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
8035 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8036 return;
8039 // If all of the operands are identical or constant we have a simple solution.
8040 // If we deal with insert/extract instructions, they all must have constant
8041 // indices, otherwise we should gather them, not try to vectorize.
8042 // If alternate op node with 2 elements with gathered operands - do not
8043 // vectorize.
8044 auto &&NotProfitableForVectorization = [&S, this,
8045 Depth](ArrayRef<Value *> VL) {
8046 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2)
8047 return false;
8048 if (VectorizableTree.size() < MinTreeSize)
8049 return false;
8050 if (Depth >= RecursionMaxDepth - 1)
8051 return true;
8052 // Check if all operands are extracts, part of vector node or can build a
8053 // regular vectorize node.
8054 SmallVector<unsigned, 2> InstsCount(VL.size(), 0);
8055 for (Value *V : VL) {
8056 auto *I = cast<Instruction>(V);
8057 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) {
8058 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op);
8059 }));
8061 bool IsCommutative = isCommutative(S.MainOp) || isCommutative(S.AltOp);
8062 if ((IsCommutative &&
8063 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) ||
8064 (!IsCommutative &&
8065 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; })))
8066 return true;
8067 assert(VL.size() == 2 && "Expected only 2 alternate op instructions.");
8068 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
8069 auto *I1 = cast<Instruction>(VL.front());
8070 auto *I2 = cast<Instruction>(VL.back());
8071 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
8072 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
8073 I2->getOperand(Op));
8074 if (static_cast<unsigned>(count_if(
8075 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
8076 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
8077 })) >= S.MainOp->getNumOperands() / 2)
8078 return false;
8079 if (S.MainOp->getNumOperands() > 2)
8080 return true;
8081 if (IsCommutative) {
8082 // Check permuted operands.
8083 Candidates.clear();
8084 for (int Op = 0, E = S.MainOp->getNumOperands(); Op < E; ++Op)
8085 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
8086 I2->getOperand((Op + 1) % E));
8087 if (any_of(
8088 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
8089 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
8091 return false;
8093 return true;
8095 SmallVector<unsigned> SortedIndices;
8096 BasicBlock *BB = nullptr;
8097 bool IsScatterVectorizeUserTE =
8098 UserTreeIdx.UserTE &&
8099 UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize;
8100 bool AreAllSameBlock = S.getOpcode() && allSameBlock(VL);
8101 bool AreScatterAllGEPSameBlock =
8102 (IsScatterVectorizeUserTE && VL.front()->getType()->isPointerTy() &&
8103 VL.size() > 2 &&
8104 all_of(VL,
8105 [&BB](Value *V) {
8106 auto *I = dyn_cast<GetElementPtrInst>(V);
8107 if (!I)
8108 return doesNotNeedToBeScheduled(V);
8109 if (!BB)
8110 BB = I->getParent();
8111 return BB == I->getParent() && I->getNumOperands() == 2;
8112 }) &&
8113 BB &&
8114 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE,
8115 SortedIndices));
8116 bool AreAllSameInsts = AreAllSameBlock || AreScatterAllGEPSameBlock;
8117 if (!AreAllSameInsts || (!S.getOpcode() && allConstant(VL)) || isSplat(VL) ||
8118 (isa_and_present<InsertElementInst, ExtractValueInst, ExtractElementInst>(
8119 S.OpValue) &&
8120 !all_of(VL, isVectorLikeInstWithConstOps)) ||
8121 NotProfitableForVectorization(VL)) {
8122 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n");
8123 if (TryToFindDuplicates(S))
8124 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8125 ReuseShuffleIndices);
8126 return;
8129 // Don't vectorize ephemeral values.
8130 if (S.getOpcode() && !EphValues.empty()) {
8131 for (Value *V : VL) {
8132 if (EphValues.count(V)) {
8133 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
8134 << ") is ephemeral.\n");
8135 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8136 return;
8141 // We now know that this is a vector of instructions of the same type from
8142 // the same block.
8144 // Check that none of the instructions in the bundle are already in the tree.
8145 for (Value *V : VL) {
8146 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) ||
8147 doesNotNeedToBeScheduled(V))
8148 continue;
8149 if (getTreeEntry(V)) {
8150 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
8151 << ") is already in tree.\n");
8152 if (TryToFindDuplicates(S))
8153 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8154 ReuseShuffleIndices);
8155 return;
8159 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
8160 if (UserIgnoreList && !UserIgnoreList->empty()) {
8161 for (Value *V : VL) {
8162 if (UserIgnoreList->contains(V)) {
8163 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
8164 if (TryToFindDuplicates(S))
8165 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8166 ReuseShuffleIndices);
8167 return;
8172 // Special processing for sorted pointers for ScatterVectorize node with
8173 // constant indeces only.
8174 if (!AreAllSameBlock && AreScatterAllGEPSameBlock) {
8175 assert(VL.front()->getType()->isPointerTy() &&
8176 count_if(VL, IsaPred<GetElementPtrInst>) >= 2 &&
8177 "Expected pointers only.");
8178 // Reset S to make it GetElementPtr kind of node.
8179 const auto *It = find_if(VL, IsaPred<GetElementPtrInst>);
8180 assert(It != VL.end() && "Expected at least one GEP.");
8181 S = getSameOpcode(*It, *TLI);
8184 // Check that all of the users of the scalars that we want to vectorize are
8185 // schedulable.
8186 auto *VL0 = cast<Instruction>(S.OpValue);
8187 BB = VL0->getParent();
8189 if (S.MainOp &&
8190 (BB->isEHPad() || isa_and_nonnull<UnreachableInst>(BB->getTerminator()) ||
8191 !DT->isReachableFromEntry(BB))) {
8192 // Don't go into unreachable blocks. They may contain instructions with
8193 // dependency cycles which confuse the final scheduling.
8194 // Do not vectorize EH and non-returning blocks, not profitable in most
8195 // cases.
8196 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
8197 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8198 return;
8201 // Check that every instruction appears once in this bundle.
8202 if (!TryToFindDuplicates(S, /*DoNotFail=*/true))
8203 return;
8205 // Perform specific checks for each particular instruction kind.
8206 OrdersType CurrentOrder;
8207 SmallVector<Value *> PointerOps;
8208 TreeEntry::EntryState State = getScalarsVectorizationState(
8209 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps);
8210 if (State == TreeEntry::NeedToGather) {
8211 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8212 ReuseShuffleIndices);
8213 return;
8216 auto &BSRef = BlocksSchedules[BB];
8217 if (!BSRef)
8218 BSRef = std::make_unique<BlockScheduling>(BB);
8220 BlockScheduling &BS = *BSRef;
8222 std::optional<ScheduleData *> Bundle =
8223 BS.tryScheduleBundle(UniqueValues, this, S);
8224 #ifdef EXPENSIVE_CHECKS
8225 // Make sure we didn't break any internal invariants
8226 BS.verify();
8227 #endif
8228 if (!Bundle) {
8229 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
8230 assert((!BS.getScheduleData(VL0) ||
8231 !BS.getScheduleData(VL0)->isPartOfBundle()) &&
8232 "tryScheduleBundle should cancelScheduling on failure");
8233 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8234 ReuseShuffleIndices);
8235 NonScheduledFirst.insert(VL.front());
8236 if (S.getOpcode() == Instruction::Load &&
8237 BS.ScheduleRegionSize < BS.ScheduleRegionSizeLimit)
8238 registerNonVectorizableLoads(VL);
8239 return;
8241 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
8243 unsigned ShuffleOrOp = S.isAltShuffle() ?
8244 (unsigned) Instruction::ShuffleVector : S.getOpcode();
8245 auto CreateOperandNodes = [&](TreeEntry *TE, const auto &Operands) {
8246 // Postpone PHI nodes creation
8247 SmallVector<unsigned> PHIOps;
8248 for (unsigned I : seq<unsigned>(Operands.size())) {
8249 ArrayRef<Value *> Op = Operands[I];
8250 if (Op.empty())
8251 continue;
8252 InstructionsState S = getSameOpcode(Op, *TLI);
8253 if (S.getOpcode() != Instruction::PHI || S.isAltShuffle())
8254 buildTree_rec(Op, Depth + 1, {TE, I});
8255 else
8256 PHIOps.push_back(I);
8258 for (unsigned I : PHIOps)
8259 buildTree_rec(Operands[I], Depth + 1, {TE, I});
8261 switch (ShuffleOrOp) {
8262 case Instruction::PHI: {
8263 auto *PH = cast<PHINode>(VL0);
8265 TreeEntry *TE =
8266 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndices);
8267 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
8269 // Keeps the reordered operands to avoid code duplication.
8270 PHIHandler Handler(*DT, PH, VL);
8271 Handler.buildOperands();
8272 for (unsigned I : seq<unsigned>(PH->getNumOperands()))
8273 TE->setOperand(I, Handler.getOperands(I));
8274 SmallVector<ArrayRef<Value *>> Operands(PH->getNumOperands());
8275 for (unsigned I : seq<unsigned>(PH->getNumOperands()))
8276 Operands[I] = Handler.getOperands(I);
8277 CreateOperandNodes(TE, Operands);
8278 return;
8280 case Instruction::ExtractValue:
8281 case Instruction::ExtractElement: {
8282 if (CurrentOrder.empty()) {
8283 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
8284 } else {
8285 LLVM_DEBUG({
8286 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
8287 "with order";
8288 for (unsigned Idx : CurrentOrder)
8289 dbgs() << " " << Idx;
8290 dbgs() << "\n";
8292 fixupOrderingIndices(CurrentOrder);
8294 // Insert new order with initial value 0, if it does not exist,
8295 // otherwise return the iterator to the existing one.
8296 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8297 ReuseShuffleIndices, CurrentOrder);
8298 // This is a special case, as it does not gather, but at the same time
8299 // we are not extending buildTree_rec() towards the operands.
8300 ValueList Op0;
8301 Op0.assign(VL.size(), VL0->getOperand(0));
8302 VectorizableTree.back()->setOperand(0, Op0);
8303 return;
8305 case Instruction::InsertElement: {
8306 assert(ReuseShuffleIndices.empty() && "All inserts should be unique");
8308 auto OrdCompare = [](const std::pair<int, int> &P1,
8309 const std::pair<int, int> &P2) {
8310 return P1.first > P2.first;
8312 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>,
8313 decltype(OrdCompare)>
8314 Indices(OrdCompare);
8315 for (int I = 0, E = VL.size(); I < E; ++I) {
8316 unsigned Idx = *getElementIndex(VL[I]);
8317 Indices.emplace(Idx, I);
8319 OrdersType CurrentOrder(VL.size(), VL.size());
8320 bool IsIdentity = true;
8321 for (int I = 0, E = VL.size(); I < E; ++I) {
8322 CurrentOrder[Indices.top().second] = I;
8323 IsIdentity &= Indices.top().second == I;
8324 Indices.pop();
8326 if (IsIdentity)
8327 CurrentOrder.clear();
8328 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8329 {}, CurrentOrder);
8330 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
8332 TE->setOperandsInOrder();
8333 buildTree_rec(TE->getOperand(1), Depth + 1, {TE, 1});
8334 return;
8336 case Instruction::Load: {
8337 // Check that a vectorized load would load the same memory as a scalar
8338 // load. For example, we don't want to vectorize loads that are smaller
8339 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
8340 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
8341 // from such a struct, we read/write packed bits disagreeing with the
8342 // unvectorized version.
8343 TreeEntry *TE = nullptr;
8344 fixupOrderingIndices(CurrentOrder);
8345 switch (State) {
8346 case TreeEntry::Vectorize:
8347 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8348 ReuseShuffleIndices, CurrentOrder, InterleaveFactor);
8349 if (CurrentOrder.empty())
8350 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
8351 else
8352 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
8353 TE->setOperandsInOrder();
8354 break;
8355 case TreeEntry::StridedVectorize:
8356 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
8357 TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
8358 UserTreeIdx, ReuseShuffleIndices, CurrentOrder);
8359 TE->setOperandsInOrder();
8360 LLVM_DEBUG(dbgs() << "SLP: added a vector of strided loads.\n");
8361 break;
8362 case TreeEntry::ScatterVectorize:
8363 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
8364 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
8365 UserTreeIdx, ReuseShuffleIndices);
8366 TE->setOperandsInOrder();
8367 buildTree_rec(PointerOps, Depth + 1, {TE, 0});
8368 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
8369 break;
8370 case TreeEntry::CombinedVectorize:
8371 case TreeEntry::NeedToGather:
8372 llvm_unreachable("Unexpected loads state.");
8374 return;
8376 case Instruction::ZExt:
8377 case Instruction::SExt:
8378 case Instruction::FPToUI:
8379 case Instruction::FPToSI:
8380 case Instruction::FPExt:
8381 case Instruction::PtrToInt:
8382 case Instruction::IntToPtr:
8383 case Instruction::SIToFP:
8384 case Instruction::UIToFP:
8385 case Instruction::Trunc:
8386 case Instruction::FPTrunc:
8387 case Instruction::BitCast: {
8388 auto [PrevMaxBW, PrevMinBW] = CastMaxMinBWSizes.value_or(
8389 std::make_pair(std::numeric_limits<unsigned>::min(),
8390 std::numeric_limits<unsigned>::max()));
8391 if (ShuffleOrOp == Instruction::ZExt ||
8392 ShuffleOrOp == Instruction::SExt) {
8393 CastMaxMinBWSizes = std::make_pair(
8394 std::max<unsigned>(DL->getTypeSizeInBits(VL0->getType()),
8395 PrevMaxBW),
8396 std::min<unsigned>(
8397 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()),
8398 PrevMinBW));
8399 } else if (ShuffleOrOp == Instruction::Trunc) {
8400 CastMaxMinBWSizes = std::make_pair(
8401 std::max<unsigned>(
8402 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()),
8403 PrevMaxBW),
8404 std::min<unsigned>(DL->getTypeSizeInBits(VL0->getType()),
8405 PrevMinBW));
8407 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8408 ReuseShuffleIndices);
8409 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
8411 TE->setOperandsInOrder();
8412 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands()))
8413 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8414 if (ShuffleOrOp == Instruction::Trunc) {
8415 ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
8416 } else if (ShuffleOrOp == Instruction::SIToFP ||
8417 ShuffleOrOp == Instruction::UIToFP) {
8418 unsigned NumSignBits =
8419 ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
8420 if (auto *OpI = dyn_cast<Instruction>(VL0->getOperand(0))) {
8421 APInt Mask = DB->getDemandedBits(OpI);
8422 NumSignBits = std::max(NumSignBits, Mask.countl_zero());
8424 if (NumSignBits * 2 >=
8425 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()))
8426 ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
8428 return;
8430 case Instruction::ICmp:
8431 case Instruction::FCmp: {
8432 // Check that all of the compares have the same predicate.
8433 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
8434 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8435 ReuseShuffleIndices);
8436 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
8438 ValueList Left, Right;
8439 if (cast<CmpInst>(VL0)->isCommutative()) {
8440 // Commutative predicate - collect + sort operands of the instructions
8441 // so that each side is more likely to have the same opcode.
8442 assert(P0 == CmpInst::getSwappedPredicate(P0) &&
8443 "Commutative Predicate mismatch");
8444 reorderInputsAccordingToOpcode(VL, Left, Right, *this);
8445 } else {
8446 // Collect operands - commute if it uses the swapped predicate.
8447 for (Value *V : VL) {
8448 auto *Cmp = cast<CmpInst>(V);
8449 Value *LHS = Cmp->getOperand(0);
8450 Value *RHS = Cmp->getOperand(1);
8451 if (Cmp->getPredicate() != P0)
8452 std::swap(LHS, RHS);
8453 Left.push_back(LHS);
8454 Right.push_back(RHS);
8457 TE->setOperand(0, Left);
8458 TE->setOperand(1, Right);
8459 buildTree_rec(Left, Depth + 1, {TE, 0});
8460 buildTree_rec(Right, Depth + 1, {TE, 1});
8461 if (ShuffleOrOp == Instruction::ICmp) {
8462 unsigned NumSignBits0 =
8463 ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
8464 if (NumSignBits0 * 2 >=
8465 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()))
8466 ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
8467 unsigned NumSignBits1 =
8468 ComputeNumSignBits(VL0->getOperand(1), *DL, 0, AC, nullptr, DT);
8469 if (NumSignBits1 * 2 >=
8470 DL->getTypeSizeInBits(VL0->getOperand(1)->getType()))
8471 ExtraBitWidthNodes.insert(getOperandEntry(TE, 1)->Idx);
8473 return;
8475 case Instruction::Select:
8476 case Instruction::FNeg:
8477 case Instruction::Add:
8478 case Instruction::FAdd:
8479 case Instruction::Sub:
8480 case Instruction::FSub:
8481 case Instruction::Mul:
8482 case Instruction::FMul:
8483 case Instruction::UDiv:
8484 case Instruction::SDiv:
8485 case Instruction::FDiv:
8486 case Instruction::URem:
8487 case Instruction::SRem:
8488 case Instruction::FRem:
8489 case Instruction::Shl:
8490 case Instruction::LShr:
8491 case Instruction::AShr:
8492 case Instruction::And:
8493 case Instruction::Or:
8494 case Instruction::Xor:
8495 case Instruction::Freeze: {
8496 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8497 ReuseShuffleIndices);
8498 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
8500 // Sort operands of the instructions so that each side is more likely to
8501 // have the same opcode.
8502 if (isa<BinaryOperator>(VL0) && isCommutative(VL0)) {
8503 ValueList Left, Right;
8504 reorderInputsAccordingToOpcode(VL, Left, Right, *this);
8505 TE->setOperand(0, Left);
8506 TE->setOperand(1, Right);
8507 buildTree_rec(Left, Depth + 1, {TE, 0});
8508 buildTree_rec(Right, Depth + 1, {TE, 1});
8509 return;
8512 TE->setOperandsInOrder();
8513 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands()))
8514 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8515 return;
8517 case Instruction::GetElementPtr: {
8518 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8519 ReuseShuffleIndices);
8520 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
8521 SmallVector<ValueList, 2> Operands(2);
8522 // Prepare the operand vector for pointer operands.
8523 for (Value *V : VL) {
8524 auto *GEP = dyn_cast<GetElementPtrInst>(V);
8525 if (!GEP) {
8526 Operands.front().push_back(V);
8527 continue;
8529 Operands.front().push_back(GEP->getPointerOperand());
8531 TE->setOperand(0, Operands.front());
8532 // Need to cast all indices to the same type before vectorization to
8533 // avoid crash.
8534 // Required to be able to find correct matches between different gather
8535 // nodes and reuse the vectorized values rather than trying to gather them
8536 // again.
8537 int IndexIdx = 1;
8538 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
8539 Type *Ty = all_of(VL,
8540 [VL0Ty, IndexIdx](Value *V) {
8541 auto *GEP = dyn_cast<GetElementPtrInst>(V);
8542 if (!GEP)
8543 return true;
8544 return VL0Ty == GEP->getOperand(IndexIdx)->getType();
8546 ? VL0Ty
8547 : DL->getIndexType(cast<GetElementPtrInst>(VL0)
8548 ->getPointerOperandType()
8549 ->getScalarType());
8550 // Prepare the operand vector.
8551 for (Value *V : VL) {
8552 auto *I = dyn_cast<GetElementPtrInst>(V);
8553 if (!I) {
8554 Operands.back().push_back(
8555 ConstantInt::get(Ty, 0, /*isSigned=*/false));
8556 continue;
8558 auto *Op = I->getOperand(IndexIdx);
8559 auto *CI = dyn_cast<ConstantInt>(Op);
8560 if (!CI)
8561 Operands.back().push_back(Op);
8562 else
8563 Operands.back().push_back(ConstantFoldIntegerCast(
8564 CI, Ty, CI->getValue().isSignBitSet(), *DL));
8566 TE->setOperand(IndexIdx, Operands.back());
8568 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
8569 buildTree_rec(Operands[I], Depth + 1, {TE, I});
8570 return;
8572 case Instruction::Store: {
8573 bool Consecutive = CurrentOrder.empty();
8574 if (!Consecutive)
8575 fixupOrderingIndices(CurrentOrder);
8576 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8577 ReuseShuffleIndices, CurrentOrder);
8578 TE->setOperandsInOrder();
8579 buildTree_rec(TE->getOperand(0), Depth + 1, {TE, 0});
8580 if (Consecutive)
8581 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
8582 else
8583 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
8584 return;
8586 case Instruction::Call: {
8587 // Check if the calls are all to the same vectorizable intrinsic or
8588 // library function.
8589 CallInst *CI = cast<CallInst>(VL0);
8590 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8592 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8593 ReuseShuffleIndices);
8594 // Sort operands of the instructions so that each side is more likely to
8595 // have the same opcode.
8596 if (isCommutative(VL0)) {
8597 ValueList Left, Right;
8598 reorderInputsAccordingToOpcode(VL, Left, Right, *this);
8599 TE->setOperand(0, Left);
8600 TE->setOperand(1, Right);
8601 SmallVector<ValueList> Operands;
8602 for (unsigned I : seq<unsigned>(2, CI->arg_size())) {
8603 Operands.emplace_back();
8604 if (isVectorIntrinsicWithScalarOpAtArg(ID, I))
8605 continue;
8606 for (Value *V : VL) {
8607 auto *CI2 = cast<CallInst>(V);
8608 Operands.back().push_back(CI2->getArgOperand(I));
8610 TE->setOperand(I, Operands.back());
8612 buildTree_rec(Left, Depth + 1, {TE, 0});
8613 buildTree_rec(Right, Depth + 1, {TE, 1});
8614 for (unsigned I : seq<unsigned>(2, CI->arg_size())) {
8615 if (Operands[I - 2].empty())
8616 continue;
8617 buildTree_rec(Operands[I - 2], Depth + 1, {TE, I});
8619 return;
8621 TE->setOperandsInOrder();
8622 for (unsigned I : seq<unsigned>(0, CI->arg_size())) {
8623 // For scalar operands no need to create an entry since no need to
8624 // vectorize it.
8625 if (isVectorIntrinsicWithScalarOpAtArg(ID, I))
8626 continue;
8627 ValueList Operands;
8628 // Prepare the operand vector.
8629 for (Value *V : VL) {
8630 auto *CI2 = cast<CallInst>(V);
8631 Operands.push_back(CI2->getArgOperand(I));
8633 buildTree_rec(Operands, Depth + 1, {TE, I});
8635 return;
8637 case Instruction::ShuffleVector: {
8638 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8639 ReuseShuffleIndices);
8640 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
8642 // Reorder operands if reordering would enable vectorization.
8643 auto *CI = dyn_cast<CmpInst>(VL0);
8644 if (isa<BinaryOperator>(VL0) || CI) {
8645 ValueList Left, Right;
8646 if (!CI || all_of(VL, [](Value *V) {
8647 return cast<CmpInst>(V)->isCommutative();
8648 })) {
8649 reorderInputsAccordingToOpcode(VL, Left, Right, *this);
8650 } else {
8651 auto *MainCI = cast<CmpInst>(S.MainOp);
8652 auto *AltCI = cast<CmpInst>(S.AltOp);
8653 CmpInst::Predicate MainP = MainCI->getPredicate();
8654 CmpInst::Predicate AltP = AltCI->getPredicate();
8655 assert(MainP != AltP &&
8656 "Expected different main/alternate predicates.");
8657 // Collect operands - commute if it uses the swapped predicate or
8658 // alternate operation.
8659 for (Value *V : VL) {
8660 auto *Cmp = cast<CmpInst>(V);
8661 Value *LHS = Cmp->getOperand(0);
8662 Value *RHS = Cmp->getOperand(1);
8664 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) {
8665 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
8666 std::swap(LHS, RHS);
8667 } else {
8668 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
8669 std::swap(LHS, RHS);
8671 Left.push_back(LHS);
8672 Right.push_back(RHS);
8675 TE->setOperand(0, Left);
8676 TE->setOperand(1, Right);
8677 buildTree_rec(Left, Depth + 1, {TE, 0});
8678 buildTree_rec(Right, Depth + 1, {TE, 1});
8679 return;
8682 TE->setOperandsInOrder();
8683 for (unsigned I : seq<unsigned>(0, VL0->getNumOperands()))
8684 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8685 return;
8687 default:
8688 break;
8690 llvm_unreachable("Unexpected vectorization of the instructions.");
8693 unsigned BoUpSLP::canMapToVector(Type *T) const {
8694 unsigned N = 1;
8695 Type *EltTy = T;
8697 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) {
8698 if (EltTy->isEmptyTy())
8699 return 0;
8700 if (auto *ST = dyn_cast<StructType>(EltTy)) {
8701 // Check that struct is homogeneous.
8702 for (const auto *Ty : ST->elements())
8703 if (Ty != *ST->element_begin())
8704 return 0;
8705 N *= ST->getNumElements();
8706 EltTy = *ST->element_begin();
8707 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
8708 N *= AT->getNumElements();
8709 EltTy = AT->getElementType();
8710 } else {
8711 auto *VT = cast<FixedVectorType>(EltTy);
8712 N *= VT->getNumElements();
8713 EltTy = VT->getElementType();
8717 if (!isValidElementType(EltTy))
8718 return 0;
8719 uint64_t VTSize = DL->getTypeStoreSizeInBits(getWidenedType(EltTy, N));
8720 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize ||
8721 VTSize != DL->getTypeStoreSizeInBits(T))
8722 return 0;
8723 return N;
8726 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
8727 SmallVectorImpl<unsigned> &CurrentOrder,
8728 bool ResizeAllowed) const {
8729 const auto *It = find_if(VL, IsaPred<ExtractElementInst, ExtractValueInst>);
8730 assert(It != VL.end() && "Expected at least one extract instruction.");
8731 auto *E0 = cast<Instruction>(*It);
8732 assert(
8733 all_of(VL, IsaPred<UndefValue, ExtractElementInst, ExtractValueInst>) &&
8734 "Invalid opcode");
8735 // Check if all of the extracts come from the same vector and from the
8736 // correct offset.
8737 Value *Vec = E0->getOperand(0);
8739 CurrentOrder.clear();
8741 // We have to extract from a vector/aggregate with the same number of elements.
8742 unsigned NElts;
8743 if (E0->getOpcode() == Instruction::ExtractValue) {
8744 NElts = canMapToVector(Vec->getType());
8745 if (!NElts)
8746 return false;
8747 // Check if load can be rewritten as load of vector.
8748 LoadInst *LI = dyn_cast<LoadInst>(Vec);
8749 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
8750 return false;
8751 } else {
8752 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
8755 unsigned E = VL.size();
8756 if (!ResizeAllowed && NElts != E)
8757 return false;
8758 SmallVector<int> Indices(E, PoisonMaskElem);
8759 unsigned MinIdx = NElts, MaxIdx = 0;
8760 for (auto [I, V] : enumerate(VL)) {
8761 auto *Inst = dyn_cast<Instruction>(V);
8762 if (!Inst)
8763 continue;
8764 if (Inst->getOperand(0) != Vec)
8765 return false;
8766 if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
8767 if (isa<UndefValue>(EE->getIndexOperand()))
8768 continue;
8769 std::optional<unsigned> Idx = getExtractIndex(Inst);
8770 if (!Idx)
8771 return false;
8772 const unsigned ExtIdx = *Idx;
8773 if (ExtIdx >= NElts)
8774 continue;
8775 Indices[I] = ExtIdx;
8776 if (MinIdx > ExtIdx)
8777 MinIdx = ExtIdx;
8778 if (MaxIdx < ExtIdx)
8779 MaxIdx = ExtIdx;
8781 if (MaxIdx - MinIdx + 1 > E)
8782 return false;
8783 if (MaxIdx + 1 <= E)
8784 MinIdx = 0;
8786 // Check that all of the indices extract from the correct offset.
8787 bool ShouldKeepOrder = true;
8788 // Assign to all items the initial value E + 1 so we can check if the extract
8789 // instruction index was used already.
8790 // Also, later we can check that all the indices are used and we have a
8791 // consecutive access in the extract instructions, by checking that no
8792 // element of CurrentOrder still has value E + 1.
8793 CurrentOrder.assign(E, E);
8794 for (unsigned I = 0; I < E; ++I) {
8795 if (Indices[I] == PoisonMaskElem)
8796 continue;
8797 const unsigned ExtIdx = Indices[I] - MinIdx;
8798 if (CurrentOrder[ExtIdx] != E) {
8799 CurrentOrder.clear();
8800 return false;
8802 ShouldKeepOrder &= ExtIdx == I;
8803 CurrentOrder[ExtIdx] = I;
8805 if (ShouldKeepOrder)
8806 CurrentOrder.clear();
8808 return ShouldKeepOrder;
8811 bool BoUpSLP::areAllUsersVectorized(
8812 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const {
8813 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) ||
8814 all_of(I->users(), [this](User *U) {
8815 return ScalarToTreeEntry.contains(U) ||
8816 isVectorLikeInstWithConstOps(U) ||
8817 (isa<ExtractElementInst>(U) && MustGather.contains(U));
8821 static std::pair<InstructionCost, InstructionCost>
8822 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
8823 TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
8824 ArrayRef<Type *> ArgTys) {
8825 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8827 // Calculate the cost of the scalar and vector calls.
8828 FastMathFlags FMF;
8829 if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
8830 FMF = FPCI->getFastMathFlags();
8831 SmallVector<const Value *> Arguments(CI->args());
8832 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, ArgTys, FMF,
8833 dyn_cast<IntrinsicInst>(CI));
8834 auto IntrinsicCost =
8835 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
8837 auto Shape = VFShape::get(CI->getFunctionType(),
8838 ElementCount::getFixed(VecTy->getNumElements()),
8839 false /*HasGlobalPred*/);
8840 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
8841 auto LibCost = IntrinsicCost;
8842 if (!CI->isNoBuiltin() && VecFunc) {
8843 // Calculate the cost of the vector library call.
8844 // If the corresponding vector call is cheaper, return its cost.
8845 LibCost =
8846 TTI->getCallInstrCost(nullptr, VecTy, ArgTys, TTI::TCK_RecipThroughput);
8848 return {IntrinsicCost, LibCost};
8851 void BoUpSLP::TreeEntry::buildAltOpShuffleMask(
8852 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask,
8853 SmallVectorImpl<Value *> *OpScalars,
8854 SmallVectorImpl<Value *> *AltScalars) const {
8855 unsigned Sz = Scalars.size();
8856 Mask.assign(Sz, PoisonMaskElem);
8857 SmallVector<int> OrderMask;
8858 if (!ReorderIndices.empty())
8859 inversePermutation(ReorderIndices, OrderMask);
8860 for (unsigned I = 0; I < Sz; ++I) {
8861 unsigned Idx = I;
8862 if (!ReorderIndices.empty())
8863 Idx = OrderMask[I];
8864 auto *OpInst = cast<Instruction>(Scalars[Idx]);
8865 if (IsAltOp(OpInst)) {
8866 Mask[I] = Sz + Idx;
8867 if (AltScalars)
8868 AltScalars->push_back(OpInst);
8869 } else {
8870 Mask[I] = Idx;
8871 if (OpScalars)
8872 OpScalars->push_back(OpInst);
8875 if (!ReuseShuffleIndices.empty()) {
8876 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem);
8877 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) {
8878 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem;
8880 Mask.swap(NewMask);
8884 static bool isAlternateInstruction(const Instruction *I,
8885 const Instruction *MainOp,
8886 const Instruction *AltOp,
8887 const TargetLibraryInfo &TLI) {
8888 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) {
8889 auto *AltCI = cast<CmpInst>(AltOp);
8890 CmpInst::Predicate MainP = MainCI->getPredicate();
8891 CmpInst::Predicate AltP = AltCI->getPredicate();
8892 assert(MainP != AltP && "Expected different main/alternate predicates.");
8893 auto *CI = cast<CmpInst>(I);
8894 if (isCmpSameOrSwapped(MainCI, CI, TLI))
8895 return false;
8896 if (isCmpSameOrSwapped(AltCI, CI, TLI))
8897 return true;
8898 CmpInst::Predicate P = CI->getPredicate();
8899 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P);
8901 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) &&
8902 "CmpInst expected to match either main or alternate predicate or "
8903 "their swap.");
8904 (void)AltP;
8905 return MainP != P && MainP != SwappedP;
8907 return I->getOpcode() == AltOp->getOpcode();
8910 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) {
8911 assert(!Ops.empty());
8912 const auto *Op0 = Ops.front();
8914 const bool IsConstant = all_of(Ops, [](Value *V) {
8915 // TODO: We should allow undef elements here
8916 return isConstant(V) && !isa<UndefValue>(V);
8918 const bool IsUniform = all_of(Ops, [=](Value *V) {
8919 // TODO: We should allow undef elements here
8920 return V == Op0;
8922 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) {
8923 // TODO: We should allow undef elements here
8924 if (auto *CI = dyn_cast<ConstantInt>(V))
8925 return CI->getValue().isPowerOf2();
8926 return false;
8928 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) {
8929 // TODO: We should allow undef elements here
8930 if (auto *CI = dyn_cast<ConstantInt>(V))
8931 return CI->getValue().isNegatedPowerOf2();
8932 return false;
8935 TTI::OperandValueKind VK = TTI::OK_AnyValue;
8936 if (IsConstant && IsUniform)
8937 VK = TTI::OK_UniformConstantValue;
8938 else if (IsConstant)
8939 VK = TTI::OK_NonUniformConstantValue;
8940 else if (IsUniform)
8941 VK = TTI::OK_UniformValue;
8943 TTI::OperandValueProperties VP = TTI::OP_None;
8944 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP;
8945 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP;
8947 return {VK, VP};
8950 namespace {
8951 /// The base class for shuffle instruction emission and shuffle cost estimation.
8952 class BaseShuffleAnalysis {
8953 protected:
8954 Type *ScalarTy = nullptr;
8956 BaseShuffleAnalysis(Type *ScalarTy) : ScalarTy(ScalarTy) {}
8958 /// V is expected to be a vectorized value.
8959 /// When REVEC is disabled, there is no difference between VF and
8960 /// VNumElements.
8961 /// When REVEC is enabled, VF is VNumElements / ScalarTyNumElements.
8962 /// e.g., if ScalarTy is <4 x Ty> and V1 is <8 x Ty>, 2 is returned instead
8963 /// of 8.
8964 unsigned getVF(Value *V) const {
8965 assert(V && "V cannot be nullptr");
8966 assert(isa<FixedVectorType>(V->getType()) &&
8967 "V does not have FixedVectorType");
8968 assert(ScalarTy && "ScalarTy cannot be nullptr");
8969 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
8970 unsigned VNumElements =
8971 cast<FixedVectorType>(V->getType())->getNumElements();
8972 assert(VNumElements > ScalarTyNumElements &&
8973 "the number of elements of V is not large enough");
8974 assert(VNumElements % ScalarTyNumElements == 0 &&
8975 "the number of elements of V is not a vectorized value");
8976 return VNumElements / ScalarTyNumElements;
8979 /// Checks if the mask is an identity mask.
8980 /// \param IsStrict if is true the function returns false if mask size does
8981 /// not match vector size.
8982 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy,
8983 bool IsStrict) {
8984 int Limit = Mask.size();
8985 int VF = VecTy->getNumElements();
8986 int Index = -1;
8987 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit))
8988 return true;
8989 if (!IsStrict) {
8990 // Consider extract subvector starting from index 0.
8991 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) &&
8992 Index == 0)
8993 return true;
8994 // All VF-size submasks are identity (e.g.
8995 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4).
8996 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) {
8997 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF);
8998 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) ||
8999 ShuffleVectorInst::isIdentityMask(Slice, VF);
9001 return true;
9003 return false;
9006 /// Tries to combine 2 different masks into single one.
9007 /// \param LocalVF Vector length of the permuted input vector. \p Mask may
9008 /// change the size of the vector, \p LocalVF is the original size of the
9009 /// shuffled vector.
9010 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask,
9011 ArrayRef<int> ExtMask) {
9012 unsigned VF = Mask.size();
9013 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem);
9014 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
9015 if (ExtMask[I] == PoisonMaskElem)
9016 continue;
9017 int MaskedIdx = Mask[ExtMask[I] % VF];
9018 NewMask[I] =
9019 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF;
9021 Mask.swap(NewMask);
9024 /// Looks through shuffles trying to reduce final number of shuffles in the
9025 /// code. The function looks through the previously emitted shuffle
9026 /// instructions and properly mark indices in mask as undef.
9027 /// For example, given the code
9028 /// \code
9029 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0>
9030 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0>
9031 /// \endcode
9032 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will
9033 /// look through %s1 and %s2 and select vectors %0 and %1 with mask
9034 /// <0, 1, 2, 3> for the shuffle.
9035 /// If 2 operands are of different size, the smallest one will be resized and
9036 /// the mask recalculated properly.
9037 /// For example, given the code
9038 /// \code
9039 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0>
9040 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0>
9041 /// \endcode
9042 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will
9043 /// look through %s1 and %s2 and select vectors %0 and %1 with mask
9044 /// <0, 1, 2, 3> for the shuffle.
9045 /// So, it tries to transform permutations to simple vector merge, if
9046 /// possible.
9047 /// \param V The input vector which must be shuffled using the given \p Mask.
9048 /// If the better candidate is found, \p V is set to this best candidate
9049 /// vector.
9050 /// \param Mask The input mask for the shuffle. If the best candidate is found
9051 /// during looking-through-shuffles attempt, it is updated accordingly.
9052 /// \param SinglePermute true if the shuffle operation is originally a
9053 /// single-value-permutation. In this case the look-through-shuffles procedure
9054 /// may look for resizing shuffles as the best candidates.
9055 /// \return true if the shuffle results in the non-resizing identity shuffle
9056 /// (and thus can be ignored), false - otherwise.
9057 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask,
9058 bool SinglePermute) {
9059 Value *Op = V;
9060 ShuffleVectorInst *IdentityOp = nullptr;
9061 SmallVector<int> IdentityMask;
9062 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) {
9063 // Exit if not a fixed vector type or changing size shuffle.
9064 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType());
9065 if (!SVTy)
9066 break;
9067 // Remember the identity or broadcast mask, if it is not a resizing
9068 // shuffle. If no better candidates are found, this Op and Mask will be
9069 // used in the final shuffle.
9070 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) {
9071 if (!IdentityOp || !SinglePermute ||
9072 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) &&
9073 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask,
9074 IdentityMask.size()))) {
9075 IdentityOp = SV;
9076 // Store current mask in the IdentityMask so later we did not lost
9077 // this info if IdentityOp is selected as the best candidate for the
9078 // permutation.
9079 IdentityMask.assign(Mask);
9082 // Remember the broadcast mask. If no better candidates are found, this Op
9083 // and Mask will be used in the final shuffle.
9084 // Zero splat can be used as identity too, since it might be used with
9085 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling.
9086 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is
9087 // expensive, the analysis founds out, that the source vector is just a
9088 // broadcast, this original mask can be transformed to identity mask <0,
9089 // 1, 2, 3>.
9090 // \code
9091 // %0 = shuffle %v, poison, zeroinitalizer
9092 // %res = shuffle %0, poison, <3, 1, 2, 0>
9093 // \endcode
9094 // may be transformed to
9095 // \code
9096 // %0 = shuffle %v, poison, zeroinitalizer
9097 // %res = shuffle %0, poison, <0, 1, 2, 3>
9098 // \endcode
9099 if (SV->isZeroEltSplat()) {
9100 IdentityOp = SV;
9101 IdentityMask.assign(Mask);
9103 int LocalVF = Mask.size();
9104 if (auto *SVOpTy =
9105 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType()))
9106 LocalVF = SVOpTy->getNumElements();
9107 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem);
9108 for (auto [Idx, I] : enumerate(Mask)) {
9109 if (I == PoisonMaskElem ||
9110 static_cast<unsigned>(I) >= SV->getShuffleMask().size())
9111 continue;
9112 ExtMask[Idx] = SV->getMaskValue(I);
9114 bool IsOp1Undef = isUndefVector</*isPoisonOnly=*/true>(
9115 SV->getOperand(0),
9116 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg))
9117 .all();
9118 bool IsOp2Undef = isUndefVector</*isPoisonOnly=*/true>(
9119 SV->getOperand(1),
9120 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg))
9121 .all();
9122 if (!IsOp1Undef && !IsOp2Undef) {
9123 // Update mask and mark undef elems.
9124 for (int &I : Mask) {
9125 if (I == PoisonMaskElem)
9126 continue;
9127 if (SV->getMaskValue(I % SV->getShuffleMask().size()) ==
9128 PoisonMaskElem)
9129 I = PoisonMaskElem;
9131 break;
9133 SmallVector<int> ShuffleMask(SV->getShuffleMask());
9134 combineMasks(LocalVF, ShuffleMask, Mask);
9135 Mask.swap(ShuffleMask);
9136 if (IsOp2Undef)
9137 Op = SV->getOperand(0);
9138 else
9139 Op = SV->getOperand(1);
9141 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType());
9142 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) ||
9143 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) {
9144 if (IdentityOp) {
9145 V = IdentityOp;
9146 assert(Mask.size() == IdentityMask.size() &&
9147 "Expected masks of same sizes.");
9148 // Clear known poison elements.
9149 for (auto [I, Idx] : enumerate(Mask))
9150 if (Idx == PoisonMaskElem)
9151 IdentityMask[I] = PoisonMaskElem;
9152 Mask.swap(IdentityMask);
9153 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V);
9154 return SinglePermute &&
9155 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()),
9156 /*IsStrict=*/true) ||
9157 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() &&
9158 Shuffle->isZeroEltSplat() &&
9159 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())));
9161 V = Op;
9162 return false;
9164 V = Op;
9165 return true;
9168 /// Smart shuffle instruction emission, walks through shuffles trees and
9169 /// tries to find the best matching vector for the actual shuffle
9170 /// instruction.
9171 template <typename T, typename ShuffleBuilderTy>
9172 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask,
9173 ShuffleBuilderTy &Builder) {
9174 assert(V1 && "Expected at least one vector value.");
9175 if (V2)
9176 Builder.resizeToMatch(V1, V2);
9177 int VF = Mask.size();
9178 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType()))
9179 VF = FTy->getNumElements();
9180 if (V2 && !isUndefVector</*IsPoisonOnly=*/true>(
9181 V2, buildUseMask(VF, Mask, UseMask::SecondArg))
9182 .all()) {
9183 // Peek through shuffles.
9184 Value *Op1 = V1;
9185 Value *Op2 = V2;
9186 int VF =
9187 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
9188 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem);
9189 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem);
9190 for (int I = 0, E = Mask.size(); I < E; ++I) {
9191 if (Mask[I] < VF)
9192 CombinedMask1[I] = Mask[I];
9193 else
9194 CombinedMask2[I] = Mask[I] - VF;
9196 Value *PrevOp1;
9197 Value *PrevOp2;
9198 do {
9199 PrevOp1 = Op1;
9200 PrevOp2 = Op2;
9201 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false);
9202 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false);
9203 // Check if we have 2 resizing shuffles - need to peek through operands
9204 // again.
9205 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1))
9206 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) {
9207 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem);
9208 for (auto [Idx, I] : enumerate(CombinedMask1)) {
9209 if (I == PoisonMaskElem)
9210 continue;
9211 ExtMask1[Idx] = SV1->getMaskValue(I);
9213 SmallBitVector UseMask1 = buildUseMask(
9214 cast<FixedVectorType>(SV1->getOperand(1)->getType())
9215 ->getNumElements(),
9216 ExtMask1, UseMask::SecondArg);
9217 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem);
9218 for (auto [Idx, I] : enumerate(CombinedMask2)) {
9219 if (I == PoisonMaskElem)
9220 continue;
9221 ExtMask2[Idx] = SV2->getMaskValue(I);
9223 SmallBitVector UseMask2 = buildUseMask(
9224 cast<FixedVectorType>(SV2->getOperand(1)->getType())
9225 ->getNumElements(),
9226 ExtMask2, UseMask::SecondArg);
9227 if (SV1->getOperand(0)->getType() ==
9228 SV2->getOperand(0)->getType() &&
9229 SV1->getOperand(0)->getType() != SV1->getType() &&
9230 isUndefVector(SV1->getOperand(1), UseMask1).all() &&
9231 isUndefVector(SV2->getOperand(1), UseMask2).all()) {
9232 Op1 = SV1->getOperand(0);
9233 Op2 = SV2->getOperand(0);
9234 SmallVector<int> ShuffleMask1(SV1->getShuffleMask());
9235 int LocalVF = ShuffleMask1.size();
9236 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType()))
9237 LocalVF = FTy->getNumElements();
9238 combineMasks(LocalVF, ShuffleMask1, CombinedMask1);
9239 CombinedMask1.swap(ShuffleMask1);
9240 SmallVector<int> ShuffleMask2(SV2->getShuffleMask());
9241 LocalVF = ShuffleMask2.size();
9242 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType()))
9243 LocalVF = FTy->getNumElements();
9244 combineMasks(LocalVF, ShuffleMask2, CombinedMask2);
9245 CombinedMask2.swap(ShuffleMask2);
9248 } while (PrevOp1 != Op1 || PrevOp2 != Op2);
9249 Builder.resizeToMatch(Op1, Op2);
9250 VF = std::max(cast<VectorType>(Op1->getType())
9251 ->getElementCount()
9252 .getKnownMinValue(),
9253 cast<VectorType>(Op2->getType())
9254 ->getElementCount()
9255 .getKnownMinValue());
9256 for (int I = 0, E = Mask.size(); I < E; ++I) {
9257 if (CombinedMask2[I] != PoisonMaskElem) {
9258 assert(CombinedMask1[I] == PoisonMaskElem &&
9259 "Expected undefined mask element");
9260 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF);
9263 if (Op1 == Op2 &&
9264 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) ||
9265 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) &&
9266 isa<ShuffleVectorInst>(Op1) &&
9267 cast<ShuffleVectorInst>(Op1)->getShuffleMask() ==
9268 ArrayRef(CombinedMask1))))
9269 return Builder.createIdentity(Op1);
9270 return Builder.createShuffleVector(
9271 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2,
9272 CombinedMask1);
9274 if (isa<PoisonValue>(V1))
9275 return Builder.createPoison(
9276 cast<VectorType>(V1->getType())->getElementType(), Mask.size());
9277 SmallVector<int> NewMask(Mask);
9278 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true);
9279 assert(V1 && "Expected non-null value after looking through shuffles.");
9281 if (!IsIdentity)
9282 return Builder.createShuffleVector(V1, NewMask);
9283 return Builder.createIdentity(V1);
9286 } // namespace
9288 /// Calculate the scalar and the vector costs from vectorizing set of GEPs.
9289 static std::pair<InstructionCost, InstructionCost>
9290 getGEPCosts(const TargetTransformInfo &TTI, ArrayRef<Value *> Ptrs,
9291 Value *BasePtr, unsigned Opcode, TTI::TargetCostKind CostKind,
9292 Type *ScalarTy, VectorType *VecTy) {
9293 InstructionCost ScalarCost = 0;
9294 InstructionCost VecCost = 0;
9295 // Here we differentiate two cases: (1) when Ptrs represent a regular
9296 // vectorization tree node (as they are pointer arguments of scattered
9297 // loads) or (2) when Ptrs are the arguments of loads or stores being
9298 // vectorized as plane wide unit-stride load/store since all the
9299 // loads/stores are known to be from/to adjacent locations.
9300 if (Opcode == Instruction::Load || Opcode == Instruction::Store) {
9301 // Case 2: estimate costs for pointer related costs when vectorizing to
9302 // a wide load/store.
9303 // Scalar cost is estimated as a set of pointers with known relationship
9304 // between them.
9305 // For vector code we will use BasePtr as argument for the wide load/store
9306 // but we also need to account all the instructions which are going to
9307 // stay in vectorized code due to uses outside of these scalar
9308 // loads/stores.
9309 ScalarCost = TTI.getPointersChainCost(
9310 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy,
9311 CostKind);
9313 SmallVector<const Value *> PtrsRetainedInVecCode;
9314 for (Value *V : Ptrs) {
9315 if (V == BasePtr) {
9316 PtrsRetainedInVecCode.push_back(V);
9317 continue;
9319 auto *Ptr = dyn_cast<GetElementPtrInst>(V);
9320 // For simplicity assume Ptr to stay in vectorized code if it's not a
9321 // GEP instruction. We don't care since it's cost considered free.
9322 // TODO: We should check for any uses outside of vectorizable tree
9323 // rather than just single use.
9324 if (!Ptr || !Ptr->hasOneUse())
9325 PtrsRetainedInVecCode.push_back(V);
9328 if (PtrsRetainedInVecCode.size() == Ptrs.size()) {
9329 // If all pointers stay in vectorized code then we don't have
9330 // any savings on that.
9331 return std::make_pair(TTI::TCC_Free, TTI::TCC_Free);
9333 VecCost = TTI.getPointersChainCost(PtrsRetainedInVecCode, BasePtr,
9334 TTI::PointersChainInfo::getKnownStride(),
9335 VecTy, CostKind);
9336 } else {
9337 // Case 1: Ptrs are the arguments of loads that we are going to transform
9338 // into masked gather load intrinsic.
9339 // All the scalar GEPs will be removed as a result of vectorization.
9340 // For any external uses of some lanes extract element instructions will
9341 // be generated (which cost is estimated separately).
9342 TTI::PointersChainInfo PtrsInfo =
9343 all_of(Ptrs,
9344 [](const Value *V) {
9345 auto *Ptr = dyn_cast<GetElementPtrInst>(V);
9346 return Ptr && !Ptr->hasAllConstantIndices();
9348 ? TTI::PointersChainInfo::getUnknownStride()
9349 : TTI::PointersChainInfo::getKnownStride();
9351 ScalarCost =
9352 TTI.getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, CostKind);
9353 auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr);
9354 if (!BaseGEP) {
9355 auto *It = find_if(Ptrs, IsaPred<GEPOperator>);
9356 if (It != Ptrs.end())
9357 BaseGEP = cast<GEPOperator>(*It);
9359 if (BaseGEP) {
9360 SmallVector<const Value *> Indices(BaseGEP->indices());
9361 VecCost = TTI.getGEPCost(BaseGEP->getSourceElementType(),
9362 BaseGEP->getPointerOperand(), Indices, VecTy,
9363 CostKind);
9367 return std::make_pair(ScalarCost, VecCost);
9370 void BoUpSLP::reorderGatherNode(TreeEntry &TE) {
9371 assert(TE.isGather() && TE.ReorderIndices.empty() &&
9372 "Expected gather node without reordering.");
9373 DenseMap<std::pair<size_t, Value *>, SmallVector<LoadInst *>> LoadsMap;
9374 SmallSet<size_t, 2> LoadKeyUsed;
9376 // Do not reorder nodes if it small (just 2 elements), all-constant or all
9377 // instructions have same opcode already.
9378 if (TE.Scalars.size() == 2 || (TE.getOpcode() && !TE.isAltShuffle()) ||
9379 all_of(TE.Scalars, isConstant))
9380 return;
9382 if (any_of(seq<unsigned>(TE.Idx), [&](unsigned Idx) {
9383 return VectorizableTree[Idx]->isSame(TE.Scalars);
9385 return;
9387 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) {
9388 Key = hash_combine(hash_value(LI->getParent()), Key);
9389 Value *Ptr =
9390 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth);
9391 if (LoadKeyUsed.contains(Key)) {
9392 auto LIt = LoadsMap.find(std::make_pair(Key, Ptr));
9393 if (LIt != LoadsMap.end()) {
9394 for (LoadInst *RLI : LIt->second) {
9395 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(),
9396 LI->getType(), LI->getPointerOperand(), *DL, *SE,
9397 /*StrictCheck=*/true))
9398 return hash_value(RLI->getPointerOperand());
9400 for (LoadInst *RLI : LIt->second) {
9401 if (arePointersCompatible(RLI->getPointerOperand(),
9402 LI->getPointerOperand(), *TLI)) {
9403 hash_code SubKey = hash_value(RLI->getPointerOperand());
9404 return SubKey;
9407 if (LIt->second.size() > 2) {
9408 hash_code SubKey =
9409 hash_value(LIt->second.back()->getPointerOperand());
9410 return SubKey;
9414 LoadKeyUsed.insert(Key);
9415 LoadsMap.try_emplace(std::make_pair(Key, Ptr)).first->second.push_back(LI);
9416 return hash_value(LI->getPointerOperand());
9418 MapVector<size_t, MapVector<size_t, SmallVector<Value *>>> SortedValues;
9419 SmallDenseMap<Value *, SmallVector<unsigned>, 8> KeyToIndex;
9420 bool IsOrdered = true;
9421 unsigned NumInstructions = 0;
9422 // Try to "cluster" scalar instructions, to be able to build extra vectorized
9423 // nodes.
9424 for (auto [I, V] : enumerate(TE.Scalars)) {
9425 size_t Key = 1, Idx = 1;
9426 if (auto *Inst = dyn_cast<Instruction>(V);
9427 Inst && !isa<ExtractElementInst, LoadInst, CastInst>(V) &&
9428 !isDeleted(Inst) && !isVectorized(V)) {
9429 std::tie(Key, Idx) = generateKeySubkey(V, TLI, GenerateLoadsSubkey,
9430 /*AllowAlternate=*/false);
9431 ++NumInstructions;
9433 auto &Container = SortedValues[Key];
9434 if (IsOrdered && !KeyToIndex.contains(V) &&
9435 !(isa<Constant, ExtractElementInst>(V) ||
9436 isVectorLikeInstWithConstOps(V)) &&
9437 ((Container.contains(Idx) &&
9438 KeyToIndex.at(Container[Idx].back()).back() != I - 1) ||
9439 (!Container.empty() && !Container.contains(Idx) &&
9440 KeyToIndex.at(Container.back().second.back()).back() != I - 1)))
9441 IsOrdered = false;
9442 auto &KTI = KeyToIndex[V];
9443 if (KTI.empty())
9444 Container[Idx].push_back(V);
9445 KTI.push_back(I);
9447 SmallVector<std::pair<unsigned, unsigned>> SubVectors;
9448 APInt DemandedElts = APInt::getAllOnes(TE.Scalars.size());
9449 if (!IsOrdered && NumInstructions > 1) {
9450 unsigned Cnt = 0;
9451 TE.ReorderIndices.resize(TE.Scalars.size(), TE.Scalars.size());
9452 for (const auto &D : SortedValues) {
9453 for (const auto &P : D.second) {
9454 unsigned Sz = 0;
9455 for (Value *V : P.second) {
9456 ArrayRef<unsigned> Indices = KeyToIndex.at(V);
9457 for (auto [K, Idx] : enumerate(Indices)) {
9458 TE.ReorderIndices[Cnt + K] = Idx;
9459 TE.Scalars[Cnt + K] = V;
9461 Sz += Indices.size();
9462 Cnt += Indices.size();
9464 if (Sz > 1 && isa<Instruction>(P.second.front())) {
9465 const unsigned SubVF = getFloorFullVectorNumberOfElements(
9466 *TTI, TE.Scalars.front()->getType(), Sz);
9467 SubVectors.emplace_back(Cnt - Sz, SubVF);
9468 for (unsigned I : seq<unsigned>(Cnt - Sz, Cnt - Sz + SubVF))
9469 DemandedElts.clearBit(I);
9470 } else if (!P.second.empty() && isConstant(P.second.front())) {
9471 for (unsigned I : seq<unsigned>(Cnt - Sz, Cnt))
9472 DemandedElts.clearBit(I);
9477 // Reuses always require shuffles, so consider it as profitable.
9478 if (!TE.ReuseShuffleIndices.empty() || TE.ReorderIndices.empty())
9479 return;
9480 // Do simple cost estimation.
9481 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
9482 InstructionCost Cost = 0;
9483 auto *ScalarTy = TE.Scalars.front()->getType();
9484 auto *VecTy = getWidenedType(ScalarTy, TE.Scalars.size());
9485 for (auto [Idx, Sz] : SubVectors) {
9486 Cost += ::getShuffleCost(*TTI, TTI::SK_InsertSubvector, VecTy, {}, CostKind,
9487 Idx, getWidenedType(ScalarTy, Sz));
9489 Cost += TTI->getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
9490 /*Extract=*/false, CostKind);
9491 int Sz = TE.Scalars.size();
9492 SmallVector<int> ReorderMask(TE.ReorderIndices.begin(),
9493 TE.ReorderIndices.end());
9494 for (unsigned I : seq<unsigned>(Sz)) {
9495 Value *V = TE.getOrdered(I);
9496 if (isa<PoisonValue>(V)) {
9497 ReorderMask[I] = PoisonMaskElem;
9498 } else if (isConstant(V) || DemandedElts[I]) {
9499 ReorderMask[I] = I + TE.ReorderIndices.size();
9502 Cost += ::getShuffleCost(*TTI,
9503 any_of(ReorderMask, [&](int I) { return I >= Sz; })
9504 ? TTI::SK_PermuteTwoSrc
9505 : TTI::SK_PermuteSingleSrc,
9506 VecTy, ReorderMask);
9507 DemandedElts = APInt::getAllOnes(VecTy->getNumElements());
9508 ReorderMask.assign(Sz, PoisonMaskElem);
9509 for (unsigned I : seq<unsigned>(Sz)) {
9510 Value *V = TE.getOrdered(I);
9511 if (isConstant(V)) {
9512 DemandedElts.clearBit(I);
9513 if (!isa<PoisonValue>(V))
9514 ReorderMask[I] = I;
9515 } else {
9516 ReorderMask[I] = I + Sz;
9519 InstructionCost BVCost = TTI->getScalarizationOverhead(
9520 VecTy, DemandedElts, /*Insert=*/true, /*Extract=*/false, CostKind);
9521 if (!DemandedElts.isAllOnes())
9522 BVCost += ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, VecTy, ReorderMask);
9523 if (Cost >= BVCost) {
9524 SmallVector<int> Mask(TE.ReorderIndices.begin(), TE.ReorderIndices.end());
9525 reorderScalars(TE.Scalars, Mask);
9526 TE.ReorderIndices.clear();
9530 void BoUpSLP::transformNodes() {
9531 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
9532 BaseGraphSize = VectorizableTree.size();
9533 // Turn graph transforming mode on and off, when done.
9534 class GraphTransformModeRAAI {
9535 bool &SavedIsGraphTransformMode;
9537 public:
9538 GraphTransformModeRAAI(bool &IsGraphTransformMode)
9539 : SavedIsGraphTransformMode(IsGraphTransformMode) {
9540 IsGraphTransformMode = true;
9542 ~GraphTransformModeRAAI() { SavedIsGraphTransformMode = false; }
9543 } TransformContext(IsGraphTransformMode);
9544 // Operands are profitable if they are:
9545 // 1. At least one constant
9546 // or
9547 // 2. Splats
9548 // or
9549 // 3. Results in good vectorization opportunity, i.e. may generate vector
9550 // nodes and reduce cost of the graph.
9551 auto CheckOperandsProfitability = [this](Instruction *I1, Instruction *I2,
9552 const InstructionsState &S) {
9553 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
9554 for (unsigned Op : seq<unsigned>(S.MainOp->getNumOperands()))
9555 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
9556 I2->getOperand(Op));
9557 return all_of(
9558 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
9559 return all_of(Cand,
9560 [](const std::pair<Value *, Value *> &P) {
9561 return isa<Constant>(P.first) ||
9562 isa<Constant>(P.second) || P.first == P.second;
9563 }) ||
9564 findBestRootPair(Cand, LookAheadHeuristics::ScoreSplatLoads);
9568 // Try to reorder gather nodes for better vectorization opportunities.
9569 for (unsigned Idx : seq<unsigned>(BaseGraphSize)) {
9570 TreeEntry &E = *VectorizableTree[Idx];
9571 if (E.isGather())
9572 reorderGatherNode(E);
9575 // The tree may grow here, so iterate over nodes, built before.
9576 for (unsigned Idx : seq<unsigned>(BaseGraphSize)) {
9577 TreeEntry &E = *VectorizableTree[Idx];
9578 if (E.isGather()) {
9579 ArrayRef<Value *> VL = E.Scalars;
9580 const unsigned Sz = getVectorElementSize(VL.front());
9581 unsigned MinVF = getMinVF(2 * Sz);
9582 // Do not try partial vectorization for small nodes (<= 2), nodes with the
9583 // same opcode and same parent block or all constants.
9584 if (VL.size() <= 2 || LoadEntriesToVectorize.contains(Idx) ||
9585 !(!E.getOpcode() || E.getOpcode() == Instruction::Load ||
9586 E.isAltShuffle() || !allSameBlock(VL)) ||
9587 allConstant(VL) || isSplat(VL))
9588 continue;
9589 // Try to find vectorizable sequences and transform them into a series of
9590 // insertvector instructions.
9591 unsigned StartIdx = 0;
9592 unsigned End = VL.size();
9593 for (unsigned VF = getFloorFullVectorNumberOfElements(
9594 *TTI, VL.front()->getType(), VL.size() - 1);
9595 VF >= MinVF; VF = getFloorFullVectorNumberOfElements(
9596 *TTI, VL.front()->getType(), VF - 1)) {
9597 if (StartIdx + VF > End)
9598 continue;
9599 SmallVector<std::pair<unsigned, unsigned>> Slices;
9600 for (unsigned Cnt = StartIdx; Cnt + VF <= End; Cnt += VF) {
9601 ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
9602 // If any instruction is vectorized already - do not try again.
9603 // Reuse the existing node, if it fully matches the slice.
9604 if (const TreeEntry *SE = getTreeEntry(Slice.front());
9605 SE || getTreeEntry(Slice.back())) {
9606 if (!SE)
9607 continue;
9608 if (VF != SE->getVectorFactor() || !SE->isSame(Slice))
9609 continue;
9611 // Constant already handled effectively - skip.
9612 if (allConstant(Slice))
9613 continue;
9614 // Do not try to vectorize small splats (less than vector register and
9615 // only with the single non-undef element).
9616 bool IsSplat = isSplat(Slice);
9617 if (Slices.empty() || !IsSplat ||
9618 (VF <= 2 && 2 * std::clamp(TTI->getNumberOfParts(getWidenedType(
9619 Slice.front()->getType(), VF)),
9620 1U, VF - 1) !=
9621 std::clamp(TTI->getNumberOfParts(getWidenedType(
9622 Slice.front()->getType(), 2 * VF)),
9623 1U, 2 * VF)) ||
9624 count(Slice, Slice.front()) ==
9625 (isa<UndefValue>(Slice.front()) ? VF - 1 : 1)) {
9626 if (IsSplat)
9627 continue;
9628 InstructionsState S = getSameOpcode(Slice, *TLI);
9629 if (!S.getOpcode() || S.isAltShuffle() || !allSameBlock(Slice) ||
9630 (S.getOpcode() == Instruction::Load &&
9631 areKnownNonVectorizableLoads(Slice)) ||
9632 (S.getOpcode() != Instruction::Load && !has_single_bit(VF)))
9633 continue;
9634 if (VF == 2) {
9635 // Try to vectorize reduced values or if all users are vectorized.
9636 // For expensive instructions extra extracts might be profitable.
9637 if ((!UserIgnoreList || E.Idx != 0) &&
9638 TTI->getInstructionCost(cast<Instruction>(Slice.front()),
9639 CostKind) < TTI::TCC_Expensive &&
9640 !all_of(Slice, [&](Value *V) {
9641 return areAllUsersVectorized(cast<Instruction>(V),
9642 UserIgnoreList);
9644 continue;
9645 if (S.getOpcode() == Instruction::Load) {
9646 OrdersType Order;
9647 SmallVector<Value *> PointerOps;
9648 LoadsState Res =
9649 canVectorizeLoads(Slice, Slice.front(), Order, PointerOps);
9650 // Do not vectorize gathers.
9651 if (Res == LoadsState::ScatterVectorize ||
9652 Res == LoadsState::Gather) {
9653 if (Res == LoadsState::Gather) {
9654 registerNonVectorizableLoads(Slice);
9655 // If reductions and the scalars from the root node are
9656 // analyzed - mark as non-vectorizable reduction.
9657 if (UserIgnoreList && E.Idx == 0)
9658 analyzedReductionVals(Slice);
9660 continue;
9662 } else if (S.getOpcode() == Instruction::ExtractElement ||
9663 (TTI->getInstructionCost(
9664 cast<Instruction>(Slice.front()), CostKind) <
9665 TTI::TCC_Expensive &&
9666 !CheckOperandsProfitability(
9667 cast<Instruction>(Slice.front()),
9668 cast<Instruction>(Slice.back()), S))) {
9669 // Do not vectorize extractelements (handled effectively
9670 // alread). Do not vectorize non-profitable instructions (with
9671 // low cost and non-vectorizable operands.)
9672 continue;
9676 Slices.emplace_back(Cnt, Slice.size());
9678 auto AddCombinedNode = [&](unsigned Idx, unsigned Cnt, unsigned Sz) {
9679 E.CombinedEntriesWithIndices.emplace_back(Idx, Cnt);
9680 if (StartIdx == Cnt)
9681 StartIdx = Cnt + Sz;
9682 if (End == Cnt + Sz)
9683 End = Cnt;
9685 for (auto [Cnt, Sz] : Slices) {
9686 ArrayRef<Value *> Slice = VL.slice(Cnt, Sz);
9687 // If any instruction is vectorized already - do not try again.
9688 if (TreeEntry *SE = getTreeEntry(Slice.front());
9689 SE || getTreeEntry(Slice.back())) {
9690 if (!SE)
9691 continue;
9692 if (VF != SE->getVectorFactor() || !SE->isSame(Slice))
9693 continue;
9694 SE->UserTreeIndices.emplace_back(&E, UINT_MAX);
9695 AddCombinedNode(SE->Idx, Cnt, Sz);
9696 continue;
9698 unsigned PrevSize = VectorizableTree.size();
9699 [[maybe_unused]] unsigned PrevEntriesSize =
9700 LoadEntriesToVectorize.size();
9701 buildTree_rec(Slice, 0, EdgeInfo(&E, UINT_MAX));
9702 if (PrevSize + 1 == VectorizableTree.size() &&
9703 VectorizableTree[PrevSize]->isGather() &&
9704 VectorizableTree[PrevSize]->getOpcode() !=
9705 Instruction::ExtractElement &&
9706 !isSplat(Slice)) {
9707 if (UserIgnoreList && E.Idx == 0 && VF == 2)
9708 analyzedReductionVals(Slice);
9709 VectorizableTree.pop_back();
9710 assert(PrevEntriesSize == LoadEntriesToVectorize.size() &&
9711 "LoadEntriesToVectorize expected to remain the same");
9712 continue;
9714 AddCombinedNode(PrevSize, Cnt, Sz);
9717 // Restore ordering, if no extra vectorization happened.
9718 if (E.CombinedEntriesWithIndices.empty() && !E.ReorderIndices.empty()) {
9719 SmallVector<int> Mask(E.ReorderIndices.begin(), E.ReorderIndices.end());
9720 reorderScalars(E.Scalars, Mask);
9721 E.ReorderIndices.clear();
9724 switch (E.getOpcode()) {
9725 case Instruction::Load: {
9726 // No need to reorder masked gather loads, just reorder the scalar
9727 // operands.
9728 if (E.State != TreeEntry::Vectorize)
9729 break;
9730 Type *ScalarTy = E.getMainOp()->getType();
9731 auto *VecTy = getWidenedType(ScalarTy, E.Scalars.size());
9732 Align CommonAlignment = computeCommonAlignment<LoadInst>(E.Scalars);
9733 // Check if profitable to represent consecutive load + reverse as strided
9734 // load with stride -1.
9735 if (isReverseOrder(E.ReorderIndices) &&
9736 TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) {
9737 SmallVector<int> Mask;
9738 inversePermutation(E.ReorderIndices, Mask);
9739 auto *BaseLI = cast<LoadInst>(E.Scalars.back());
9740 InstructionCost OriginalVecCost =
9741 TTI->getMemoryOpCost(Instruction::Load, VecTy, BaseLI->getAlign(),
9742 BaseLI->getPointerAddressSpace(), CostKind,
9743 TTI::OperandValueInfo()) +
9744 ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind);
9745 InstructionCost StridedCost = TTI->getStridedMemoryOpCost(
9746 Instruction::Load, VecTy, BaseLI->getPointerOperand(),
9747 /*VariableMask=*/false, CommonAlignment, CostKind, BaseLI);
9748 if (StridedCost < OriginalVecCost)
9749 // Strided load is more profitable than consecutive load + reverse -
9750 // transform the node to strided load.
9751 E.State = TreeEntry::StridedVectorize;
9753 break;
9755 case Instruction::Store: {
9756 Type *ScalarTy =
9757 cast<StoreInst>(E.getMainOp())->getValueOperand()->getType();
9758 auto *VecTy = getWidenedType(ScalarTy, E.Scalars.size());
9759 Align CommonAlignment = computeCommonAlignment<StoreInst>(E.Scalars);
9760 // Check if profitable to represent consecutive load + reverse as strided
9761 // load with stride -1.
9762 if (isReverseOrder(E.ReorderIndices) &&
9763 TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) {
9764 SmallVector<int> Mask;
9765 inversePermutation(E.ReorderIndices, Mask);
9766 auto *BaseSI = cast<StoreInst>(E.Scalars.back());
9767 InstructionCost OriginalVecCost =
9768 TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(),
9769 BaseSI->getPointerAddressSpace(), CostKind,
9770 TTI::OperandValueInfo()) +
9771 ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind);
9772 InstructionCost StridedCost = TTI->getStridedMemoryOpCost(
9773 Instruction::Store, VecTy, BaseSI->getPointerOperand(),
9774 /*VariableMask=*/false, CommonAlignment, CostKind, BaseSI);
9775 if (StridedCost < OriginalVecCost)
9776 // Strided store is more profitable than reverse + consecutive store -
9777 // transform the node to strided store.
9778 E.State = TreeEntry::StridedVectorize;
9779 } else if (!E.ReorderIndices.empty()) {
9780 // Check for interleaved stores.
9781 auto IsInterleaveMask = [&, &TTI = *TTI](ArrayRef<int> Mask) {
9782 auto *BaseSI = cast<StoreInst>(E.Scalars.front());
9783 assert(Mask.size() > 1 && "Expected mask greater than 1 element.");
9784 if (Mask.size() < 4)
9785 return 0u;
9786 for (unsigned Factor : seq<unsigned>(2, Mask.size() / 2 + 1)) {
9787 if (ShuffleVectorInst::isInterleaveMask(
9788 Mask, Factor, VecTy->getElementCount().getFixedValue()) &&
9789 TTI.isLegalInterleavedAccessType(
9790 VecTy, Factor, BaseSI->getAlign(),
9791 BaseSI->getPointerAddressSpace()))
9792 return Factor;
9795 return 0u;
9797 SmallVector<int> Mask(E.ReorderIndices.begin(), E.ReorderIndices.end());
9798 unsigned InterleaveFactor = IsInterleaveMask(Mask);
9799 if (InterleaveFactor != 0)
9800 E.setInterleave(InterleaveFactor);
9802 break;
9804 case Instruction::Select: {
9805 if (E.State != TreeEntry::Vectorize)
9806 break;
9807 auto [MinMaxID, SelectOnly] = canConvertToMinOrMaxIntrinsic(E.Scalars);
9808 if (MinMaxID == Intrinsic::not_intrinsic)
9809 break;
9810 // This node is a minmax node.
9811 E.CombinedOp = TreeEntry::MinMax;
9812 TreeEntry *CondEntry = const_cast<TreeEntry *>(getOperandEntry(&E, 0));
9813 if (SelectOnly && CondEntry->UserTreeIndices.size() == 1 &&
9814 CondEntry->State == TreeEntry::Vectorize) {
9815 // The condition node is part of the combined minmax node.
9816 CondEntry->State = TreeEntry::CombinedVectorize;
9818 break;
9820 default:
9821 break;
9825 if (LoadEntriesToVectorize.empty()) {
9826 // Single load node - exit.
9827 if (VectorizableTree.size() <= 1 &&
9828 VectorizableTree.front()->getOpcode() == Instruction::Load)
9829 return;
9830 // Small graph with small VF - exit.
9831 constexpr unsigned SmallTree = 3;
9832 constexpr unsigned SmallVF = 2;
9833 if ((VectorizableTree.size() <= SmallTree &&
9834 VectorizableTree.front()->Scalars.size() == SmallVF) ||
9835 (VectorizableTree.size() <= 2 && UserIgnoreList))
9836 return;
9838 if (VectorizableTree.front()->isNonPowOf2Vec() &&
9839 getCanonicalGraphSize() != getTreeSize() && UserIgnoreList &&
9840 getCanonicalGraphSize() <= SmallTree &&
9841 count_if(ArrayRef(VectorizableTree).drop_front(getCanonicalGraphSize()),
9842 [](const std::unique_ptr<TreeEntry> &TE) {
9843 return TE->isGather() &&
9844 TE->getOpcode() == Instruction::Load &&
9845 !allSameBlock(TE->Scalars);
9846 }) == 1)
9847 return;
9850 // A list of loads to be gathered during the vectorization process. We can
9851 // try to vectorize them at the end, if profitable.
9852 SmallMapVector<std::tuple<BasicBlock *, Value *, Type *>,
9853 SmallVector<SmallVector<std::pair<LoadInst *, int>>>, 8>
9854 GatheredLoads;
9856 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
9857 TreeEntry &E = *TE;
9858 if (E.isGather() &&
9859 (E.getOpcode() == Instruction::Load ||
9860 (!E.getOpcode() && any_of(E.Scalars,
9861 [&](Value *V) {
9862 return isa<LoadInst>(V) &&
9863 !isVectorized(V) &&
9864 !isDeleted(cast<Instruction>(V));
9865 }))) &&
9866 !isSplat(E.Scalars)) {
9867 for (Value *V : E.Scalars) {
9868 auto *LI = dyn_cast<LoadInst>(V);
9869 if (!LI)
9870 continue;
9871 if (isDeleted(LI) || isVectorized(LI) || !LI->isSimple())
9872 continue;
9873 gatherPossiblyVectorizableLoads(
9874 *this, V, *DL, *SE, *TTI,
9875 GatheredLoads[std::make_tuple(
9876 LI->getParent(),
9877 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth),
9878 LI->getType())]);
9882 // Try to vectorize gathered loads if this is not just a gather of loads.
9883 if (!GatheredLoads.empty())
9884 tryToVectorizeGatheredLoads(GatheredLoads);
9887 /// Merges shuffle masks and emits final shuffle instruction, if required. It
9888 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission,
9889 /// when the actual shuffle instruction is generated only if this is actually
9890 /// required. Otherwise, the shuffle instruction emission is delayed till the
9891 /// end of the process, to reduce the number of emitted instructions and further
9892 /// analysis/transformations.
9893 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
9894 bool IsFinalized = false;
9895 SmallVector<int> CommonMask;
9896 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors;
9897 const TargetTransformInfo &TTI;
9898 InstructionCost Cost = 0;
9899 SmallDenseSet<Value *> VectorizedVals;
9900 BoUpSLP &R;
9901 SmallPtrSetImpl<Value *> &CheckedExtracts;
9902 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
9903 /// While set, still trying to estimate the cost for the same nodes and we
9904 /// can delay actual cost estimation (virtual shuffle instruction emission).
9905 /// May help better estimate the cost if same nodes must be permuted + allows
9906 /// to move most of the long shuffles cost estimation to TTI.
9907 bool SameNodesEstimated = true;
9909 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) {
9910 if (Ty->getScalarType()->isPointerTy()) {
9911 Constant *Res = ConstantExpr::getIntToPtr(
9912 ConstantInt::getAllOnesValue(
9913 IntegerType::get(Ty->getContext(),
9914 DL.getTypeStoreSizeInBits(Ty->getScalarType()))),
9915 Ty->getScalarType());
9916 if (auto *VTy = dyn_cast<VectorType>(Ty))
9917 Res = ConstantVector::getSplat(VTy->getElementCount(), Res);
9918 return Res;
9920 return Constant::getAllOnesValue(Ty);
9923 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) {
9924 if ((!Root && allConstant(VL)) || all_of(VL, IsaPred<UndefValue>))
9925 return TTI::TCC_Free;
9926 auto *VecTy = getWidenedType(ScalarTy, VL.size());
9927 InstructionCost GatherCost = 0;
9928 SmallVector<Value *> Gathers(VL);
9929 if (!Root && isSplat(VL)) {
9930 // Found the broadcasting of the single scalar, calculate the cost as
9931 // the broadcast.
9932 const auto *It = find_if_not(VL, IsaPred<UndefValue>);
9933 assert(It != VL.end() && "Expected at least one non-undef value.");
9934 // Add broadcast for non-identity shuffle only.
9935 bool NeedShuffle =
9936 count(VL, *It) > 1 &&
9937 (VL.front() != *It || !all_of(VL.drop_front(), IsaPred<UndefValue>));
9938 if (!NeedShuffle) {
9939 if (isa<FixedVectorType>(ScalarTy)) {
9940 assert(SLPReVec && "FixedVectorType is not expected.");
9941 return TTI.getShuffleCost(
9942 TTI::SK_InsertSubvector, VecTy, {}, CostKind,
9943 std::distance(VL.begin(), It) * getNumElements(ScalarTy),
9944 cast<FixedVectorType>(ScalarTy));
9946 return TTI.getVectorInstrCost(Instruction::InsertElement, VecTy,
9947 CostKind, std::distance(VL.begin(), It),
9948 PoisonValue::get(VecTy), *It);
9951 SmallVector<int> ShuffleMask(VL.size(), PoisonMaskElem);
9952 transform(VL, ShuffleMask.begin(), [](Value *V) {
9953 return isa<PoisonValue>(V) ? PoisonMaskElem : 0;
9955 InstructionCost InsertCost =
9956 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 0,
9957 PoisonValue::get(VecTy), *It);
9958 return InsertCost + ::getShuffleCost(TTI,
9959 TargetTransformInfo::SK_Broadcast,
9960 VecTy, ShuffleMask, CostKind,
9961 /*Index=*/0, /*SubTp=*/nullptr,
9962 /*Args=*/*It);
9964 return GatherCost +
9965 (all_of(Gathers, IsaPred<UndefValue>)
9966 ? TTI::TCC_Free
9967 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers),
9968 ScalarTy));
9971 /// Compute the cost of creating a vector containing the extracted values from
9972 /// \p VL.
9973 InstructionCost
9974 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask,
9975 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds,
9976 unsigned NumParts) {
9977 assert(VL.size() > NumParts && "Unexpected scalarized shuffle.");
9978 unsigned NumElts =
9979 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) {
9980 auto *EE = dyn_cast<ExtractElementInst>(V);
9981 if (!EE)
9982 return Sz;
9983 auto *VecTy = dyn_cast<FixedVectorType>(EE->getVectorOperandType());
9984 if (!VecTy)
9985 return Sz;
9986 return std::max(Sz, VecTy->getNumElements());
9988 // FIXME: this must be moved to TTI for better estimation.
9989 unsigned EltsPerVector = getPartNumElems(VL.size(), NumParts);
9990 auto CheckPerRegistersShuffle = [&](MutableArrayRef<int> Mask,
9991 SmallVectorImpl<unsigned> &Indices)
9992 -> std::optional<TTI::ShuffleKind> {
9993 if (NumElts <= EltsPerVector)
9994 return std::nullopt;
9995 int OffsetReg0 =
9996 alignDown(std::accumulate(Mask.begin(), Mask.end(), INT_MAX,
9997 [](int S, int I) {
9998 if (I == PoisonMaskElem)
9999 return S;
10000 return std::min(S, I);
10002 EltsPerVector);
10003 int OffsetReg1 = OffsetReg0;
10004 DenseSet<int> RegIndices;
10005 // Check that if trying to permute same single/2 input vectors.
10006 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc;
10007 int FirstRegId = -1;
10008 Indices.assign(1, OffsetReg0);
10009 for (auto [Pos, I] : enumerate(Mask)) {
10010 if (I == PoisonMaskElem)
10011 continue;
10012 int Idx = I - OffsetReg0;
10013 int RegId =
10014 (Idx / NumElts) * NumParts + (Idx % NumElts) / EltsPerVector;
10015 if (FirstRegId < 0)
10016 FirstRegId = RegId;
10017 RegIndices.insert(RegId);
10018 if (RegIndices.size() > 2)
10019 return std::nullopt;
10020 if (RegIndices.size() == 2) {
10021 ShuffleKind = TTI::SK_PermuteTwoSrc;
10022 if (Indices.size() == 1) {
10023 OffsetReg1 = alignDown(
10024 std::accumulate(
10025 std::next(Mask.begin(), Pos), Mask.end(), INT_MAX,
10026 [&](int S, int I) {
10027 if (I == PoisonMaskElem)
10028 return S;
10029 int RegId = ((I - OffsetReg0) / NumElts) * NumParts +
10030 ((I - OffsetReg0) % NumElts) / EltsPerVector;
10031 if (RegId == FirstRegId)
10032 return S;
10033 return std::min(S, I);
10035 EltsPerVector);
10036 Indices.push_back(OffsetReg1 % NumElts);
10038 Idx = I - OffsetReg1;
10040 I = (Idx % NumElts) % EltsPerVector +
10041 (RegId == FirstRegId ? 0 : EltsPerVector);
10043 return ShuffleKind;
10045 InstructionCost Cost = 0;
10047 // Process extracts in blocks of EltsPerVector to check if the source vector
10048 // operand can be re-used directly. If not, add the cost of creating a
10049 // shuffle to extract the values into a vector register.
10050 for (unsigned Part : seq<unsigned>(NumParts)) {
10051 if (!ShuffleKinds[Part])
10052 continue;
10053 ArrayRef<int> MaskSlice = Mask.slice(
10054 Part * EltsPerVector, getNumElems(Mask.size(), EltsPerVector, Part));
10055 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem);
10056 copy(MaskSlice, SubMask.begin());
10057 SmallVector<unsigned, 2> Indices;
10058 std::optional<TTI::ShuffleKind> RegShuffleKind =
10059 CheckPerRegistersShuffle(SubMask, Indices);
10060 if (!RegShuffleKind) {
10061 if (*ShuffleKinds[Part] != TTI::SK_PermuteSingleSrc ||
10062 !ShuffleVectorInst::isIdentityMask(
10063 MaskSlice, std::max<unsigned>(NumElts, MaskSlice.size())))
10064 Cost +=
10065 ::getShuffleCost(TTI, *ShuffleKinds[Part],
10066 getWidenedType(ScalarTy, NumElts), MaskSlice);
10067 continue;
10069 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc ||
10070 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) {
10071 Cost +=
10072 ::getShuffleCost(TTI, *RegShuffleKind,
10073 getWidenedType(ScalarTy, EltsPerVector), SubMask);
10075 const unsigned BaseVF = getFullVectorNumberOfElements(
10076 *R.TTI, VL.front()->getType(), alignTo(NumElts, EltsPerVector));
10077 for (unsigned Idx : Indices) {
10078 assert((Idx + EltsPerVector) <= BaseVF &&
10079 "SK_ExtractSubvector index out of range");
10080 Cost += ::getShuffleCost(TTI, TTI::SK_ExtractSubvector,
10081 getWidenedType(ScalarTy, BaseVF), {}, CostKind,
10082 Idx, getWidenedType(ScalarTy, EltsPerVector));
10084 // Second attempt to check, if just a permute is better estimated than
10085 // subvector extract.
10086 SubMask.assign(NumElts, PoisonMaskElem);
10087 copy(MaskSlice, SubMask.begin());
10088 InstructionCost OriginalCost = ::getShuffleCost(
10089 TTI, *ShuffleKinds[Part], getWidenedType(ScalarTy, NumElts), SubMask);
10090 if (OriginalCost < Cost)
10091 Cost = OriginalCost;
10093 return Cost;
10095 /// Transforms mask \p CommonMask per given \p Mask to make proper set after
10096 /// shuffle emission.
10097 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask,
10098 ArrayRef<int> Mask) {
10099 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10100 if (Mask[Idx] != PoisonMaskElem)
10101 CommonMask[Idx] = Idx;
10103 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given
10104 /// mask \p Mask, register number \p Part, that includes \p SliceSize
10105 /// elements.
10106 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2,
10107 ArrayRef<int> Mask, unsigned Part,
10108 unsigned SliceSize) {
10109 if (SameNodesEstimated) {
10110 // Delay the cost estimation if the same nodes are reshuffling.
10111 // If we already requested the cost of reshuffling of E1 and E2 before, no
10112 // need to estimate another cost with the sub-Mask, instead include this
10113 // sub-Mask into the CommonMask to estimate it later and avoid double cost
10114 // estimation.
10115 if ((InVectors.size() == 2 &&
10116 InVectors.front().get<const TreeEntry *>() == &E1 &&
10117 InVectors.back().get<const TreeEntry *>() == E2) ||
10118 (!E2 && InVectors.front().get<const TreeEntry *>() == &E1)) {
10119 unsigned Limit = getNumElems(Mask.size(), SliceSize, Part);
10120 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, Limit),
10121 [](int Idx) { return Idx == PoisonMaskElem; }) &&
10122 "Expected all poisoned elements.");
10123 ArrayRef<int> SubMask = ArrayRef(Mask).slice(Part * SliceSize, Limit);
10124 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part));
10125 return;
10127 // Found non-matching nodes - need to estimate the cost for the matched
10128 // and transform mask.
10129 Cost += createShuffle(InVectors.front(),
10130 InVectors.size() == 1 ? nullptr : InVectors.back(),
10131 CommonMask);
10132 transformMaskAfterShuffle(CommonMask, CommonMask);
10134 SameNodesEstimated = false;
10135 if (!E2 && InVectors.size() == 1) {
10136 unsigned VF = E1.getVectorFactor();
10137 if (Value *V1 = InVectors.front().dyn_cast<Value *>()) {
10138 VF = std::max(VF,
10139 cast<FixedVectorType>(V1->getType())->getNumElements());
10140 } else {
10141 const auto *E = InVectors.front().get<const TreeEntry *>();
10142 VF = std::max(VF, E->getVectorFactor());
10144 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10145 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
10146 CommonMask[Idx] = Mask[Idx] + VF;
10147 Cost += createShuffle(InVectors.front(), &E1, CommonMask);
10148 transformMaskAfterShuffle(CommonMask, CommonMask);
10149 } else {
10150 Cost += createShuffle(&E1, E2, Mask);
10151 transformMaskAfterShuffle(CommonMask, Mask);
10155 class ShuffleCostBuilder {
10156 const TargetTransformInfo &TTI;
10158 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) {
10159 int Index = -1;
10160 return Mask.empty() ||
10161 (VF == Mask.size() &&
10162 ShuffleVectorInst::isIdentityMask(Mask, VF)) ||
10163 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) &&
10164 Index == 0);
10167 public:
10168 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {}
10169 ~ShuffleCostBuilder() = default;
10170 InstructionCost createShuffleVector(Value *V1, Value *,
10171 ArrayRef<int> Mask) const {
10172 // Empty mask or identity mask are free.
10173 unsigned VF =
10174 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
10175 if (isEmptyOrIdentity(Mask, VF))
10176 return TTI::TCC_Free;
10177 return ::getShuffleCost(TTI, TTI::SK_PermuteTwoSrc,
10178 cast<VectorType>(V1->getType()), Mask);
10180 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const {
10181 // Empty mask or identity mask are free.
10182 unsigned VF =
10183 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
10184 if (isEmptyOrIdentity(Mask, VF))
10185 return TTI::TCC_Free;
10186 return ::getShuffleCost(TTI, TTI::SK_PermuteSingleSrc,
10187 cast<VectorType>(V1->getType()), Mask);
10189 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; }
10190 InstructionCost createPoison(Type *Ty, unsigned VF) const {
10191 return TTI::TCC_Free;
10193 void resizeToMatch(Value *&, Value *&) const {}
10196 /// Smart shuffle instruction emission, walks through shuffles trees and
10197 /// tries to find the best matching vector for the actual shuffle
10198 /// instruction.
10199 InstructionCost
10200 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1,
10201 const PointerUnion<Value *, const TreeEntry *> &P2,
10202 ArrayRef<int> Mask) {
10203 ShuffleCostBuilder Builder(TTI);
10204 SmallVector<int> CommonMask(Mask);
10205 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>();
10206 unsigned CommonVF = Mask.size();
10207 InstructionCost ExtraCost = 0;
10208 auto GetNodeMinBWAffectedCost = [&](const TreeEntry &E,
10209 unsigned VF) -> InstructionCost {
10210 if (E.isGather() && allConstant(E.Scalars))
10211 return TTI::TCC_Free;
10212 Type *EScalarTy = E.Scalars.front()->getType();
10213 bool IsSigned = true;
10214 if (auto It = R.MinBWs.find(&E); It != R.MinBWs.end()) {
10215 EScalarTy = IntegerType::get(EScalarTy->getContext(), It->second.first);
10216 IsSigned = It->second.second;
10218 if (EScalarTy != ScalarTy) {
10219 unsigned CastOpcode = Instruction::Trunc;
10220 unsigned DstSz = R.DL->getTypeSizeInBits(ScalarTy);
10221 unsigned SrcSz = R.DL->getTypeSizeInBits(EScalarTy);
10222 if (DstSz > SrcSz)
10223 CastOpcode = IsSigned ? Instruction::SExt : Instruction::ZExt;
10224 return TTI.getCastInstrCost(CastOpcode, getWidenedType(ScalarTy, VF),
10225 getWidenedType(EScalarTy, VF),
10226 TTI::CastContextHint::None, CostKind);
10228 return TTI::TCC_Free;
10230 auto GetValueMinBWAffectedCost = [&](const Value *V) -> InstructionCost {
10231 if (isa<Constant>(V))
10232 return TTI::TCC_Free;
10233 auto *VecTy = cast<VectorType>(V->getType());
10234 Type *EScalarTy = VecTy->getElementType();
10235 if (EScalarTy != ScalarTy) {
10236 bool IsSigned = !isKnownNonNegative(V, SimplifyQuery(*R.DL));
10237 unsigned CastOpcode = Instruction::Trunc;
10238 unsigned DstSz = R.DL->getTypeSizeInBits(ScalarTy);
10239 unsigned SrcSz = R.DL->getTypeSizeInBits(EScalarTy);
10240 if (DstSz > SrcSz)
10241 CastOpcode = IsSigned ? Instruction::SExt : Instruction::ZExt;
10242 return TTI.getCastInstrCost(
10243 CastOpcode, VectorType::get(ScalarTy, VecTy->getElementCount()),
10244 VecTy, TTI::CastContextHint::None, CostKind);
10246 return TTI::TCC_Free;
10248 if (!V1 && !V2 && !P2.isNull()) {
10249 // Shuffle 2 entry nodes.
10250 const TreeEntry *E = P1.get<const TreeEntry *>();
10251 unsigned VF = E->getVectorFactor();
10252 const TreeEntry *E2 = P2.get<const TreeEntry *>();
10253 CommonVF = std::max(VF, E2->getVectorFactor());
10254 assert(all_of(Mask,
10255 [=](int Idx) {
10256 return Idx < 2 * static_cast<int>(CommonVF);
10257 }) &&
10258 "All elements in mask must be less than 2 * CommonVF.");
10259 if (E->Scalars.size() == E2->Scalars.size()) {
10260 SmallVector<int> EMask = E->getCommonMask();
10261 SmallVector<int> E2Mask = E2->getCommonMask();
10262 if (!EMask.empty() || !E2Mask.empty()) {
10263 for (int &Idx : CommonMask) {
10264 if (Idx == PoisonMaskElem)
10265 continue;
10266 if (Idx < static_cast<int>(CommonVF) && !EMask.empty())
10267 Idx = EMask[Idx];
10268 else if (Idx >= static_cast<int>(CommonVF))
10269 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) +
10270 E->Scalars.size();
10273 CommonVF = E->Scalars.size();
10274 ExtraCost += GetNodeMinBWAffectedCost(*E, CommonVF) +
10275 GetNodeMinBWAffectedCost(*E2, CommonVF);
10276 } else {
10277 ExtraCost += GetNodeMinBWAffectedCost(*E, E->getVectorFactor()) +
10278 GetNodeMinBWAffectedCost(*E2, E2->getVectorFactor());
10280 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10281 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10282 } else if (!V1 && P2.isNull()) {
10283 // Shuffle single entry node.
10284 const TreeEntry *E = P1.get<const TreeEntry *>();
10285 unsigned VF = E->getVectorFactor();
10286 CommonVF = VF;
10287 assert(
10288 all_of(Mask,
10289 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) &&
10290 "All elements in mask must be less than CommonVF.");
10291 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) {
10292 SmallVector<int> EMask = E->getCommonMask();
10293 assert(!EMask.empty() && "Expected non-empty common mask.");
10294 for (int &Idx : CommonMask) {
10295 if (Idx != PoisonMaskElem)
10296 Idx = EMask[Idx];
10298 CommonVF = E->Scalars.size();
10299 } else if (unsigned Factor = E->getInterleaveFactor();
10300 Factor > 0 && E->Scalars.size() != Mask.size() &&
10301 ShuffleVectorInst::isDeInterleaveMaskOfFactor(CommonMask,
10302 Factor)) {
10303 // Deinterleaved nodes are free.
10304 std::iota(CommonMask.begin(), CommonMask.end(), 0);
10306 ExtraCost += GetNodeMinBWAffectedCost(*E, CommonVF);
10307 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10308 // Not identity/broadcast? Try to see if the original vector is better.
10309 if (!E->ReorderIndices.empty() && CommonVF == E->ReorderIndices.size() &&
10310 CommonVF == CommonMask.size() &&
10311 any_of(enumerate(CommonMask),
10312 [](const auto &&P) {
10313 return P.value() != PoisonMaskElem &&
10314 static_cast<unsigned>(P.value()) != P.index();
10315 }) &&
10316 any_of(CommonMask,
10317 [](int Idx) { return Idx != PoisonMaskElem && Idx != 0; })) {
10318 SmallVector<int> ReorderMask;
10319 inversePermutation(E->ReorderIndices, ReorderMask);
10320 ::addMask(CommonMask, ReorderMask);
10322 } else if (V1 && P2.isNull()) {
10323 // Shuffle single vector.
10324 ExtraCost += GetValueMinBWAffectedCost(V1);
10325 CommonVF = getVF(V1);
10326 assert(
10327 all_of(Mask,
10328 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) &&
10329 "All elements in mask must be less than CommonVF.");
10330 } else if (V1 && !V2) {
10331 // Shuffle vector and tree node.
10332 unsigned VF = getVF(V1);
10333 const TreeEntry *E2 = P2.get<const TreeEntry *>();
10334 CommonVF = std::max(VF, E2->getVectorFactor());
10335 assert(all_of(Mask,
10336 [=](int Idx) {
10337 return Idx < 2 * static_cast<int>(CommonVF);
10338 }) &&
10339 "All elements in mask must be less than 2 * CommonVF.");
10340 if (E2->Scalars.size() == VF && VF != CommonVF) {
10341 SmallVector<int> E2Mask = E2->getCommonMask();
10342 assert(!E2Mask.empty() && "Expected non-empty common mask.");
10343 for (int &Idx : CommonMask) {
10344 if (Idx == PoisonMaskElem)
10345 continue;
10346 if (Idx >= static_cast<int>(CommonVF))
10347 Idx = E2Mask[Idx - CommonVF] + VF;
10349 CommonVF = VF;
10351 ExtraCost += GetValueMinBWAffectedCost(V1);
10352 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10353 ExtraCost += GetNodeMinBWAffectedCost(
10354 *E2, std::min(CommonVF, E2->getVectorFactor()));
10355 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10356 } else if (!V1 && V2) {
10357 // Shuffle vector and tree node.
10358 unsigned VF = getVF(V2);
10359 const TreeEntry *E1 = P1.get<const TreeEntry *>();
10360 CommonVF = std::max(VF, E1->getVectorFactor());
10361 assert(all_of(Mask,
10362 [=](int Idx) {
10363 return Idx < 2 * static_cast<int>(CommonVF);
10364 }) &&
10365 "All elements in mask must be less than 2 * CommonVF.");
10366 if (E1->Scalars.size() == VF && VF != CommonVF) {
10367 SmallVector<int> E1Mask = E1->getCommonMask();
10368 assert(!E1Mask.empty() && "Expected non-empty common mask.");
10369 for (int &Idx : CommonMask) {
10370 if (Idx == PoisonMaskElem)
10371 continue;
10372 if (Idx >= static_cast<int>(CommonVF))
10373 Idx = E1Mask[Idx - CommonVF] + VF;
10374 else
10375 Idx = E1Mask[Idx];
10377 CommonVF = VF;
10379 ExtraCost += GetNodeMinBWAffectedCost(
10380 *E1, std::min(CommonVF, E1->getVectorFactor()));
10381 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10382 ExtraCost += GetValueMinBWAffectedCost(V2);
10383 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10384 } else {
10385 assert(V1 && V2 && "Expected both vectors.");
10386 unsigned VF = getVF(V1);
10387 CommonVF = std::max(VF, getVF(V2));
10388 assert(all_of(Mask,
10389 [=](int Idx) {
10390 return Idx < 2 * static_cast<int>(CommonVF);
10391 }) &&
10392 "All elements in mask must be less than 2 * CommonVF.");
10393 ExtraCost +=
10394 GetValueMinBWAffectedCost(V1) + GetValueMinBWAffectedCost(V2);
10395 if (V1->getType() != V2->getType()) {
10396 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10397 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10398 } else {
10399 if (cast<VectorType>(V1->getType())->getElementType() != ScalarTy)
10400 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10401 if (cast<VectorType>(V2->getType())->getElementType() != ScalarTy)
10402 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10405 if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
10406 assert(SLPReVec && "FixedVectorType is not expected.");
10407 transformScalarShuffleIndiciesToVector(VecTy->getNumElements(),
10408 CommonMask);
10410 InVectors.front() =
10411 Constant::getNullValue(getWidenedType(ScalarTy, CommonMask.size()));
10412 if (InVectors.size() == 2)
10413 InVectors.pop_back();
10414 return ExtraCost + BaseShuffleAnalysis::createShuffle<InstructionCost>(
10415 V1, V2, CommonMask, Builder);
10418 public:
10419 ShuffleCostEstimator(Type *ScalarTy, TargetTransformInfo &TTI,
10420 ArrayRef<Value *> VectorizedVals, BoUpSLP &R,
10421 SmallPtrSetImpl<Value *> &CheckedExtracts)
10422 : BaseShuffleAnalysis(ScalarTy), TTI(TTI),
10423 VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), R(R),
10424 CheckedExtracts(CheckedExtracts) {}
10425 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask,
10426 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds,
10427 unsigned NumParts, bool &UseVecBaseAsInput) {
10428 UseVecBaseAsInput = false;
10429 if (Mask.empty())
10430 return nullptr;
10431 Value *VecBase = nullptr;
10432 SmallVector<Value *> VL(E->Scalars.begin(), E->Scalars.end());
10433 if (!E->ReorderIndices.empty()) {
10434 SmallVector<int> ReorderMask(E->ReorderIndices.begin(),
10435 E->ReorderIndices.end());
10436 reorderScalars(VL, ReorderMask);
10438 // Check if it can be considered reused if same extractelements were
10439 // vectorized already.
10440 bool PrevNodeFound = any_of(
10441 ArrayRef(R.VectorizableTree).take_front(E->Idx),
10442 [&](const std::unique_ptr<TreeEntry> &TE) {
10443 return ((!TE->isAltShuffle() &&
10444 TE->getOpcode() == Instruction::ExtractElement) ||
10445 TE->isGather()) &&
10446 all_of(enumerate(TE->Scalars), [&](auto &&Data) {
10447 return VL.size() > Data.index() &&
10448 (Mask[Data.index()] == PoisonMaskElem ||
10449 isa<UndefValue>(VL[Data.index()]) ||
10450 Data.value() == VL[Data.index()]);
10453 SmallPtrSet<Value *, 4> UniqueBases;
10454 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
10455 for (unsigned Part : seq<unsigned>(NumParts)) {
10456 unsigned Limit = getNumElems(VL.size(), SliceSize, Part);
10457 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, Limit);
10458 for (auto [I, V] :
10459 enumerate(ArrayRef(VL).slice(Part * SliceSize, Limit))) {
10460 // Ignore non-extractelement scalars.
10461 if (isa<UndefValue>(V) ||
10462 (!SubMask.empty() && SubMask[I] == PoisonMaskElem))
10463 continue;
10464 // If all users of instruction are going to be vectorized and this
10465 // instruction itself is not going to be vectorized, consider this
10466 // instruction as dead and remove its cost from the final cost of the
10467 // vectorized tree.
10468 // Also, avoid adjusting the cost for extractelements with multiple uses
10469 // in different graph entries.
10470 auto *EE = cast<ExtractElementInst>(V);
10471 VecBase = EE->getVectorOperand();
10472 UniqueBases.insert(VecBase);
10473 const TreeEntry *VE = R.getTreeEntry(V);
10474 if (!CheckedExtracts.insert(V).second ||
10475 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) ||
10476 any_of(EE->users(),
10477 [&](User *U) {
10478 return isa<GetElementPtrInst>(U) &&
10479 !R.areAllUsersVectorized(cast<Instruction>(U),
10480 &VectorizedVals);
10481 }) ||
10482 (VE && VE != E))
10483 continue;
10484 std::optional<unsigned> EEIdx = getExtractIndex(EE);
10485 if (!EEIdx)
10486 continue;
10487 unsigned Idx = *EEIdx;
10488 // Take credit for instruction that will become dead.
10489 if (EE->hasOneUse() || !PrevNodeFound) {
10490 Instruction *Ext = EE->user_back();
10491 if (isa<SExtInst, ZExtInst>(Ext) &&
10492 all_of(Ext->users(), IsaPred<GetElementPtrInst>)) {
10493 // Use getExtractWithExtendCost() to calculate the cost of
10494 // extractelement/ext pair.
10495 Cost -=
10496 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
10497 EE->getVectorOperandType(), Idx);
10498 // Add back the cost of s|zext which is subtracted separately.
10499 Cost += TTI.getCastInstrCost(
10500 Ext->getOpcode(), Ext->getType(), EE->getType(),
10501 TTI::getCastContextHint(Ext), CostKind, Ext);
10502 continue;
10505 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(),
10506 CostKind, Idx);
10509 // Check that gather of extractelements can be represented as just a
10510 // shuffle of a single/two vectors the scalars are extracted from.
10511 // Found the bunch of extractelement instructions that must be gathered
10512 // into a vector and can be represented as a permutation elements in a
10513 // single input vector or of 2 input vectors.
10514 // Done for reused if same extractelements were vectorized already.
10515 if (!PrevNodeFound)
10516 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts);
10517 InVectors.assign(1, E);
10518 CommonMask.assign(Mask.begin(), Mask.end());
10519 transformMaskAfterShuffle(CommonMask, CommonMask);
10520 SameNodesEstimated = false;
10521 if (NumParts != 1 && UniqueBases.size() != 1) {
10522 UseVecBaseAsInput = true;
10523 VecBase =
10524 Constant::getNullValue(getWidenedType(ScalarTy, CommonMask.size()));
10526 return VecBase;
10528 /// Checks if the specified entry \p E needs to be delayed because of its
10529 /// dependency nodes.
10530 std::optional<InstructionCost>
10531 needToDelay(const TreeEntry *,
10532 ArrayRef<SmallVector<const TreeEntry *>>) const {
10533 // No need to delay the cost estimation during analysis.
10534 return std::nullopt;
10536 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) {
10537 if (&E1 == &E2) {
10538 assert(all_of(Mask,
10539 [&](int Idx) {
10540 return Idx < static_cast<int>(E1.getVectorFactor());
10541 }) &&
10542 "Expected single vector shuffle mask.");
10543 add(E1, Mask);
10544 return;
10546 if (InVectors.empty()) {
10547 CommonMask.assign(Mask.begin(), Mask.end());
10548 InVectors.assign({&E1, &E2});
10549 return;
10551 assert(!CommonMask.empty() && "Expected non-empty common mask.");
10552 auto *MaskVecTy = getWidenedType(ScalarTy, Mask.size());
10553 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
10554 if (NumParts == 0 || NumParts >= Mask.size() ||
10555 MaskVecTy->getNumElements() % NumParts != 0 ||
10556 !hasFullVectorsOrPowerOf2(TTI, MaskVecTy->getElementType(),
10557 MaskVecTy->getNumElements() / NumParts))
10558 NumParts = 1;
10559 unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
10560 const auto *It =
10561 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; });
10562 unsigned Part = std::distance(Mask.begin(), It) / SliceSize;
10563 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize);
10565 void add(const TreeEntry &E1, ArrayRef<int> Mask) {
10566 if (InVectors.empty()) {
10567 CommonMask.assign(Mask.begin(), Mask.end());
10568 InVectors.assign(1, &E1);
10569 return;
10571 assert(!CommonMask.empty() && "Expected non-empty common mask.");
10572 auto *MaskVecTy = getWidenedType(ScalarTy, Mask.size());
10573 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
10574 if (NumParts == 0 || NumParts >= Mask.size() ||
10575 MaskVecTy->getNumElements() % NumParts != 0 ||
10576 !hasFullVectorsOrPowerOf2(TTI, MaskVecTy->getElementType(),
10577 MaskVecTy->getNumElements() / NumParts))
10578 NumParts = 1;
10579 unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
10580 const auto *It =
10581 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; });
10582 unsigned Part = std::distance(Mask.begin(), It) / SliceSize;
10583 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize);
10584 if (!SameNodesEstimated && InVectors.size() == 1)
10585 InVectors.emplace_back(&E1);
10587 /// Adds 2 input vectors and the mask for their shuffling.
10588 void add(Value *V1, Value *V2, ArrayRef<int> Mask) {
10589 // May come only for shuffling of 2 vectors with extractelements, already
10590 // handled in adjustExtracts.
10591 assert(InVectors.size() == 1 &&
10592 all_of(enumerate(CommonMask),
10593 [&](auto P) {
10594 if (P.value() == PoisonMaskElem)
10595 return Mask[P.index()] == PoisonMaskElem;
10596 auto *EI = cast<ExtractElementInst>(
10597 InVectors.front().get<const TreeEntry *>()->getOrdered(
10598 P.index()));
10599 return EI->getVectorOperand() == V1 ||
10600 EI->getVectorOperand() == V2;
10601 }) &&
10602 "Expected extractelement vectors.");
10604 /// Adds another one input vector and the mask for the shuffling.
10605 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) {
10606 if (InVectors.empty()) {
10607 assert(CommonMask.empty() && !ForExtracts &&
10608 "Expected empty input mask/vectors.");
10609 CommonMask.assign(Mask.begin(), Mask.end());
10610 InVectors.assign(1, V1);
10611 return;
10613 if (ForExtracts) {
10614 // No need to add vectors here, already handled them in adjustExtracts.
10615 assert(
10616 InVectors.size() == 1 && InVectors.front().is<const TreeEntry *>() &&
10617 !CommonMask.empty() &&
10618 all_of(enumerate(CommonMask),
10619 [&](auto P) {
10620 Value *Scalar =
10621 InVectors.front().get<const TreeEntry *>()->getOrdered(
10622 P.index());
10623 if (P.value() == PoisonMaskElem)
10624 return P.value() == Mask[P.index()] ||
10625 isa<UndefValue>(Scalar);
10626 if (isa<Constant>(V1))
10627 return true;
10628 auto *EI = cast<ExtractElementInst>(Scalar);
10629 return EI->getVectorOperand() == V1;
10630 }) &&
10631 "Expected only tree entry for extractelement vectors.");
10632 return;
10634 assert(!InVectors.empty() && !CommonMask.empty() &&
10635 "Expected only tree entries from extracts/reused buildvectors.");
10636 unsigned VF = getVF(V1);
10637 if (InVectors.size() == 2) {
10638 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask);
10639 transformMaskAfterShuffle(CommonMask, CommonMask);
10640 VF = std::max<unsigned>(VF, CommonMask.size());
10641 } else if (const auto *InTE =
10642 InVectors.front().dyn_cast<const TreeEntry *>()) {
10643 VF = std::max(VF, InTE->getVectorFactor());
10644 } else {
10645 VF = std::max(
10646 VF, cast<FixedVectorType>(InVectors.front().get<Value *>()->getType())
10647 ->getNumElements());
10649 InVectors.push_back(V1);
10650 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10651 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
10652 CommonMask[Idx] = Mask[Idx] + VF;
10654 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0,
10655 Value *Root = nullptr) {
10656 Cost += getBuildVectorCost(VL, Root);
10657 if (!Root) {
10658 // FIXME: Need to find a way to avoid use of getNullValue here.
10659 SmallVector<Constant *> Vals;
10660 unsigned VF = VL.size();
10661 if (MaskVF != 0)
10662 VF = std::min(VF, MaskVF);
10663 for (Value *V : VL.take_front(VF)) {
10664 if (isa<UndefValue>(V)) {
10665 Vals.push_back(cast<Constant>(V));
10666 continue;
10668 Vals.push_back(Constant::getNullValue(V->getType()));
10670 if (auto *VecTy = dyn_cast<FixedVectorType>(Vals.front()->getType())) {
10671 assert(SLPReVec && "FixedVectorType is not expected.");
10672 // When REVEC is enabled, we need to expand vector types into scalar
10673 // types.
10674 unsigned VecTyNumElements = VecTy->getNumElements();
10675 SmallVector<Constant *> NewVals(VF * VecTyNumElements, nullptr);
10676 for (auto [I, V] : enumerate(Vals)) {
10677 Type *ScalarTy = V->getType()->getScalarType();
10678 Constant *NewVal;
10679 if (isa<PoisonValue>(V))
10680 NewVal = PoisonValue::get(ScalarTy);
10681 else if (isa<UndefValue>(V))
10682 NewVal = UndefValue::get(ScalarTy);
10683 else
10684 NewVal = Constant::getNullValue(ScalarTy);
10685 std::fill_n(NewVals.begin() + I * VecTyNumElements, VecTyNumElements,
10686 NewVal);
10688 Vals.swap(NewVals);
10690 return ConstantVector::get(Vals);
10692 return ConstantVector::getSplat(
10693 ElementCount::getFixed(
10694 cast<FixedVectorType>(Root->getType())->getNumElements()),
10695 getAllOnesValue(*R.DL, ScalarTy->getScalarType()));
10697 InstructionCost createFreeze(InstructionCost Cost) { return Cost; }
10698 /// Finalize emission of the shuffles.
10699 InstructionCost
10700 finalize(ArrayRef<int> ExtMask,
10701 ArrayRef<std::pair<const TreeEntry *, unsigned>> SubVectors,
10702 ArrayRef<int> SubVectorsMask, unsigned VF = 0,
10703 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) {
10704 IsFinalized = true;
10705 if (Action) {
10706 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front();
10707 if (InVectors.size() == 2)
10708 Cost += createShuffle(Vec, InVectors.back(), CommonMask);
10709 else
10710 Cost += createShuffle(Vec, nullptr, CommonMask);
10711 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10712 if (CommonMask[Idx] != PoisonMaskElem)
10713 CommonMask[Idx] = Idx;
10714 assert(VF > 0 &&
10715 "Expected vector length for the final value before action.");
10716 Value *V = Vec.get<Value *>();
10717 Action(V, CommonMask);
10718 InVectors.front() = V;
10720 if (!SubVectors.empty()) {
10721 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front();
10722 if (InVectors.size() == 2)
10723 Cost += createShuffle(Vec, InVectors.back(), CommonMask);
10724 else
10725 Cost += createShuffle(Vec, nullptr, CommonMask);
10726 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10727 if (CommonMask[Idx] != PoisonMaskElem)
10728 CommonMask[Idx] = Idx;
10729 // Add subvectors permutation cost.
10730 if (!SubVectorsMask.empty()) {
10731 assert(SubVectorsMask.size() == CommonMask.size() &&
10732 "Expected same size of masks for subvectors and common mask.");
10733 SmallVector<int> SVMask(SubVectorsMask.begin(), SubVectorsMask.end());
10734 for (auto [I1, I2] : zip(SVMask, CommonMask)) {
10735 if (I2 != PoisonMaskElem) {
10736 assert(I1 == PoisonMaskElem && "Expected unused subvectors mask");
10737 I1 = I2 + CommonMask.size();
10740 Cost += ::getShuffleCost(TTI, TTI::SK_PermuteTwoSrc,
10741 getWidenedType(ScalarTy, CommonMask.size()),
10742 SVMask, CostKind);
10744 for (auto [E, Idx] : SubVectors) {
10745 Type *EScalarTy = E->Scalars.front()->getType();
10746 bool IsSigned = true;
10747 if (auto It = R.MinBWs.find(E); It != R.MinBWs.end()) {
10748 EScalarTy =
10749 IntegerType::get(EScalarTy->getContext(), It->second.first);
10750 IsSigned = It->second.second;
10752 if (ScalarTy != EScalarTy) {
10753 unsigned CastOpcode = Instruction::Trunc;
10754 unsigned DstSz = R.DL->getTypeSizeInBits(ScalarTy);
10755 unsigned SrcSz = R.DL->getTypeSizeInBits(EScalarTy);
10756 if (DstSz > SrcSz)
10757 CastOpcode = IsSigned ? Instruction::SExt : Instruction::ZExt;
10758 Cost += TTI.getCastInstrCost(
10759 CastOpcode, getWidenedType(ScalarTy, E->getVectorFactor()),
10760 getWidenedType(EScalarTy, E->getVectorFactor()),
10761 TTI::CastContextHint::Normal, CostKind);
10763 Cost += ::getShuffleCost(
10764 TTI, TTI::SK_InsertSubvector,
10765 getWidenedType(ScalarTy, CommonMask.size()), {}, CostKind, Idx,
10766 getWidenedType(ScalarTy, E->getVectorFactor()));
10767 if (!CommonMask.empty()) {
10768 std::iota(std::next(CommonMask.begin(), Idx),
10769 std::next(CommonMask.begin(), Idx + E->getVectorFactor()),
10770 Idx);
10775 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true);
10776 if (CommonMask.empty()) {
10777 assert(InVectors.size() == 1 && "Expected only one vector with no mask");
10778 return Cost;
10780 return Cost +
10781 createShuffle(InVectors.front(),
10782 InVectors.size() == 2 ? InVectors.back() : nullptr,
10783 CommonMask);
10786 ~ShuffleCostEstimator() {
10787 assert((IsFinalized || CommonMask.empty()) &&
10788 "Shuffle construction must be finalized.");
10792 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E,
10793 unsigned Idx) const {
10794 if (const TreeEntry *VE = getMatchedVectorizedOperand(E, Idx))
10795 return VE;
10796 const auto *It =
10797 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
10798 return TE->isGather() &&
10799 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) {
10800 return EI.EdgeIdx == Idx && EI.UserTE == E;
10801 }) != TE->UserTreeIndices.end();
10803 assert(It != VectorizableTree.end() && "Expected vectorizable entry.");
10804 return It->get();
10807 TTI::CastContextHint BoUpSLP::getCastContextHint(const TreeEntry &TE) const {
10808 if (TE.State == TreeEntry::ScatterVectorize ||
10809 TE.State == TreeEntry::StridedVectorize)
10810 return TTI::CastContextHint::GatherScatter;
10811 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::Load &&
10812 !TE.isAltShuffle()) {
10813 if (TE.ReorderIndices.empty())
10814 return TTI::CastContextHint::Normal;
10815 SmallVector<int> Mask;
10816 inversePermutation(TE.ReorderIndices, Mask);
10817 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size()))
10818 return TTI::CastContextHint::Reversed;
10820 return TTI::CastContextHint::None;
10823 /// Builds the arguments types vector for the given call instruction with the
10824 /// given \p ID for the specified vector factor.
10825 static SmallVector<Type *> buildIntrinsicArgTypes(const CallInst *CI,
10826 const Intrinsic::ID ID,
10827 const unsigned VF,
10828 unsigned MinBW) {
10829 SmallVector<Type *> ArgTys;
10830 for (auto [Idx, Arg] : enumerate(CI->args())) {
10831 if (ID != Intrinsic::not_intrinsic) {
10832 if (isVectorIntrinsicWithScalarOpAtArg(ID, Idx)) {
10833 ArgTys.push_back(Arg->getType());
10834 continue;
10836 if (MinBW > 0) {
10837 ArgTys.push_back(
10838 getWidenedType(IntegerType::get(CI->getContext(), MinBW), VF));
10839 continue;
10842 ArgTys.push_back(getWidenedType(Arg->getType(), VF));
10844 return ArgTys;
10847 InstructionCost
10848 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
10849 SmallPtrSetImpl<Value *> &CheckedExtracts) {
10850 ArrayRef<Value *> VL = E->Scalars;
10852 Type *ScalarTy = getValueType(VL[0]);
10853 if (!isValidElementType(ScalarTy))
10854 return InstructionCost::getInvalid();
10855 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
10857 // If we have computed a smaller type for the expression, update VecTy so
10858 // that the costs will be accurate.
10859 auto It = MinBWs.find(E);
10860 Type *OrigScalarTy = ScalarTy;
10861 if (It != MinBWs.end()) {
10862 auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy);
10863 ScalarTy = IntegerType::get(F->getContext(), It->second.first);
10864 if (VecTy)
10865 ScalarTy = getWidenedType(ScalarTy, VecTy->getNumElements());
10867 auto *VecTy = getWidenedType(ScalarTy, VL.size());
10868 unsigned EntryVF = E->getVectorFactor();
10869 auto *FinalVecTy = getWidenedType(ScalarTy, EntryVF);
10871 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
10872 if (E->isGather()) {
10873 if (allConstant(VL))
10874 return 0;
10875 if (isa<InsertElementInst>(VL[0]))
10876 return InstructionCost::getInvalid();
10877 if (isa<CmpInst>(VL.front()))
10878 ScalarTy = VL.front()->getType();
10879 return processBuildVector<ShuffleCostEstimator, InstructionCost>(
10880 E, ScalarTy, *TTI, VectorizedVals, *this, CheckedExtracts);
10882 InstructionCost CommonCost = 0;
10883 SmallVector<int> Mask;
10884 bool IsReverseOrder = isReverseOrder(E->ReorderIndices);
10885 if (!E->ReorderIndices.empty() &&
10886 (E->State != TreeEntry::StridedVectorize || !IsReverseOrder)) {
10887 SmallVector<int> NewMask;
10888 if (E->getOpcode() == Instruction::Store) {
10889 // For stores the order is actually a mask.
10890 NewMask.resize(E->ReorderIndices.size());
10891 copy(E->ReorderIndices, NewMask.begin());
10892 } else {
10893 inversePermutation(E->ReorderIndices, NewMask);
10895 ::addMask(Mask, NewMask);
10897 if (NeedToShuffleReuses)
10898 ::addMask(Mask, E->ReuseShuffleIndices);
10899 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))
10900 CommonCost =
10901 ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
10902 assert((E->State == TreeEntry::Vectorize ||
10903 E->State == TreeEntry::ScatterVectorize ||
10904 E->State == TreeEntry::StridedVectorize) &&
10905 "Unhandled state");
10906 assert(E->getOpcode() &&
10907 ((allSameType(VL) && allSameBlock(VL)) ||
10908 (E->getOpcode() == Instruction::GetElementPtr &&
10909 E->getMainOp()->getType()->isPointerTy())) &&
10910 "Invalid VL");
10911 Instruction *VL0 = E->getMainOp();
10912 unsigned ShuffleOrOp =
10913 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
10914 if (E->CombinedOp != TreeEntry::NotCombinedOp)
10915 ShuffleOrOp = E->CombinedOp;
10916 SmallSetVector<Value *, 16> UniqueValues(VL.begin(), VL.end());
10917 const unsigned Sz = UniqueValues.size();
10918 SmallBitVector UsedScalars(Sz, false);
10919 for (unsigned I = 0; I < Sz; ++I) {
10920 if (getTreeEntry(UniqueValues[I]) == E)
10921 continue;
10922 UsedScalars.set(I);
10924 auto GetCastContextHint = [&](Value *V) {
10925 if (const TreeEntry *OpTE = getTreeEntry(V))
10926 return getCastContextHint(*OpTE);
10927 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI);
10928 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle())
10929 return TTI::CastContextHint::GatherScatter;
10930 return TTI::CastContextHint::None;
10932 auto GetCostDiff =
10933 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost,
10934 function_ref<InstructionCost(InstructionCost)> VectorCost) {
10935 // Calculate the cost of this instruction.
10936 InstructionCost ScalarCost = 0;
10937 if (isa<CastInst, CallInst>(VL0)) {
10938 // For some of the instructions no need to calculate cost for each
10939 // particular instruction, we can use the cost of the single
10940 // instruction x total number of scalar instructions.
10941 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0);
10942 } else {
10943 for (unsigned I = 0; I < Sz; ++I) {
10944 if (UsedScalars.test(I))
10945 continue;
10946 ScalarCost += ScalarEltCost(I);
10950 InstructionCost VecCost = VectorCost(CommonCost);
10951 // Check if the current node must be resized, if the parent node is not
10952 // resized.
10953 if (It != MinBWs.end() && !UnaryInstruction::isCast(E->getOpcode()) &&
10954 E->Idx != 0 &&
10955 (E->getOpcode() != Instruction::Load ||
10956 !E->UserTreeIndices.empty())) {
10957 const EdgeInfo &EI =
10958 *find_if(E->UserTreeIndices, [](const EdgeInfo &EI) {
10959 return !EI.UserTE->isGather() || EI.EdgeIdx != UINT_MAX;
10961 if (EI.UserTE->getOpcode() != Instruction::Select ||
10962 EI.EdgeIdx != 0) {
10963 auto UserBWIt = MinBWs.find(EI.UserTE);
10964 Type *UserScalarTy =
10965 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType();
10966 if (UserBWIt != MinBWs.end())
10967 UserScalarTy = IntegerType::get(ScalarTy->getContext(),
10968 UserBWIt->second.first);
10969 if (ScalarTy != UserScalarTy) {
10970 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
10971 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy);
10972 unsigned VecOpcode;
10973 auto *UserVecTy = getWidenedType(UserScalarTy, E->Scalars.size());
10974 if (BWSz > SrcBWSz)
10975 VecOpcode = Instruction::Trunc;
10976 else
10977 VecOpcode =
10978 It->second.second ? Instruction::SExt : Instruction::ZExt;
10979 TTI::CastContextHint CCH = GetCastContextHint(VL0);
10980 VecCost += TTI->getCastInstrCost(VecOpcode, UserVecTy, VecTy, CCH,
10981 CostKind);
10985 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost,
10986 ScalarCost, "Calculated costs for Tree"));
10987 return VecCost - ScalarCost;
10989 // Calculate cost difference from vectorizing set of GEPs.
10990 // Negative value means vectorizing is profitable.
10991 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) {
10992 assert((E->State == TreeEntry::Vectorize ||
10993 E->State == TreeEntry::StridedVectorize) &&
10994 "Entry state expected to be Vectorize or StridedVectorize here.");
10995 InstructionCost ScalarCost = 0;
10996 InstructionCost VecCost = 0;
10997 std::tie(ScalarCost, VecCost) = getGEPCosts(
10998 *TTI, Ptrs, BasePtr, E->getOpcode(), CostKind, OrigScalarTy, VecTy);
10999 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost,
11000 "Calculated GEPs cost for Tree"));
11002 return VecCost - ScalarCost;
11005 auto GetMinMaxCost = [&](Type *Ty, Instruction *VI = nullptr) {
11006 auto [MinMaxID, SelectOnly] = canConvertToMinOrMaxIntrinsic(VI ? VI : VL);
11007 if (MinMaxID == Intrinsic::not_intrinsic)
11008 return InstructionCost::getInvalid();
11009 Type *CanonicalType = Ty;
11010 if (CanonicalType->isPtrOrPtrVectorTy())
11011 CanonicalType = CanonicalType->getWithNewType(IntegerType::get(
11012 CanonicalType->getContext(),
11013 DL->getTypeSizeInBits(CanonicalType->getScalarType())));
11015 IntrinsicCostAttributes CostAttrs(MinMaxID, CanonicalType,
11016 {CanonicalType, CanonicalType});
11017 InstructionCost IntrinsicCost =
11018 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
11019 // If the selects are the only uses of the compares, they will be
11020 // dead and we can adjust the cost by removing their cost.
11021 if (VI && SelectOnly) {
11022 assert((!Ty->isVectorTy() || SLPReVec) &&
11023 "Expected only for scalar type.");
11024 auto *CI = cast<CmpInst>(VI->getOperand(0));
11025 IntrinsicCost -= TTI->getCmpSelInstrCost(
11026 CI->getOpcode(), Ty, Builder.getInt1Ty(), CI->getPredicate(),
11027 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
11028 {TTI::OK_AnyValue, TTI::OP_None}, CI);
11030 return IntrinsicCost;
11032 switch (ShuffleOrOp) {
11033 case Instruction::PHI: {
11034 // Count reused scalars.
11035 InstructionCost ScalarCost = 0;
11036 SmallPtrSet<const TreeEntry *, 4> CountedOps;
11037 for (Value *V : UniqueValues) {
11038 auto *PHI = dyn_cast<PHINode>(V);
11039 if (!PHI)
11040 continue;
11042 ValueList Operands(PHI->getNumIncomingValues(), nullptr);
11043 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) {
11044 Value *Op = PHI->getIncomingValue(I);
11045 Operands[I] = Op;
11047 if (const TreeEntry *OpTE = getTreeEntry(Operands.front()))
11048 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second)
11049 if (!OpTE->ReuseShuffleIndices.empty())
11050 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() -
11051 OpTE->Scalars.size());
11054 return CommonCost - ScalarCost;
11056 case Instruction::ExtractValue:
11057 case Instruction::ExtractElement: {
11058 auto GetScalarCost = [&](unsigned Idx) {
11059 auto *I = cast<Instruction>(UniqueValues[Idx]);
11060 VectorType *SrcVecTy;
11061 if (ShuffleOrOp == Instruction::ExtractElement) {
11062 auto *EE = cast<ExtractElementInst>(I);
11063 SrcVecTy = EE->getVectorOperandType();
11064 } else {
11065 auto *EV = cast<ExtractValueInst>(I);
11066 Type *AggregateTy = EV->getAggregateOperand()->getType();
11067 unsigned NumElts;
11068 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy))
11069 NumElts = ATy->getNumElements();
11070 else
11071 NumElts = AggregateTy->getStructNumElements();
11072 SrcVecTy = getWidenedType(OrigScalarTy, NumElts);
11074 if (I->hasOneUse()) {
11075 Instruction *Ext = I->user_back();
11076 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
11077 all_of(Ext->users(), IsaPred<GetElementPtrInst>)) {
11078 // Use getExtractWithExtendCost() to calculate the cost of
11079 // extractelement/ext pair.
11080 InstructionCost Cost = TTI->getExtractWithExtendCost(
11081 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I));
11082 // Subtract the cost of s|zext which is subtracted separately.
11083 Cost -= TTI->getCastInstrCost(
11084 Ext->getOpcode(), Ext->getType(), I->getType(),
11085 TTI::getCastContextHint(Ext), CostKind, Ext);
11086 return Cost;
11089 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy,
11090 CostKind, *getExtractIndex(I));
11092 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; };
11093 return GetCostDiff(GetScalarCost, GetVectorCost);
11095 case Instruction::InsertElement: {
11096 assert(E->ReuseShuffleIndices.empty() &&
11097 "Unique insertelements only are expected.");
11098 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
11099 unsigned const NumElts = SrcVecTy->getNumElements();
11100 unsigned const NumScalars = VL.size();
11102 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
11104 SmallVector<int> InsertMask(NumElts, PoisonMaskElem);
11105 unsigned OffsetBeg = *getElementIndex(VL.front());
11106 unsigned OffsetEnd = OffsetBeg;
11107 InsertMask[OffsetBeg] = 0;
11108 for (auto [I, V] : enumerate(VL.drop_front())) {
11109 unsigned Idx = *getElementIndex(V);
11110 if (OffsetBeg > Idx)
11111 OffsetBeg = Idx;
11112 else if (OffsetEnd < Idx)
11113 OffsetEnd = Idx;
11114 InsertMask[Idx] = I + 1;
11116 unsigned VecScalarsSz = PowerOf2Ceil(NumElts);
11117 if (NumOfParts > 0 && NumOfParts < NumElts)
11118 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts);
11119 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) *
11120 VecScalarsSz;
11121 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz);
11122 unsigned InsertVecSz = std::min<unsigned>(
11123 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1),
11124 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz);
11125 bool IsWholeSubvector =
11126 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0);
11127 // Check if we can safely insert a subvector. If it is not possible, just
11128 // generate a whole-sized vector and shuffle the source vector and the new
11129 // subvector.
11130 if (OffsetBeg + InsertVecSz > VecSz) {
11131 // Align OffsetBeg to generate correct mask.
11132 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset);
11133 InsertVecSz = VecSz;
11136 APInt DemandedElts = APInt::getZero(NumElts);
11137 // TODO: Add support for Instruction::InsertValue.
11138 SmallVector<int> Mask;
11139 if (!E->ReorderIndices.empty()) {
11140 inversePermutation(E->ReorderIndices, Mask);
11141 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem);
11142 } else {
11143 Mask.assign(VecSz, PoisonMaskElem);
11144 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0);
11146 bool IsIdentity = true;
11147 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem);
11148 Mask.swap(PrevMask);
11149 for (unsigned I = 0; I < NumScalars; ++I) {
11150 unsigned InsertIdx = *getElementIndex(VL[PrevMask[I]]);
11151 DemandedElts.setBit(InsertIdx);
11152 IsIdentity &= InsertIdx - OffsetBeg == I;
11153 Mask[InsertIdx - OffsetBeg] = I;
11155 assert(Offset < NumElts && "Failed to find vector index offset");
11157 InstructionCost Cost = 0;
11158 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
11159 /*Insert*/ true, /*Extract*/ false,
11160 CostKind);
11162 // First cost - resize to actual vector size if not identity shuffle or
11163 // need to shift the vector.
11164 // Do not calculate the cost if the actual size is the register size and
11165 // we can merge this shuffle with the following SK_Select.
11166 auto *InsertVecTy = getWidenedType(ScalarTy, InsertVecSz);
11167 if (!IsIdentity)
11168 Cost += ::getShuffleCost(*TTI, TargetTransformInfo::SK_PermuteSingleSrc,
11169 InsertVecTy, Mask);
11170 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
11171 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
11172 }));
11173 // Second cost - permutation with subvector, if some elements are from the
11174 // initial vector or inserting a subvector.
11175 // TODO: Implement the analysis of the FirstInsert->getOperand(0)
11176 // subvector of ActualVecTy.
11177 SmallBitVector InMask =
11178 isUndefVector(FirstInsert->getOperand(0),
11179 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask));
11180 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) {
11181 if (InsertVecSz != VecSz) {
11182 auto *ActualVecTy = getWidenedType(ScalarTy, VecSz);
11183 Cost += ::getShuffleCost(*TTI, TTI::SK_InsertSubvector, ActualVecTy, {},
11184 CostKind, OffsetBeg - Offset, InsertVecTy);
11185 } else {
11186 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I)
11187 Mask[I] = InMask.test(I) ? PoisonMaskElem : I;
11188 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset;
11189 I <= End; ++I)
11190 if (Mask[I] != PoisonMaskElem)
11191 Mask[I] = I + VecSz;
11192 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I)
11193 Mask[I] =
11194 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I;
11195 Cost +=
11196 ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, InsertVecTy, Mask);
11199 return Cost;
11201 case Instruction::ZExt:
11202 case Instruction::SExt:
11203 case Instruction::FPToUI:
11204 case Instruction::FPToSI:
11205 case Instruction::FPExt:
11206 case Instruction::PtrToInt:
11207 case Instruction::IntToPtr:
11208 case Instruction::SIToFP:
11209 case Instruction::UIToFP:
11210 case Instruction::Trunc:
11211 case Instruction::FPTrunc:
11212 case Instruction::BitCast: {
11213 auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
11214 Type *SrcScalarTy = VL0->getOperand(0)->getType();
11215 auto *SrcVecTy = getWidenedType(SrcScalarTy, VL.size());
11216 unsigned Opcode = ShuffleOrOp;
11217 unsigned VecOpcode = Opcode;
11218 if (!ScalarTy->isFPOrFPVectorTy() && !SrcScalarTy->isFPOrFPVectorTy() &&
11219 (SrcIt != MinBWs.end() || It != MinBWs.end())) {
11220 // Check if the values are candidates to demote.
11221 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy->getScalarType());
11222 if (SrcIt != MinBWs.end()) {
11223 SrcBWSz = SrcIt->second.first;
11224 unsigned SrcScalarTyNumElements = getNumElements(SrcScalarTy);
11225 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz);
11226 SrcVecTy =
11227 getWidenedType(SrcScalarTy, VL.size() * SrcScalarTyNumElements);
11229 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy->getScalarType());
11230 if (BWSz == SrcBWSz) {
11231 VecOpcode = Instruction::BitCast;
11232 } else if (BWSz < SrcBWSz) {
11233 VecOpcode = Instruction::Trunc;
11234 } else if (It != MinBWs.end()) {
11235 assert(BWSz > SrcBWSz && "Invalid cast!");
11236 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
11237 } else if (SrcIt != MinBWs.end()) {
11238 assert(BWSz > SrcBWSz && "Invalid cast!");
11239 VecOpcode =
11240 SrcIt->second.second ? Instruction::SExt : Instruction::ZExt;
11242 } else if (VecOpcode == Instruction::SIToFP && SrcIt != MinBWs.end() &&
11243 !SrcIt->second.second) {
11244 VecOpcode = Instruction::UIToFP;
11246 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost {
11247 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11248 return TTI->getCastInstrCost(Opcode, VL0->getType(),
11249 VL0->getOperand(0)->getType(),
11250 TTI::getCastContextHint(VI), CostKind, VI);
11252 auto GetVectorCost = [=](InstructionCost CommonCost) {
11253 // Do not count cost here if minimum bitwidth is in effect and it is just
11254 // a bitcast (here it is just a noop).
11255 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast)
11256 return CommonCost;
11257 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr;
11258 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0));
11259 return CommonCost +
11260 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind,
11261 VecOpcode == Opcode ? VI : nullptr);
11263 return GetCostDiff(GetScalarCost, GetVectorCost);
11265 case Instruction::FCmp:
11266 case Instruction::ICmp:
11267 case Instruction::Select: {
11268 CmpInst::Predicate VecPred, SwappedVecPred;
11269 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value());
11270 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) ||
11271 match(VL0, MatchCmp))
11272 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred);
11273 else
11274 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy()
11275 ? CmpInst::BAD_FCMP_PREDICATE
11276 : CmpInst::BAD_ICMP_PREDICATE;
11277 auto GetScalarCost = [&](unsigned Idx) {
11278 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11279 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy()
11280 ? CmpInst::BAD_FCMP_PREDICATE
11281 : CmpInst::BAD_ICMP_PREDICATE;
11282 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
11283 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) &&
11284 !match(VI, MatchCmp)) ||
11285 (CurrentPred != VecPred && CurrentPred != SwappedVecPred))
11286 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy()
11287 ? CmpInst::BAD_FCMP_PREDICATE
11288 : CmpInst::BAD_ICMP_PREDICATE;
11290 InstructionCost ScalarCost = TTI->getCmpSelInstrCost(
11291 E->getOpcode(), OrigScalarTy, Builder.getInt1Ty(), CurrentPred,
11292 CostKind, getOperandInfo(VI->getOperand(0)),
11293 getOperandInfo(VI->getOperand(1)), VI);
11294 InstructionCost IntrinsicCost = GetMinMaxCost(OrigScalarTy, VI);
11295 if (IntrinsicCost.isValid())
11296 ScalarCost = IntrinsicCost;
11298 return ScalarCost;
11300 auto GetVectorCost = [&](InstructionCost CommonCost) {
11301 auto *MaskTy = getWidenedType(Builder.getInt1Ty(), VL.size());
11303 InstructionCost VecCost =
11304 TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, VecPred,
11305 CostKind, getOperandInfo(E->getOperand(0)),
11306 getOperandInfo(E->getOperand(1)), VL0);
11307 if (auto *SI = dyn_cast<SelectInst>(VL0)) {
11308 auto *CondType =
11309 getWidenedType(SI->getCondition()->getType(), VL.size());
11310 unsigned CondNumElements = CondType->getNumElements();
11311 unsigned VecTyNumElements = getNumElements(VecTy);
11312 assert(VecTyNumElements >= CondNumElements &&
11313 VecTyNumElements % CondNumElements == 0 &&
11314 "Cannot vectorize Instruction::Select");
11315 if (CondNumElements != VecTyNumElements) {
11316 // When the return type is i1 but the source is fixed vector type, we
11317 // need to duplicate the condition value.
11318 VecCost += ::getShuffleCost(
11319 *TTI, TTI::SK_PermuteSingleSrc, CondType,
11320 createReplicatedMask(VecTyNumElements / CondNumElements,
11321 CondNumElements));
11324 return VecCost + CommonCost;
11326 return GetCostDiff(GetScalarCost, GetVectorCost);
11328 case TreeEntry::MinMax: {
11329 auto GetScalarCost = [&](unsigned Idx) {
11330 return GetMinMaxCost(OrigScalarTy);
11332 auto GetVectorCost = [&](InstructionCost CommonCost) {
11333 InstructionCost VecCost = GetMinMaxCost(VecTy);
11334 return VecCost + CommonCost;
11336 return GetCostDiff(GetScalarCost, GetVectorCost);
11338 case Instruction::FNeg:
11339 case Instruction::Add:
11340 case Instruction::FAdd:
11341 case Instruction::Sub:
11342 case Instruction::FSub:
11343 case Instruction::Mul:
11344 case Instruction::FMul:
11345 case Instruction::UDiv:
11346 case Instruction::SDiv:
11347 case Instruction::FDiv:
11348 case Instruction::URem:
11349 case Instruction::SRem:
11350 case Instruction::FRem:
11351 case Instruction::Shl:
11352 case Instruction::LShr:
11353 case Instruction::AShr:
11354 case Instruction::And:
11355 case Instruction::Or:
11356 case Instruction::Xor: {
11357 auto GetScalarCost = [&](unsigned Idx) {
11358 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11359 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1;
11360 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0));
11361 TTI::OperandValueInfo Op2Info =
11362 TTI::getOperandInfo(VI->getOperand(OpIdx));
11363 SmallVector<const Value *> Operands(VI->operand_values());
11364 return TTI->getArithmeticInstrCost(ShuffleOrOp, OrigScalarTy, CostKind,
11365 Op1Info, Op2Info, Operands, VI);
11367 auto GetVectorCost = [=](InstructionCost CommonCost) {
11368 if (ShuffleOrOp == Instruction::And && It != MinBWs.end()) {
11369 for (unsigned I : seq<unsigned>(0, E->getNumOperands())) {
11370 ArrayRef<Value *> Ops = E->getOperand(I);
11371 if (all_of(Ops, [&](Value *Op) {
11372 auto *CI = dyn_cast<ConstantInt>(Op);
11373 return CI && CI->getValue().countr_one() >= It->second.first;
11375 return CommonCost;
11378 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1;
11379 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0));
11380 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx));
11381 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info,
11382 Op2Info, {}, nullptr, TLI) +
11383 CommonCost;
11385 return GetCostDiff(GetScalarCost, GetVectorCost);
11387 case Instruction::GetElementPtr: {
11388 return CommonCost + GetGEPCostDiff(VL, VL0);
11390 case Instruction::Load: {
11391 auto GetScalarCost = [&](unsigned Idx) {
11392 auto *VI = cast<LoadInst>(UniqueValues[Idx]);
11393 return TTI->getMemoryOpCost(Instruction::Load, OrigScalarTy,
11394 VI->getAlign(), VI->getPointerAddressSpace(),
11395 CostKind, TTI::OperandValueInfo(), VI);
11397 auto *LI0 = cast<LoadInst>(VL0);
11398 auto GetVectorCost = [&](InstructionCost CommonCost) {
11399 InstructionCost VecLdCost;
11400 switch (E->State) {
11401 case TreeEntry::Vectorize:
11402 if (unsigned Factor = E->getInterleaveFactor()) {
11403 VecLdCost = TTI->getInterleavedMemoryOpCost(
11404 Instruction::Load, VecTy, Factor, std::nullopt, LI0->getAlign(),
11405 LI0->getPointerAddressSpace(), CostKind);
11407 } else {
11408 VecLdCost = TTI->getMemoryOpCost(
11409 Instruction::Load, VecTy, LI0->getAlign(),
11410 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo());
11412 break;
11413 case TreeEntry::StridedVectorize: {
11414 Align CommonAlignment =
11415 computeCommonAlignment<LoadInst>(UniqueValues.getArrayRef());
11416 VecLdCost = TTI->getStridedMemoryOpCost(
11417 Instruction::Load, VecTy, LI0->getPointerOperand(),
11418 /*VariableMask=*/false, CommonAlignment, CostKind);
11419 break;
11421 case TreeEntry::ScatterVectorize: {
11422 Align CommonAlignment =
11423 computeCommonAlignment<LoadInst>(UniqueValues.getArrayRef());
11424 VecLdCost = TTI->getGatherScatterOpCost(
11425 Instruction::Load, VecTy, LI0->getPointerOperand(),
11426 /*VariableMask=*/false, CommonAlignment, CostKind);
11427 break;
11429 case TreeEntry::CombinedVectorize:
11430 case TreeEntry::NeedToGather:
11431 llvm_unreachable("Unexpected vectorization state.");
11433 return VecLdCost + CommonCost;
11436 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost);
11437 // If this node generates masked gather load then it is not a terminal node.
11438 // Hence address operand cost is estimated separately.
11439 if (E->State == TreeEntry::ScatterVectorize)
11440 return Cost;
11442 // Estimate cost of GEPs since this tree node is a terminator.
11443 SmallVector<Value *> PointerOps(VL.size());
11444 for (auto [I, V] : enumerate(VL))
11445 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand();
11446 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand());
11448 case Instruction::Store: {
11449 bool IsReorder = !E->ReorderIndices.empty();
11450 auto GetScalarCost = [=](unsigned Idx) {
11451 auto *VI = cast<StoreInst>(VL[Idx]);
11452 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand());
11453 return TTI->getMemoryOpCost(Instruction::Store, OrigScalarTy,
11454 VI->getAlign(), VI->getPointerAddressSpace(),
11455 CostKind, OpInfo, VI);
11457 auto *BaseSI =
11458 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
11459 auto GetVectorCost = [=](InstructionCost CommonCost) {
11460 // We know that we can merge the stores. Calculate the cost.
11461 InstructionCost VecStCost;
11462 if (E->State == TreeEntry::StridedVectorize) {
11463 Align CommonAlignment =
11464 computeCommonAlignment<StoreInst>(UniqueValues.getArrayRef());
11465 VecStCost = TTI->getStridedMemoryOpCost(
11466 Instruction::Store, VecTy, BaseSI->getPointerOperand(),
11467 /*VariableMask=*/false, CommonAlignment, CostKind);
11468 } else {
11469 assert(E->State == TreeEntry::Vectorize &&
11470 "Expected either strided or consecutive stores.");
11471 if (unsigned Factor = E->getInterleaveFactor()) {
11472 assert(E->ReuseShuffleIndices.empty() && !E->ReorderIndices.empty() &&
11473 "No reused shuffles expected");
11474 CommonCost = 0;
11475 VecStCost = TTI->getInterleavedMemoryOpCost(
11476 Instruction::Store, VecTy, Factor, std::nullopt,
11477 BaseSI->getAlign(), BaseSI->getPointerAddressSpace(), CostKind);
11478 } else {
11479 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0));
11480 VecStCost = TTI->getMemoryOpCost(
11481 Instruction::Store, VecTy, BaseSI->getAlign(),
11482 BaseSI->getPointerAddressSpace(), CostKind, OpInfo);
11485 return VecStCost + CommonCost;
11487 SmallVector<Value *> PointerOps(VL.size());
11488 for (auto [I, V] : enumerate(VL)) {
11489 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I;
11490 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand();
11493 return GetCostDiff(GetScalarCost, GetVectorCost) +
11494 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand());
11496 case Instruction::Call: {
11497 auto GetScalarCost = [&](unsigned Idx) {
11498 auto *CI = cast<CallInst>(UniqueValues[Idx]);
11499 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
11500 if (ID != Intrinsic::not_intrinsic) {
11501 IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
11502 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
11504 return TTI->getCallInstrCost(CI->getCalledFunction(),
11505 CI->getFunctionType()->getReturnType(),
11506 CI->getFunctionType()->params(), CostKind);
11508 auto GetVectorCost = [=](InstructionCost CommonCost) {
11509 auto *CI = cast<CallInst>(VL0);
11510 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
11511 SmallVector<Type *> ArgTys =
11512 buildIntrinsicArgTypes(CI, ID, VecTy->getNumElements(),
11513 It != MinBWs.end() ? It->second.first : 0);
11514 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys);
11515 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost;
11517 return GetCostDiff(GetScalarCost, GetVectorCost);
11519 case Instruction::ShuffleVector: {
11520 if (!SLPReVec || E->isAltShuffle())
11521 assert(E->isAltShuffle() &&
11522 ((Instruction::isBinaryOp(E->getOpcode()) &&
11523 Instruction::isBinaryOp(E->getAltOpcode())) ||
11524 (Instruction::isCast(E->getOpcode()) &&
11525 Instruction::isCast(E->getAltOpcode())) ||
11526 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
11527 "Invalid Shuffle Vector Operand");
11528 // Try to find the previous shuffle node with the same operands and same
11529 // main/alternate ops.
11530 auto TryFindNodeWithEqualOperands = [=]() {
11531 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
11532 if (TE.get() == E)
11533 break;
11534 if (TE->isAltShuffle() &&
11535 ((TE->getOpcode() == E->getOpcode() &&
11536 TE->getAltOpcode() == E->getAltOpcode()) ||
11537 (TE->getOpcode() == E->getAltOpcode() &&
11538 TE->getAltOpcode() == E->getOpcode())) &&
11539 TE->hasEqualOperands(*E))
11540 return true;
11542 return false;
11544 auto GetScalarCost = [&](unsigned Idx) {
11545 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11546 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode");
11547 (void)E;
11548 return TTI->getInstructionCost(VI, CostKind);
11550 // Need to clear CommonCost since the final shuffle cost is included into
11551 // vector cost.
11552 auto GetVectorCost = [&, &TTIRef = *TTI](InstructionCost) {
11553 // VecCost is equal to sum of the cost of creating 2 vectors
11554 // and the cost of creating shuffle.
11555 InstructionCost VecCost = 0;
11556 if (TryFindNodeWithEqualOperands()) {
11557 LLVM_DEBUG({
11558 dbgs() << "SLP: diamond match for alternate node found.\n";
11559 E->dump();
11561 // No need to add new vector costs here since we're going to reuse
11562 // same main/alternate vector ops, just do different shuffling.
11563 } else if (Instruction::isBinaryOp(E->getOpcode())) {
11564 VecCost =
11565 TTIRef.getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
11566 VecCost +=
11567 TTIRef.getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind);
11568 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
11569 auto *MaskTy = getWidenedType(Builder.getInt1Ty(), VL.size());
11570 VecCost = TTIRef.getCmpSelInstrCost(
11571 E->getOpcode(), VecTy, MaskTy, CI0->getPredicate(), CostKind,
11572 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
11573 VL0);
11574 VecCost += TTIRef.getCmpSelInstrCost(
11575 E->getOpcode(), VecTy, MaskTy,
11576 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind,
11577 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
11578 E->getAltOp());
11579 } else {
11580 Type *SrcSclTy = E->getMainOp()->getOperand(0)->getType();
11581 auto *SrcTy = getWidenedType(SrcSclTy, VL.size());
11582 if (SrcSclTy->isIntegerTy() && ScalarTy->isIntegerTy()) {
11583 auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
11584 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
11585 unsigned SrcBWSz =
11586 DL->getTypeSizeInBits(E->getMainOp()->getOperand(0)->getType());
11587 if (SrcIt != MinBWs.end()) {
11588 SrcBWSz = SrcIt->second.first;
11589 SrcSclTy = IntegerType::get(SrcSclTy->getContext(), SrcBWSz);
11590 SrcTy = getWidenedType(SrcSclTy, VL.size());
11592 if (BWSz <= SrcBWSz) {
11593 if (BWSz < SrcBWSz)
11594 VecCost =
11595 TTIRef.getCastInstrCost(Instruction::Trunc, VecTy, SrcTy,
11596 TTI::CastContextHint::None, CostKind);
11597 LLVM_DEBUG({
11598 dbgs()
11599 << "SLP: alternate extension, which should be truncated.\n";
11600 E->dump();
11602 return VecCost;
11605 VecCost = TTIRef.getCastInstrCost(E->getOpcode(), VecTy, SrcTy,
11606 TTI::CastContextHint::None, CostKind);
11607 VecCost +=
11608 TTIRef.getCastInstrCost(E->getAltOpcode(), VecTy, SrcTy,
11609 TTI::CastContextHint::None, CostKind);
11611 SmallVector<int> Mask;
11612 E->buildAltOpShuffleMask(
11613 [&](Instruction *I) {
11614 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
11615 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(),
11616 *TLI);
11618 Mask);
11619 VecCost += ::getShuffleCost(TTIRef, TargetTransformInfo::SK_PermuteTwoSrc,
11620 FinalVecTy, Mask, CostKind);
11621 // Patterns like [fadd,fsub] can be combined into a single instruction
11622 // in x86. Reordering them into [fsub,fadd] blocks this pattern. So we
11623 // need to take into account their order when looking for the most used
11624 // order.
11625 unsigned Opcode0 = E->getOpcode();
11626 unsigned Opcode1 = E->getAltOpcode();
11627 SmallBitVector OpcodeMask(getAltInstrMask(E->Scalars, Opcode0, Opcode1));
11628 // If this pattern is supported by the target then we consider the
11629 // order.
11630 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
11631 InstructionCost AltVecCost = TTIRef.getAltInstrCost(
11632 VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
11633 return AltVecCost < VecCost ? AltVecCost : VecCost;
11635 // TODO: Check the reverse order too.
11636 return VecCost;
11638 if (SLPReVec && !E->isAltShuffle())
11639 return GetCostDiff(
11640 GetScalarCost, [&](InstructionCost) -> InstructionCost {
11641 // If a group uses mask in order, the shufflevector can be
11642 // eliminated by instcombine. Then the cost is 0.
11643 assert(isa<ShuffleVectorInst>(VL.front()) &&
11644 "Not supported shufflevector usage.");
11645 auto *SV = cast<ShuffleVectorInst>(VL.front());
11646 unsigned SVNumElements =
11647 cast<FixedVectorType>(SV->getOperand(0)->getType())
11648 ->getNumElements();
11649 unsigned GroupSize = SVNumElements / SV->getShuffleMask().size();
11650 for (size_t I = 0, End = VL.size(); I != End; I += GroupSize) {
11651 ArrayRef<Value *> Group = VL.slice(I, GroupSize);
11652 int NextIndex = 0;
11653 if (!all_of(Group, [&](Value *V) {
11654 assert(isa<ShuffleVectorInst>(V) &&
11655 "Not supported shufflevector usage.");
11656 auto *SV = cast<ShuffleVectorInst>(V);
11657 int Index;
11658 [[maybe_unused]] bool IsExtractSubvectorMask =
11659 SV->isExtractSubvectorMask(Index);
11660 assert(IsExtractSubvectorMask &&
11661 "Not supported shufflevector usage.");
11662 if (NextIndex != Index)
11663 return false;
11664 NextIndex += SV->getShuffleMask().size();
11665 return true;
11667 return ::getShuffleCost(
11668 *TTI, TargetTransformInfo::SK_PermuteSingleSrc, VecTy,
11669 calculateShufflevectorMask(E->Scalars));
11671 return TTI::TCC_Free;
11673 return GetCostDiff(GetScalarCost, GetVectorCost);
11675 case Instruction::Freeze:
11676 return CommonCost;
11677 default:
11678 llvm_unreachable("Unknown instruction");
11682 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
11683 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
11684 << VectorizableTree.size() << " is fully vectorizable .\n");
11686 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) {
11687 SmallVector<int> Mask;
11688 return TE->isGather() &&
11689 !any_of(TE->Scalars,
11690 [this](Value *V) { return EphValues.contains(V); }) &&
11691 (allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
11692 TE->Scalars.size() < Limit ||
11693 ((TE->getOpcode() == Instruction::ExtractElement ||
11694 all_of(TE->Scalars, IsaPred<ExtractElementInst, UndefValue>)) &&
11695 isFixedVectorShuffle(TE->Scalars, Mask)) ||
11696 (TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()) ||
11697 any_of(TE->Scalars, IsaPred<LoadInst>));
11700 // We only handle trees of heights 1 and 2.
11701 if (VectorizableTree.size() == 1 &&
11702 (VectorizableTree[0]->State == TreeEntry::Vectorize ||
11703 VectorizableTree[0]->State == TreeEntry::StridedVectorize ||
11704 (ForReduction &&
11705 AreVectorizableGathers(VectorizableTree[0].get(),
11706 VectorizableTree[0]->Scalars.size()) &&
11707 VectorizableTree[0]->getVectorFactor() > 2)))
11708 return true;
11710 if (VectorizableTree.size() != 2)
11711 return false;
11713 // Handle splat and all-constants stores. Also try to vectorize tiny trees
11714 // with the second gather nodes if they have less scalar operands rather than
11715 // the initial tree element (may be profitable to shuffle the second gather)
11716 // or they are extractelements, which form shuffle.
11717 SmallVector<int> Mask;
11718 if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
11719 AreVectorizableGathers(VectorizableTree[1].get(),
11720 VectorizableTree[0]->Scalars.size()))
11721 return true;
11723 // Gathering cost would be too much for tiny trees.
11724 if (VectorizableTree[0]->isGather() ||
11725 (VectorizableTree[1]->isGather() &&
11726 VectorizableTree[0]->State != TreeEntry::ScatterVectorize &&
11727 VectorizableTree[0]->State != TreeEntry::StridedVectorize))
11728 return false;
11730 return true;
11733 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
11734 TargetTransformInfo *TTI,
11735 bool MustMatchOrInst) {
11736 // Look past the root to find a source value. Arbitrarily follow the
11737 // path through operand 0 of any 'or'. Also, peek through optional
11738 // shift-left-by-multiple-of-8-bits.
11739 Value *ZextLoad = Root;
11740 const APInt *ShAmtC;
11741 bool FoundOr = false;
11742 while (!isa<ConstantExpr>(ZextLoad) &&
11743 (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
11744 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
11745 ShAmtC->urem(8) == 0))) {
11746 auto *BinOp = cast<BinaryOperator>(ZextLoad);
11747 ZextLoad = BinOp->getOperand(0);
11748 if (BinOp->getOpcode() == Instruction::Or)
11749 FoundOr = true;
11751 // Check if the input is an extended load of the required or/shift expression.
11752 Value *Load;
11753 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
11754 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load))
11755 return false;
11757 // Require that the total load bit width is a legal integer type.
11758 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
11759 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
11760 Type *SrcTy = Load->getType();
11761 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
11762 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
11763 return false;
11765 // Everything matched - assume that we can fold the whole sequence using
11766 // load combining.
11767 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
11768 << *(cast<Instruction>(Root)) << "\n");
11770 return true;
11773 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
11774 if (RdxKind != RecurKind::Or)
11775 return false;
11777 unsigned NumElts = VectorizableTree[0]->Scalars.size();
11778 Value *FirstReduced = VectorizableTree[0]->Scalars[0];
11779 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
11780 /* MatchOr */ false);
11783 bool BoUpSLP::isLoadCombineCandidate(ArrayRef<Value *> Stores) const {
11784 // Peek through a final sequence of stores and check if all operations are
11785 // likely to be load-combined.
11786 unsigned NumElts = Stores.size();
11787 for (Value *Scalar : Stores) {
11788 Value *X;
11789 if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
11790 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
11791 return false;
11793 return true;
11796 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
11797 if (!DebugCounter::shouldExecute(VectorizedGraphs))
11798 return true;
11800 // Graph is empty - do nothing.
11801 if (VectorizableTree.empty()) {
11802 assert(ExternalUses.empty() && "We shouldn't have any external users");
11804 return true;
11807 // No need to vectorize inserts of gathered values.
11808 if (VectorizableTree.size() == 2 &&
11809 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
11810 VectorizableTree[1]->isGather() &&
11811 (VectorizableTree[1]->getVectorFactor() <= 2 ||
11812 !(isSplat(VectorizableTree[1]->Scalars) ||
11813 allConstant(VectorizableTree[1]->Scalars))))
11814 return true;
11816 // If the graph includes only PHI nodes and gathers, it is defnitely not
11817 // profitable for the vectorization, we can skip it, if the cost threshold is
11818 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of
11819 // gathers/buildvectors.
11820 constexpr int Limit = 4;
11821 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() &&
11822 !VectorizableTree.empty() &&
11823 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
11824 return (TE->isGather() &&
11825 TE->getOpcode() != Instruction::ExtractElement &&
11826 count_if(TE->Scalars, IsaPred<ExtractElementInst>) <= Limit) ||
11827 TE->getOpcode() == Instruction::PHI;
11829 return true;
11831 // We can vectorize the tree if its size is greater than or equal to the
11832 // minimum size specified by the MinTreeSize command line option.
11833 if (VectorizableTree.size() >= MinTreeSize)
11834 return false;
11836 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
11837 // can vectorize it if we can prove it fully vectorizable.
11838 if (isFullyVectorizableTinyTree(ForReduction))
11839 return false;
11841 // Check if any of the gather node forms an insertelement buildvector
11842 // somewhere.
11843 bool IsAllowedSingleBVNode =
11844 VectorizableTree.size() > 1 ||
11845 (VectorizableTree.size() == 1 && VectorizableTree.front()->getOpcode() &&
11846 !VectorizableTree.front()->isAltShuffle() &&
11847 VectorizableTree.front()->getOpcode() != Instruction::PHI &&
11848 VectorizableTree.front()->getOpcode() != Instruction::GetElementPtr &&
11849 allSameBlock(VectorizableTree.front()->Scalars));
11850 if (any_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
11851 return TE->isGather() && all_of(TE->Scalars, [&](Value *V) {
11852 return isa<ExtractElementInst, UndefValue>(V) ||
11853 (IsAllowedSingleBVNode &&
11854 !V->hasNUsesOrMore(UsesLimit) &&
11855 any_of(V->users(), IsaPred<InsertElementInst>));
11858 return false;
11860 if (VectorizableTree.back()->isGather() &&
11861 VectorizableTree.back()->isAltShuffle() &&
11862 VectorizableTree.back()->getVectorFactor() > 2 &&
11863 allSameBlock(VectorizableTree.back()->Scalars))
11864 return false;
11866 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
11867 // vectorizable.
11868 return true;
11871 bool BoUpSLP::isTreeNotExtendable() const {
11872 if (getCanonicalGraphSize() != getTreeSize()) {
11873 constexpr unsigned SmallTree = 3;
11874 if (VectorizableTree.front()->isNonPowOf2Vec() &&
11875 getCanonicalGraphSize() <= SmallTree &&
11876 count_if(ArrayRef(VectorizableTree).drop_front(getCanonicalGraphSize()),
11877 [](const std::unique_ptr<TreeEntry> &TE) {
11878 return TE->isGather() &&
11879 TE->getOpcode() == Instruction::Load &&
11880 !allSameBlock(TE->Scalars);
11881 }) == 1)
11882 return true;
11883 return false;
11885 bool Res = false;
11886 for (unsigned Idx : seq<unsigned>(getTreeSize())) {
11887 TreeEntry &E = *VectorizableTree[Idx];
11888 if (!E.isGather())
11889 continue;
11890 if (E.getOpcode() && E.getOpcode() != Instruction::Load)
11891 return false;
11892 if (isSplat(E.Scalars) || allConstant(E.Scalars))
11893 continue;
11894 Res = true;
11896 return Res;
11899 InstructionCost BoUpSLP::getSpillCost() const {
11900 // Walk from the bottom of the tree to the top, tracking which values are
11901 // live. When we see a call instruction that is not part of our tree,
11902 // query TTI to see if there is a cost to keeping values live over it
11903 // (for example, if spills and fills are required).
11904 unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
11905 InstructionCost Cost = 0;
11907 SmallPtrSet<Instruction *, 4> LiveValues;
11908 Instruction *PrevInst = nullptr;
11910 // The entries in VectorizableTree are not necessarily ordered by their
11911 // position in basic blocks. Collect them and order them by dominance so later
11912 // instructions are guaranteed to be visited first. For instructions in
11913 // different basic blocks, we only scan to the beginning of the block, so
11914 // their order does not matter, as long as all instructions in a basic block
11915 // are grouped together. Using dominance ensures a deterministic order.
11916 SmallVector<Instruction *, 16> OrderedScalars;
11917 for (const auto &TEPtr : VectorizableTree) {
11918 if (TEPtr->State != TreeEntry::Vectorize)
11919 continue;
11920 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
11921 if (!Inst)
11922 continue;
11923 OrderedScalars.push_back(Inst);
11925 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
11926 auto *NodeA = DT->getNode(A->getParent());
11927 auto *NodeB = DT->getNode(B->getParent());
11928 assert(NodeA && "Should only process reachable instructions");
11929 assert(NodeB && "Should only process reachable instructions");
11930 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
11931 "Different nodes should have different DFS numbers");
11932 if (NodeA != NodeB)
11933 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn();
11934 return B->comesBefore(A);
11937 for (Instruction *Inst : OrderedScalars) {
11938 if (!PrevInst) {
11939 PrevInst = Inst;
11940 continue;
11943 // Update LiveValues.
11944 LiveValues.erase(PrevInst);
11945 for (auto &J : PrevInst->operands()) {
11946 if (isa<Instruction>(&*J) && getTreeEntry(&*J))
11947 LiveValues.insert(cast<Instruction>(&*J));
11950 LLVM_DEBUG({
11951 dbgs() << "SLP: #LV: " << LiveValues.size();
11952 for (auto *X : LiveValues)
11953 dbgs() << " " << X->getName();
11954 dbgs() << ", Looking at ";
11955 Inst->dump();
11958 // Now find the sequence of instructions between PrevInst and Inst.
11959 unsigned NumCalls = 0;
11960 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
11961 PrevInstIt =
11962 PrevInst->getIterator().getReverse();
11963 while (InstIt != PrevInstIt) {
11964 if (PrevInstIt == PrevInst->getParent()->rend()) {
11965 PrevInstIt = Inst->getParent()->rbegin();
11966 continue;
11969 auto NoCallIntrinsic = [this](Instruction *I) {
11970 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
11971 if (II->isAssumeLikeIntrinsic())
11972 return true;
11973 FastMathFlags FMF;
11974 SmallVector<Type *, 4> Tys;
11975 for (auto &ArgOp : II->args())
11976 Tys.push_back(ArgOp->getType());
11977 if (auto *FPMO = dyn_cast<FPMathOperator>(II))
11978 FMF = FPMO->getFastMathFlags();
11979 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys,
11980 FMF);
11981 InstructionCost IntrCost =
11982 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput);
11983 InstructionCost CallCost = TTI->getCallInstrCost(
11984 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput);
11985 if (IntrCost < CallCost)
11986 return true;
11988 return false;
11991 // Debug information does not impact spill cost.
11992 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) &&
11993 &*PrevInstIt != PrevInst)
11994 NumCalls++;
11996 ++PrevInstIt;
11999 if (NumCalls) {
12000 SmallVector<Type *, 4> V;
12001 for (auto *II : LiveValues) {
12002 auto *ScalarTy = II->getType();
12003 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
12004 ScalarTy = VectorTy->getElementType();
12005 V.push_back(getWidenedType(ScalarTy, BundleWidth));
12007 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
12010 PrevInst = Inst;
12013 return Cost;
12016 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the
12017 /// buildvector sequence.
12018 static bool isFirstInsertElement(const InsertElementInst *IE1,
12019 const InsertElementInst *IE2) {
12020 if (IE1 == IE2)
12021 return false;
12022 const auto *I1 = IE1;
12023 const auto *I2 = IE2;
12024 const InsertElementInst *PrevI1;
12025 const InsertElementInst *PrevI2;
12026 unsigned Idx1 = *getElementIndex(IE1);
12027 unsigned Idx2 = *getElementIndex(IE2);
12028 do {
12029 if (I2 == IE1)
12030 return true;
12031 if (I1 == IE2)
12032 return false;
12033 PrevI1 = I1;
12034 PrevI2 = I2;
12035 if (I1 && (I1 == IE1 || I1->hasOneUse()) &&
12036 getElementIndex(I1).value_or(Idx2) != Idx2)
12037 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0));
12038 if (I2 && ((I2 == IE2 || I2->hasOneUse())) &&
12039 getElementIndex(I2).value_or(Idx1) != Idx1)
12040 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0));
12041 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2));
12042 llvm_unreachable("Two different buildvectors not expected.");
12045 namespace {
12046 /// Returns incoming Value *, if the requested type is Value * too, or a default
12047 /// value, otherwise.
12048 struct ValueSelect {
12049 template <typename U>
12050 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) {
12051 return V;
12053 template <typename U>
12054 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) {
12055 return U();
12058 } // namespace
12060 /// Does the analysis of the provided shuffle masks and performs the requested
12061 /// actions on the vectors with the given shuffle masks. It tries to do it in
12062 /// several steps.
12063 /// 1. If the Base vector is not undef vector, resizing the very first mask to
12064 /// have common VF and perform action for 2 input vectors (including non-undef
12065 /// Base). Other shuffle masks are combined with the resulting after the 1 stage
12066 /// and processed as a shuffle of 2 elements.
12067 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the
12068 /// action only for 1 vector with the given mask, if it is not the identity
12069 /// mask.
12070 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2
12071 /// vectors, combing the masks properly between the steps.
12072 template <typename T>
12073 static T *performExtractsShuffleAction(
12074 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base,
12075 function_ref<unsigned(T *)> GetVF,
12076 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction,
12077 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) {
12078 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts.");
12079 SmallVector<int> Mask(ShuffleMask.begin()->second);
12080 auto VMIt = std::next(ShuffleMask.begin());
12081 T *Prev = nullptr;
12082 SmallBitVector UseMask =
12083 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask);
12084 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask);
12085 if (!IsBaseUndef.all()) {
12086 // Base is not undef, need to combine it with the next subvectors.
12087 std::pair<T *, bool> Res =
12088 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false);
12089 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask);
12090 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
12091 if (Mask[Idx] == PoisonMaskElem)
12092 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx;
12093 else
12094 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF;
12096 auto *V = ValueSelect::get<T *>(Base);
12097 (void)V;
12098 assert((!V || GetVF(V) == Mask.size()) &&
12099 "Expected base vector of VF number of elements.");
12100 Prev = Action(Mask, {nullptr, Res.first});
12101 } else if (ShuffleMask.size() == 1) {
12102 // Base is undef and only 1 vector is shuffled - perform the action only for
12103 // single vector, if the mask is not the identity mask.
12104 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask,
12105 /*ForSingleMask=*/true);
12106 if (Res.second)
12107 // Identity mask is found.
12108 Prev = Res.first;
12109 else
12110 Prev = Action(Mask, {ShuffleMask.begin()->first});
12111 } else {
12112 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors
12113 // shuffles step by step, combining shuffle between the steps.
12114 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first);
12115 unsigned Vec2VF = GetVF(VMIt->first);
12116 if (Vec1VF == Vec2VF) {
12117 // No need to resize the input vectors since they are of the same size, we
12118 // can shuffle them directly.
12119 ArrayRef<int> SecMask = VMIt->second;
12120 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
12121 if (SecMask[I] != PoisonMaskElem) {
12122 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars.");
12123 Mask[I] = SecMask[I] + Vec1VF;
12126 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first});
12127 } else {
12128 // Vectors of different sizes - resize and reshuffle.
12129 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask,
12130 /*ForSingleMask=*/false);
12131 std::pair<T *, bool> Res2 =
12132 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false);
12133 ArrayRef<int> SecMask = VMIt->second;
12134 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
12135 if (Mask[I] != PoisonMaskElem) {
12136 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars.");
12137 if (Res1.second)
12138 Mask[I] = I;
12139 } else if (SecMask[I] != PoisonMaskElem) {
12140 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars.");
12141 Mask[I] = (Res2.second ? I : SecMask[I]) + VF;
12144 Prev = Action(Mask, {Res1.first, Res2.first});
12146 VMIt = std::next(VMIt);
12148 bool IsBaseNotUndef = !IsBaseUndef.all();
12149 (void)IsBaseNotUndef;
12150 // Perform requested actions for the remaining masks/vectors.
12151 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) {
12152 // Shuffle other input vectors, if any.
12153 std::pair<T *, bool> Res =
12154 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false);
12155 ArrayRef<int> SecMask = VMIt->second;
12156 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
12157 if (SecMask[I] != PoisonMaskElem) {
12158 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) &&
12159 "Multiple uses of scalars.");
12160 Mask[I] = (Res.second ? I : SecMask[I]) + VF;
12161 } else if (Mask[I] != PoisonMaskElem) {
12162 Mask[I] = I;
12165 Prev = Action(Mask, {Prev, Res.first});
12167 return Prev;
12170 namespace {
12171 /// Data type for handling buildvector sequences with the reused scalars from
12172 /// other tree entries.
12173 template <typename T> struct ShuffledInsertData {
12174 /// List of insertelements to be replaced by shuffles.
12175 SmallVector<InsertElementInst *> InsertElements;
12176 /// The parent vectors and shuffle mask for the given list of inserts.
12177 MapVector<T, SmallVector<int>> ValueMasks;
12179 } // namespace
12181 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
12182 InstructionCost Cost = 0;
12183 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
12184 << VectorizableTree.size() << ".\n");
12186 unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
12188 SmallPtrSet<Value *, 4> CheckedExtracts;
12189 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
12190 TreeEntry &TE = *VectorizableTree[I];
12191 // No need to count the cost for combined entries, they are combined and
12192 // just skip their cost.
12193 if (TE.State == TreeEntry::CombinedVectorize) {
12194 LLVM_DEBUG(
12195 dbgs() << "SLP: Skipping cost for combined node that starts with "
12196 << *TE.Scalars[0] << ".\n";
12197 TE.dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n");
12198 continue;
12200 if (TE.isGather()) {
12201 if (const TreeEntry *E = getTreeEntry(TE.getMainOp());
12202 E && E->getVectorFactor() == TE.getVectorFactor() &&
12203 E->isSame(TE.Scalars)) {
12204 // Some gather nodes might be absolutely the same as some vectorizable
12205 // nodes after reordering, need to handle it.
12206 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle "
12207 << shortBundleName(TE.Scalars, TE.Idx) << ".\n"
12208 << "SLP: Current total cost = " << Cost << "\n");
12209 continue;
12213 // Exclude cost of gather loads nodes which are not used. These nodes were
12214 // built as part of the final attempt to vectorize gathered loads.
12215 assert((!TE.isGather() || TE.Idx == 0 || !TE.UserTreeIndices.empty()) &&
12216 "Expected gather nodes with users only.");
12218 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts);
12219 Cost += C;
12220 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle "
12221 << shortBundleName(TE.Scalars, TE.Idx) << ".\n"
12222 << "SLP: Current total cost = " << Cost << "\n");
12225 SmallPtrSet<Value *, 16> ExtractCostCalculated;
12226 InstructionCost ExtractCost = 0;
12227 SmallVector<ShuffledInsertData<const TreeEntry *>> ShuffledInserts;
12228 SmallVector<APInt> DemandedElts;
12229 SmallDenseSet<Value *, 4> UsedInserts;
12230 DenseSet<std::pair<const TreeEntry *, Type *>> VectorCasts;
12231 std::optional<DenseMap<Value *, unsigned>> ValueToExtUses;
12232 DenseMap<const TreeEntry *, DenseSet<Value *>> ExtractsCount;
12233 SmallPtrSet<Value *, 4> ScalarOpsFromCasts;
12234 // Keep track {Scalar, Index, User} tuple.
12235 // On AArch64, this helps in fusing a mov instruction, associated with
12236 // extractelement, with fmul in the backend so that extractelement is free.
12237 SmallVector<std::tuple<Value *, User *, int>, 4> ScalarUserAndIdx;
12238 for (ExternalUser &EU : ExternalUses) {
12239 ScalarUserAndIdx.emplace_back(EU.Scalar, EU.User, EU.Lane);
12241 for (ExternalUser &EU : ExternalUses) {
12242 // Uses by ephemeral values are free (because the ephemeral value will be
12243 // removed prior to code generation, and so the extraction will be
12244 // removed as well).
12245 if (EphValues.count(EU.User))
12246 continue;
12248 // Used in unreachable blocks or in EH pads (rarely executed) or is
12249 // terminated with unreachable instruction.
12250 if (BasicBlock *UserParent =
12251 EU.User ? cast<Instruction>(EU.User)->getParent() : nullptr;
12252 UserParent &&
12253 (!DT->isReachableFromEntry(UserParent) || UserParent->isEHPad() ||
12254 isa_and_present<UnreachableInst>(UserParent->getTerminator())))
12255 continue;
12257 // We only add extract cost once for the same scalar.
12258 if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
12259 !ExtractCostCalculated.insert(EU.Scalar).second)
12260 continue;
12262 // No extract cost for vector "scalar"
12263 if (isa<FixedVectorType>(EU.Scalar->getType()))
12264 continue;
12266 // If found user is an insertelement, do not calculate extract cost but try
12267 // to detect it as a final shuffled/identity match.
12268 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User);
12269 VU && VU->getOperand(1) == EU.Scalar) {
12270 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
12271 if (!UsedInserts.insert(VU).second)
12272 continue;
12273 std::optional<unsigned> InsertIdx = getElementIndex(VU);
12274 if (InsertIdx) {
12275 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
12276 auto *It = find_if(
12277 ShuffledInserts,
12278 [this, VU](const ShuffledInsertData<const TreeEntry *> &Data) {
12279 // Checks if 2 insertelements are from the same buildvector.
12280 InsertElementInst *VecInsert = Data.InsertElements.front();
12281 return areTwoInsertFromSameBuildVector(
12282 VU, VecInsert, [this](InsertElementInst *II) -> Value * {
12283 Value *Op0 = II->getOperand(0);
12284 if (getTreeEntry(II) && !getTreeEntry(Op0))
12285 return nullptr;
12286 return Op0;
12289 int VecId = -1;
12290 if (It == ShuffledInserts.end()) {
12291 auto &Data = ShuffledInserts.emplace_back();
12292 Data.InsertElements.emplace_back(VU);
12293 DemandedElts.push_back(APInt::getZero(FTy->getNumElements()));
12294 VecId = ShuffledInserts.size() - 1;
12295 auto It = MinBWs.find(ScalarTE);
12296 if (It != MinBWs.end() &&
12297 VectorCasts
12298 .insert(std::make_pair(ScalarTE, FTy->getElementType()))
12299 .second) {
12300 unsigned BWSz = It->second.first;
12301 unsigned DstBWSz = DL->getTypeSizeInBits(FTy->getElementType());
12302 unsigned VecOpcode;
12303 if (DstBWSz < BWSz)
12304 VecOpcode = Instruction::Trunc;
12305 else
12306 VecOpcode =
12307 It->second.second ? Instruction::SExt : Instruction::ZExt;
12308 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12309 InstructionCost C = TTI->getCastInstrCost(
12310 VecOpcode, FTy,
12311 getWidenedType(IntegerType::get(FTy->getContext(), BWSz),
12312 FTy->getNumElements()),
12313 TTI::CastContextHint::None, CostKind);
12314 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
12315 << " for extending externally used vector with "
12316 "non-equal minimum bitwidth.\n");
12317 Cost += C;
12319 } else {
12320 if (isFirstInsertElement(VU, It->InsertElements.front()))
12321 It->InsertElements.front() = VU;
12322 VecId = std::distance(ShuffledInserts.begin(), It);
12324 int InIdx = *InsertIdx;
12325 SmallVectorImpl<int> &Mask =
12326 ShuffledInserts[VecId].ValueMasks[ScalarTE];
12327 if (Mask.empty())
12328 Mask.assign(FTy->getNumElements(), PoisonMaskElem);
12329 Mask[InIdx] = EU.Lane;
12330 DemandedElts[VecId].setBit(InIdx);
12331 continue;
12336 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12337 // If we plan to rewrite the tree in a smaller type, we will need to sign
12338 // extend the extracted value back to the original type. Here, we account
12339 // for the extract and the added cost of the sign extend if needed.
12340 InstructionCost ExtraCost = TTI::TCC_Free;
12341 auto *VecTy = getWidenedType(EU.Scalar->getType(), BundleWidth);
12342 const TreeEntry *Entry = getTreeEntry(EU.Scalar);
12343 auto It = MinBWs.find(Entry);
12344 if (It != MinBWs.end()) {
12345 auto *MinTy = IntegerType::get(F->getContext(), It->second.first);
12346 unsigned Extend = isKnownNonNegative(EU.Scalar, SimplifyQuery(*DL))
12347 ? Instruction::ZExt
12348 : Instruction::SExt;
12349 VecTy = getWidenedType(MinTy, BundleWidth);
12350 ExtraCost = TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
12351 VecTy, EU.Lane);
12352 } else {
12353 ExtraCost =
12354 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, CostKind,
12355 EU.Lane, EU.Scalar, ScalarUserAndIdx);
12357 // Leave the scalar instructions as is if they are cheaper than extracts.
12358 if (Entry->Idx != 0 || Entry->getOpcode() == Instruction::GetElementPtr ||
12359 Entry->getOpcode() == Instruction::Load) {
12360 // Checks if the user of the external scalar is phi in loop body.
12361 auto IsPhiInLoop = [&](const ExternalUser &U) {
12362 if (auto *Phi = dyn_cast_if_present<PHINode>(U.User)) {
12363 auto *I = cast<Instruction>(U.Scalar);
12364 const Loop *L = LI->getLoopFor(Phi->getParent());
12365 return L && (Phi->getParent() == I->getParent() ||
12366 L == LI->getLoopFor(I->getParent()));
12368 return false;
12370 if (!ValueToExtUses) {
12371 ValueToExtUses.emplace();
12372 for_each(enumerate(ExternalUses), [&](const auto &P) {
12373 // Ignore phis in loops.
12374 if (IsPhiInLoop(P.value()))
12375 return;
12377 ValueToExtUses->try_emplace(P.value().Scalar, P.index());
12380 // Can use original instruction, if no operands vectorized or they are
12381 // marked as externally used already.
12382 auto *Inst = cast<Instruction>(EU.Scalar);
12383 InstructionCost ScalarCost = TTI->getInstructionCost(Inst, CostKind);
12384 auto OperandIsScalar = [&](Value *V) {
12385 if (!getTreeEntry(V)) {
12386 // Some extractelements might be not vectorized, but
12387 // transformed into shuffle and removed from the function,
12388 // consider it here.
12389 if (auto *EE = dyn_cast<ExtractElementInst>(V))
12390 return !EE->hasOneUse() || !MustGather.contains(EE);
12391 return true;
12393 return ValueToExtUses->contains(V);
12395 bool CanBeUsedAsScalar = all_of(Inst->operands(), OperandIsScalar);
12396 bool CanBeUsedAsScalarCast = false;
12397 if (auto *CI = dyn_cast<CastInst>(Inst); CI && !CanBeUsedAsScalar) {
12398 if (auto *Op = dyn_cast<Instruction>(CI->getOperand(0));
12399 Op && all_of(Op->operands(), OperandIsScalar)) {
12400 InstructionCost OpCost =
12401 (getTreeEntry(Op) && !ValueToExtUses->contains(Op))
12402 ? TTI->getInstructionCost(Op, CostKind)
12403 : 0;
12404 if (ScalarCost + OpCost <= ExtraCost) {
12405 CanBeUsedAsScalar = CanBeUsedAsScalarCast = true;
12406 ScalarCost += OpCost;
12410 if (CanBeUsedAsScalar) {
12411 bool KeepScalar = ScalarCost <= ExtraCost;
12412 // Try to keep original scalar if the user is the phi node from the same
12413 // block as the root phis, currently vectorized. It allows to keep
12414 // better ordering info of PHIs, being vectorized currently.
12415 bool IsProfitablePHIUser =
12416 (KeepScalar || (ScalarCost - ExtraCost <= TTI::TCC_Basic &&
12417 VectorizableTree.front()->Scalars.size() > 2)) &&
12418 VectorizableTree.front()->getOpcode() == Instruction::PHI &&
12419 !Inst->hasNUsesOrMore(UsesLimit) &&
12420 none_of(Inst->users(),
12421 [&](User *U) {
12422 auto *PHIUser = dyn_cast<PHINode>(U);
12423 return (!PHIUser ||
12424 PHIUser->getParent() !=
12425 cast<Instruction>(
12426 VectorizableTree.front()->getMainOp())
12427 ->getParent()) &&
12428 !getTreeEntry(U);
12429 }) &&
12430 count_if(Entry->Scalars, [&](Value *V) {
12431 return ValueToExtUses->contains(V);
12432 }) <= 2;
12433 if (IsProfitablePHIUser) {
12434 KeepScalar = true;
12435 } else if (KeepScalar && ScalarCost != TTI::TCC_Free &&
12436 ExtraCost - ScalarCost <= TTI::TCC_Basic &&
12437 (!GatheredLoadsEntriesFirst.has_value() ||
12438 Entry->Idx < *GatheredLoadsEntriesFirst)) {
12439 unsigned ScalarUsesCount = count_if(Entry->Scalars, [&](Value *V) {
12440 return ValueToExtUses->contains(V);
12442 auto It = ExtractsCount.find(Entry);
12443 if (It != ExtractsCount.end()) {
12444 assert(ScalarUsesCount >= It->getSecond().size() &&
12445 "Expected total number of external uses not less than "
12446 "number of scalar uses.");
12447 ScalarUsesCount -= It->getSecond().size();
12449 // Keep original scalar if number of externally used instructions in
12450 // the same entry is not power of 2. It may help to do some extra
12451 // vectorization for now.
12452 KeepScalar = ScalarUsesCount <= 1 || !has_single_bit(ScalarUsesCount);
12454 if (KeepScalar) {
12455 ExternalUsesAsOriginalScalar.insert(EU.Scalar);
12456 for_each(Inst->operands(), [&](Value *V) {
12457 auto It = ValueToExtUses->find(V);
12458 if (It != ValueToExtUses->end()) {
12459 // Replace all uses to avoid compiler crash.
12460 ExternalUses[It->second].User = nullptr;
12463 ExtraCost = ScalarCost;
12464 if (!IsPhiInLoop(EU))
12465 ExtractsCount[Entry].insert(Inst);
12466 if (CanBeUsedAsScalarCast) {
12467 ScalarOpsFromCasts.insert(Inst->getOperand(0));
12468 // Update the users of the operands of the cast operand to avoid
12469 // compiler crash.
12470 if (auto *IOp = dyn_cast<Instruction>(Inst->getOperand(0))) {
12471 for_each(IOp->operands(), [&](Value *V) {
12472 auto It = ValueToExtUses->find(V);
12473 if (It != ValueToExtUses->end()) {
12474 // Replace all uses to avoid compiler crash.
12475 ExternalUses[It->second].User = nullptr;
12484 ExtractCost += ExtraCost;
12486 // Insert externals for extract of operands of casts to be emitted as scalars
12487 // instead of extractelement.
12488 for (Value *V : ScalarOpsFromCasts) {
12489 ExternalUsesAsOriginalScalar.insert(V);
12490 if (const TreeEntry *E = getTreeEntry(V)) {
12491 ExternalUses.emplace_back(V, nullptr, E->findLaneForValue(V));
12494 // Add reduced value cost, if resized.
12495 if (!VectorizedVals.empty()) {
12496 const TreeEntry &Root = *VectorizableTree.front();
12497 auto BWIt = MinBWs.find(&Root);
12498 if (BWIt != MinBWs.end()) {
12499 Type *DstTy = Root.Scalars.front()->getType();
12500 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy->getScalarType());
12501 unsigned SrcSz =
12502 ReductionBitWidth == 0 ? BWIt->second.first : ReductionBitWidth;
12503 if (OriginalSz != SrcSz) {
12504 unsigned Opcode = Instruction::Trunc;
12505 if (OriginalSz > SrcSz)
12506 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt;
12507 Type *SrcTy = IntegerType::get(DstTy->getContext(), SrcSz);
12508 if (auto *VecTy = dyn_cast<FixedVectorType>(DstTy)) {
12509 assert(SLPReVec && "Only supported by REVEC.");
12510 SrcTy = getWidenedType(SrcTy, VecTy->getNumElements());
12512 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy,
12513 TTI::CastContextHint::None,
12514 TTI::TCK_RecipThroughput);
12519 InstructionCost SpillCost = getSpillCost();
12520 Cost += SpillCost + ExtractCost;
12521 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask,
12522 bool) {
12523 InstructionCost C = 0;
12524 unsigned VF = Mask.size();
12525 unsigned VecVF = TE->getVectorFactor();
12526 if (VF != VecVF &&
12527 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) ||
12528 !ShuffleVectorInst::isIdentityMask(Mask, VF))) {
12529 SmallVector<int> OrigMask(VecVF, PoisonMaskElem);
12530 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)),
12531 OrigMask.begin());
12532 C = ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc,
12533 getWidenedType(TE->getMainOp()->getType(), VecVF),
12534 OrigMask);
12535 LLVM_DEBUG(
12536 dbgs() << "SLP: Adding cost " << C
12537 << " for final shuffle of insertelement external users.\n";
12538 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n");
12539 Cost += C;
12540 return std::make_pair(TE, true);
12542 return std::make_pair(TE, false);
12544 // Calculate the cost of the reshuffled vectors, if any.
12545 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
12546 Value *Base = ShuffledInserts[I].InsertElements.front()->getOperand(0);
12547 auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
12548 unsigned VF = 0;
12549 auto EstimateShufflesCost = [&](ArrayRef<int> Mask,
12550 ArrayRef<const TreeEntry *> TEs) {
12551 assert((TEs.size() == 1 || TEs.size() == 2) &&
12552 "Expected exactly 1 or 2 tree entries.");
12553 if (TEs.size() == 1) {
12554 if (VF == 0)
12555 VF = TEs.front()->getVectorFactor();
12556 auto *FTy = getWidenedType(TEs.back()->Scalars.front()->getType(), VF);
12557 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) &&
12558 !all_of(enumerate(Mask), [=](const auto &Data) {
12559 return Data.value() == PoisonMaskElem ||
12560 (Data.index() < VF &&
12561 static_cast<int>(Data.index()) == Data.value());
12562 })) {
12563 InstructionCost C =
12564 ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, FTy, Mask);
12565 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
12566 << " for final shuffle of insertelement "
12567 "external users.\n";
12568 TEs.front()->dump();
12569 dbgs() << "SLP: Current total cost = " << Cost << "\n");
12570 Cost += C;
12572 } else {
12573 if (VF == 0) {
12574 if (TEs.front() &&
12575 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor())
12576 VF = TEs.front()->getVectorFactor();
12577 else
12578 VF = Mask.size();
12580 auto *FTy = getWidenedType(TEs.back()->Scalars.front()->getType(), VF);
12581 InstructionCost C =
12582 ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, FTy, Mask);
12583 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
12584 << " for final shuffle of vector node and external "
12585 "insertelement users.\n";
12586 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump();
12587 dbgs() << "SLP: Current total cost = " << Cost << "\n");
12588 Cost += C;
12590 VF = Mask.size();
12591 return TEs.back();
12593 (void)performExtractsShuffleAction<const TreeEntry>(
12594 MutableArrayRef(Vector.data(), Vector.size()), Base,
12595 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF,
12596 EstimateShufflesCost);
12597 InstructionCost InsertCost = TTI->getScalarizationOverhead(
12598 cast<FixedVectorType>(
12599 ShuffledInserts[I].InsertElements.front()->getType()),
12600 DemandedElts[I],
12601 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput);
12602 Cost -= InsertCost;
12605 // Add the cost for reduced value resize (if required).
12606 if (ReductionBitWidth != 0) {
12607 assert(UserIgnoreList && "Expected reduction tree.");
12608 const TreeEntry &E = *VectorizableTree.front();
12609 auto It = MinBWs.find(&E);
12610 if (It != MinBWs.end() && It->second.first != ReductionBitWidth) {
12611 unsigned SrcSize = It->second.first;
12612 unsigned DstSize = ReductionBitWidth;
12613 unsigned Opcode = Instruction::Trunc;
12614 if (SrcSize < DstSize)
12615 Opcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
12616 auto *SrcVecTy =
12617 getWidenedType(Builder.getIntNTy(SrcSize), E.getVectorFactor());
12618 auto *DstVecTy =
12619 getWidenedType(Builder.getIntNTy(DstSize), E.getVectorFactor());
12620 TTI::CastContextHint CCH = getCastContextHint(E);
12621 InstructionCost CastCost;
12622 switch (E.getOpcode()) {
12623 case Instruction::SExt:
12624 case Instruction::ZExt:
12625 case Instruction::Trunc: {
12626 const TreeEntry *OpTE = getOperandEntry(&E, 0);
12627 CCH = getCastContextHint(*OpTE);
12628 break;
12630 default:
12631 break;
12633 CastCost += TTI->getCastInstrCost(Opcode, DstVecTy, SrcVecTy, CCH,
12634 TTI::TCK_RecipThroughput);
12635 Cost += CastCost;
12636 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << CastCost
12637 << " for final resize for reduction from " << SrcVecTy
12638 << " to " << DstVecTy << "\n";
12639 dbgs() << "SLP: Current total cost = " << Cost << "\n");
12643 #ifndef NDEBUG
12644 SmallString<256> Str;
12646 raw_svector_ostream OS(Str);
12647 OS << "SLP: Spill Cost = " << SpillCost << ".\n"
12648 << "SLP: Extract Cost = " << ExtractCost << ".\n"
12649 << "SLP: Total Cost = " << Cost << ".\n";
12651 LLVM_DEBUG(dbgs() << Str);
12652 if (ViewSLPTree)
12653 ViewGraph(this, "SLP" + F->getName(), false, Str);
12654 #endif
12656 return Cost;
12659 /// Tries to find extractelement instructions with constant indices from fixed
12660 /// vector type and gather such instructions into a bunch, which highly likely
12661 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was
12662 /// successful, the matched scalars are replaced by poison values in \p VL for
12663 /// future analysis.
12664 std::optional<TTI::ShuffleKind>
12665 BoUpSLP::tryToGatherSingleRegisterExtractElements(
12666 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const {
12667 // Scan list of gathered scalars for extractelements that can be represented
12668 // as shuffles.
12669 MapVector<Value *, SmallVector<int>> VectorOpToIdx;
12670 SmallVector<int> UndefVectorExtracts;
12671 for (int I = 0, E = VL.size(); I < E; ++I) {
12672 auto *EI = dyn_cast<ExtractElementInst>(VL[I]);
12673 if (!EI) {
12674 if (isa<UndefValue>(VL[I]))
12675 UndefVectorExtracts.push_back(I);
12676 continue;
12678 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType());
12679 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand()))
12680 continue;
12681 std::optional<unsigned> Idx = getExtractIndex(EI);
12682 // Undefined index.
12683 if (!Idx) {
12684 UndefVectorExtracts.push_back(I);
12685 continue;
12687 if (Idx >= VecTy->getNumElements()) {
12688 UndefVectorExtracts.push_back(I);
12689 continue;
12691 SmallBitVector ExtractMask(VecTy->getNumElements(), true);
12692 ExtractMask.reset(*Idx);
12693 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) {
12694 UndefVectorExtracts.push_back(I);
12695 continue;
12697 VectorOpToIdx[EI->getVectorOperand()].push_back(I);
12699 // Sort the vector operands by the maximum number of uses in extractelements.
12700 SmallVector<std::pair<Value *, SmallVector<int>>> Vectors =
12701 VectorOpToIdx.takeVector();
12702 stable_sort(Vectors, [](const auto &P1, const auto &P2) {
12703 return P1.second.size() > P2.second.size();
12705 // Find the best pair of the vectors or a single vector.
12706 const int UndefSz = UndefVectorExtracts.size();
12707 unsigned SingleMax = 0;
12708 unsigned PairMax = 0;
12709 if (!Vectors.empty()) {
12710 SingleMax = Vectors.front().second.size() + UndefSz;
12711 if (Vectors.size() > 1) {
12712 auto *ItNext = std::next(Vectors.begin());
12713 PairMax = SingleMax + ItNext->second.size();
12716 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0)
12717 return std::nullopt;
12718 // Check if better to perform a shuffle of 2 vectors or just of a single
12719 // vector.
12720 SmallVector<Value *> SavedVL(VL.begin(), VL.end());
12721 SmallVector<Value *> GatheredExtracts(
12722 VL.size(), PoisonValue::get(VL.front()->getType()));
12723 if (SingleMax >= PairMax && SingleMax) {
12724 for (int Idx : Vectors.front().second)
12725 std::swap(GatheredExtracts[Idx], VL[Idx]);
12726 } else if (!Vectors.empty()) {
12727 for (unsigned Idx : {0, 1})
12728 for (int Idx : Vectors[Idx].second)
12729 std::swap(GatheredExtracts[Idx], VL[Idx]);
12731 // Add extracts from undefs too.
12732 for (int Idx : UndefVectorExtracts)
12733 std::swap(GatheredExtracts[Idx], VL[Idx]);
12734 // Check that gather of extractelements can be represented as just a
12735 // shuffle of a single/two vectors the scalars are extracted from.
12736 std::optional<TTI::ShuffleKind> Res =
12737 isFixedVectorShuffle(GatheredExtracts, Mask);
12738 if (!Res || all_of(Mask, [](int Idx) { return Idx == PoisonMaskElem; })) {
12739 // TODO: try to check other subsets if possible.
12740 // Restore the original VL if attempt was not successful.
12741 copy(SavedVL, VL.begin());
12742 return std::nullopt;
12744 // Restore unused scalars from mask, if some of the extractelements were not
12745 // selected for shuffle.
12746 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) {
12747 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) &&
12748 isa<UndefValue>(GatheredExtracts[I])) {
12749 std::swap(VL[I], GatheredExtracts[I]);
12750 continue;
12752 auto *EI = dyn_cast<ExtractElementInst>(VL[I]);
12753 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) ||
12754 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) ||
12755 is_contained(UndefVectorExtracts, I))
12756 continue;
12758 return Res;
12761 /// Tries to find extractelement instructions with constant indices from fixed
12762 /// vector type and gather such instructions into a bunch, which highly likely
12763 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was
12764 /// successful, the matched scalars are replaced by poison values in \p VL for
12765 /// future analysis.
12766 SmallVector<std::optional<TTI::ShuffleKind>>
12767 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL,
12768 SmallVectorImpl<int> &Mask,
12769 unsigned NumParts) const {
12770 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1.");
12771 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts);
12772 Mask.assign(VL.size(), PoisonMaskElem);
12773 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
12774 for (unsigned Part : seq<unsigned>(NumParts)) {
12775 // Scan list of gathered scalars for extractelements that can be represented
12776 // as shuffles.
12777 MutableArrayRef<Value *> SubVL = MutableArrayRef(VL).slice(
12778 Part * SliceSize, getNumElems(VL.size(), SliceSize, Part));
12779 SmallVector<int> SubMask;
12780 std::optional<TTI::ShuffleKind> Res =
12781 tryToGatherSingleRegisterExtractElements(SubVL, SubMask);
12782 ShufflesRes[Part] = Res;
12783 copy(SubMask, std::next(Mask.begin(), Part * SliceSize));
12785 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) {
12786 return Res.has_value();
12788 ShufflesRes.clear();
12789 return ShufflesRes;
12792 std::optional<TargetTransformInfo::ShuffleKind>
12793 BoUpSLP::isGatherShuffledSingleRegisterEntry(
12794 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask,
12795 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part, bool ForOrder) {
12796 Entries.clear();
12797 // TODO: currently checking only for Scalars in the tree entry, need to count
12798 // reused elements too for better cost estimation.
12799 const EdgeInfo &TEUseEI = TE == VectorizableTree.front().get()
12800 ? EdgeInfo(const_cast<TreeEntry *>(TE), 0)
12801 : TE->UserTreeIndices.front();
12802 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE);
12803 const BasicBlock *TEInsertBlock = nullptr;
12804 // Main node of PHI entries keeps the correct order of operands/incoming
12805 // blocks.
12806 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) {
12807 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx);
12808 TEInsertPt = TEInsertBlock->getTerminator();
12809 } else {
12810 TEInsertBlock = TEInsertPt->getParent();
12812 if (!DT->isReachableFromEntry(TEInsertBlock))
12813 return std::nullopt;
12814 auto *NodeUI = DT->getNode(TEInsertBlock);
12815 assert(NodeUI && "Should only process reachable instructions");
12816 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end());
12817 auto CheckOrdering = [&](const Instruction *InsertPt) {
12818 // Argument InsertPt is an instruction where vector code for some other
12819 // tree entry (one that shares one or more scalars with TE) is going to be
12820 // generated. This lambda returns true if insertion point of vector code
12821 // for the TE dominates that point (otherwise dependency is the other way
12822 // around). The other node is not limited to be of a gather kind. Gather
12823 // nodes are not scheduled and their vector code is inserted before their
12824 // first user. If user is PHI, that is supposed to be at the end of a
12825 // predecessor block. Otherwise it is the last instruction among scalars of
12826 // the user node. So, instead of checking dependency between instructions
12827 // themselves, we check dependency between their insertion points for vector
12828 // code (since each scalar instruction ends up as a lane of a vector
12829 // instruction).
12830 const BasicBlock *InsertBlock = InsertPt->getParent();
12831 auto *NodeEUI = DT->getNode(InsertBlock);
12832 if (!NodeEUI)
12833 return false;
12834 assert((NodeUI == NodeEUI) ==
12835 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) &&
12836 "Different nodes should have different DFS numbers");
12837 // Check the order of the gather nodes users.
12838 if (TEInsertPt->getParent() != InsertBlock &&
12839 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI)))
12840 return false;
12841 if (TEInsertPt->getParent() == InsertBlock &&
12842 TEInsertPt->comesBefore(InsertPt))
12843 return false;
12844 return true;
12846 // Find all tree entries used by the gathered values. If no common entries
12847 // found - not a shuffle.
12848 // Here we build a set of tree nodes for each gathered value and trying to
12849 // find the intersection between these sets. If we have at least one common
12850 // tree node for each gathered value - we have just a permutation of the
12851 // single vector. If we have 2 different sets, we're in situation where we
12852 // have a permutation of 2 input vectors.
12853 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
12854 DenseMap<Value *, int> UsedValuesEntry;
12855 for (Value *V : VL) {
12856 if (isConstant(V))
12857 continue;
12858 // Build a list of tree entries where V is used.
12859 SmallPtrSet<const TreeEntry *, 4> VToTEs;
12860 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) {
12861 if (TEPtr == TE || TEPtr->Idx == 0)
12862 continue;
12863 assert(any_of(TEPtr->Scalars,
12864 [&](Value *V) { return GatheredScalars.contains(V); }) &&
12865 "Must contain at least single gathered value.");
12866 assert(TEPtr->UserTreeIndices.size() == 1 &&
12867 "Expected only single user of a gather node.");
12868 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front();
12870 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp());
12871 const Instruction *InsertPt =
12872 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator()
12873 : &getLastInstructionInBundle(UseEI.UserTE);
12874 if (TEInsertPt == InsertPt) {
12875 // If 2 gathers are operands of the same entry (regardless of whether
12876 // user is PHI or else), compare operands indices, use the earlier one
12877 // as the base.
12878 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx)
12879 continue;
12880 // If the user instruction is used for some reason in different
12881 // vectorized nodes - make it depend on index.
12882 if (TEUseEI.UserTE != UseEI.UserTE &&
12883 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx)
12884 continue;
12887 // Check if the user node of the TE comes after user node of TEPtr,
12888 // otherwise TEPtr depends on TE.
12889 if ((TEInsertBlock != InsertPt->getParent() ||
12890 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) &&
12891 !CheckOrdering(InsertPt))
12892 continue;
12893 VToTEs.insert(TEPtr);
12895 if (const TreeEntry *VTE = getTreeEntry(V)) {
12896 if (ForOrder && VTE->Idx < GatheredLoadsEntriesFirst.value_or(0)) {
12897 if (VTE->State != TreeEntry::Vectorize) {
12898 auto It = MultiNodeScalars.find(V);
12899 if (It == MultiNodeScalars.end())
12900 continue;
12901 VTE = *It->getSecond().begin();
12902 // Iterate through all vectorized nodes.
12903 auto *MIt = find_if(It->getSecond(), [](const TreeEntry *MTE) {
12904 return MTE->State == TreeEntry::Vectorize;
12906 if (MIt == It->getSecond().end())
12907 continue;
12908 VTE = *MIt;
12911 Instruction &LastBundleInst = getLastInstructionInBundle(VTE);
12912 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst))
12913 continue;
12914 VToTEs.insert(VTE);
12916 if (VToTEs.empty())
12917 continue;
12918 if (UsedTEs.empty()) {
12919 // The first iteration, just insert the list of nodes to vector.
12920 UsedTEs.push_back(VToTEs);
12921 UsedValuesEntry.try_emplace(V, 0);
12922 } else {
12923 // Need to check if there are any previously used tree nodes which use V.
12924 // If there are no such nodes, consider that we have another one input
12925 // vector.
12926 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
12927 unsigned Idx = 0;
12928 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
12929 // Do we have a non-empty intersection of previously listed tree entries
12930 // and tree entries using current V?
12931 set_intersect(VToTEs, Set);
12932 if (!VToTEs.empty()) {
12933 // Yes, write the new subset and continue analysis for the next
12934 // scalar.
12935 Set.swap(VToTEs);
12936 break;
12938 VToTEs = SavedVToTEs;
12939 ++Idx;
12941 // No non-empty intersection found - need to add a second set of possible
12942 // source vectors.
12943 if (Idx == UsedTEs.size()) {
12944 // If the number of input vectors is greater than 2 - not a permutation,
12945 // fallback to the regular gather.
12946 // TODO: support multiple reshuffled nodes.
12947 if (UsedTEs.size() == 2)
12948 continue;
12949 UsedTEs.push_back(SavedVToTEs);
12950 Idx = UsedTEs.size() - 1;
12952 UsedValuesEntry.try_emplace(V, Idx);
12956 if (UsedTEs.empty()) {
12957 Entries.clear();
12958 return std::nullopt;
12961 unsigned VF = 0;
12962 if (UsedTEs.size() == 1) {
12963 // Keep the order to avoid non-determinism.
12964 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(),
12965 UsedTEs.front().end());
12966 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) {
12967 return TE1->Idx < TE2->Idx;
12969 // Try to find the perfect match in another gather node at first.
12970 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) {
12971 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars);
12973 if (It != FirstEntries.end() &&
12974 ((*It)->getVectorFactor() == VL.size() ||
12975 ((*It)->getVectorFactor() == TE->Scalars.size() &&
12976 TE->ReuseShuffleIndices.size() == VL.size() &&
12977 (*It)->isSame(TE->Scalars)))) {
12978 Entries.push_back(*It);
12979 if ((*It)->getVectorFactor() == VL.size()) {
12980 std::iota(std::next(Mask.begin(), Part * VL.size()),
12981 std::next(Mask.begin(), (Part + 1) * VL.size()), 0);
12982 } else {
12983 SmallVector<int> CommonMask = TE->getCommonMask();
12984 copy(CommonMask, Mask.begin());
12986 // Clear undef scalars.
12987 for (int I = 0, Sz = VL.size(); I < Sz; ++I)
12988 if (isa<PoisonValue>(VL[I]))
12989 Mask[I] = PoisonMaskElem;
12990 return TargetTransformInfo::SK_PermuteSingleSrc;
12992 // No perfect match, just shuffle, so choose the first tree node from the
12993 // tree.
12994 Entries.push_back(FirstEntries.front());
12995 } else {
12996 // Try to find nodes with the same vector factor.
12997 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
12998 // Keep the order of tree nodes to avoid non-determinism.
12999 DenseMap<int, const TreeEntry *> VFToTE;
13000 for (const TreeEntry *TE : UsedTEs.front()) {
13001 unsigned VF = TE->getVectorFactor();
13002 auto It = VFToTE.find(VF);
13003 if (It != VFToTE.end()) {
13004 if (It->second->Idx > TE->Idx)
13005 It->getSecond() = TE;
13006 continue;
13008 VFToTE.try_emplace(VF, TE);
13010 // Same, keep the order to avoid non-determinism.
13011 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(),
13012 UsedTEs.back().end());
13013 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) {
13014 return TE1->Idx < TE2->Idx;
13016 for (const TreeEntry *TE : SecondEntries) {
13017 auto It = VFToTE.find(TE->getVectorFactor());
13018 if (It != VFToTE.end()) {
13019 VF = It->first;
13020 Entries.push_back(It->second);
13021 Entries.push_back(TE);
13022 break;
13025 // No 2 source vectors with the same vector factor - just choose 2 with max
13026 // index.
13027 if (Entries.empty()) {
13028 Entries.push_back(*llvm::max_element(
13029 UsedTEs.front(), [](const TreeEntry *TE1, const TreeEntry *TE2) {
13030 return TE1->Idx < TE2->Idx;
13031 }));
13032 Entries.push_back(SecondEntries.front());
13033 VF = std::max(Entries.front()->getVectorFactor(),
13034 Entries.back()->getVectorFactor());
13038 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, IsaPred<UndefValue>);
13039 // Checks if the 2 PHIs are compatible in terms of high possibility to be
13040 // vectorized.
13041 auto AreCompatiblePHIs = [&](Value *V, Value *V1) {
13042 auto *PHI = cast<PHINode>(V);
13043 auto *PHI1 = cast<PHINode>(V1);
13044 // Check that all incoming values are compatible/from same parent (if they
13045 // are instructions).
13046 // The incoming values are compatible if they all are constants, or
13047 // instruction with the same/alternate opcodes from the same basic block.
13048 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
13049 Value *In = PHI->getIncomingValue(I);
13050 Value *In1 = PHI1->getIncomingValue(I);
13051 if (isConstant(In) && isConstant(In1))
13052 continue;
13053 if (!getSameOpcode({In, In1}, *TLI).getOpcode())
13054 return false;
13055 if (cast<Instruction>(In)->getParent() !=
13056 cast<Instruction>(In1)->getParent())
13057 return false;
13059 return true;
13061 // Check if the value can be ignored during analysis for shuffled gathers.
13062 // We suppose it is better to ignore instruction, which do not form splats,
13063 // are not vectorized/not extractelements (these instructions will be handled
13064 // by extractelements processing) or may form vector node in future.
13065 auto MightBeIgnored = [=](Value *V) {
13066 auto *I = dyn_cast<Instruction>(V);
13067 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) &&
13068 !isVectorLikeInstWithConstOps(I) &&
13069 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I);
13071 // Check that the neighbor instruction may form a full vector node with the
13072 // current instruction V. It is possible, if they have same/alternate opcode
13073 // and same parent basic block.
13074 auto NeighborMightBeIgnored = [&](Value *V, int Idx) {
13075 Value *V1 = VL[Idx];
13076 bool UsedInSameVTE = false;
13077 auto It = UsedValuesEntry.find(V1);
13078 if (It != UsedValuesEntry.end())
13079 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second;
13080 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE &&
13081 getSameOpcode({V, V1}, *TLI).getOpcode() &&
13082 cast<Instruction>(V)->getParent() ==
13083 cast<Instruction>(V1)->getParent() &&
13084 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1));
13086 // Build a shuffle mask for better cost estimation and vector emission.
13087 SmallBitVector UsedIdxs(Entries.size());
13088 SmallVector<std::pair<unsigned, int>> EntryLanes;
13089 for (int I = 0, E = VL.size(); I < E; ++I) {
13090 Value *V = VL[I];
13091 auto It = UsedValuesEntry.find(V);
13092 if (It == UsedValuesEntry.end())
13093 continue;
13094 // Do not try to shuffle scalars, if they are constants, or instructions
13095 // that can be vectorized as a result of the following vector build
13096 // vectorization.
13097 if (isConstant(V) || (MightBeIgnored(V) &&
13098 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) ||
13099 (I != E - 1 && NeighborMightBeIgnored(V, I + 1)))))
13100 continue;
13101 unsigned Idx = It->second;
13102 EntryLanes.emplace_back(Idx, I);
13103 UsedIdxs.set(Idx);
13105 // Iterate through all shuffled scalars and select entries, which can be used
13106 // for final shuffle.
13107 SmallVector<const TreeEntry *> TempEntries;
13108 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) {
13109 if (!UsedIdxs.test(I))
13110 continue;
13111 // Fix the entry number for the given scalar. If it is the first entry, set
13112 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes).
13113 // These indices are used when calculating final shuffle mask as the vector
13114 // offset.
13115 for (std::pair<unsigned, int> &Pair : EntryLanes)
13116 if (Pair.first == I)
13117 Pair.first = TempEntries.size();
13118 TempEntries.push_back(Entries[I]);
13120 Entries.swap(TempEntries);
13121 if (EntryLanes.size() == Entries.size() &&
13122 !VL.equals(ArrayRef(TE->Scalars)
13123 .slice(Part * VL.size(),
13124 std::min<int>(VL.size(), TE->Scalars.size())))) {
13125 // We may have here 1 or 2 entries only. If the number of scalars is equal
13126 // to the number of entries, no need to do the analysis, it is not very
13127 // profitable. Since VL is not the same as TE->Scalars, it means we already
13128 // have some shuffles before. Cut off not profitable case.
13129 Entries.clear();
13130 return std::nullopt;
13132 // Build the final mask, check for the identity shuffle, if possible.
13133 bool IsIdentity = Entries.size() == 1;
13134 // Pair.first is the offset to the vector, while Pair.second is the index of
13135 // scalar in the list.
13136 for (const std::pair<unsigned, int> &Pair : EntryLanes) {
13137 unsigned Idx = Part * VL.size() + Pair.second;
13138 Mask[Idx] =
13139 Pair.first * VF +
13140 (ForOrder ? std::distance(
13141 Entries[Pair.first]->Scalars.begin(),
13142 find(Entries[Pair.first]->Scalars, VL[Pair.second]))
13143 : Entries[Pair.first]->findLaneForValue(VL[Pair.second]));
13144 IsIdentity &= Mask[Idx] == Pair.second;
13146 switch (Entries.size()) {
13147 case 1:
13148 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2)
13149 return TargetTransformInfo::SK_PermuteSingleSrc;
13150 break;
13151 case 2:
13152 if (EntryLanes.size() > 2 || VL.size() <= 2)
13153 return TargetTransformInfo::SK_PermuteTwoSrc;
13154 break;
13155 default:
13156 break;
13158 Entries.clear();
13159 // Clear the corresponding mask elements.
13160 std::fill(std::next(Mask.begin(), Part * VL.size()),
13161 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem);
13162 return std::nullopt;
13165 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
13166 BoUpSLP::isGatherShuffledEntry(
13167 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask,
13168 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, unsigned NumParts,
13169 bool ForOrder) {
13170 assert(NumParts > 0 && NumParts < VL.size() &&
13171 "Expected positive number of registers.");
13172 Entries.clear();
13173 // No need to check for the topmost gather node.
13174 if (TE == VectorizableTree.front().get() &&
13175 (!GatheredLoadsEntriesFirst.has_value() ||
13176 none_of(ArrayRef(VectorizableTree).drop_front(),
13177 [](const std::unique_ptr<TreeEntry> &TE) {
13178 return !TE->isGather();
13179 })))
13180 return {};
13181 // FIXME: Gathering for non-power-of-2 nodes not implemented yet.
13182 if (TE->isNonPowOf2Vec())
13183 return {};
13184 Mask.assign(VL.size(), PoisonMaskElem);
13185 assert((TE->UserTreeIndices.size() == 1 ||
13186 TE == VectorizableTree.front().get()) &&
13187 "Expected only single user of the gather node.");
13188 assert(VL.size() % NumParts == 0 &&
13189 "Number of scalars must be divisible by NumParts.");
13190 if (!TE->UserTreeIndices.empty() &&
13191 TE->UserTreeIndices.front().UserTE->isGather() &&
13192 TE->UserTreeIndices.front().EdgeIdx == UINT_MAX) {
13193 assert((TE->Idx == 0 || TE->getOpcode() == Instruction::ExtractElement ||
13194 isSplat(TE->Scalars)) &&
13195 "Expected splat or extractelements only node.");
13196 return {};
13198 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
13199 SmallVector<std::optional<TTI::ShuffleKind>> Res;
13200 for (unsigned Part : seq<unsigned>(NumParts)) {
13201 ArrayRef<Value *> SubVL =
13202 VL.slice(Part * SliceSize, getNumElems(VL.size(), SliceSize, Part));
13203 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back();
13204 std::optional<TTI::ShuffleKind> SubRes =
13205 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part,
13206 ForOrder);
13207 if (!SubRes)
13208 SubEntries.clear();
13209 Res.push_back(SubRes);
13210 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc &&
13211 SubEntries.front()->getVectorFactor() == VL.size() &&
13212 (SubEntries.front()->isSame(TE->Scalars) ||
13213 SubEntries.front()->isSame(VL))) {
13214 SmallVector<const TreeEntry *> LocalSubEntries;
13215 LocalSubEntries.swap(SubEntries);
13216 Entries.clear();
13217 Res.clear();
13218 std::iota(Mask.begin(), Mask.end(), 0);
13219 // Clear undef scalars.
13220 for (int I = 0, Sz = VL.size(); I < Sz; ++I)
13221 if (isa<PoisonValue>(VL[I]))
13222 Mask[I] = PoisonMaskElem;
13223 Entries.emplace_back(1, LocalSubEntries.front());
13224 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc);
13225 return Res;
13228 if (all_of(Res,
13229 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) {
13230 Entries.clear();
13231 return {};
13233 return Res;
13236 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
13237 Type *ScalarTy) const {
13238 auto *VecTy = getWidenedType(ScalarTy, VL.size());
13239 bool DuplicateNonConst = false;
13240 // Find the cost of inserting/extracting values from the vector.
13241 // Check if the same elements are inserted several times and count them as
13242 // shuffle candidates.
13243 APInt ShuffledElements = APInt::getZero(VL.size());
13244 DenseMap<Value *, unsigned> UniqueElements;
13245 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
13246 InstructionCost Cost;
13247 auto EstimateInsertCost = [&](unsigned I, Value *V) {
13248 if (V->getType() != ScalarTy) {
13249 Cost += TTI->getCastInstrCost(Instruction::Trunc, ScalarTy, V->getType(),
13250 TTI::CastContextHint::None, CostKind);
13251 V = nullptr;
13253 if (!ForPoisonSrc)
13254 Cost +=
13255 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind,
13256 I, Constant::getNullValue(VecTy), V);
13258 SmallVector<int> ShuffleMask(VL.size(), PoisonMaskElem);
13259 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
13260 Value *V = VL[I];
13261 // No need to shuffle duplicates for constants.
13262 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) {
13263 ShuffledElements.setBit(I);
13264 ShuffleMask[I] = isa<PoisonValue>(V) ? PoisonMaskElem : I;
13265 continue;
13268 auto Res = UniqueElements.try_emplace(V, I);
13269 if (Res.second) {
13270 EstimateInsertCost(I, V);
13271 ShuffleMask[I] = I;
13272 continue;
13275 DuplicateNonConst = true;
13276 ShuffledElements.setBit(I);
13277 ShuffleMask[I] = Res.first->second;
13279 if (ForPoisonSrc) {
13280 if (isa<FixedVectorType>(ScalarTy)) {
13281 assert(SLPReVec && "Only supported by REVEC.");
13282 // We don't need to insert elements one by one. Instead, we can insert the
13283 // entire vector into the destination.
13284 Cost = 0;
13285 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
13286 for (unsigned I : seq<unsigned>(VL.size()))
13287 if (!ShuffledElements[I])
13288 Cost += TTI->getShuffleCost(
13289 TTI::SK_InsertSubvector, VecTy, std::nullopt, CostKind,
13290 I * ScalarTyNumElements, cast<FixedVectorType>(ScalarTy));
13291 } else {
13292 Cost = TTI->getScalarizationOverhead(VecTy, ~ShuffledElements,
13293 /*Insert*/ true,
13294 /*Extract*/ false, CostKind);
13297 if (DuplicateNonConst)
13298 Cost += ::getShuffleCost(*TTI, TargetTransformInfo::SK_PermuteSingleSrc,
13299 VecTy, ShuffleMask);
13300 return Cost;
13303 // Perform operand reordering on the instructions in VL and return the reordered
13304 // operands in Left and Right.
13305 void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
13306 SmallVectorImpl<Value *> &Left,
13307 SmallVectorImpl<Value *> &Right,
13308 const BoUpSLP &R) {
13309 if (VL.empty())
13310 return;
13311 VLOperands Ops(VL, R);
13312 // Reorder the operands in place.
13313 Ops.reorder();
13314 Left = Ops.getVL(0);
13315 Right = Ops.getVL(1);
13318 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) {
13319 auto &Res = EntryToLastInstruction.try_emplace(E).first->second;
13320 if (Res)
13321 return *Res;
13322 // Get the basic block this bundle is in. All instructions in the bundle
13323 // should be in this block (except for extractelement-like instructions with
13324 // constant indices or gathered loads).
13325 auto *Front = E->getMainOp();
13326 auto *BB = Front->getParent();
13327 assert(((GatheredLoadsEntriesFirst.has_value() &&
13328 E->getOpcode() == Instruction::Load && E->isGather() &&
13329 E->Idx < *GatheredLoadsEntriesFirst) ||
13330 all_of(E->Scalars,
13331 [=](Value *V) -> bool {
13332 if (E->getOpcode() == Instruction::GetElementPtr &&
13333 !isa<GetElementPtrInst>(V))
13334 return true;
13335 auto *I = cast<Instruction>(V);
13336 return !E->isOpcodeOrAlt(I) || I->getParent() == BB ||
13337 isVectorLikeInstWithConstOps(I);
13338 })) &&
13339 "Expected gathered loads or GEPs or instructions from same basic "
13340 "block.");
13342 auto FindLastInst = [&]() {
13343 Instruction *LastInst = Front;
13344 for (Value *V : E->Scalars) {
13345 auto *I = dyn_cast<Instruction>(V);
13346 if (!I)
13347 continue;
13348 if (LastInst->getParent() == I->getParent()) {
13349 if (LastInst->comesBefore(I))
13350 LastInst = I;
13351 continue;
13353 assert(((E->getOpcode() == Instruction::GetElementPtr &&
13354 !isa<GetElementPtrInst>(I)) ||
13355 (isVectorLikeInstWithConstOps(LastInst) &&
13356 isVectorLikeInstWithConstOps(I)) ||
13357 (GatheredLoadsEntriesFirst.has_value() &&
13358 E->getOpcode() == Instruction::Load && E->isGather() &&
13359 E->Idx < *GatheredLoadsEntriesFirst)) &&
13360 "Expected vector-like or non-GEP in GEP node insts only.");
13361 if (!DT->isReachableFromEntry(LastInst->getParent())) {
13362 LastInst = I;
13363 continue;
13365 if (!DT->isReachableFromEntry(I->getParent()))
13366 continue;
13367 auto *NodeA = DT->getNode(LastInst->getParent());
13368 auto *NodeB = DT->getNode(I->getParent());
13369 assert(NodeA && "Should only process reachable instructions");
13370 assert(NodeB && "Should only process reachable instructions");
13371 assert((NodeA == NodeB) ==
13372 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
13373 "Different nodes should have different DFS numbers");
13374 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn())
13375 LastInst = I;
13377 BB = LastInst->getParent();
13378 return LastInst;
13381 auto FindFirstInst = [&]() {
13382 Instruction *FirstInst = Front;
13383 for (Value *V : E->Scalars) {
13384 auto *I = dyn_cast<Instruction>(V);
13385 if (!I)
13386 continue;
13387 if (FirstInst->getParent() == I->getParent()) {
13388 if (I->comesBefore(FirstInst))
13389 FirstInst = I;
13390 continue;
13392 assert(((E->getOpcode() == Instruction::GetElementPtr &&
13393 !isa<GetElementPtrInst>(I)) ||
13394 (isVectorLikeInstWithConstOps(FirstInst) &&
13395 isVectorLikeInstWithConstOps(I))) &&
13396 "Expected vector-like or non-GEP in GEP node insts only.");
13397 if (!DT->isReachableFromEntry(FirstInst->getParent())) {
13398 FirstInst = I;
13399 continue;
13401 if (!DT->isReachableFromEntry(I->getParent()))
13402 continue;
13403 auto *NodeA = DT->getNode(FirstInst->getParent());
13404 auto *NodeB = DT->getNode(I->getParent());
13405 assert(NodeA && "Should only process reachable instructions");
13406 assert(NodeB && "Should only process reachable instructions");
13407 assert((NodeA == NodeB) ==
13408 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
13409 "Different nodes should have different DFS numbers");
13410 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn())
13411 FirstInst = I;
13413 return FirstInst;
13416 // Set insertpoint for gathered loads to the very first load.
13417 if (GatheredLoadsEntriesFirst.has_value() &&
13418 E->Idx >= *GatheredLoadsEntriesFirst && !E->isGather() &&
13419 E->getOpcode() == Instruction::Load) {
13420 Res = FindFirstInst();
13421 return *Res;
13424 // Set the insert point to the beginning of the basic block if the entry
13425 // should not be scheduled.
13426 if (doesNotNeedToSchedule(E->Scalars) ||
13427 (!E->isGather() && all_of(E->Scalars, isVectorLikeInstWithConstOps))) {
13428 if ((E->getOpcode() == Instruction::GetElementPtr &&
13429 any_of(E->Scalars,
13430 [](Value *V) {
13431 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V);
13432 })) ||
13433 all_of(E->Scalars,
13434 [](Value *V) {
13435 return !isVectorLikeInstWithConstOps(V) &&
13436 isUsedOutsideBlock(V);
13437 }) ||
13438 (E->isGather() && E->Idx == 0 && all_of(E->Scalars, [](Value *V) {
13439 return isa<ExtractElementInst, UndefValue>(V) ||
13440 areAllOperandsNonInsts(V);
13441 })))
13442 Res = FindLastInst();
13443 else
13444 Res = FindFirstInst();
13445 return *Res;
13448 // Find the last instruction. The common case should be that BB has been
13449 // scheduled, and the last instruction is VL.back(). So we start with
13450 // VL.back() and iterate over schedule data until we reach the end of the
13451 // bundle. The end of the bundle is marked by null ScheduleData.
13452 if (BlocksSchedules.count(BB) && !E->isGather()) {
13453 Value *V = E->isOneOf(E->Scalars.back());
13454 if (doesNotNeedToBeScheduled(V))
13455 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled);
13456 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V);
13457 if (Bundle && Bundle->isPartOfBundle())
13458 for (; Bundle; Bundle = Bundle->NextInBundle)
13459 Res = Bundle->Inst;
13462 // LastInst can still be null at this point if there's either not an entry
13463 // for BB in BlocksSchedules or there's no ScheduleData available for
13464 // VL.back(). This can be the case if buildTree_rec aborts for various
13465 // reasons (e.g., the maximum recursion depth is reached, the maximum region
13466 // size is reached, etc.). ScheduleData is initialized in the scheduling
13467 // "dry-run".
13469 // If this happens, we can still find the last instruction by brute force. We
13470 // iterate forwards from Front (inclusive) until we either see all
13471 // instructions in the bundle or reach the end of the block. If Front is the
13472 // last instruction in program order, LastInst will be set to Front, and we
13473 // will visit all the remaining instructions in the block.
13475 // One of the reasons we exit early from buildTree_rec is to place an upper
13476 // bound on compile-time. Thus, taking an additional compile-time hit here is
13477 // not ideal. However, this should be exceedingly rare since it requires that
13478 // we both exit early from buildTree_rec and that the bundle be out-of-order
13479 // (causing us to iterate all the way to the end of the block).
13480 if (!Res)
13481 Res = FindLastInst();
13482 assert(Res && "Failed to find last instruction in bundle");
13483 return *Res;
13486 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
13487 auto *Front = E->getMainOp();
13488 Instruction *LastInst = &getLastInstructionInBundle(E);
13489 assert(LastInst && "Failed to find last instruction in bundle");
13490 BasicBlock::iterator LastInstIt = LastInst->getIterator();
13491 // If the instruction is PHI, set the insert point after all the PHIs.
13492 bool IsPHI = isa<PHINode>(LastInst);
13493 if (IsPHI)
13494 LastInstIt = LastInst->getParent()->getFirstNonPHIIt();
13495 if (IsPHI || (!E->isGather() && doesNotNeedToSchedule(E->Scalars))) {
13496 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt);
13497 } else {
13498 // Set the insertion point after the last instruction in the bundle. Set the
13499 // debug location to Front.
13500 Builder.SetInsertPoint(
13501 LastInst->getParent(),
13502 LastInst->getNextNonDebugInstruction()->getIterator());
13504 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
13507 Value *BoUpSLP::gather(
13508 ArrayRef<Value *> VL, Value *Root, Type *ScalarTy,
13509 function_ref<Value *(Value *, Value *, ArrayRef<int>)> CreateShuffle) {
13510 // List of instructions/lanes from current block and/or the blocks which are
13511 // part of the current loop. These instructions will be inserted at the end to
13512 // make it possible to optimize loops and hoist invariant instructions out of
13513 // the loops body with better chances for success.
13514 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
13515 SmallSet<int, 4> PostponedIndices;
13516 Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
13517 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
13518 SmallPtrSet<BasicBlock *, 4> Visited;
13519 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
13520 InsertBB = InsertBB->getSinglePredecessor();
13521 return InsertBB && InsertBB == InstBB;
13523 for (int I = 0, E = VL.size(); I < E; ++I) {
13524 if (auto *Inst = dyn_cast<Instruction>(VL[I]))
13525 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
13526 getTreeEntry(Inst) ||
13527 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) &&
13528 PostponedIndices.insert(I).second)
13529 PostponedInsts.emplace_back(Inst, I);
13532 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos,
13533 Type *Ty) {
13534 Value *Scalar = V;
13535 if (Scalar->getType() != Ty) {
13536 assert(Scalar->getType()->isIntOrIntVectorTy() &&
13537 Ty->isIntOrIntVectorTy() && "Expected integer types only.");
13538 Value *V = Scalar;
13539 if (auto *CI = dyn_cast<CastInst>(Scalar);
13540 isa_and_nonnull<SExtInst, ZExtInst>(CI)) {
13541 Value *Op = CI->getOperand(0);
13542 if (auto *IOp = dyn_cast<Instruction>(Op);
13543 !IOp || !(isDeleted(IOp) || getTreeEntry(IOp)))
13544 V = Op;
13546 Scalar = Builder.CreateIntCast(
13547 V, Ty, !isKnownNonNegative(Scalar, SimplifyQuery(*DL)));
13550 Instruction *InsElt;
13551 if (auto *VecTy = dyn_cast<FixedVectorType>(Scalar->getType())) {
13552 assert(SLPReVec && "FixedVectorType is not expected.");
13553 Vec = InsElt = Builder.CreateInsertVector(
13554 Vec->getType(), Vec, Scalar,
13555 Builder.getInt64(Pos * VecTy->getNumElements()));
13556 auto *II = dyn_cast<IntrinsicInst>(InsElt);
13557 if (!II || II->getIntrinsicID() != Intrinsic::vector_insert)
13558 return Vec;
13559 } else {
13560 Vec = Builder.CreateInsertElement(Vec, Scalar, Builder.getInt32(Pos));
13561 InsElt = dyn_cast<InsertElementInst>(Vec);
13562 if (!InsElt)
13563 return Vec;
13565 GatherShuffleExtractSeq.insert(InsElt);
13566 CSEBlocks.insert(InsElt->getParent());
13567 // Add to our 'need-to-extract' list.
13568 if (isa<Instruction>(V)) {
13569 if (TreeEntry *Entry = getTreeEntry(V)) {
13570 // Find which lane we need to extract.
13571 User *UserOp = nullptr;
13572 if (Scalar != V) {
13573 if (auto *SI = dyn_cast<Instruction>(Scalar))
13574 UserOp = SI;
13575 } else {
13576 UserOp = InsElt;
13578 if (UserOp) {
13579 unsigned FoundLane = Entry->findLaneForValue(V);
13580 ExternalUses.emplace_back(V, UserOp, FoundLane);
13584 return Vec;
13586 auto *VecTy = getWidenedType(ScalarTy, VL.size());
13587 Value *Vec = PoisonValue::get(VecTy);
13588 SmallVector<int> NonConsts;
13589 SmallVector<int> Mask(VL.size());
13590 std::iota(Mask.begin(), Mask.end(), 0);
13591 Value *OriginalRoot = Root;
13592 if (auto *SV = dyn_cast_or_null<ShuffleVectorInst>(Root);
13593 SV && isa<PoisonValue>(SV->getOperand(1)) &&
13594 SV->getOperand(0)->getType() == VecTy) {
13595 Root = SV->getOperand(0);
13596 Mask.assign(SV->getShuffleMask().begin(), SV->getShuffleMask().end());
13598 // Insert constant values at first.
13599 for (int I = 0, E = VL.size(); I < E; ++I) {
13600 if (PostponedIndices.contains(I))
13601 continue;
13602 if (!isConstant(VL[I])) {
13603 NonConsts.push_back(I);
13604 continue;
13606 if (isa<PoisonValue>(VL[I]))
13607 continue;
13608 Vec = CreateInsertElement(Vec, VL[I], I, ScalarTy);
13609 Mask[I] = I + E;
13611 if (Root) {
13612 if (isa<PoisonValue>(Vec)) {
13613 Vec = OriginalRoot;
13614 } else {
13615 Vec = CreateShuffle(Root, Vec, Mask);
13616 if (auto *OI = dyn_cast<Instruction>(OriginalRoot);
13617 OI && OI->hasNUses(0) &&
13618 none_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
13619 return TE->VectorizedValue == OI;
13621 eraseInstruction(OI);
13624 // Insert non-constant values.
13625 for (int I : NonConsts)
13626 Vec = CreateInsertElement(Vec, VL[I], I, ScalarTy);
13627 // Append instructions, which are/may be part of the loop, in the end to make
13628 // it possible to hoist non-loop-based instructions.
13629 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
13630 Vec = CreateInsertElement(Vec, Pair.first, Pair.second, ScalarTy);
13632 return Vec;
13635 /// Merges shuffle masks and emits final shuffle instruction, if required. It
13636 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission,
13637 /// when the actual shuffle instruction is generated only if this is actually
13638 /// required. Otherwise, the shuffle instruction emission is delayed till the
13639 /// end of the process, to reduce the number of emitted instructions and further
13640 /// analysis/transformations.
13641 /// The class also will look through the previously emitted shuffle instructions
13642 /// and properly mark indices in mask as undef.
13643 /// For example, given the code
13644 /// \code
13645 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0>
13646 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0>
13647 /// \endcode
13648 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will
13649 /// look through %s1 and %s2 and emit
13650 /// \code
13651 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3>
13652 /// \endcode
13653 /// instead.
13654 /// If 2 operands are of different size, the smallest one will be resized and
13655 /// the mask recalculated properly.
13656 /// For example, given the code
13657 /// \code
13658 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0>
13659 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0>
13660 /// \endcode
13661 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will
13662 /// look through %s1 and %s2 and emit
13663 /// \code
13664 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3>
13665 /// \endcode
13666 /// instead.
13667 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
13668 bool IsFinalized = false;
13669 /// Combined mask for all applied operands and masks. It is built during
13670 /// analysis and actual emission of shuffle vector instructions.
13671 SmallVector<int> CommonMask;
13672 /// List of operands for the shuffle vector instruction. It hold at max 2
13673 /// operands, if the 3rd is going to be added, the first 2 are combined into
13674 /// shuffle with \p CommonMask mask, the first operand sets to be the
13675 /// resulting shuffle and the second operand sets to be the newly added
13676 /// operand. The \p CommonMask is transformed in the proper way after that.
13677 SmallVector<Value *, 2> InVectors;
13678 IRBuilderBase &Builder;
13679 BoUpSLP &R;
13681 class ShuffleIRBuilder {
13682 IRBuilderBase &Builder;
13683 /// Holds all of the instructions that we gathered.
13684 SetVector<Instruction *> &GatherShuffleExtractSeq;
13685 /// A list of blocks that we are going to CSE.
13686 DenseSet<BasicBlock *> &CSEBlocks;
13687 /// Data layout.
13688 const DataLayout &DL;
13690 public:
13691 ShuffleIRBuilder(IRBuilderBase &Builder,
13692 SetVector<Instruction *> &GatherShuffleExtractSeq,
13693 DenseSet<BasicBlock *> &CSEBlocks, const DataLayout &DL)
13694 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq),
13695 CSEBlocks(CSEBlocks), DL(DL) {}
13696 ~ShuffleIRBuilder() = default;
13697 /// Creates shufflevector for the 2 operands with the given mask.
13698 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) {
13699 if (V1->getType() != V2->getType()) {
13700 assert(V1->getType()->isIntOrIntVectorTy() &&
13701 V1->getType()->isIntOrIntVectorTy() &&
13702 "Expected integer vector types only.");
13703 if (V1->getType() != V2->getType()) {
13704 if (cast<VectorType>(V2->getType())
13705 ->getElementType()
13706 ->getIntegerBitWidth() < cast<VectorType>(V1->getType())
13707 ->getElementType()
13708 ->getIntegerBitWidth())
13709 V2 = Builder.CreateIntCast(
13710 V2, V1->getType(), !isKnownNonNegative(V2, SimplifyQuery(DL)));
13711 else
13712 V1 = Builder.CreateIntCast(
13713 V1, V2->getType(), !isKnownNonNegative(V1, SimplifyQuery(DL)));
13716 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask);
13717 if (auto *I = dyn_cast<Instruction>(Vec)) {
13718 GatherShuffleExtractSeq.insert(I);
13719 CSEBlocks.insert(I->getParent());
13721 return Vec;
13723 /// Creates permutation of the single vector operand with the given mask, if
13724 /// it is not identity mask.
13725 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) {
13726 if (Mask.empty())
13727 return V1;
13728 unsigned VF = Mask.size();
13729 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements();
13730 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF))
13731 return V1;
13732 Value *Vec = Builder.CreateShuffleVector(V1, Mask);
13733 if (auto *I = dyn_cast<Instruction>(Vec)) {
13734 GatherShuffleExtractSeq.insert(I);
13735 CSEBlocks.insert(I->getParent());
13737 return Vec;
13739 Value *createIdentity(Value *V) { return V; }
13740 Value *createPoison(Type *Ty, unsigned VF) {
13741 return PoisonValue::get(getWidenedType(Ty, VF));
13743 /// Resizes 2 input vector to match the sizes, if the they are not equal
13744 /// yet. The smallest vector is resized to the size of the larger vector.
13745 void resizeToMatch(Value *&V1, Value *&V2) {
13746 if (V1->getType() == V2->getType())
13747 return;
13748 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements();
13749 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements();
13750 int VF = std::max(V1VF, V2VF);
13751 int MinVF = std::min(V1VF, V2VF);
13752 SmallVector<int> IdentityMask(VF, PoisonMaskElem);
13753 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF),
13755 Value *&Op = MinVF == V1VF ? V1 : V2;
13756 Op = Builder.CreateShuffleVector(Op, IdentityMask);
13757 if (auto *I = dyn_cast<Instruction>(Op)) {
13758 GatherShuffleExtractSeq.insert(I);
13759 CSEBlocks.insert(I->getParent());
13761 if (MinVF == V1VF)
13762 V1 = Op;
13763 else
13764 V2 = Op;
13768 /// Smart shuffle instruction emission, walks through shuffles trees and
13769 /// tries to find the best matching vector for the actual shuffle
13770 /// instruction.
13771 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) {
13772 assert(V1 && "Expected at least one vector value.");
13773 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq,
13774 R.CSEBlocks, *R.DL);
13775 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask,
13776 ShuffleBuilder);
13779 /// Transforms mask \p CommonMask per given \p Mask to make proper set after
13780 /// shuffle emission.
13781 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask,
13782 ArrayRef<int> Mask) {
13783 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
13784 if (Mask[Idx] != PoisonMaskElem)
13785 CommonMask[Idx] = Idx;
13788 /// Cast value \p V to the vector type with the same number of elements, but
13789 /// the base type \p ScalarTy.
13790 Value *castToScalarTyElem(Value *V,
13791 std::optional<bool> IsSigned = std::nullopt) {
13792 auto *VecTy = cast<VectorType>(V->getType());
13793 assert(getNumElements(VecTy) % getNumElements(ScalarTy) == 0);
13794 if (VecTy->getElementType() == ScalarTy->getScalarType())
13795 return V;
13796 return Builder.CreateIntCast(
13797 V, VectorType::get(ScalarTy->getScalarType(), VecTy->getElementCount()),
13798 IsSigned.value_or(!isKnownNonNegative(V, SimplifyQuery(*R.DL))));
13801 public:
13802 ShuffleInstructionBuilder(Type *ScalarTy, IRBuilderBase &Builder, BoUpSLP &R)
13803 : BaseShuffleAnalysis(ScalarTy), Builder(Builder), R(R) {}
13805 /// Adjusts extractelements after reusing them.
13806 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask,
13807 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds,
13808 unsigned NumParts, bool &UseVecBaseAsInput) {
13809 UseVecBaseAsInput = false;
13810 SmallPtrSet<Value *, 4> UniqueBases;
13811 Value *VecBase = nullptr;
13812 SmallVector<Value *> VL(E->Scalars.begin(), E->Scalars.end());
13813 if (!E->ReorderIndices.empty()) {
13814 SmallVector<int> ReorderMask(E->ReorderIndices.begin(),
13815 E->ReorderIndices.end());
13816 reorderScalars(VL, ReorderMask);
13818 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) {
13819 int Idx = Mask[I];
13820 if (Idx == PoisonMaskElem)
13821 continue;
13822 auto *EI = cast<ExtractElementInst>(VL[I]);
13823 VecBase = EI->getVectorOperand();
13824 if (const TreeEntry *TE = R.getTreeEntry(VecBase))
13825 VecBase = TE->VectorizedValue;
13826 assert(VecBase && "Expected vectorized value.");
13827 UniqueBases.insert(VecBase);
13828 // If the only one use is vectorized - can delete the extractelement
13829 // itself.
13830 if (!EI->hasOneUse() || R.ExternalUsesAsOriginalScalar.contains(EI) ||
13831 (NumParts != 1 && count(VL, EI) > 1) ||
13832 any_of(EI->users(), [&](User *U) {
13833 const TreeEntry *UTE = R.getTreeEntry(U);
13834 return !UTE || R.MultiNodeScalars.contains(U) ||
13835 (isa<GetElementPtrInst>(U) &&
13836 !R.areAllUsersVectorized(cast<Instruction>(U))) ||
13837 count_if(R.VectorizableTree,
13838 [&](const std::unique_ptr<TreeEntry> &TE) {
13839 return any_of(TE->UserTreeIndices,
13840 [&](const EdgeInfo &Edge) {
13841 return Edge.UserTE == UTE;
13842 }) &&
13843 is_contained(VL, EI);
13844 }) != 1;
13846 continue;
13847 R.eraseInstruction(EI);
13849 if (NumParts == 1 || UniqueBases.size() == 1) {
13850 assert(VecBase && "Expected vectorized value.");
13851 return castToScalarTyElem(VecBase);
13853 UseVecBaseAsInput = true;
13854 auto TransformToIdentity = [](MutableArrayRef<int> Mask) {
13855 for (auto [I, Idx] : enumerate(Mask))
13856 if (Idx != PoisonMaskElem)
13857 Idx = I;
13859 // Perform multi-register vector shuffle, joining them into a single virtual
13860 // long vector.
13861 // Need to shuffle each part independently and then insert all this parts
13862 // into a long virtual vector register, forming the original vector.
13863 Value *Vec = nullptr;
13864 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem);
13865 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
13866 for (unsigned Part : seq<unsigned>(NumParts)) {
13867 unsigned Limit = getNumElems(VL.size(), SliceSize, Part);
13868 ArrayRef<Value *> SubVL = ArrayRef(VL).slice(Part * SliceSize, Limit);
13869 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, Limit);
13870 constexpr int MaxBases = 2;
13871 SmallVector<Value *, MaxBases> Bases(MaxBases);
13872 auto VLMask = zip(SubVL, SubMask);
13873 const unsigned VF = std::accumulate(
13874 VLMask.begin(), VLMask.end(), 0U, [&](unsigned S, const auto &D) {
13875 if (std::get<1>(D) == PoisonMaskElem)
13876 return S;
13877 Value *VecOp =
13878 cast<ExtractElementInst>(std::get<0>(D))->getVectorOperand();
13879 if (const TreeEntry *TE = R.getTreeEntry(VecOp))
13880 VecOp = TE->VectorizedValue;
13881 assert(VecOp && "Expected vectorized value.");
13882 const unsigned Size =
13883 cast<FixedVectorType>(VecOp->getType())->getNumElements();
13884 return std::max(S, Size);
13886 for (const auto [V, I] : VLMask) {
13887 if (I == PoisonMaskElem)
13888 continue;
13889 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand();
13890 if (const TreeEntry *TE = R.getTreeEntry(VecOp))
13891 VecOp = TE->VectorizedValue;
13892 assert(VecOp && "Expected vectorized value.");
13893 VecOp = castToScalarTyElem(VecOp);
13894 Bases[I / VF] = VecOp;
13896 if (!Bases.front())
13897 continue;
13898 Value *SubVec;
13899 if (Bases.back()) {
13900 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask);
13901 TransformToIdentity(SubMask);
13902 } else {
13903 SubVec = Bases.front();
13905 if (!Vec) {
13906 Vec = SubVec;
13907 assert((Part == 0 || all_of(seq<unsigned>(0, Part),
13908 [&](unsigned P) {
13909 ArrayRef<int> SubMask =
13910 Mask.slice(P * SliceSize,
13911 getNumElems(Mask.size(),
13912 SliceSize, P));
13913 return all_of(SubMask, [](int Idx) {
13914 return Idx == PoisonMaskElem;
13916 })) &&
13917 "Expected first part or all previous parts masked.");
13918 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize));
13919 } else {
13920 unsigned NewVF =
13921 cast<FixedVectorType>(Vec->getType())->getNumElements();
13922 if (Vec->getType() != SubVec->getType()) {
13923 unsigned SubVecVF =
13924 cast<FixedVectorType>(SubVec->getType())->getNumElements();
13925 NewVF = std::max(NewVF, SubVecVF);
13927 // Adjust SubMask.
13928 for (int &Idx : SubMask)
13929 if (Idx != PoisonMaskElem)
13930 Idx += NewVF;
13931 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize));
13932 Vec = createShuffle(Vec, SubVec, VecMask);
13933 TransformToIdentity(VecMask);
13936 copy(VecMask, Mask.begin());
13937 return Vec;
13939 /// Checks if the specified entry \p E needs to be delayed because of its
13940 /// dependency nodes.
13941 std::optional<Value *>
13942 needToDelay(const TreeEntry *E,
13943 ArrayRef<SmallVector<const TreeEntry *>> Deps) const {
13944 // No need to delay emission if all deps are ready.
13945 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) {
13946 return all_of(
13947 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; });
13949 return std::nullopt;
13950 // Postpone gather emission, will be emitted after the end of the
13951 // process to keep correct order.
13952 auto *ResVecTy = getWidenedType(ScalarTy, E->getVectorFactor());
13953 return Builder.CreateAlignedLoad(
13954 ResVecTy,
13955 PoisonValue::get(PointerType::getUnqual(ScalarTy->getContext())),
13956 MaybeAlign());
13958 /// Adds 2 input vectors (in form of tree entries) and the mask for their
13959 /// shuffling.
13960 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) {
13961 Value *V1 = E1.VectorizedValue;
13962 if (V1->getType()->isIntOrIntVectorTy())
13963 V1 = castToScalarTyElem(V1, any_of(E1.Scalars, [&](Value *V) {
13964 return !isKnownNonNegative(
13965 V, SimplifyQuery(*R.DL));
13966 }));
13967 Value *V2 = E2.VectorizedValue;
13968 if (V2->getType()->isIntOrIntVectorTy())
13969 V2 = castToScalarTyElem(V2, any_of(E2.Scalars, [&](Value *V) {
13970 return !isKnownNonNegative(
13971 V, SimplifyQuery(*R.DL));
13972 }));
13973 add(V1, V2, Mask);
13975 /// Adds single input vector (in form of tree entry) and the mask for its
13976 /// shuffling.
13977 void add(const TreeEntry &E1, ArrayRef<int> Mask) {
13978 Value *V1 = E1.VectorizedValue;
13979 if (V1->getType()->isIntOrIntVectorTy())
13980 V1 = castToScalarTyElem(V1, any_of(E1.Scalars, [&](Value *V) {
13981 return !isKnownNonNegative(
13982 V, SimplifyQuery(*R.DL));
13983 }));
13984 add(V1, Mask);
13986 /// Adds 2 input vectors and the mask for their shuffling.
13987 void add(Value *V1, Value *V2, ArrayRef<int> Mask) {
13988 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors.");
13989 assert(isa<FixedVectorType>(V1->getType()) &&
13990 isa<FixedVectorType>(V2->getType()) &&
13991 "castToScalarTyElem expects V1 and V2 to be FixedVectorType");
13992 V1 = castToScalarTyElem(V1);
13993 V2 = castToScalarTyElem(V2);
13994 if (InVectors.empty()) {
13995 InVectors.push_back(V1);
13996 InVectors.push_back(V2);
13997 CommonMask.assign(Mask.begin(), Mask.end());
13998 return;
14000 Value *Vec = InVectors.front();
14001 if (InVectors.size() == 2) {
14002 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
14003 transformMaskAfterShuffle(CommonMask, CommonMask);
14004 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() !=
14005 Mask.size()) {
14006 Vec = createShuffle(Vec, nullptr, CommonMask);
14007 transformMaskAfterShuffle(CommonMask, CommonMask);
14009 V1 = createShuffle(V1, V2, Mask);
14010 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14011 if (Mask[Idx] != PoisonMaskElem)
14012 CommonMask[Idx] = Idx + Sz;
14013 InVectors.front() = Vec;
14014 if (InVectors.size() == 2)
14015 InVectors.back() = V1;
14016 else
14017 InVectors.push_back(V1);
14019 /// Adds another one input vector and the mask for the shuffling.
14020 void add(Value *V1, ArrayRef<int> Mask, bool = false) {
14021 assert(isa<FixedVectorType>(V1->getType()) &&
14022 "castToScalarTyElem expects V1 to be FixedVectorType");
14023 V1 = castToScalarTyElem(V1);
14024 if (InVectors.empty()) {
14025 InVectors.push_back(V1);
14026 CommonMask.assign(Mask.begin(), Mask.end());
14027 return;
14029 const auto *It = find(InVectors, V1);
14030 if (It == InVectors.end()) {
14031 if (InVectors.size() == 2 ||
14032 InVectors.front()->getType() != V1->getType()) {
14033 Value *V = InVectors.front();
14034 if (InVectors.size() == 2) {
14035 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask);
14036 transformMaskAfterShuffle(CommonMask, CommonMask);
14037 } else if (cast<FixedVectorType>(V->getType())->getNumElements() !=
14038 CommonMask.size()) {
14039 V = createShuffle(InVectors.front(), nullptr, CommonMask);
14040 transformMaskAfterShuffle(CommonMask, CommonMask);
14042 unsigned VF = std::max(CommonMask.size(), Mask.size());
14043 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14044 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem)
14045 CommonMask[Idx] =
14046 V->getType() != V1->getType()
14047 ? Idx + VF
14048 : Mask[Idx] + cast<FixedVectorType>(V1->getType())
14049 ->getNumElements();
14050 if (V->getType() != V1->getType())
14051 V1 = createShuffle(V1, nullptr, Mask);
14052 InVectors.front() = V;
14053 if (InVectors.size() == 2)
14054 InVectors.back() = V1;
14055 else
14056 InVectors.push_back(V1);
14057 return;
14059 // Check if second vector is required if the used elements are already
14060 // used from the first one.
14061 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14062 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) {
14063 InVectors.push_back(V1);
14064 break;
14067 int VF = getVF(V1);
14068 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14069 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
14070 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF);
14072 /// Adds another one input vector and the mask for the shuffling.
14073 void addOrdered(Value *V1, ArrayRef<unsigned> Order) {
14074 SmallVector<int> NewMask;
14075 inversePermutation(Order, NewMask);
14076 add(V1, NewMask);
14078 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0,
14079 Value *Root = nullptr) {
14080 return R.gather(VL, Root, ScalarTy,
14081 [&](Value *V1, Value *V2, ArrayRef<int> Mask) {
14082 return createShuffle(V1, V2, Mask);
14085 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); }
14086 /// Finalize emission of the shuffles.
14087 /// \param Action the action (if any) to be performed before final applying of
14088 /// the \p ExtMask mask.
14089 Value *
14090 finalize(ArrayRef<int> ExtMask,
14091 ArrayRef<std::pair<const TreeEntry *, unsigned>> SubVectors,
14092 ArrayRef<int> SubVectorsMask, unsigned VF = 0,
14093 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) {
14094 IsFinalized = true;
14095 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
14096 SmallVector<int> NewExtMask(ExtMask);
14097 if (ScalarTyNumElements != 1) {
14098 assert(SLPReVec && "FixedVectorType is not expected.");
14099 transformScalarShuffleIndiciesToVector(ScalarTyNumElements, CommonMask);
14100 transformScalarShuffleIndiciesToVector(ScalarTyNumElements, NewExtMask);
14101 ExtMask = NewExtMask;
14103 if (Action) {
14104 Value *Vec = InVectors.front();
14105 if (InVectors.size() == 2) {
14106 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
14107 InVectors.pop_back();
14108 } else {
14109 Vec = createShuffle(Vec, nullptr, CommonMask);
14111 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14112 if (CommonMask[Idx] != PoisonMaskElem)
14113 CommonMask[Idx] = Idx;
14114 assert(VF > 0 &&
14115 "Expected vector length for the final value before action.");
14116 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
14117 if (VecVF < VF) {
14118 SmallVector<int> ResizeMask(VF, PoisonMaskElem);
14119 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0);
14120 Vec = createShuffle(Vec, nullptr, ResizeMask);
14122 Action(Vec, CommonMask);
14123 InVectors.front() = Vec;
14125 if (!SubVectors.empty()) {
14126 Value *Vec = InVectors.front();
14127 if (InVectors.size() == 2) {
14128 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
14129 InVectors.pop_back();
14130 } else {
14131 Vec = createShuffle(Vec, nullptr, CommonMask);
14133 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14134 if (CommonMask[Idx] != PoisonMaskElem)
14135 CommonMask[Idx] = Idx;
14136 auto CreateSubVectors = [&](Value *Vec,
14137 SmallVectorImpl<int> &CommonMask) {
14138 for (auto [E, Idx] : SubVectors) {
14139 Value *V = E->VectorizedValue;
14140 if (V->getType()->isIntOrIntVectorTy())
14141 V = castToScalarTyElem(V, any_of(E->Scalars, [&](Value *V) {
14142 return !isKnownNonNegative(
14143 V, SimplifyQuery(*R.DL));
14144 }));
14145 unsigned InsertionIndex = Idx * ScalarTyNumElements;
14146 const unsigned SubVecVF =
14147 cast<FixedVectorType>(V->getType())->getNumElements();
14148 if (InsertionIndex % SubVecVF == 0) {
14149 Vec = Builder.CreateInsertVector(Vec->getType(), Vec, V,
14150 Builder.getInt64(InsertionIndex));
14151 } else {
14152 // Create shuffle, insertvector requires that index is multiple of
14153 // the subvectors length.
14154 const unsigned VecVF =
14155 cast<FixedVectorType>(Vec->getType())->getNumElements();
14156 SmallVector<int> Mask(VecVF, PoisonMaskElem);
14157 std::iota(Mask.begin(), Mask.end(), 0);
14158 for (unsigned I : seq<unsigned>(
14159 InsertionIndex, (Idx + SubVecVF) * ScalarTyNumElements))
14160 Mask[I] = I - Idx + VecVF;
14161 Vec = createShuffle(Vec, V, Mask);
14163 if (!CommonMask.empty()) {
14164 std::iota(
14165 std::next(CommonMask.begin(), InsertionIndex),
14166 std::next(CommonMask.begin(),
14167 (Idx + E->getVectorFactor()) * ScalarTyNumElements),
14168 InsertionIndex);
14171 return Vec;
14173 if (SubVectorsMask.empty()) {
14174 Vec = CreateSubVectors(Vec, CommonMask);
14175 } else {
14176 SmallVector<int> SVMask(SubVectorsMask.begin(), SubVectorsMask.end());
14177 for (auto [I1, I2] : zip(SVMask, CommonMask)) {
14178 if (I2 != PoisonMaskElem) {
14179 assert(I1 == PoisonMaskElem && "Expected unused subvectors mask");
14180 I1 = I2 + CommonMask.size();
14183 Value *InsertVec =
14184 CreateSubVectors(PoisonValue::get(Vec->getType()), CommonMask);
14185 Vec = createShuffle(InsertVec, Vec, SVMask);
14186 for (unsigned I : seq<unsigned>(CommonMask.size())) {
14187 if (SVMask[I] != PoisonMaskElem)
14188 CommonMask[I] = I;
14191 InVectors.front() = Vec;
14194 if (!ExtMask.empty()) {
14195 if (CommonMask.empty()) {
14196 CommonMask.assign(ExtMask.begin(), ExtMask.end());
14197 } else {
14198 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem);
14199 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
14200 if (ExtMask[I] == PoisonMaskElem)
14201 continue;
14202 NewMask[I] = CommonMask[ExtMask[I]];
14204 CommonMask.swap(NewMask);
14207 if (CommonMask.empty()) {
14208 assert(InVectors.size() == 1 && "Expected only one vector with no mask");
14209 return InVectors.front();
14211 if (InVectors.size() == 2)
14212 return createShuffle(InVectors.front(), InVectors.back(), CommonMask);
14213 return createShuffle(InVectors.front(), nullptr, CommonMask);
14216 ~ShuffleInstructionBuilder() {
14217 assert((IsFinalized || CommonMask.empty()) &&
14218 "Shuffle construction must be finalized.");
14222 BoUpSLP::TreeEntry *BoUpSLP::getMatchedVectorizedOperand(const TreeEntry *E,
14223 unsigned NodeIdx) {
14224 ArrayRef<Value *> VL = E->getOperand(NodeIdx);
14225 InstructionsState S = getSameOpcode(VL, *TLI);
14226 // Special processing for GEPs bundle, which may include non-gep values.
14227 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
14228 const auto *It = find_if(VL, IsaPred<GetElementPtrInst>);
14229 if (It != VL.end())
14230 S = getSameOpcode(*It, *TLI);
14232 if (!S.getOpcode())
14233 return nullptr;
14234 auto CheckSameVE = [&](const TreeEntry *VE) {
14235 return VE->isSame(VL) &&
14236 (any_of(VE->UserTreeIndices,
14237 [E, NodeIdx](const EdgeInfo &EI) {
14238 return EI.UserTE == E && EI.EdgeIdx == NodeIdx;
14239 }) ||
14240 any_of(VectorizableTree,
14241 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) {
14242 return TE->isOperandGatherNode(
14243 {const_cast<TreeEntry *>(E), NodeIdx}) &&
14244 VE->isSame(TE->Scalars);
14245 }));
14247 TreeEntry *VE = getTreeEntry(S.OpValue);
14248 if (VE && CheckSameVE(VE))
14249 return VE;
14250 auto It = MultiNodeScalars.find(S.OpValue);
14251 if (It != MultiNodeScalars.end()) {
14252 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) {
14253 return TE != VE && CheckSameVE(TE);
14255 if (I != It->getSecond().end())
14256 return *I;
14258 return nullptr;
14261 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx,
14262 bool PostponedPHIs) {
14263 ValueList &VL = E->getOperand(NodeIdx);
14264 const unsigned VF = VL.size();
14265 if (TreeEntry *VE = getMatchedVectorizedOperand(E, NodeIdx)) {
14266 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) {
14267 // V may be affected by MinBWs.
14268 // We want ShuffleInstructionBuilder to correctly support REVEC. The key
14269 // factor is the number of elements, not their type.
14270 Type *ScalarTy = cast<VectorType>(V->getType())->getElementType();
14271 unsigned NumElements = getNumElements(VL.front()->getType());
14272 ShuffleInstructionBuilder ShuffleBuilder(
14273 NumElements != 1 ? FixedVectorType::get(ScalarTy, NumElements)
14274 : ScalarTy,
14275 Builder, *this);
14276 ShuffleBuilder.add(V, Mask);
14277 SmallVector<std::pair<const TreeEntry *, unsigned>> SubVectors(
14278 E->CombinedEntriesWithIndices.size());
14279 transform(E->CombinedEntriesWithIndices, SubVectors.begin(),
14280 [&](const auto &P) {
14281 return std::make_pair(VectorizableTree[P.first].get(),
14282 P.second);
14284 assert((E->CombinedEntriesWithIndices.empty() ||
14285 E->ReorderIndices.empty()) &&
14286 "Expected either combined subnodes or reordering");
14287 return ShuffleBuilder.finalize({}, SubVectors, {});
14289 Value *V = vectorizeTree(VE, PostponedPHIs);
14290 if (VF * getNumElements(VL[0]->getType()) !=
14291 cast<FixedVectorType>(V->getType())->getNumElements()) {
14292 if (!VE->ReuseShuffleIndices.empty()) {
14293 // Reshuffle to get only unique values.
14294 // If some of the scalars are duplicated in the vectorization
14295 // tree entry, we do not vectorize them but instead generate a
14296 // mask for the reuses. But if there are several users of the
14297 // same entry, they may have different vectorization factors.
14298 // This is especially important for PHI nodes. In this case, we
14299 // need to adapt the resulting instruction for the user
14300 // vectorization factor and have to reshuffle it again to take
14301 // only unique elements of the vector. Without this code the
14302 // function incorrectly returns reduced vector instruction with
14303 // the same elements, not with the unique ones.
14305 // block:
14306 // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
14307 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0>
14308 // ... (use %2)
14309 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0}
14310 // br %block
14311 SmallVector<int> Mask(VF, PoisonMaskElem);
14312 for (auto [I, V] : enumerate(VL)) {
14313 if (isa<PoisonValue>(V))
14314 continue;
14315 Mask[I] = VE->findLaneForValue(V);
14317 V = FinalShuffle(V, Mask);
14318 } else {
14319 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
14320 "Expected vectorization factor less "
14321 "than original vector size.");
14322 SmallVector<int> UniformMask(VF, 0);
14323 std::iota(UniformMask.begin(), UniformMask.end(), 0);
14324 V = FinalShuffle(V, UniformMask);
14327 // Need to update the operand gather node, if actually the operand is not a
14328 // vectorized node, but the buildvector/gather node, which matches one of
14329 // the vectorized nodes.
14330 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) {
14331 return EI.UserTE == E && EI.EdgeIdx == NodeIdx;
14332 }) == VE->UserTreeIndices.end()) {
14333 auto *It =
14334 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
14335 return TE->isGather() && TE->UserTreeIndices.front().UserTE == E &&
14336 TE->UserTreeIndices.front().EdgeIdx == NodeIdx;
14338 assert(It != VectorizableTree.end() && "Expected gather node operand.");
14339 (*It)->VectorizedValue = V;
14341 return V;
14344 // Find the corresponding gather entry and vectorize it.
14345 // Allows to be more accurate with tree/graph transformations, checks for the
14346 // correctness of the transformations in many cases.
14347 auto *I = find_if(VectorizableTree,
14348 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) {
14349 return TE->isOperandGatherNode({E, NodeIdx});
14351 assert(I != VectorizableTree.end() && "Gather node is not in the graph.");
14352 assert(I->get()->UserTreeIndices.size() == 1 &&
14353 "Expected only single user for the gather node.");
14354 assert(I->get()->isSame(VL) && "Expected same list of scalars.");
14355 return vectorizeTree(I->get(), PostponedPHIs);
14358 template <typename BVTy, typename ResTy, typename... Args>
14359 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
14360 Args &...Params) {
14361 assert(E->isGather() && "Expected gather node.");
14362 unsigned VF = E->getVectorFactor();
14364 bool NeedFreeze = false;
14365 SmallVector<int> ReuseShuffleIndices(E->ReuseShuffleIndices.begin(),
14366 E->ReuseShuffleIndices.end());
14367 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end());
14368 // Clear values, to be replaced by insertvector instructions.
14369 for (auto [EIdx, Idx] : E->CombinedEntriesWithIndices)
14370 for_each(MutableArrayRef(GatheredScalars)
14371 .slice(Idx, VectorizableTree[EIdx]->getVectorFactor()),
14372 [&](Value *&V) { V = PoisonValue::get(V->getType()); });
14373 SmallVector<std::pair<const TreeEntry *, unsigned>> SubVectors(
14374 E->CombinedEntriesWithIndices.size());
14375 transform(E->CombinedEntriesWithIndices, SubVectors.begin(),
14376 [&](const auto &P) {
14377 return std::make_pair(VectorizableTree[P.first].get(), P.second);
14379 // Build a mask out of the reorder indices and reorder scalars per this
14380 // mask.
14381 SmallVector<int> ReorderMask(E->ReorderIndices.begin(),
14382 E->ReorderIndices.end());
14383 if (!ReorderMask.empty())
14384 reorderScalars(GatheredScalars, ReorderMask);
14385 SmallVector<int> SubVectorsMask;
14386 inversePermutation(E->ReorderIndices, SubVectorsMask);
14387 // Transform non-clustered elements in the mask to poison (-1).
14388 // "Clustered" operations will be reordered using this mask later.
14389 if (!SubVectors.empty() && !SubVectorsMask.empty()) {
14390 for (unsigned I : seq<unsigned>(GatheredScalars.size()))
14391 if (E->Scalars[I] == GatheredScalars[ReorderMask[I]])
14392 SubVectorsMask[ReorderMask[I]] = PoisonMaskElem;
14393 } else {
14394 SubVectorsMask.clear();
14396 SmallVector<Value *> StoredGS(GatheredScalars);
14397 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF,
14398 unsigned I, unsigned SliceSize,
14399 bool IsNotPoisonous) {
14400 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) {
14401 return isa<UndefValue>(V) && !isa<PoisonValue>(V);
14403 return false;
14404 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE;
14405 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx;
14406 if (UserTE->getNumOperands() != 2)
14407 return false;
14408 if (!IsNotPoisonous) {
14409 auto *It =
14410 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) {
14411 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) {
14412 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx;
14413 }) != TE->UserTreeIndices.end();
14415 if (It == VectorizableTree.end())
14416 return false;
14417 SmallVector<Value *> GS((*It)->Scalars.begin(), (*It)->Scalars.end());
14418 if (!(*It)->ReorderIndices.empty()) {
14419 inversePermutation((*It)->ReorderIndices, ReorderMask);
14420 reorderScalars(GS, ReorderMask);
14422 if (!all_of(zip(GatheredScalars, GS), [&](const auto &P) {
14423 Value *V0 = std::get<0>(P);
14424 Value *V1 = std::get<1>(P);
14425 return !isa<UndefValue>(V0) || isa<PoisonValue>(V0) ||
14426 (isa<UndefValue>(V0) && !isa<PoisonValue>(V0) &&
14427 is_contained(E->Scalars, V1));
14429 return false;
14431 int Idx;
14432 if ((Mask.size() < InputVF &&
14433 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) &&
14434 Idx == 0) ||
14435 (Mask.size() == InputVF &&
14436 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) {
14437 std::iota(
14438 std::next(Mask.begin(), I * SliceSize),
14439 std::next(Mask.begin(),
14440 I * SliceSize + getNumElems(Mask.size(), SliceSize, I)),
14442 } else {
14443 unsigned IVal =
14444 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; });
14445 std::fill(
14446 std::next(Mask.begin(), I * SliceSize),
14447 std::next(Mask.begin(),
14448 I * SliceSize + getNumElems(Mask.size(), SliceSize, I)),
14449 IVal);
14451 return true;
14453 BVTy ShuffleBuilder(ScalarTy, Params...);
14454 ResTy Res = ResTy();
14455 SmallVector<int> Mask;
14456 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem);
14457 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles;
14458 Value *ExtractVecBase = nullptr;
14459 bool UseVecBaseAsInput = false;
14460 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles;
14461 SmallVector<SmallVector<const TreeEntry *>> Entries;
14462 Type *OrigScalarTy = GatheredScalars.front()->getType();
14463 auto *VecTy = getWidenedType(ScalarTy, GatheredScalars.size());
14464 unsigned NumParts = TTI->getNumberOfParts(VecTy);
14465 if (NumParts == 0 || NumParts >= GatheredScalars.size() ||
14466 VecTy->getNumElements() % NumParts != 0 ||
14467 !hasFullVectorsOrPowerOf2(*TTI, VecTy->getElementType(),
14468 VecTy->getNumElements() / NumParts))
14469 NumParts = 1;
14470 if (!all_of(GatheredScalars, IsaPred<UndefValue>)) {
14471 // Check for gathered extracts.
14472 bool Resized = false;
14473 ExtractShuffles =
14474 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts);
14475 if (!ExtractShuffles.empty()) {
14476 SmallVector<const TreeEntry *> ExtractEntries;
14477 for (auto [Idx, I] : enumerate(ExtractMask)) {
14478 if (I == PoisonMaskElem)
14479 continue;
14480 if (const auto *TE = getTreeEntry(
14481 cast<ExtractElementInst>(StoredGS[Idx])->getVectorOperand()))
14482 ExtractEntries.push_back(TE);
14484 if (std::optional<ResTy> Delayed =
14485 ShuffleBuilder.needToDelay(E, ExtractEntries)) {
14486 // Delay emission of gathers which are not ready yet.
14487 PostponedGathers.insert(E);
14488 // Postpone gather emission, will be emitted after the end of the
14489 // process to keep correct order.
14490 return *Delayed;
14492 if (Value *VecBase = ShuffleBuilder.adjustExtracts(
14493 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) {
14494 ExtractVecBase = VecBase;
14495 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType()))
14496 if (VF == VecBaseTy->getNumElements() &&
14497 GatheredScalars.size() != VF) {
14498 Resized = true;
14499 GatheredScalars.append(VF - GatheredScalars.size(),
14500 PoisonValue::get(OrigScalarTy));
14504 // Gather extracts after we check for full matched gathers only.
14505 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load ||
14506 ((E->getOpcode() == Instruction::Load ||
14507 any_of(E->Scalars, IsaPred<LoadInst>)) &&
14508 any_of(E->Scalars,
14509 [this](Value *V) {
14510 return isa<LoadInst>(V) && getTreeEntry(V);
14511 })) ||
14512 E->isAltShuffle() ||
14513 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) ||
14514 isSplat(E->Scalars) ||
14515 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) {
14516 GatherShuffles =
14517 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts);
14519 if (!GatherShuffles.empty()) {
14520 if (std::optional<ResTy> Delayed =
14521 ShuffleBuilder.needToDelay(E, Entries)) {
14522 // Delay emission of gathers which are not ready yet.
14523 PostponedGathers.insert(E);
14524 // Postpone gather emission, will be emitted after the end of the
14525 // process to keep correct order.
14526 return *Delayed;
14528 if (GatherShuffles.size() == 1 &&
14529 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc &&
14530 Entries.front().front()->isSame(E->Scalars)) {
14531 // Perfect match in the graph, will reuse the previously vectorized
14532 // node. Cost is 0.
14533 LLVM_DEBUG(dbgs() << "SLP: perfect diamond match for gather bundle "
14534 << shortBundleName(E->Scalars, E->Idx) << ".\n");
14535 // Restore the mask for previous partially matched values.
14536 Mask.resize(E->Scalars.size());
14537 const TreeEntry *FrontTE = Entries.front().front();
14538 if (FrontTE->ReorderIndices.empty() &&
14539 ((FrontTE->ReuseShuffleIndices.empty() &&
14540 E->Scalars.size() == FrontTE->Scalars.size()) ||
14541 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) {
14542 std::iota(Mask.begin(), Mask.end(), 0);
14543 } else {
14544 for (auto [I, V] : enumerate(E->Scalars)) {
14545 if (isa<PoisonValue>(V)) {
14546 Mask[I] = PoisonMaskElem;
14547 continue;
14549 Mask[I] = FrontTE->findLaneForValue(V);
14552 ShuffleBuilder.add(*FrontTE, Mask);
14553 Res = ShuffleBuilder.finalize(E->getCommonMask(), SubVectors,
14554 SubVectorsMask);
14555 return Res;
14557 if (!Resized) {
14558 if (GatheredScalars.size() != VF &&
14559 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) {
14560 return any_of(TEs, [&](const TreeEntry *TE) {
14561 return TE->getVectorFactor() == VF;
14564 GatheredScalars.append(VF - GatheredScalars.size(),
14565 PoisonValue::get(OrigScalarTy));
14567 // Remove shuffled elements from list of gathers.
14568 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) {
14569 if (Mask[I] != PoisonMaskElem)
14570 GatheredScalars[I] = PoisonValue::get(OrigScalarTy);
14574 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars,
14575 SmallVectorImpl<int> &ReuseMask,
14576 bool IsRootPoison) {
14577 // For splats with can emit broadcasts instead of gathers, so try to find
14578 // such sequences.
14579 bool IsSplat = IsRootPoison && isSplat(Scalars) &&
14580 (Scalars.size() > 2 || Scalars.front() == Scalars.back());
14581 Scalars.append(VF - Scalars.size(), PoisonValue::get(OrigScalarTy));
14582 SmallVector<int> UndefPos;
14583 DenseMap<Value *, unsigned> UniquePositions;
14584 // Gather unique non-const values and all constant values.
14585 // For repeated values, just shuffle them.
14586 int NumNonConsts = 0;
14587 int SinglePos = 0;
14588 for (auto [I, V] : enumerate(Scalars)) {
14589 if (isa<UndefValue>(V)) {
14590 if (!isa<PoisonValue>(V)) {
14591 ReuseMask[I] = I;
14592 UndefPos.push_back(I);
14594 continue;
14596 if (isConstant(V)) {
14597 ReuseMask[I] = I;
14598 continue;
14600 ++NumNonConsts;
14601 SinglePos = I;
14602 Value *OrigV = V;
14603 Scalars[I] = PoisonValue::get(OrigScalarTy);
14604 if (IsSplat) {
14605 Scalars.front() = OrigV;
14606 ReuseMask[I] = 0;
14607 } else {
14608 const auto Res = UniquePositions.try_emplace(OrigV, I);
14609 Scalars[Res.first->second] = OrigV;
14610 ReuseMask[I] = Res.first->second;
14613 if (NumNonConsts == 1) {
14614 // Restore single insert element.
14615 if (IsSplat) {
14616 ReuseMask.assign(VF, PoisonMaskElem);
14617 std::swap(Scalars.front(), Scalars[SinglePos]);
14618 if (!UndefPos.empty() && UndefPos.front() == 0)
14619 Scalars.front() = UndefValue::get(OrigScalarTy);
14621 ReuseMask[SinglePos] = SinglePos;
14622 } else if (!UndefPos.empty() && IsSplat) {
14623 // For undef values, try to replace them with the simple broadcast.
14624 // We can do it if the broadcasted value is guaranteed to be
14625 // non-poisonous, or by freezing the incoming scalar value first.
14626 auto *It = find_if(Scalars, [this, E](Value *V) {
14627 return !isa<UndefValue>(V) &&
14628 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) ||
14629 (E->UserTreeIndices.size() == 1 &&
14630 any_of(V->uses(), [E](const Use &U) {
14631 // Check if the value already used in the same operation in
14632 // one of the nodes already.
14633 return E->UserTreeIndices.front().EdgeIdx !=
14634 U.getOperandNo() &&
14635 is_contained(
14636 E->UserTreeIndices.front().UserTE->Scalars,
14637 U.getUser());
14638 })));
14640 if (It != Scalars.end()) {
14641 // Replace undefs by the non-poisoned scalars and emit broadcast.
14642 int Pos = std::distance(Scalars.begin(), It);
14643 for (int I : UndefPos) {
14644 // Set the undef position to the non-poisoned scalar.
14645 ReuseMask[I] = Pos;
14646 // Replace the undef by the poison, in the mask it is replaced by
14647 // non-poisoned scalar already.
14648 if (I != Pos)
14649 Scalars[I] = PoisonValue::get(OrigScalarTy);
14651 } else {
14652 // Replace undefs by the poisons, emit broadcast and then emit
14653 // freeze.
14654 for (int I : UndefPos) {
14655 ReuseMask[I] = PoisonMaskElem;
14656 if (isa<UndefValue>(Scalars[I]))
14657 Scalars[I] = PoisonValue::get(OrigScalarTy);
14659 NeedFreeze = true;
14663 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) {
14664 bool IsNonPoisoned = true;
14665 bool IsUsedInExpr = true;
14666 Value *Vec1 = nullptr;
14667 if (!ExtractShuffles.empty()) {
14668 // Gather of extractelements can be represented as just a shuffle of
14669 // a single/two vectors the scalars are extracted from.
14670 // Find input vectors.
14671 Value *Vec2 = nullptr;
14672 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) {
14673 if (!Mask.empty() && Mask[I] != PoisonMaskElem)
14674 ExtractMask[I] = PoisonMaskElem;
14676 if (UseVecBaseAsInput) {
14677 Vec1 = ExtractVecBase;
14678 } else {
14679 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) {
14680 if (ExtractMask[I] == PoisonMaskElem)
14681 continue;
14682 if (isa<UndefValue>(E->Scalars[I]))
14683 continue;
14684 auto *EI = cast<ExtractElementInst>(StoredGS[I]);
14685 Value *VecOp = EI->getVectorOperand();
14686 if (const auto *TE = getTreeEntry(VecOp))
14687 if (TE->VectorizedValue)
14688 VecOp = TE->VectorizedValue;
14689 if (!Vec1) {
14690 Vec1 = VecOp;
14691 } else if (Vec1 != VecOp) {
14692 assert((!Vec2 || Vec2 == VecOp) &&
14693 "Expected only 1 or 2 vectors shuffle.");
14694 Vec2 = VecOp;
14698 if (Vec2) {
14699 IsUsedInExpr = false;
14700 IsNonPoisoned &=
14701 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2);
14702 ShuffleBuilder.add(Vec1, Vec2, ExtractMask);
14703 } else if (Vec1) {
14704 bool IsNotPoisonedVec = isGuaranteedNotToBePoison(Vec1);
14705 IsUsedInExpr &= FindReusedSplat(
14706 ExtractMask,
14707 cast<FixedVectorType>(Vec1->getType())->getNumElements(), 0,
14708 ExtractMask.size(), IsNotPoisonedVec);
14709 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true);
14710 IsNonPoisoned &= IsNotPoisonedVec;
14711 } else {
14712 IsUsedInExpr = false;
14713 ShuffleBuilder.add(PoisonValue::get(VecTy), ExtractMask,
14714 /*ForExtracts=*/true);
14717 if (!GatherShuffles.empty()) {
14718 unsigned SliceSize = getPartNumElems(E->Scalars.size(), NumParts);
14719 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem);
14720 for (const auto [I, TEs] : enumerate(Entries)) {
14721 if (TEs.empty()) {
14722 assert(!GatherShuffles[I] &&
14723 "No shuffles with empty entries list expected.");
14724 continue;
14726 assert((TEs.size() == 1 || TEs.size() == 2) &&
14727 "Expected shuffle of 1 or 2 entries.");
14728 unsigned Limit = getNumElems(Mask.size(), SliceSize, I);
14729 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, Limit);
14730 VecMask.assign(VecMask.size(), PoisonMaskElem);
14731 copy(SubMask, std::next(VecMask.begin(), I * SliceSize));
14732 if (TEs.size() == 1) {
14733 bool IsNotPoisonedVec =
14734 TEs.front()->VectorizedValue
14735 ? isGuaranteedNotToBePoison(TEs.front()->VectorizedValue)
14736 : true;
14737 IsUsedInExpr &=
14738 FindReusedSplat(VecMask, TEs.front()->getVectorFactor(), I,
14739 SliceSize, IsNotPoisonedVec);
14740 ShuffleBuilder.add(*TEs.front(), VecMask);
14741 IsNonPoisoned &= IsNotPoisonedVec;
14742 } else {
14743 IsUsedInExpr = false;
14744 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask);
14745 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue)
14746 IsNonPoisoned &=
14747 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) &&
14748 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue);
14752 // Try to figure out best way to combine values: build a shuffle and insert
14753 // elements or just build several shuffles.
14754 // Insert non-constant scalars.
14755 SmallVector<Value *> NonConstants(GatheredScalars);
14756 int EMSz = ExtractMask.size();
14757 int MSz = Mask.size();
14758 // Try to build constant vector and shuffle with it only if currently we
14759 // have a single permutation and more than 1 scalar constants.
14760 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty();
14761 bool IsIdentityShuffle =
14762 ((UseVecBaseAsInput ||
14763 all_of(ExtractShuffles,
14764 [](const std::optional<TTI::ShuffleKind> &SK) {
14765 return SK.value_or(TTI::SK_PermuteTwoSrc) ==
14766 TTI::SK_PermuteSingleSrc;
14767 })) &&
14768 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) &&
14769 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) ||
14770 (!GatherShuffles.empty() &&
14771 all_of(GatherShuffles,
14772 [](const std::optional<TTI::ShuffleKind> &SK) {
14773 return SK.value_or(TTI::SK_PermuteTwoSrc) ==
14774 TTI::SK_PermuteSingleSrc;
14775 }) &&
14776 none_of(Mask, [&](int I) { return I >= MSz; }) &&
14777 ShuffleVectorInst::isIdentityMask(Mask, MSz));
14778 bool EnoughConstsForShuffle =
14779 IsSingleShuffle &&
14780 (none_of(GatheredScalars,
14781 [](Value *V) {
14782 return isa<UndefValue>(V) && !isa<PoisonValue>(V);
14783 }) ||
14784 any_of(GatheredScalars,
14785 [](Value *V) {
14786 return isa<Constant>(V) && !isa<UndefValue>(V);
14787 })) &&
14788 (!IsIdentityShuffle ||
14789 (GatheredScalars.size() == 2 &&
14790 any_of(GatheredScalars,
14791 [](Value *V) { return !isa<UndefValue>(V); })) ||
14792 count_if(GatheredScalars, [](Value *V) {
14793 return isa<Constant>(V) && !isa<PoisonValue>(V);
14794 }) > 1);
14795 // NonConstants array contains just non-constant values, GatheredScalars
14796 // contains only constant to build final vector and then shuffle.
14797 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) {
14798 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I]))
14799 NonConstants[I] = PoisonValue::get(OrigScalarTy);
14800 else
14801 GatheredScalars[I] = PoisonValue::get(OrigScalarTy);
14803 // Generate constants for final shuffle and build a mask for them.
14804 if (!all_of(GatheredScalars, IsaPred<PoisonValue>)) {
14805 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem);
14806 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true);
14807 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size());
14808 ShuffleBuilder.add(BV, BVMask);
14810 if (all_of(NonConstants, [=](Value *V) {
14811 return isa<PoisonValue>(V) ||
14812 (IsSingleShuffle && ((IsIdentityShuffle &&
14813 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V));
14815 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors,
14816 SubVectorsMask);
14817 else
14818 Res = ShuffleBuilder.finalize(
14819 E->ReuseShuffleIndices, SubVectors, SubVectorsMask, E->Scalars.size(),
14820 [&](Value *&Vec, SmallVectorImpl<int> &Mask) {
14821 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false);
14822 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec);
14824 } else if (!allConstant(GatheredScalars)) {
14825 // Gather unique scalars and all constants.
14826 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem);
14827 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true);
14828 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size());
14829 ShuffleBuilder.add(BV, ReuseMask);
14830 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors,
14831 SubVectorsMask);
14832 } else {
14833 // Gather all constants.
14834 SmallVector<int> Mask(GatheredScalars.size(), PoisonMaskElem);
14835 for (auto [I, V] : enumerate(GatheredScalars)) {
14836 if (!isa<PoisonValue>(V))
14837 Mask[I] = I;
14839 Value *BV = ShuffleBuilder.gather(GatheredScalars);
14840 ShuffleBuilder.add(BV, Mask);
14841 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors,
14842 SubVectorsMask);
14845 if (NeedFreeze)
14846 Res = ShuffleBuilder.createFreeze(Res);
14847 return Res;
14850 Value *BoUpSLP::createBuildVector(const TreeEntry *E, Type *ScalarTy,
14851 bool PostponedPHIs) {
14852 for (auto [EIdx, _] : E->CombinedEntriesWithIndices)
14853 (void)vectorizeTree(VectorizableTree[EIdx].get(), PostponedPHIs);
14854 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, ScalarTy,
14855 Builder, *this);
14858 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
14859 IRBuilderBase::InsertPointGuard Guard(Builder);
14861 if (E->VectorizedValue &&
14862 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI ||
14863 E->isAltShuffle())) {
14864 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
14865 return E->VectorizedValue;
14868 Value *V = E->Scalars.front();
14869 Type *ScalarTy = V->getType();
14870 if (!isa<CmpInst>(V))
14871 ScalarTy = getValueType(V);
14872 auto It = MinBWs.find(E);
14873 if (It != MinBWs.end()) {
14874 auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy);
14875 ScalarTy = IntegerType::get(F->getContext(), It->second.first);
14876 if (VecTy)
14877 ScalarTy = getWidenedType(ScalarTy, VecTy->getNumElements());
14879 auto *VecTy = getWidenedType(ScalarTy, E->Scalars.size());
14880 if (E->isGather()) {
14881 // Set insert point for non-reduction initial nodes.
14882 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList)
14883 setInsertPointAfterBundle(E);
14884 Value *Vec = createBuildVector(E, ScalarTy, PostponedPHIs);
14885 E->VectorizedValue = Vec;
14886 return Vec;
14889 bool IsReverseOrder = isReverseOrder(E->ReorderIndices);
14890 auto FinalShuffle = [&](Value *V, const TreeEntry *E) {
14891 ShuffleInstructionBuilder ShuffleBuilder(ScalarTy, Builder, *this);
14892 if (E->getOpcode() == Instruction::Store &&
14893 E->State == TreeEntry::Vectorize) {
14894 ArrayRef<int> Mask =
14895 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()),
14896 E->ReorderIndices.size());
14897 ShuffleBuilder.add(V, Mask);
14898 } else if (E->State == TreeEntry::StridedVectorize && IsReverseOrder) {
14899 ShuffleBuilder.addOrdered(V, {});
14900 } else {
14901 ShuffleBuilder.addOrdered(V, E->ReorderIndices);
14903 SmallVector<std::pair<const TreeEntry *, unsigned>> SubVectors(
14904 E->CombinedEntriesWithIndices.size());
14905 transform(
14906 E->CombinedEntriesWithIndices, SubVectors.begin(), [&](const auto &P) {
14907 return std::make_pair(VectorizableTree[P.first].get(), P.second);
14909 assert(
14910 (E->CombinedEntriesWithIndices.empty() || E->ReorderIndices.empty()) &&
14911 "Expected either combined subnodes or reordering");
14912 return ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors, {});
14915 assert(!E->isGather() && "Unhandled state");
14916 unsigned ShuffleOrOp =
14917 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
14918 Instruction *VL0 = E->getMainOp();
14919 auto GetOperandSignedness = [&](unsigned Idx) {
14920 const TreeEntry *OpE = getOperandEntry(E, Idx);
14921 bool IsSigned = false;
14922 auto It = MinBWs.find(OpE);
14923 if (It != MinBWs.end())
14924 IsSigned = It->second.second;
14925 else
14926 IsSigned = any_of(OpE->Scalars, [&](Value *R) {
14927 return !isKnownNonNegative(R, SimplifyQuery(*DL));
14929 return IsSigned;
14931 switch (ShuffleOrOp) {
14932 case Instruction::PHI: {
14933 assert((E->ReorderIndices.empty() || !E->ReuseShuffleIndices.empty() ||
14934 E != VectorizableTree.front().get() ||
14935 !E->UserTreeIndices.empty()) &&
14936 "PHI reordering is free.");
14937 if (PostponedPHIs && E->VectorizedValue)
14938 return E->VectorizedValue;
14939 auto *PH = cast<PHINode>(VL0);
14940 Builder.SetInsertPoint(PH->getParent(),
14941 PH->getParent()->getFirstNonPHIIt());
14942 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
14943 if (PostponedPHIs || !E->VectorizedValue) {
14944 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
14945 E->PHI = NewPhi;
14946 Value *V = NewPhi;
14948 // Adjust insertion point once all PHI's have been generated.
14949 Builder.SetInsertPoint(PH->getParent(),
14950 PH->getParent()->getFirstInsertionPt());
14951 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
14953 V = FinalShuffle(V, E);
14955 E->VectorizedValue = V;
14956 if (PostponedPHIs)
14957 return V;
14959 PHINode *NewPhi = cast<PHINode>(E->PHI);
14960 // If phi node is fully emitted - exit.
14961 if (NewPhi->getNumIncomingValues() != 0)
14962 return NewPhi;
14964 // PHINodes may have multiple entries from the same block. We want to
14965 // visit every block once.
14966 SmallPtrSet<BasicBlock *, 4> VisitedBBs;
14968 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) {
14969 ValueList Operands;
14970 BasicBlock *IBB = PH->getIncomingBlock(I);
14972 // Stop emission if all incoming values are generated.
14973 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) {
14974 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
14975 return NewPhi;
14978 if (!VisitedBBs.insert(IBB).second) {
14979 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
14980 continue;
14983 Builder.SetInsertPoint(IBB->getTerminator());
14984 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
14985 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true);
14986 if (VecTy != Vec->getType()) {
14987 assert((It != MinBWs.end() || getOperandEntry(E, I)->isGather() ||
14988 MinBWs.contains(getOperandEntry(E, I))) &&
14989 "Expected item in MinBWs.");
14990 Vec = Builder.CreateIntCast(Vec, VecTy, GetOperandSignedness(I));
14992 NewPhi->addIncoming(Vec, IBB);
14995 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
14996 "Invalid number of incoming values");
14997 assert(E->VectorizedValue && "Expected vectorized value.");
14998 return E->VectorizedValue;
15001 case Instruction::ExtractElement: {
15002 Value *V = E->getSingleOperand(0);
15003 if (const TreeEntry *TE = getTreeEntry(V))
15004 V = TE->VectorizedValue;
15005 setInsertPointAfterBundle(E);
15006 V = FinalShuffle(V, E);
15007 E->VectorizedValue = V;
15008 return V;
15010 case Instruction::ExtractValue: {
15011 auto *LI = cast<LoadInst>(E->getSingleOperand(0));
15012 Builder.SetInsertPoint(LI);
15013 Value *Ptr = LI->getPointerOperand();
15014 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
15015 Value *NewV = propagateMetadata(V, E->Scalars);
15016 NewV = FinalShuffle(NewV, E);
15017 E->VectorizedValue = NewV;
15018 return NewV;
15020 case Instruction::InsertElement: {
15021 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique");
15022 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back()));
15023 Value *V = vectorizeOperand(E, 1, PostponedPHIs);
15024 ArrayRef<Value *> Op = E->getOperand(1);
15025 Type *ScalarTy = Op.front()->getType();
15026 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) {
15027 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs.");
15028 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1));
15029 assert(Res.first > 0 && "Expected item in MinBWs.");
15030 V = Builder.CreateIntCast(
15032 getWidenedType(
15033 ScalarTy,
15034 cast<FixedVectorType>(V->getType())->getNumElements()),
15035 Res.second);
15038 // Create InsertVector shuffle if necessary
15039 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
15040 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
15041 }));
15042 const unsigned NumElts =
15043 cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
15044 const unsigned NumScalars = E->Scalars.size();
15046 unsigned Offset = *getElementIndex(VL0);
15047 assert(Offset < NumElts && "Failed to find vector index offset");
15049 // Create shuffle to resize vector
15050 SmallVector<int> Mask;
15051 if (!E->ReorderIndices.empty()) {
15052 inversePermutation(E->ReorderIndices, Mask);
15053 Mask.append(NumElts - NumScalars, PoisonMaskElem);
15054 } else {
15055 Mask.assign(NumElts, PoisonMaskElem);
15056 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
15058 // Create InsertVector shuffle if necessary
15059 bool IsIdentity = true;
15060 SmallVector<int> PrevMask(NumElts, PoisonMaskElem);
15061 Mask.swap(PrevMask);
15062 for (unsigned I = 0; I < NumScalars; ++I) {
15063 Value *Scalar = E->Scalars[PrevMask[I]];
15064 unsigned InsertIdx = *getElementIndex(Scalar);
15065 IsIdentity &= InsertIdx - Offset == I;
15066 Mask[InsertIdx - Offset] = I;
15068 if (!IsIdentity || NumElts != NumScalars) {
15069 Value *V2 = nullptr;
15070 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V);
15071 SmallVector<int> InsertMask(Mask);
15072 if (NumElts != NumScalars && Offset == 0) {
15073 // Follow all insert element instructions from the current buildvector
15074 // sequence.
15075 InsertElementInst *Ins = cast<InsertElementInst>(VL0);
15076 do {
15077 std::optional<unsigned> InsertIdx = getElementIndex(Ins);
15078 if (!InsertIdx)
15079 break;
15080 if (InsertMask[*InsertIdx] == PoisonMaskElem)
15081 InsertMask[*InsertIdx] = *InsertIdx;
15082 if (!Ins->hasOneUse())
15083 break;
15084 Ins = dyn_cast_or_null<InsertElementInst>(
15085 Ins->getUniqueUndroppableUser());
15086 } while (Ins);
15087 SmallBitVector UseMask =
15088 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask);
15089 SmallBitVector IsFirstPoison =
15090 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
15091 SmallBitVector IsFirstUndef =
15092 isUndefVector(FirstInsert->getOperand(0), UseMask);
15093 if (!IsFirstPoison.all()) {
15094 unsigned Idx = 0;
15095 for (unsigned I = 0; I < NumElts; I++) {
15096 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) &&
15097 IsFirstUndef.test(I)) {
15098 if (IsVNonPoisonous) {
15099 InsertMask[I] = I < NumScalars ? I : 0;
15100 continue;
15102 if (!V2)
15103 V2 = UndefValue::get(V->getType());
15104 if (Idx >= NumScalars)
15105 Idx = NumScalars - 1;
15106 InsertMask[I] = NumScalars + Idx;
15107 ++Idx;
15108 } else if (InsertMask[I] != PoisonMaskElem &&
15109 Mask[I] == PoisonMaskElem) {
15110 InsertMask[I] = PoisonMaskElem;
15113 } else {
15114 InsertMask = Mask;
15117 if (!V2)
15118 V2 = PoisonValue::get(V->getType());
15119 V = Builder.CreateShuffleVector(V, V2, InsertMask);
15120 if (auto *I = dyn_cast<Instruction>(V)) {
15121 GatherShuffleExtractSeq.insert(I);
15122 CSEBlocks.insert(I->getParent());
15126 SmallVector<int> InsertMask(NumElts, PoisonMaskElem);
15127 for (unsigned I = 0; I < NumElts; I++) {
15128 if (Mask[I] != PoisonMaskElem)
15129 InsertMask[Offset + I] = I;
15131 SmallBitVector UseMask =
15132 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask);
15133 SmallBitVector IsFirstUndef =
15134 isUndefVector(FirstInsert->getOperand(0), UseMask);
15135 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) &&
15136 NumElts != NumScalars) {
15137 if (IsFirstUndef.all()) {
15138 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) {
15139 SmallBitVector IsFirstPoison =
15140 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
15141 if (!IsFirstPoison.all()) {
15142 for (unsigned I = 0; I < NumElts; I++) {
15143 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I))
15144 InsertMask[I] = I + NumElts;
15147 V = Builder.CreateShuffleVector(
15149 IsFirstPoison.all() ? PoisonValue::get(V->getType())
15150 : FirstInsert->getOperand(0),
15151 InsertMask, cast<Instruction>(E->Scalars.back())->getName());
15152 if (auto *I = dyn_cast<Instruction>(V)) {
15153 GatherShuffleExtractSeq.insert(I);
15154 CSEBlocks.insert(I->getParent());
15157 } else {
15158 SmallBitVector IsFirstPoison =
15159 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
15160 for (unsigned I = 0; I < NumElts; I++) {
15161 if (InsertMask[I] == PoisonMaskElem)
15162 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I;
15163 else
15164 InsertMask[I] += NumElts;
15166 V = Builder.CreateShuffleVector(
15167 FirstInsert->getOperand(0), V, InsertMask,
15168 cast<Instruction>(E->Scalars.back())->getName());
15169 if (auto *I = dyn_cast<Instruction>(V)) {
15170 GatherShuffleExtractSeq.insert(I);
15171 CSEBlocks.insert(I->getParent());
15176 ++NumVectorInstructions;
15177 E->VectorizedValue = V;
15178 return V;
15180 case Instruction::ZExt:
15181 case Instruction::SExt:
15182 case Instruction::FPToUI:
15183 case Instruction::FPToSI:
15184 case Instruction::FPExt:
15185 case Instruction::PtrToInt:
15186 case Instruction::IntToPtr:
15187 case Instruction::SIToFP:
15188 case Instruction::UIToFP:
15189 case Instruction::Trunc:
15190 case Instruction::FPTrunc:
15191 case Instruction::BitCast: {
15192 setInsertPointAfterBundle(E);
15194 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs);
15195 if (E->VectorizedValue) {
15196 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15197 return E->VectorizedValue;
15200 auto *CI = cast<CastInst>(VL0);
15201 Instruction::CastOps VecOpcode = CI->getOpcode();
15202 Type *SrcScalarTy = cast<VectorType>(InVec->getType())->getElementType();
15203 auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
15204 if (!ScalarTy->isFPOrFPVectorTy() && !SrcScalarTy->isFPOrFPVectorTy() &&
15205 (SrcIt != MinBWs.end() || It != MinBWs.end() ||
15206 SrcScalarTy != CI->getOperand(0)->getType()->getScalarType())) {
15207 // Check if the values are candidates to demote.
15208 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy);
15209 if (SrcIt != MinBWs.end())
15210 SrcBWSz = SrcIt->second.first;
15211 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy->getScalarType());
15212 if (BWSz == SrcBWSz) {
15213 VecOpcode = Instruction::BitCast;
15214 } else if (BWSz < SrcBWSz) {
15215 VecOpcode = Instruction::Trunc;
15216 } else if (It != MinBWs.end()) {
15217 assert(BWSz > SrcBWSz && "Invalid cast!");
15218 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
15219 } else if (SrcIt != MinBWs.end()) {
15220 assert(BWSz > SrcBWSz && "Invalid cast!");
15221 VecOpcode =
15222 SrcIt->second.second ? Instruction::SExt : Instruction::ZExt;
15224 } else if (VecOpcode == Instruction::SIToFP && SrcIt != MinBWs.end() &&
15225 !SrcIt->second.second) {
15226 VecOpcode = Instruction::UIToFP;
15228 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast)
15229 ? InVec
15230 : Builder.CreateCast(VecOpcode, InVec, VecTy);
15231 V = FinalShuffle(V, E);
15233 E->VectorizedValue = V;
15234 ++NumVectorInstructions;
15235 return V;
15237 case Instruction::FCmp:
15238 case Instruction::ICmp: {
15239 setInsertPointAfterBundle(E);
15241 Value *L = vectorizeOperand(E, 0, PostponedPHIs);
15242 if (E->VectorizedValue) {
15243 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15244 return E->VectorizedValue;
15246 Value *R = vectorizeOperand(E, 1, PostponedPHIs);
15247 if (E->VectorizedValue) {
15248 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15249 return E->VectorizedValue;
15251 if (L->getType() != R->getType()) {
15252 assert((getOperandEntry(E, 0)->isGather() ||
15253 getOperandEntry(E, 1)->isGather() ||
15254 MinBWs.contains(getOperandEntry(E, 0)) ||
15255 MinBWs.contains(getOperandEntry(E, 1))) &&
15256 "Expected item in MinBWs.");
15257 if (cast<VectorType>(L->getType())
15258 ->getElementType()
15259 ->getIntegerBitWidth() < cast<VectorType>(R->getType())
15260 ->getElementType()
15261 ->getIntegerBitWidth()) {
15262 Type *CastTy = R->getType();
15263 L = Builder.CreateIntCast(L, CastTy, GetOperandSignedness(0));
15264 } else {
15265 Type *CastTy = L->getType();
15266 R = Builder.CreateIntCast(R, CastTy, GetOperandSignedness(1));
15270 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
15271 Value *V = Builder.CreateCmp(P0, L, R);
15272 propagateIRFlags(V, E->Scalars, VL0);
15273 // Do not cast for cmps.
15274 VecTy = cast<FixedVectorType>(V->getType());
15275 V = FinalShuffle(V, E);
15277 E->VectorizedValue = V;
15278 ++NumVectorInstructions;
15279 return V;
15281 case Instruction::Select: {
15282 setInsertPointAfterBundle(E);
15284 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs);
15285 if (E->VectorizedValue) {
15286 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15287 return E->VectorizedValue;
15289 Value *True = vectorizeOperand(E, 1, PostponedPHIs);
15290 if (E->VectorizedValue) {
15291 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15292 return E->VectorizedValue;
15294 Value *False = vectorizeOperand(E, 2, PostponedPHIs);
15295 if (E->VectorizedValue) {
15296 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15297 return E->VectorizedValue;
15299 if (True->getType() != VecTy || False->getType() != VecTy) {
15300 assert((It != MinBWs.end() || getOperandEntry(E, 1)->isGather() ||
15301 getOperandEntry(E, 2)->isGather() ||
15302 MinBWs.contains(getOperandEntry(E, 1)) ||
15303 MinBWs.contains(getOperandEntry(E, 2))) &&
15304 "Expected item in MinBWs.");
15305 if (True->getType() != VecTy)
15306 True = Builder.CreateIntCast(True, VecTy, GetOperandSignedness(1));
15307 if (False->getType() != VecTy)
15308 False = Builder.CreateIntCast(False, VecTy, GetOperandSignedness(2));
15311 unsigned CondNumElements = getNumElements(Cond->getType());
15312 unsigned TrueNumElements = getNumElements(True->getType());
15313 assert(TrueNumElements >= CondNumElements &&
15314 TrueNumElements % CondNumElements == 0 &&
15315 "Cannot vectorize Instruction::Select");
15316 assert(TrueNumElements == getNumElements(False->getType()) &&
15317 "Cannot vectorize Instruction::Select");
15318 if (CondNumElements != TrueNumElements) {
15319 // When the return type is i1 but the source is fixed vector type, we
15320 // need to duplicate the condition value.
15321 Cond = Builder.CreateShuffleVector(
15322 Cond, createReplicatedMask(TrueNumElements / CondNumElements,
15323 CondNumElements));
15325 assert(getNumElements(Cond->getType()) == TrueNumElements &&
15326 "Cannot vectorize Instruction::Select");
15327 Value *V = Builder.CreateSelect(Cond, True, False);
15328 V = FinalShuffle(V, E);
15330 E->VectorizedValue = V;
15331 ++NumVectorInstructions;
15332 return V;
15334 case Instruction::FNeg: {
15335 setInsertPointAfterBundle(E);
15337 Value *Op = vectorizeOperand(E, 0, PostponedPHIs);
15339 if (E->VectorizedValue) {
15340 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15341 return E->VectorizedValue;
15344 Value *V = Builder.CreateUnOp(
15345 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
15346 propagateIRFlags(V, E->Scalars, VL0);
15347 if (auto *I = dyn_cast<Instruction>(V))
15348 V = propagateMetadata(I, E->Scalars);
15350 V = FinalShuffle(V, E);
15352 E->VectorizedValue = V;
15353 ++NumVectorInstructions;
15355 return V;
15357 case Instruction::Freeze: {
15358 setInsertPointAfterBundle(E);
15360 Value *Op = vectorizeOperand(E, 0, PostponedPHIs);
15362 if (E->VectorizedValue) {
15363 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15364 return E->VectorizedValue;
15367 if (Op->getType() != VecTy) {
15368 assert((It != MinBWs.end() || getOperandEntry(E, 0)->isGather() ||
15369 MinBWs.contains(getOperandEntry(E, 0))) &&
15370 "Expected item in MinBWs.");
15371 Op = Builder.CreateIntCast(Op, VecTy, GetOperandSignedness(0));
15373 Value *V = Builder.CreateFreeze(Op);
15374 V = FinalShuffle(V, E);
15376 E->VectorizedValue = V;
15377 ++NumVectorInstructions;
15379 return V;
15381 case Instruction::Add:
15382 case Instruction::FAdd:
15383 case Instruction::Sub:
15384 case Instruction::FSub:
15385 case Instruction::Mul:
15386 case Instruction::FMul:
15387 case Instruction::UDiv:
15388 case Instruction::SDiv:
15389 case Instruction::FDiv:
15390 case Instruction::URem:
15391 case Instruction::SRem:
15392 case Instruction::FRem:
15393 case Instruction::Shl:
15394 case Instruction::LShr:
15395 case Instruction::AShr:
15396 case Instruction::And:
15397 case Instruction::Or:
15398 case Instruction::Xor: {
15399 setInsertPointAfterBundle(E);
15401 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs);
15402 if (E->VectorizedValue) {
15403 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15404 return E->VectorizedValue;
15406 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs);
15407 if (E->VectorizedValue) {
15408 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15409 return E->VectorizedValue;
15411 if (ShuffleOrOp == Instruction::And && It != MinBWs.end()) {
15412 for (unsigned I : seq<unsigned>(0, E->getNumOperands())) {
15413 ArrayRef<Value *> Ops = E->getOperand(I);
15414 if (all_of(Ops, [&](Value *Op) {
15415 auto *CI = dyn_cast<ConstantInt>(Op);
15416 return CI && CI->getValue().countr_one() >= It->second.first;
15417 })) {
15418 V = FinalShuffle(I == 0 ? RHS : LHS, E);
15419 E->VectorizedValue = V;
15420 ++NumVectorInstructions;
15421 return V;
15425 if (LHS->getType() != VecTy || RHS->getType() != VecTy) {
15426 assert((It != MinBWs.end() || getOperandEntry(E, 0)->isGather() ||
15427 getOperandEntry(E, 1)->isGather() ||
15428 MinBWs.contains(getOperandEntry(E, 0)) ||
15429 MinBWs.contains(getOperandEntry(E, 1))) &&
15430 "Expected item in MinBWs.");
15431 if (LHS->getType() != VecTy)
15432 LHS = Builder.CreateIntCast(LHS, VecTy, GetOperandSignedness(0));
15433 if (RHS->getType() != VecTy)
15434 RHS = Builder.CreateIntCast(RHS, VecTy, GetOperandSignedness(1));
15437 Value *V = Builder.CreateBinOp(
15438 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
15439 RHS);
15440 propagateIRFlags(V, E->Scalars, VL0, It == MinBWs.end());
15441 if (auto *I = dyn_cast<Instruction>(V)) {
15442 V = propagateMetadata(I, E->Scalars);
15443 // Drop nuw flags for abs(sub(commutative), true).
15444 if (!MinBWs.contains(E) && ShuffleOrOp == Instruction::Sub &&
15445 any_of(E->Scalars, [](Value *V) {
15446 return isCommutative(cast<Instruction>(V));
15448 I->setHasNoUnsignedWrap(/*b=*/false);
15451 V = FinalShuffle(V, E);
15453 E->VectorizedValue = V;
15454 ++NumVectorInstructions;
15456 return V;
15458 case Instruction::Load: {
15459 // Loads are inserted at the head of the tree because we don't want to
15460 // sink them all the way down past store instructions.
15461 setInsertPointAfterBundle(E);
15463 LoadInst *LI = cast<LoadInst>(VL0);
15464 Instruction *NewLI;
15465 Value *PO = LI->getPointerOperand();
15466 if (E->State == TreeEntry::Vectorize) {
15467 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign());
15468 } else if (E->State == TreeEntry::StridedVectorize) {
15469 Value *Ptr0 = cast<LoadInst>(E->Scalars.front())->getPointerOperand();
15470 Value *PtrN = cast<LoadInst>(E->Scalars.back())->getPointerOperand();
15471 PO = IsReverseOrder ? PtrN : Ptr0;
15472 std::optional<int> Diff = getPointersDiff(
15473 VL0->getType(), Ptr0, VL0->getType(), PtrN, *DL, *SE);
15474 Type *StrideTy = DL->getIndexType(PO->getType());
15475 Value *StrideVal;
15476 if (Diff) {
15477 int Stride = *Diff / (static_cast<int>(E->Scalars.size()) - 1);
15478 StrideVal =
15479 ConstantInt::get(StrideTy, (IsReverseOrder ? -1 : 1) * Stride *
15480 DL->getTypeAllocSize(ScalarTy));
15481 } else {
15482 SmallVector<Value *> PointerOps(E->Scalars.size(), nullptr);
15483 transform(E->Scalars, PointerOps.begin(), [](Value *V) {
15484 return cast<LoadInst>(V)->getPointerOperand();
15486 OrdersType Order;
15487 std::optional<Value *> Stride =
15488 calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order,
15489 &*Builder.GetInsertPoint());
15490 Value *NewStride =
15491 Builder.CreateIntCast(*Stride, StrideTy, /*isSigned=*/true);
15492 StrideVal = Builder.CreateMul(
15493 NewStride,
15494 ConstantInt::get(
15495 StrideTy,
15496 (IsReverseOrder ? -1 : 1) *
15497 static_cast<int>(DL->getTypeAllocSize(ScalarTy))));
15499 Align CommonAlignment = computeCommonAlignment<LoadInst>(E->Scalars);
15500 auto *Inst = Builder.CreateIntrinsic(
15501 Intrinsic::experimental_vp_strided_load,
15502 {VecTy, PO->getType(), StrideTy},
15503 {PO, StrideVal, Builder.getAllOnesMask(VecTy->getElementCount()),
15504 Builder.getInt32(E->Scalars.size())});
15505 Inst->addParamAttr(
15506 /*ArgNo=*/0,
15507 Attribute::getWithAlignment(Inst->getContext(), CommonAlignment));
15508 NewLI = Inst;
15509 } else {
15510 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
15511 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs);
15512 if (E->VectorizedValue) {
15513 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15514 return E->VectorizedValue;
15516 if (isa<FixedVectorType>(ScalarTy)) {
15517 assert(SLPReVec && "FixedVectorType is not expected.");
15518 // CreateMaskedGather expects VecTy and VecPtr have same size. We need
15519 // to expand VecPtr if ScalarTy is a vector type.
15520 unsigned ScalarTyNumElements =
15521 cast<FixedVectorType>(ScalarTy)->getNumElements();
15522 unsigned VecTyNumElements =
15523 cast<FixedVectorType>(VecTy)->getNumElements();
15524 assert(VecTyNumElements % ScalarTyNumElements == 0 &&
15525 "Cannot expand getelementptr.");
15526 unsigned VF = VecTyNumElements / ScalarTyNumElements;
15527 SmallVector<Constant *> Indices(VecTyNumElements);
15528 transform(seq(VecTyNumElements), Indices.begin(), [=](unsigned I) {
15529 return Builder.getInt64(I % ScalarTyNumElements);
15531 VecPtr = Builder.CreateGEP(
15532 VecTy->getElementType(),
15533 Builder.CreateShuffleVector(
15534 VecPtr, createReplicatedMask(ScalarTyNumElements, VF)),
15535 ConstantVector::get(Indices));
15537 // Use the minimum alignment of the gathered loads.
15538 Align CommonAlignment = computeCommonAlignment<LoadInst>(E->Scalars);
15539 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
15541 Value *V = propagateMetadata(NewLI, E->Scalars);
15543 V = FinalShuffle(V, E);
15544 E->VectorizedValue = V;
15545 ++NumVectorInstructions;
15546 return V;
15548 case Instruction::Store: {
15549 auto *SI = cast<StoreInst>(VL0);
15551 setInsertPointAfterBundle(E);
15553 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs);
15554 if (VecValue->getType() != VecTy)
15555 VecValue =
15556 Builder.CreateIntCast(VecValue, VecTy, GetOperandSignedness(0));
15557 VecValue = FinalShuffle(VecValue, E);
15559 Value *Ptr = SI->getPointerOperand();
15560 Instruction *ST;
15561 if (E->State == TreeEntry::Vectorize) {
15562 ST = Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign());
15563 } else {
15564 assert(E->State == TreeEntry::StridedVectorize &&
15565 "Expected either strided or consecutive stores.");
15566 if (!E->ReorderIndices.empty()) {
15567 SI = cast<StoreInst>(E->Scalars[E->ReorderIndices.front()]);
15568 Ptr = SI->getPointerOperand();
15570 Align CommonAlignment = computeCommonAlignment<StoreInst>(E->Scalars);
15571 Type *StrideTy = DL->getIndexType(SI->getPointerOperandType());
15572 auto *Inst = Builder.CreateIntrinsic(
15573 Intrinsic::experimental_vp_strided_store,
15574 {VecTy, Ptr->getType(), StrideTy},
15575 {VecValue, Ptr,
15576 ConstantInt::get(
15577 StrideTy, -static_cast<int>(DL->getTypeAllocSize(ScalarTy))),
15578 Builder.getAllOnesMask(VecTy->getElementCount()),
15579 Builder.getInt32(E->Scalars.size())});
15580 Inst->addParamAttr(
15581 /*ArgNo=*/1,
15582 Attribute::getWithAlignment(Inst->getContext(), CommonAlignment));
15583 ST = Inst;
15586 Value *V = propagateMetadata(ST, E->Scalars);
15588 E->VectorizedValue = V;
15589 ++NumVectorInstructions;
15590 return V;
15592 case Instruction::GetElementPtr: {
15593 auto *GEP0 = cast<GetElementPtrInst>(VL0);
15594 setInsertPointAfterBundle(E);
15596 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs);
15597 if (E->VectorizedValue) {
15598 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15599 return E->VectorizedValue;
15602 SmallVector<Value *> OpVecs;
15603 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) {
15604 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs);
15605 if (E->VectorizedValue) {
15606 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15607 return E->VectorizedValue;
15609 OpVecs.push_back(OpVec);
15612 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs);
15613 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) {
15614 SmallVector<Value *> GEPs;
15615 for (Value *V : E->Scalars) {
15616 if (isa<GetElementPtrInst>(V))
15617 GEPs.push_back(V);
15619 V = propagateMetadata(I, GEPs);
15622 V = FinalShuffle(V, E);
15624 E->VectorizedValue = V;
15625 ++NumVectorInstructions;
15627 return V;
15629 case Instruction::Call: {
15630 CallInst *CI = cast<CallInst>(VL0);
15631 setInsertPointAfterBundle(E);
15633 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
15635 SmallVector<Type *> ArgTys =
15636 buildIntrinsicArgTypes(CI, ID, VecTy->getNumElements(),
15637 It != MinBWs.end() ? It->second.first : 0);
15638 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys);
15639 bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
15640 VecCallCosts.first <= VecCallCosts.second;
15642 Value *ScalarArg = nullptr;
15643 SmallVector<Value *> OpVecs;
15644 SmallVector<Type *, 2> TysForDecl;
15645 // Add return type if intrinsic is overloaded on it.
15646 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1))
15647 TysForDecl.push_back(VecTy);
15648 auto *CEI = cast<CallInst>(VL0);
15649 for (unsigned I : seq<unsigned>(0, CI->arg_size())) {
15650 ValueList OpVL;
15651 // Some intrinsics have scalar arguments. This argument should not be
15652 // vectorized.
15653 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) {
15654 ScalarArg = CEI->getArgOperand(I);
15655 // if decided to reduce bitwidth of abs intrinsic, it second argument
15656 // must be set false (do not return poison, if value issigned min).
15657 if (ID == Intrinsic::abs && It != MinBWs.end() &&
15658 It->second.first < DL->getTypeSizeInBits(CEI->getType()))
15659 ScalarArg = Builder.getFalse();
15660 OpVecs.push_back(ScalarArg);
15661 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
15662 TysForDecl.push_back(ScalarArg->getType());
15663 continue;
15666 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs);
15667 if (E->VectorizedValue) {
15668 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15669 return E->VectorizedValue;
15671 ScalarArg = CEI->getArgOperand(I);
15672 if (cast<VectorType>(OpVec->getType())->getElementType() !=
15673 ScalarArg->getType()->getScalarType() &&
15674 It == MinBWs.end()) {
15675 auto *CastTy =
15676 getWidenedType(ScalarArg->getType(), VecTy->getNumElements());
15677 OpVec = Builder.CreateIntCast(OpVec, CastTy, GetOperandSignedness(I));
15678 } else if (It != MinBWs.end()) {
15679 OpVec = Builder.CreateIntCast(OpVec, VecTy, GetOperandSignedness(I));
15681 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n");
15682 OpVecs.push_back(OpVec);
15683 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I))
15684 TysForDecl.push_back(OpVec->getType());
15687 Function *CF;
15688 if (!UseIntrinsic) {
15689 VFShape Shape =
15690 VFShape::get(CI->getFunctionType(),
15691 ElementCount::getFixed(
15692 static_cast<unsigned>(VecTy->getNumElements())),
15693 false /*HasGlobalPred*/);
15694 CF = VFDatabase(*CI).getVectorizedFunction(Shape);
15695 } else {
15696 CF = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, TysForDecl);
15699 SmallVector<OperandBundleDef, 1> OpBundles;
15700 CI->getOperandBundlesAsDefs(OpBundles);
15701 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
15703 propagateIRFlags(V, E->Scalars, VL0);
15704 V = FinalShuffle(V, E);
15706 E->VectorizedValue = V;
15707 ++NumVectorInstructions;
15708 return V;
15710 case Instruction::ShuffleVector: {
15711 Value *V;
15712 if (SLPReVec && !E->isAltShuffle()) {
15713 setInsertPointAfterBundle(E);
15714 Value *Src = vectorizeOperand(E, 0, PostponedPHIs);
15715 if (E->VectorizedValue) {
15716 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15717 return E->VectorizedValue;
15719 SmallVector<int> ThisMask(calculateShufflevectorMask(E->Scalars));
15720 if (auto *SVSrc = dyn_cast<ShuffleVectorInst>(Src)) {
15721 assert(isa<PoisonValue>(SVSrc->getOperand(1)) &&
15722 "Not supported shufflevector usage.");
15723 SmallVector<int> NewMask(ThisMask.size());
15724 transform(ThisMask, NewMask.begin(), [&SVSrc](int Mask) {
15725 return SVSrc->getShuffleMask()[Mask];
15727 V = Builder.CreateShuffleVector(SVSrc->getOperand(0), NewMask);
15728 } else {
15729 V = Builder.CreateShuffleVector(Src, ThisMask);
15731 propagateIRFlags(V, E->Scalars, VL0);
15732 if (auto *I = dyn_cast<Instruction>(V))
15733 V = propagateMetadata(I, E->Scalars);
15734 V = FinalShuffle(V, E);
15735 } else {
15736 assert(E->isAltShuffle() &&
15737 ((Instruction::isBinaryOp(E->getOpcode()) &&
15738 Instruction::isBinaryOp(E->getAltOpcode())) ||
15739 (Instruction::isCast(E->getOpcode()) &&
15740 Instruction::isCast(E->getAltOpcode())) ||
15741 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
15742 "Invalid Shuffle Vector Operand");
15744 Value *LHS = nullptr, *RHS = nullptr;
15745 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) {
15746 setInsertPointAfterBundle(E);
15747 LHS = vectorizeOperand(E, 0, PostponedPHIs);
15748 if (E->VectorizedValue) {
15749 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15750 return E->VectorizedValue;
15752 RHS = vectorizeOperand(E, 1, PostponedPHIs);
15753 } else {
15754 setInsertPointAfterBundle(E);
15755 LHS = vectorizeOperand(E, 0, PostponedPHIs);
15757 if (E->VectorizedValue) {
15758 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15759 return E->VectorizedValue;
15761 if (LHS && RHS &&
15762 ((Instruction::isBinaryOp(E->getOpcode()) &&
15763 (LHS->getType() != VecTy || RHS->getType() != VecTy)) ||
15764 (isa<CmpInst>(VL0) && LHS->getType() != RHS->getType()))) {
15765 assert((It != MinBWs.end() ||
15766 getOperandEntry(E, 0)->State == TreeEntry::NeedToGather ||
15767 getOperandEntry(E, 1)->State == TreeEntry::NeedToGather ||
15768 MinBWs.contains(getOperandEntry(E, 0)) ||
15769 MinBWs.contains(getOperandEntry(E, 1))) &&
15770 "Expected item in MinBWs.");
15771 Type *CastTy = VecTy;
15772 if (isa<CmpInst>(VL0) && LHS->getType() != RHS->getType()) {
15773 if (cast<VectorType>(LHS->getType())
15774 ->getElementType()
15775 ->getIntegerBitWidth() < cast<VectorType>(RHS->getType())
15776 ->getElementType()
15777 ->getIntegerBitWidth())
15778 CastTy = RHS->getType();
15779 else
15780 CastTy = LHS->getType();
15782 if (LHS->getType() != CastTy)
15783 LHS = Builder.CreateIntCast(LHS, CastTy, GetOperandSignedness(0));
15784 if (RHS->getType() != CastTy)
15785 RHS = Builder.CreateIntCast(RHS, CastTy, GetOperandSignedness(1));
15788 Value *V0, *V1;
15789 if (Instruction::isBinaryOp(E->getOpcode())) {
15790 V0 = Builder.CreateBinOp(
15791 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
15792 V1 = Builder.CreateBinOp(
15793 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
15794 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
15795 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS);
15796 auto *AltCI = cast<CmpInst>(E->getAltOp());
15797 CmpInst::Predicate AltPred = AltCI->getPredicate();
15798 V1 = Builder.CreateCmp(AltPred, LHS, RHS);
15799 } else {
15800 if (LHS->getType()->isIntOrIntVectorTy() && ScalarTy->isIntegerTy()) {
15801 unsigned SrcBWSz = DL->getTypeSizeInBits(
15802 cast<VectorType>(LHS->getType())->getElementType());
15803 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
15804 if (BWSz <= SrcBWSz) {
15805 if (BWSz < SrcBWSz)
15806 LHS = Builder.CreateIntCast(LHS, VecTy, It->second.first);
15807 assert(LHS->getType() == VecTy &&
15808 "Expected same type as operand.");
15809 if (auto *I = dyn_cast<Instruction>(LHS))
15810 LHS = propagateMetadata(I, E->Scalars);
15811 LHS = FinalShuffle(LHS, E);
15812 E->VectorizedValue = LHS;
15813 ++NumVectorInstructions;
15814 return LHS;
15817 V0 = Builder.CreateCast(
15818 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
15819 V1 = Builder.CreateCast(
15820 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
15822 // Add V0 and V1 to later analysis to try to find and remove matching
15823 // instruction, if any.
15824 for (Value *V : {V0, V1}) {
15825 if (auto *I = dyn_cast<Instruction>(V)) {
15826 GatherShuffleExtractSeq.insert(I);
15827 CSEBlocks.insert(I->getParent());
15831 // Create shuffle to take alternate operations from the vector.
15832 // Also, gather up main and alt scalar ops to propagate IR flags to
15833 // each vector operation.
15834 ValueList OpScalars, AltScalars;
15835 SmallVector<int> Mask;
15836 E->buildAltOpShuffleMask(
15837 [E, this](Instruction *I) {
15838 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
15839 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(),
15840 *TLI);
15842 Mask, &OpScalars, &AltScalars);
15844 propagateIRFlags(V0, OpScalars, E->getMainOp(), It == MinBWs.end());
15845 propagateIRFlags(V1, AltScalars, E->getAltOp(), It == MinBWs.end());
15846 auto DropNuwFlag = [&](Value *Vec, unsigned Opcode) {
15847 // Drop nuw flags for abs(sub(commutative), true).
15848 if (auto *I = dyn_cast<Instruction>(Vec);
15849 I && Opcode == Instruction::Sub && !MinBWs.contains(E) &&
15850 any_of(E->Scalars, [](Value *V) {
15851 auto *IV = cast<Instruction>(V);
15852 return IV->getOpcode() == Instruction::Sub &&
15853 isCommutative(cast<Instruction>(IV));
15855 I->setHasNoUnsignedWrap(/*b=*/false);
15857 DropNuwFlag(V0, E->getOpcode());
15858 DropNuwFlag(V1, E->getAltOpcode());
15860 if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
15861 assert(SLPReVec && "FixedVectorType is not expected.");
15862 transformScalarShuffleIndiciesToVector(VecTy->getNumElements(), Mask);
15864 V = Builder.CreateShuffleVector(V0, V1, Mask);
15865 if (auto *I = dyn_cast<Instruction>(V)) {
15866 V = propagateMetadata(I, E->Scalars);
15867 GatherShuffleExtractSeq.insert(I);
15868 CSEBlocks.insert(I->getParent());
15872 E->VectorizedValue = V;
15873 ++NumVectorInstructions;
15875 return V;
15877 default:
15878 llvm_unreachable("unknown inst");
15880 return nullptr;
15883 Value *BoUpSLP::vectorizeTree() {
15884 ExtraValueToDebugLocsMap ExternallyUsedValues;
15885 return vectorizeTree(ExternallyUsedValues);
15888 Value *
15889 BoUpSLP::vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues,
15890 Instruction *ReductionRoot) {
15891 // All blocks must be scheduled before any instructions are inserted.
15892 for (auto &BSIter : BlocksSchedules) {
15893 scheduleBlock(BSIter.second.get());
15895 // Clean Entry-to-LastInstruction table. It can be affected after scheduling,
15896 // need to rebuild it.
15897 EntryToLastInstruction.clear();
15899 if (ReductionRoot)
15900 Builder.SetInsertPoint(ReductionRoot->getParent(),
15901 ReductionRoot->getIterator());
15902 else
15903 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
15905 // Emit gathered loads first to emit better code for the users of those
15906 // gathered loads.
15907 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
15908 if (GatheredLoadsEntriesFirst.has_value() &&
15909 TE->Idx >= *GatheredLoadsEntriesFirst &&
15910 (!TE->isGather() || !TE->UserTreeIndices.empty())) {
15911 assert((!TE->UserTreeIndices.empty() ||
15912 (TE->getOpcode() == Instruction::Load && !TE->isGather())) &&
15913 "Expected gathered load node.");
15914 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false);
15917 // Postpone emission of PHIs operands to avoid cyclic dependencies issues.
15918 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true);
15919 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree)
15920 if (TE->State == TreeEntry::Vectorize &&
15921 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() &&
15922 TE->VectorizedValue)
15923 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false);
15924 // Run through the list of postponed gathers and emit them, replacing the temp
15925 // emitted allocas with actual vector instructions.
15926 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef();
15927 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues;
15928 for (const TreeEntry *E : PostponedNodes) {
15929 auto *TE = const_cast<TreeEntry *>(E);
15930 if (auto *VecTE = getTreeEntry(TE->Scalars.front()))
15931 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand(
15932 TE->UserTreeIndices.front().EdgeIdx)) &&
15933 VecTE->isSame(TE->Scalars))
15934 // Found gather node which is absolutely the same as one of the
15935 // vectorized nodes. It may happen after reordering.
15936 continue;
15937 auto *PrevVec = cast<Instruction>(TE->VectorizedValue);
15938 TE->VectorizedValue = nullptr;
15939 auto *UserI =
15940 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue);
15941 // If user is a PHI node, its vector code have to be inserted right before
15942 // block terminator. Since the node was delayed, there were some unresolved
15943 // dependencies at the moment when stab instruction was emitted. In a case
15944 // when any of these dependencies turn out an operand of another PHI, coming
15945 // from this same block, position of a stab instruction will become invalid.
15946 // The is because source vector that supposed to feed this gather node was
15947 // inserted at the end of the block [after stab instruction]. So we need
15948 // to adjust insertion point again to the end of block.
15949 if (isa<PHINode>(UserI)) {
15950 // Insert before all users.
15951 Instruction *InsertPt = PrevVec->getParent()->getTerminator();
15952 for (User *U : PrevVec->users()) {
15953 if (U == UserI)
15954 continue;
15955 auto *UI = dyn_cast<Instruction>(U);
15956 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent())
15957 continue;
15958 if (UI->comesBefore(InsertPt))
15959 InsertPt = UI;
15961 Builder.SetInsertPoint(InsertPt);
15962 } else {
15963 Builder.SetInsertPoint(PrevVec);
15965 Builder.SetCurrentDebugLocation(UserI->getDebugLoc());
15966 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false);
15967 if (Vec->getType() != PrevVec->getType()) {
15968 assert(Vec->getType()->isIntOrIntVectorTy() &&
15969 PrevVec->getType()->isIntOrIntVectorTy() &&
15970 "Expected integer vector types only.");
15971 std::optional<bool> IsSigned;
15972 for (Value *V : TE->Scalars) {
15973 if (const TreeEntry *BaseTE = getTreeEntry(V)) {
15974 auto It = MinBWs.find(BaseTE);
15975 if (It != MinBWs.end()) {
15976 IsSigned = IsSigned.value_or(false) || It->second.second;
15977 if (*IsSigned)
15978 break;
15980 for (const TreeEntry *MNTE : MultiNodeScalars.lookup(V)) {
15981 auto It = MinBWs.find(MNTE);
15982 if (It != MinBWs.end()) {
15983 IsSigned = IsSigned.value_or(false) || It->second.second;
15984 if (*IsSigned)
15985 break;
15988 if (IsSigned.value_or(false))
15989 break;
15990 // Scan through gather nodes.
15991 for (const TreeEntry *BVE : ValueToGatherNodes.lookup(V)) {
15992 auto It = MinBWs.find(BVE);
15993 if (It != MinBWs.end()) {
15994 IsSigned = IsSigned.value_or(false) || It->second.second;
15995 if (*IsSigned)
15996 break;
15999 if (IsSigned.value_or(false))
16000 break;
16001 if (auto *EE = dyn_cast<ExtractElementInst>(V)) {
16002 IsSigned =
16003 IsSigned.value_or(false) ||
16004 !isKnownNonNegative(EE->getVectorOperand(), SimplifyQuery(*DL));
16005 continue;
16007 if (IsSigned.value_or(false))
16008 break;
16011 if (IsSigned.value_or(false)) {
16012 // Final attempt - check user node.
16013 auto It = MinBWs.find(TE->UserTreeIndices.front().UserTE);
16014 if (It != MinBWs.end())
16015 IsSigned = It->second.second;
16017 assert(IsSigned &&
16018 "Expected user node or perfect diamond match in MinBWs.");
16019 Vec = Builder.CreateIntCast(Vec, PrevVec->getType(), *IsSigned);
16021 PrevVec->replaceAllUsesWith(Vec);
16022 PostponedValues.try_emplace(Vec).first->second.push_back(TE);
16023 // Replace the stub vector node, if it was used before for one of the
16024 // buildvector nodes already.
16025 auto It = PostponedValues.find(PrevVec);
16026 if (It != PostponedValues.end()) {
16027 for (TreeEntry *VTE : It->getSecond())
16028 VTE->VectorizedValue = Vec;
16030 eraseInstruction(PrevVec);
16033 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
16034 << " values .\n");
16036 SmallVector<ShuffledInsertData<Value *>> ShuffledInserts;
16037 // Maps vector instruction to original insertelement instruction
16038 DenseMap<Value *, InsertElementInst *> VectorToInsertElement;
16039 // Maps extract Scalar to the corresponding extractelement instruction in the
16040 // basic block. Only one extractelement per block should be emitted.
16041 DenseMap<Value *, DenseMap<BasicBlock *, std::pair<Value *, Value *>>>
16042 ScalarToEEs;
16043 SmallDenseSet<Value *, 4> UsedInserts;
16044 DenseMap<std::pair<Value *, Type *>, Value *> VectorCasts;
16045 SmallDenseSet<Value *, 4> ScalarsWithNullptrUser;
16046 SmallDenseSet<ExtractElementInst *, 4> IgnoredExtracts;
16047 // Extract all of the elements with the external uses.
16048 for (const auto &ExternalUse : ExternalUses) {
16049 Value *Scalar = ExternalUse.Scalar;
16050 llvm::User *User = ExternalUse.User;
16052 // Skip users that we already RAUW. This happens when one instruction
16053 // has multiple uses of the same value.
16054 if (User && !is_contained(Scalar->users(), User))
16055 continue;
16056 TreeEntry *E = getTreeEntry(Scalar);
16057 assert(E && "Invalid scalar");
16058 assert(!E->isGather() && "Extracting from a gather list");
16059 // Non-instruction pointers are not deleted, just skip them.
16060 if (E->getOpcode() == Instruction::GetElementPtr &&
16061 !isa<GetElementPtrInst>(Scalar))
16062 continue;
16064 Value *Vec = E->VectorizedValue;
16065 assert(Vec && "Can't find vectorizable value");
16067 Value *Lane = Builder.getInt32(ExternalUse.Lane);
16068 auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
16069 if (Scalar->getType() != Vec->getType()) {
16070 Value *Ex = nullptr;
16071 Value *ExV = nullptr;
16072 auto *Inst = dyn_cast<Instruction>(Scalar);
16073 bool ReplaceInst = Inst && ExternalUsesAsOriginalScalar.contains(Inst);
16074 auto It = ScalarToEEs.find(Scalar);
16075 if (It != ScalarToEEs.end()) {
16076 // No need to emit many extracts, just move the only one in the
16077 // current block.
16078 auto EEIt = It->second.find(ReplaceInst ? Inst->getParent()
16079 : Builder.GetInsertBlock());
16080 if (EEIt != It->second.end()) {
16081 Value *PrevV = EEIt->second.first;
16082 if (auto *I = dyn_cast<Instruction>(PrevV);
16083 I && !ReplaceInst &&
16084 Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() &&
16085 Builder.GetInsertPoint()->comesBefore(I)) {
16086 I->moveBefore(*Builder.GetInsertPoint()->getParent(),
16087 Builder.GetInsertPoint());
16088 if (auto *CI = dyn_cast<Instruction>(EEIt->second.second))
16089 CI->moveAfter(I);
16091 Ex = PrevV;
16092 ExV = EEIt->second.second ? EEIt->second.second : Ex;
16095 if (!Ex) {
16096 // "Reuse" the existing extract to improve final codegen.
16097 if (ReplaceInst) {
16098 // Leave the instruction as is, if it cheaper extracts and all
16099 // operands are scalar.
16100 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) {
16101 IgnoredExtracts.insert(EE);
16102 Ex = EE;
16103 } else {
16104 auto *CloneInst = Inst->clone();
16105 CloneInst->insertBefore(Inst);
16106 if (Inst->hasName())
16107 CloneInst->takeName(Inst);
16108 Ex = CloneInst;
16110 } else if (auto *ES = dyn_cast<ExtractElementInst>(Scalar);
16111 ES && isa<Instruction>(Vec)) {
16112 Value *V = ES->getVectorOperand();
16113 auto *IVec = cast<Instruction>(Vec);
16114 if (const TreeEntry *ETE = getTreeEntry(V))
16115 V = ETE->VectorizedValue;
16116 if (auto *IV = dyn_cast<Instruction>(V);
16117 !IV || IV == Vec || IV->getParent() != IVec->getParent() ||
16118 IV->comesBefore(IVec))
16119 Ex = Builder.CreateExtractElement(V, ES->getIndexOperand());
16120 else
16121 Ex = Builder.CreateExtractElement(Vec, Lane);
16122 } else if (auto *VecTy =
16123 dyn_cast<FixedVectorType>(Scalar->getType())) {
16124 assert(SLPReVec && "FixedVectorType is not expected.");
16125 unsigned VecTyNumElements = VecTy->getNumElements();
16126 // When REVEC is enabled, we need to extract a vector.
16127 // Note: The element size of Scalar may be different from the
16128 // element size of Vec.
16129 Ex = Builder.CreateExtractVector(
16130 FixedVectorType::get(Vec->getType()->getScalarType(),
16131 VecTyNumElements),
16132 Vec, Builder.getInt64(ExternalUse.Lane * VecTyNumElements));
16133 } else {
16134 Ex = Builder.CreateExtractElement(Vec, Lane);
16136 // If necessary, sign-extend or zero-extend ScalarRoot
16137 // to the larger type.
16138 ExV = Ex;
16139 if (Scalar->getType() != Ex->getType())
16140 ExV = Builder.CreateIntCast(
16141 Ex, Scalar->getType(),
16142 !isKnownNonNegative(Scalar, SimplifyQuery(*DL)));
16143 auto *I = dyn_cast<Instruction>(Ex);
16144 ScalarToEEs[Scalar].try_emplace(I ? I->getParent()
16145 : &F->getEntryBlock(),
16146 std::make_pair(Ex, ExV));
16148 // The then branch of the previous if may produce constants, since 0
16149 // operand might be a constant.
16150 if (auto *ExI = dyn_cast<Instruction>(Ex);
16151 ExI && !isa<PHINode>(ExI) && !mayHaveNonDefUseDependency(*ExI)) {
16152 GatherShuffleExtractSeq.insert(ExI);
16153 CSEBlocks.insert(ExI->getParent());
16155 return ExV;
16157 assert(isa<FixedVectorType>(Scalar->getType()) &&
16158 isa<InsertElementInst>(Scalar) &&
16159 "In-tree scalar of vector type is not insertelement?");
16160 auto *IE = cast<InsertElementInst>(Scalar);
16161 VectorToInsertElement.try_emplace(Vec, IE);
16162 return Vec;
16164 // If User == nullptr, the Scalar remains as scalar in vectorized
16165 // instructions or is used as extra arg. Generate ExtractElement instruction
16166 // and update the record for this scalar in ExternallyUsedValues.
16167 if (!User) {
16168 if (!ScalarsWithNullptrUser.insert(Scalar).second)
16169 continue;
16170 assert((ExternallyUsedValues.count(Scalar) ||
16171 Scalar->hasNUsesOrMore(UsesLimit) ||
16172 ExternalUsesAsOriginalScalar.contains(Scalar) ||
16173 any_of(Scalar->users(),
16174 [&](llvm::User *U) {
16175 if (ExternalUsesAsOriginalScalar.contains(U))
16176 return true;
16177 TreeEntry *UseEntry = getTreeEntry(U);
16178 return UseEntry &&
16179 (UseEntry->State == TreeEntry::Vectorize ||
16180 UseEntry->State ==
16181 TreeEntry::StridedVectorize) &&
16182 (E->State == TreeEntry::Vectorize ||
16183 E->State == TreeEntry::StridedVectorize) &&
16184 doesInTreeUserNeedToExtract(
16185 Scalar, getRootEntryInstruction(*UseEntry),
16186 TLI);
16187 })) &&
16188 "Scalar with nullptr User must be registered in "
16189 "ExternallyUsedValues map or remain as scalar in vectorized "
16190 "instructions");
16191 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
16192 if (auto *PHI = dyn_cast<PHINode>(VecI)) {
16193 if (PHI->getParent()->isLandingPad())
16194 Builder.SetInsertPoint(
16195 PHI->getParent(),
16196 std::next(
16197 PHI->getParent()->getLandingPadInst()->getIterator()));
16198 else
16199 Builder.SetInsertPoint(PHI->getParent(),
16200 PHI->getParent()->getFirstNonPHIIt());
16201 } else {
16202 Builder.SetInsertPoint(VecI->getParent(),
16203 std::next(VecI->getIterator()));
16205 } else {
16206 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
16208 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16209 // Required to update internally referenced instructions.
16210 if (Scalar != NewInst) {
16211 assert((!isa<ExtractElementInst>(Scalar) ||
16212 !IgnoredExtracts.contains(cast<ExtractElementInst>(Scalar))) &&
16213 "Extractelements should not be replaced.");
16214 Scalar->replaceAllUsesWith(NewInst);
16216 continue;
16219 if (auto *VU = dyn_cast<InsertElementInst>(User);
16220 VU && VU->getOperand(1) == Scalar) {
16221 // Skip if the scalar is another vector op or Vec is not an instruction.
16222 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) {
16223 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) {
16224 if (!UsedInserts.insert(VU).second)
16225 continue;
16226 // Need to use original vector, if the root is truncated.
16227 auto BWIt = MinBWs.find(E);
16228 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) {
16229 auto *ScalarTy = FTy->getElementType();
16230 auto Key = std::make_pair(Vec, ScalarTy);
16231 auto VecIt = VectorCasts.find(Key);
16232 if (VecIt == VectorCasts.end()) {
16233 IRBuilderBase::InsertPointGuard Guard(Builder);
16234 if (auto *IVec = dyn_cast<PHINode>(Vec)) {
16235 if (IVec->getParent()->isLandingPad())
16236 Builder.SetInsertPoint(IVec->getParent(),
16237 std::next(IVec->getParent()
16238 ->getLandingPadInst()
16239 ->getIterator()));
16240 else
16241 Builder.SetInsertPoint(
16242 IVec->getParent()->getFirstNonPHIOrDbgOrLifetime());
16243 } else if (auto *IVec = dyn_cast<Instruction>(Vec)) {
16244 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction());
16246 Vec = Builder.CreateIntCast(
16247 Vec,
16248 getWidenedType(
16249 ScalarTy,
16250 cast<FixedVectorType>(Vec->getType())->getNumElements()),
16251 BWIt->second.second);
16252 VectorCasts.try_emplace(Key, Vec);
16253 } else {
16254 Vec = VecIt->second;
16258 std::optional<unsigned> InsertIdx = getElementIndex(VU);
16259 if (InsertIdx) {
16260 auto *It = find_if(
16261 ShuffledInserts, [VU](const ShuffledInsertData<Value *> &Data) {
16262 // Checks if 2 insertelements are from the same buildvector.
16263 InsertElementInst *VecInsert = Data.InsertElements.front();
16264 return areTwoInsertFromSameBuildVector(
16265 VU, VecInsert,
16266 [](InsertElementInst *II) { return II->getOperand(0); });
16268 unsigned Idx = *InsertIdx;
16269 if (It == ShuffledInserts.end()) {
16270 (void)ShuffledInserts.emplace_back();
16271 It = std::next(ShuffledInserts.begin(),
16272 ShuffledInserts.size() - 1);
16274 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
16275 if (Mask.empty())
16276 Mask.assign(FTy->getNumElements(), PoisonMaskElem);
16277 Mask[Idx] = ExternalUse.Lane;
16278 It->InsertElements.push_back(cast<InsertElementInst>(User));
16279 continue;
16285 // Generate extracts for out-of-tree users.
16286 // Find the insertion point for the extractelement lane.
16287 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
16288 if (PHINode *PH = dyn_cast<PHINode>(User)) {
16289 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) {
16290 if (PH->getIncomingValue(I) == Scalar) {
16291 Instruction *IncomingTerminator =
16292 PH->getIncomingBlock(I)->getTerminator();
16293 if (isa<CatchSwitchInst>(IncomingTerminator)) {
16294 Builder.SetInsertPoint(VecI->getParent(),
16295 std::next(VecI->getIterator()));
16296 } else {
16297 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator());
16299 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16300 PH->setOperand(I, NewInst);
16303 } else {
16304 Builder.SetInsertPoint(cast<Instruction>(User));
16305 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16306 User->replaceUsesOfWith(Scalar, NewInst);
16308 } else {
16309 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
16310 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16311 User->replaceUsesOfWith(Scalar, NewInst);
16314 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
16317 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) {
16318 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem);
16319 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem);
16320 int VF = cast<FixedVectorType>(V1->getType())->getNumElements();
16321 for (int I = 0, E = Mask.size(); I < E; ++I) {
16322 if (Mask[I] < VF)
16323 CombinedMask1[I] = Mask[I];
16324 else
16325 CombinedMask2[I] = Mask[I] - VF;
16327 ShuffleInstructionBuilder ShuffleBuilder(
16328 cast<VectorType>(V1->getType())->getElementType(), Builder, *this);
16329 ShuffleBuilder.add(V1, CombinedMask1);
16330 if (V2)
16331 ShuffleBuilder.add(V2, CombinedMask2);
16332 return ShuffleBuilder.finalize({}, {}, {});
16335 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask,
16336 bool ForSingleMask) {
16337 unsigned VF = Mask.size();
16338 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
16339 if (VF != VecVF) {
16340 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) {
16341 Vec = CreateShuffle(Vec, nullptr, Mask);
16342 return std::make_pair(Vec, true);
16344 if (!ForSingleMask) {
16345 SmallVector<int> ResizeMask(VF, PoisonMaskElem);
16346 for (unsigned I = 0; I < VF; ++I) {
16347 if (Mask[I] != PoisonMaskElem)
16348 ResizeMask[Mask[I]] = Mask[I];
16350 Vec = CreateShuffle(Vec, nullptr, ResizeMask);
16354 return std::make_pair(Vec, false);
16356 // Perform shuffling of the vectorize tree entries for better handling of
16357 // external extracts.
16358 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
16359 // Find the first and the last instruction in the list of insertelements.
16360 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement);
16361 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front();
16362 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back();
16363 Builder.SetInsertPoint(LastInsert);
16364 auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
16365 Value *NewInst = performExtractsShuffleAction<Value>(
16366 MutableArrayRef(Vector.data(), Vector.size()),
16367 FirstInsert->getOperand(0),
16368 [](Value *Vec) {
16369 return cast<VectorType>(Vec->getType())
16370 ->getElementCount()
16371 .getKnownMinValue();
16373 ResizeToVF,
16374 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask,
16375 ArrayRef<Value *> Vals) {
16376 assert((Vals.size() == 1 || Vals.size() == 2) &&
16377 "Expected exactly 1 or 2 input values.");
16378 if (Vals.size() == 1) {
16379 // Do not create shuffle if the mask is a simple identity
16380 // non-resizing mask.
16381 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType())
16382 ->getNumElements() ||
16383 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))
16384 return CreateShuffle(Vals.front(), nullptr, Mask);
16385 return Vals.front();
16387 return CreateShuffle(Vals.front() ? Vals.front()
16388 : FirstInsert->getOperand(0),
16389 Vals.back(), Mask);
16391 auto It = ShuffledInserts[I].InsertElements.rbegin();
16392 // Rebuild buildvector chain.
16393 InsertElementInst *II = nullptr;
16394 if (It != ShuffledInserts[I].InsertElements.rend())
16395 II = *It;
16396 SmallVector<Instruction *> Inserts;
16397 while (It != ShuffledInserts[I].InsertElements.rend()) {
16398 assert(II && "Must be an insertelement instruction.");
16399 if (*It == II)
16400 ++It;
16401 else
16402 Inserts.push_back(cast<Instruction>(II));
16403 II = dyn_cast<InsertElementInst>(II->getOperand(0));
16405 for (Instruction *II : reverse(Inserts)) {
16406 II->replaceUsesOfWith(II->getOperand(0), NewInst);
16407 if (auto *NewI = dyn_cast<Instruction>(NewInst))
16408 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI))
16409 II->moveAfter(NewI);
16410 NewInst = II;
16412 LastInsert->replaceAllUsesWith(NewInst);
16413 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) {
16414 IE->replaceUsesOfWith(IE->getOperand(0),
16415 PoisonValue::get(IE->getOperand(0)->getType()));
16416 IE->replaceUsesOfWith(IE->getOperand(1),
16417 PoisonValue::get(IE->getOperand(1)->getType()));
16418 eraseInstruction(IE);
16420 CSEBlocks.insert(LastInsert->getParent());
16423 SmallVector<Instruction *> RemovedInsts;
16424 // For each vectorized value:
16425 for (auto &TEPtr : VectorizableTree) {
16426 TreeEntry *Entry = TEPtr.get();
16428 // No need to handle users of gathered values.
16429 if (Entry->isGather())
16430 continue;
16432 assert(Entry->VectorizedValue && "Can't find vectorizable value");
16434 // For each lane:
16435 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
16436 Value *Scalar = Entry->Scalars[Lane];
16438 if (Entry->getOpcode() == Instruction::GetElementPtr &&
16439 !isa<GetElementPtrInst>(Scalar))
16440 continue;
16441 if (auto *EE = dyn_cast<ExtractElementInst>(Scalar);
16442 EE && IgnoredExtracts.contains(EE))
16443 continue;
16444 #ifndef NDEBUG
16445 Type *Ty = Scalar->getType();
16446 if (!Ty->isVoidTy()) {
16447 for (User *U : Scalar->users()) {
16448 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
16450 // It is legal to delete users in the ignorelist.
16451 assert((getTreeEntry(U) ||
16452 (UserIgnoreList && UserIgnoreList->contains(U)) ||
16453 (isa_and_nonnull<Instruction>(U) &&
16454 isDeleted(cast<Instruction>(U)))) &&
16455 "Deleting out-of-tree value");
16458 #endif
16459 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
16460 auto *I = cast<Instruction>(Scalar);
16461 RemovedInsts.push_back(I);
16465 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the
16466 // new vector instruction.
16467 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue))
16468 V->mergeDIAssignID(RemovedInsts);
16470 // Clear up reduction references, if any.
16471 if (UserIgnoreList) {
16472 for (Instruction *I : RemovedInsts) {
16473 const TreeEntry *IE = getTreeEntry(I);
16474 if (IE->Idx != 0 &&
16475 !(VectorizableTree.front()->isGather() &&
16476 !IE->UserTreeIndices.empty() &&
16477 (ValueToGatherNodes.lookup(I).contains(
16478 VectorizableTree.front().get()) ||
16479 any_of(IE->UserTreeIndices,
16480 [&](const EdgeInfo &EI) {
16481 return EI.UserTE == VectorizableTree.front().get() &&
16482 EI.EdgeIdx == UINT_MAX;
16483 }))) &&
16484 !(GatheredLoadsEntriesFirst.has_value() &&
16485 IE->Idx >= *GatheredLoadsEntriesFirst &&
16486 VectorizableTree.front()->isGather() &&
16487 is_contained(VectorizableTree.front()->Scalars, I)))
16488 continue;
16489 SmallVector<SelectInst *> LogicalOpSelects;
16490 I->replaceUsesWithIf(PoisonValue::get(I->getType()), [&](Use &U) {
16491 // Do not replace condition of the logical op in form select <cond>.
16492 bool IsPoisoningLogicalOp = isa<SelectInst>(U.getUser()) &&
16493 (match(U.getUser(), m_LogicalAnd()) ||
16494 match(U.getUser(), m_LogicalOr())) &&
16495 U.getOperandNo() == 0;
16496 if (IsPoisoningLogicalOp) {
16497 LogicalOpSelects.push_back(cast<SelectInst>(U.getUser()));
16498 return false;
16500 return UserIgnoreList->contains(U.getUser());
16502 // Replace conditions of the poisoning logical ops with the non-poison
16503 // constant value.
16504 for (SelectInst *SI : LogicalOpSelects)
16505 SI->setCondition(Constant::getNullValue(SI->getCondition()->getType()));
16508 // Retain to-be-deleted instructions for some debug-info bookkeeping and alias
16509 // cache correctness.
16510 // NOTE: removeInstructionAndOperands only marks the instruction for deletion
16511 // - instructions are not deleted until later.
16512 removeInstructionsAndOperands(ArrayRef(RemovedInsts));
16514 Builder.ClearInsertionPoint();
16515 InstrElementSize.clear();
16517 const TreeEntry &RootTE = *VectorizableTree.front();
16518 Value *Vec = RootTE.VectorizedValue;
16519 if (auto It = MinBWs.find(&RootTE); ReductionBitWidth != 0 &&
16520 It != MinBWs.end() &&
16521 ReductionBitWidth != It->second.first) {
16522 IRBuilder<>::InsertPointGuard Guard(Builder);
16523 Builder.SetInsertPoint(ReductionRoot->getParent(),
16524 ReductionRoot->getIterator());
16525 Vec = Builder.CreateIntCast(
16526 Vec,
16527 VectorType::get(Builder.getIntNTy(ReductionBitWidth),
16528 cast<VectorType>(Vec->getType())->getElementCount()),
16529 It->second.second);
16531 return Vec;
16534 void BoUpSLP::optimizeGatherSequence() {
16535 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size()
16536 << " gather sequences instructions.\n");
16537 // LICM InsertElementInst sequences.
16538 for (Instruction *I : GatherShuffleExtractSeq) {
16539 if (isDeleted(I))
16540 continue;
16542 // Check if this block is inside a loop.
16543 Loop *L = LI->getLoopFor(I->getParent());
16544 if (!L)
16545 continue;
16547 // Check if it has a preheader.
16548 BasicBlock *PreHeader = L->getLoopPreheader();
16549 if (!PreHeader)
16550 continue;
16552 // If the vector or the element that we insert into it are
16553 // instructions that are defined in this basic block then we can't
16554 // hoist this instruction.
16555 if (any_of(I->operands(), [L](Value *V) {
16556 auto *OpI = dyn_cast<Instruction>(V);
16557 return OpI && L->contains(OpI);
16559 continue;
16561 // We can hoist this instruction. Move it to the pre-header.
16562 I->moveBefore(PreHeader->getTerminator());
16563 CSEBlocks.insert(PreHeader);
16566 // Make a list of all reachable blocks in our CSE queue.
16567 SmallVector<const DomTreeNode *, 8> CSEWorkList;
16568 CSEWorkList.reserve(CSEBlocks.size());
16569 for (BasicBlock *BB : CSEBlocks)
16570 if (DomTreeNode *N = DT->getNode(BB)) {
16571 assert(DT->isReachableFromEntry(N));
16572 CSEWorkList.push_back(N);
16575 // Sort blocks by domination. This ensures we visit a block after all blocks
16576 // dominating it are visited.
16577 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
16578 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
16579 "Different nodes should have different DFS numbers");
16580 return A->getDFSNumIn() < B->getDFSNumIn();
16583 // Less defined shuffles can be replaced by the more defined copies.
16584 // Between two shuffles one is less defined if it has the same vector operands
16585 // and its mask indeces are the same as in the first one or undefs. E.g.
16586 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0,
16587 // poison, <0, 0, 0, 0>.
16588 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2,
16589 SmallVectorImpl<int> &NewMask) {
16590 if (I1->getType() != I2->getType())
16591 return false;
16592 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1);
16593 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2);
16594 if (!SI1 || !SI2)
16595 return I1->isIdenticalTo(I2);
16596 if (SI1->isIdenticalTo(SI2))
16597 return true;
16598 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I)
16599 if (SI1->getOperand(I) != SI2->getOperand(I))
16600 return false;
16601 // Check if the second instruction is more defined than the first one.
16602 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end());
16603 ArrayRef<int> SM1 = SI1->getShuffleMask();
16604 // Count trailing undefs in the mask to check the final number of used
16605 // registers.
16606 unsigned LastUndefsCnt = 0;
16607 for (int I = 0, E = NewMask.size(); I < E; ++I) {
16608 if (SM1[I] == PoisonMaskElem)
16609 ++LastUndefsCnt;
16610 else
16611 LastUndefsCnt = 0;
16612 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem &&
16613 NewMask[I] != SM1[I])
16614 return false;
16615 if (NewMask[I] == PoisonMaskElem)
16616 NewMask[I] = SM1[I];
16618 // Check if the last undefs actually change the final number of used vector
16619 // registers.
16620 return SM1.size() - LastUndefsCnt > 1 &&
16621 TTI->getNumberOfParts(SI1->getType()) ==
16622 TTI->getNumberOfParts(
16623 getWidenedType(SI1->getType()->getElementType(),
16624 SM1.size() - LastUndefsCnt));
16626 // Perform O(N^2) search over the gather/shuffle sequences and merge identical
16627 // instructions. TODO: We can further optimize this scan if we split the
16628 // instructions into different buckets based on the insert lane.
16629 SmallVector<Instruction *, 16> Visited;
16630 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
16631 assert(*I &&
16632 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
16633 "Worklist not sorted properly!");
16634 BasicBlock *BB = (*I)->getBlock();
16635 // For all instructions in blocks containing gather sequences:
16636 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
16637 if (isDeleted(&In))
16638 continue;
16639 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) &&
16640 !GatherShuffleExtractSeq.contains(&In))
16641 continue;
16643 // Check if we can replace this instruction with any of the
16644 // visited instructions.
16645 bool Replaced = false;
16646 for (Instruction *&V : Visited) {
16647 SmallVector<int> NewMask;
16648 if (IsIdenticalOrLessDefined(&In, V, NewMask) &&
16649 DT->dominates(V->getParent(), In.getParent())) {
16650 In.replaceAllUsesWith(V);
16651 eraseInstruction(&In);
16652 if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
16653 if (!NewMask.empty())
16654 SI->setShuffleMask(NewMask);
16655 Replaced = true;
16656 break;
16658 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) &&
16659 GatherShuffleExtractSeq.contains(V) &&
16660 IsIdenticalOrLessDefined(V, &In, NewMask) &&
16661 DT->dominates(In.getParent(), V->getParent())) {
16662 In.moveAfter(V);
16663 V->replaceAllUsesWith(&In);
16664 eraseInstruction(V);
16665 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In))
16666 if (!NewMask.empty())
16667 SI->setShuffleMask(NewMask);
16668 V = &In;
16669 Replaced = true;
16670 break;
16673 if (!Replaced) {
16674 assert(!is_contained(Visited, &In));
16675 Visited.push_back(&In);
16679 CSEBlocks.clear();
16680 GatherShuffleExtractSeq.clear();
16683 BoUpSLP::ScheduleData *
16684 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
16685 ScheduleData *Bundle = nullptr;
16686 ScheduleData *PrevInBundle = nullptr;
16687 for (Value *V : VL) {
16688 if (doesNotNeedToBeScheduled(V))
16689 continue;
16690 ScheduleData *BundleMember = getScheduleData(V);
16691 assert(BundleMember &&
16692 "no ScheduleData for bundle member "
16693 "(maybe not in same basic block)");
16694 assert(BundleMember->isSchedulingEntity() &&
16695 "bundle member already part of other bundle");
16696 if (PrevInBundle) {
16697 PrevInBundle->NextInBundle = BundleMember;
16698 } else {
16699 Bundle = BundleMember;
16702 // Group the instructions to a bundle.
16703 BundleMember->FirstInBundle = Bundle;
16704 PrevInBundle = BundleMember;
16706 assert(Bundle && "Failed to find schedule bundle");
16707 return Bundle;
16710 // Groups the instructions to a bundle (which is then a single scheduling entity)
16711 // and schedules instructions until the bundle gets ready.
16712 std::optional<BoUpSLP::ScheduleData *>
16713 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
16714 const InstructionsState &S) {
16715 // No need to schedule PHIs, insertelement, extractelement and extractvalue
16716 // instructions.
16717 if (isa<PHINode>(S.OpValue) || isVectorLikeInstWithConstOps(S.OpValue) ||
16718 doesNotNeedToSchedule(VL))
16719 return nullptr;
16721 // Initialize the instruction bundle.
16722 Instruction *OldScheduleEnd = ScheduleEnd;
16723 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.OpValue << "\n");
16725 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
16726 ScheduleData *Bundle) {
16727 // The scheduling region got new instructions at the lower end (or it is a
16728 // new region for the first bundle). This makes it necessary to
16729 // recalculate all dependencies.
16730 // It is seldom that this needs to be done a second time after adding the
16731 // initial bundle to the region.
16732 if (ScheduleEnd != OldScheduleEnd) {
16733 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
16734 if (ScheduleData *SD = getScheduleData(I))
16735 SD->clearDependencies();
16736 ReSchedule = true;
16738 if (Bundle) {
16739 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
16740 << " in block " << BB->getName() << "\n");
16741 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
16744 if (ReSchedule) {
16745 resetSchedule();
16746 initialFillReadyList(ReadyInsts);
16749 // Now try to schedule the new bundle or (if no bundle) just calculate
16750 // dependencies. As soon as the bundle is "ready" it means that there are no
16751 // cyclic dependencies and we can schedule it. Note that's important that we
16752 // don't "schedule" the bundle yet (see cancelScheduling).
16753 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
16754 !ReadyInsts.empty()) {
16755 ScheduleData *Picked = ReadyInsts.pop_back_val();
16756 assert(Picked->isSchedulingEntity() && Picked->isReady() &&
16757 "must be ready to schedule");
16758 schedule(Picked, ReadyInsts);
16762 // Make sure that the scheduling region contains all
16763 // instructions of the bundle.
16764 for (Value *V : VL) {
16765 if (doesNotNeedToBeScheduled(V))
16766 continue;
16767 if (!extendSchedulingRegion(V, S)) {
16768 // If the scheduling region got new instructions at the lower end (or it
16769 // is a new region for the first bundle). This makes it necessary to
16770 // recalculate all dependencies.
16771 // Otherwise the compiler may crash trying to incorrectly calculate
16772 // dependencies and emit instruction in the wrong order at the actual
16773 // scheduling.
16774 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr);
16775 return std::nullopt;
16779 bool ReSchedule = false;
16780 for (Value *V : VL) {
16781 if (doesNotNeedToBeScheduled(V))
16782 continue;
16783 ScheduleData *BundleMember = getScheduleData(V);
16784 assert(BundleMember &&
16785 "no ScheduleData for bundle member (maybe not in same basic block)");
16787 // Make sure we don't leave the pieces of the bundle in the ready list when
16788 // whole bundle might not be ready.
16789 ReadyInsts.remove(BundleMember);
16791 if (!BundleMember->IsScheduled)
16792 continue;
16793 // A bundle member was scheduled as single instruction before and now
16794 // needs to be scheduled as part of the bundle. We just get rid of the
16795 // existing schedule.
16796 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
16797 << " was already scheduled\n");
16798 ReSchedule = true;
16801 auto *Bundle = buildBundle(VL);
16802 TryScheduleBundleImpl(ReSchedule, Bundle);
16803 if (!Bundle->isReady()) {
16804 cancelScheduling(VL, S.OpValue);
16805 return std::nullopt;
16807 return Bundle;
16810 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
16811 Value *OpValue) {
16812 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) ||
16813 doesNotNeedToSchedule(VL))
16814 return;
16816 if (doesNotNeedToBeScheduled(OpValue))
16817 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled);
16818 ScheduleData *Bundle = getScheduleData(OpValue);
16819 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
16820 assert(!Bundle->IsScheduled &&
16821 "Can't cancel bundle which is already scheduled");
16822 assert(Bundle->isSchedulingEntity() &&
16823 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) &&
16824 "tried to unbundle something which is not a bundle");
16826 // Remove the bundle from the ready list.
16827 if (Bundle->isReady())
16828 ReadyInsts.remove(Bundle);
16830 // Un-bundle: make single instructions out of the bundle.
16831 ScheduleData *BundleMember = Bundle;
16832 while (BundleMember) {
16833 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
16834 BundleMember->FirstInBundle = BundleMember;
16835 ScheduleData *Next = BundleMember->NextInBundle;
16836 BundleMember->NextInBundle = nullptr;
16837 BundleMember->TE = nullptr;
16838 if (BundleMember->unscheduledDepsInBundle() == 0) {
16839 ReadyInsts.insert(BundleMember);
16841 BundleMember = Next;
16845 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
16846 // Allocate a new ScheduleData for the instruction.
16847 if (ChunkPos >= ChunkSize) {
16848 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
16849 ChunkPos = 0;
16851 return &(ScheduleDataChunks.back()[ChunkPos++]);
16854 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(
16855 Value *V, const InstructionsState &S) {
16856 Instruction *I = dyn_cast<Instruction>(V);
16857 assert(I && "bundle member must be an instruction");
16858 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&
16859 !doesNotNeedToBeScheduled(I) &&
16860 "phi nodes/insertelements/extractelements/extractvalues don't need to "
16861 "be scheduled");
16862 if (getScheduleData(I))
16863 return true;
16864 if (!ScheduleStart) {
16865 // It's the first instruction in the new region.
16866 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
16867 ScheduleStart = I;
16868 ScheduleEnd = I->getNextNode();
16869 assert(ScheduleEnd && "tried to vectorize a terminator?");
16870 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
16871 return true;
16873 // Search up and down at the same time, because we don't know if the new
16874 // instruction is above or below the existing scheduling region.
16875 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted
16876 // against the budget. Otherwise debug info could affect codegen.
16877 BasicBlock::reverse_iterator UpIter =
16878 ++ScheduleStart->getIterator().getReverse();
16879 BasicBlock::reverse_iterator UpperEnd = BB->rend();
16880 BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
16881 BasicBlock::iterator LowerEnd = BB->end();
16882 auto IsAssumeLikeIntr = [](const Instruction &I) {
16883 if (auto *II = dyn_cast<IntrinsicInst>(&I))
16884 return II->isAssumeLikeIntrinsic();
16885 return false;
16887 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr);
16888 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr);
16889 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
16890 &*DownIter != I) {
16891 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
16892 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
16893 return false;
16896 ++UpIter;
16897 ++DownIter;
16899 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr);
16900 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr);
16902 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
16903 assert(I->getParent() == ScheduleStart->getParent() &&
16904 "Instruction is in wrong basic block.");
16905 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
16906 ScheduleStart = I;
16907 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
16908 << "\n");
16909 return true;
16911 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
16912 "Expected to reach top of the basic block or instruction down the "
16913 "lower end.");
16914 assert(I->getParent() == ScheduleEnd->getParent() &&
16915 "Instruction is in wrong basic block.");
16916 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
16917 nullptr);
16918 ScheduleEnd = I->getNextNode();
16919 assert(ScheduleEnd && "tried to vectorize a terminator?");
16920 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
16921 return true;
16924 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
16925 Instruction *ToI,
16926 ScheduleData *PrevLoadStore,
16927 ScheduleData *NextLoadStore) {
16928 ScheduleData *CurrentLoadStore = PrevLoadStore;
16929 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
16930 // No need to allocate data for non-schedulable instructions.
16931 if (doesNotNeedToBeScheduled(I))
16932 continue;
16933 ScheduleData *SD = ScheduleDataMap.lookup(I);
16934 if (!SD) {
16935 SD = allocateScheduleDataChunks();
16936 ScheduleDataMap[I] = SD;
16938 assert(!isInSchedulingRegion(SD) &&
16939 "new ScheduleData already in scheduling region");
16940 SD->init(SchedulingRegionID, I);
16942 if (I->mayReadOrWriteMemory() &&
16943 (!isa<IntrinsicInst>(I) ||
16944 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
16945 cast<IntrinsicInst>(I)->getIntrinsicID() !=
16946 Intrinsic::pseudoprobe))) {
16947 // Update the linked list of memory accessing instructions.
16948 if (CurrentLoadStore) {
16949 CurrentLoadStore->NextLoadStore = SD;
16950 } else {
16951 FirstLoadStoreInRegion = SD;
16953 CurrentLoadStore = SD;
16956 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
16957 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
16958 RegionHasStackSave = true;
16960 if (NextLoadStore) {
16961 if (CurrentLoadStore)
16962 CurrentLoadStore->NextLoadStore = NextLoadStore;
16963 } else {
16964 LastLoadStoreInRegion = CurrentLoadStore;
16968 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
16969 bool InsertInReadyList,
16970 BoUpSLP *SLP) {
16971 assert(SD->isSchedulingEntity());
16973 SmallVector<ScheduleData *, 10> WorkList;
16974 WorkList.push_back(SD);
16976 while (!WorkList.empty()) {
16977 ScheduleData *SD = WorkList.pop_back_val();
16978 for (ScheduleData *BundleMember = SD; BundleMember;
16979 BundleMember = BundleMember->NextInBundle) {
16980 assert(isInSchedulingRegion(BundleMember));
16981 if (BundleMember->hasValidDependencies())
16982 continue;
16984 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
16985 << "\n");
16986 BundleMember->Dependencies = 0;
16987 BundleMember->resetUnscheduledDeps();
16989 // Handle def-use chain dependencies.
16990 for (User *U : BundleMember->Inst->users()) {
16991 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) {
16992 BundleMember->Dependencies++;
16993 ScheduleData *DestBundle = UseSD->FirstInBundle;
16994 if (!DestBundle->IsScheduled)
16995 BundleMember->incrementUnscheduledDeps(1);
16996 if (!DestBundle->hasValidDependencies())
16997 WorkList.push_back(DestBundle);
17001 auto MakeControlDependent = [&](Instruction *I) {
17002 auto *DepDest = getScheduleData(I);
17003 assert(DepDest && "must be in schedule window");
17004 DepDest->ControlDependencies.push_back(BundleMember);
17005 BundleMember->Dependencies++;
17006 ScheduleData *DestBundle = DepDest->FirstInBundle;
17007 if (!DestBundle->IsScheduled)
17008 BundleMember->incrementUnscheduledDeps(1);
17009 if (!DestBundle->hasValidDependencies())
17010 WorkList.push_back(DestBundle);
17013 // Any instruction which isn't safe to speculate at the beginning of the
17014 // block is control dependend on any early exit or non-willreturn call
17015 // which proceeds it.
17016 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
17017 for (Instruction *I = BundleMember->Inst->getNextNode();
17018 I != ScheduleEnd; I = I->getNextNode()) {
17019 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC))
17020 continue;
17022 // Add the dependency
17023 MakeControlDependent(I);
17025 if (!isGuaranteedToTransferExecutionToSuccessor(I))
17026 // Everything past here must be control dependent on I.
17027 break;
17031 if (RegionHasStackSave) {
17032 // If we have an inalloc alloca instruction, it needs to be scheduled
17033 // after any preceeding stacksave. We also need to prevent any alloca
17034 // from reordering above a preceeding stackrestore.
17035 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) ||
17036 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) {
17037 for (Instruction *I = BundleMember->Inst->getNextNode();
17038 I != ScheduleEnd; I = I->getNextNode()) {
17039 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
17040 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
17041 // Any allocas past here must be control dependent on I, and I
17042 // must be memory dependend on BundleMember->Inst.
17043 break;
17045 if (!isa<AllocaInst>(I))
17046 continue;
17048 // Add the dependency
17049 MakeControlDependent(I);
17053 // In addition to the cases handle just above, we need to prevent
17054 // allocas and loads/stores from moving below a stacksave or a
17055 // stackrestore. Avoiding moving allocas below stackrestore is currently
17056 // thought to be conservatism. Moving loads/stores below a stackrestore
17057 // can lead to incorrect code.
17058 if (isa<AllocaInst>(BundleMember->Inst) ||
17059 BundleMember->Inst->mayReadOrWriteMemory()) {
17060 for (Instruction *I = BundleMember->Inst->getNextNode();
17061 I != ScheduleEnd; I = I->getNextNode()) {
17062 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) &&
17063 !match(I, m_Intrinsic<Intrinsic::stackrestore>()))
17064 continue;
17066 // Add the dependency
17067 MakeControlDependent(I);
17068 break;
17073 // Handle the memory dependencies (if any).
17074 ScheduleData *DepDest = BundleMember->NextLoadStore;
17075 if (!DepDest)
17076 continue;
17077 Instruction *SrcInst = BundleMember->Inst;
17078 assert(SrcInst->mayReadOrWriteMemory() &&
17079 "NextLoadStore list for non memory effecting bundle?");
17080 MemoryLocation SrcLoc = getLocation(SrcInst);
17081 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
17082 unsigned NumAliased = 0;
17083 unsigned DistToSrc = 1;
17085 for (; DepDest; DepDest = DepDest->NextLoadStore) {
17086 assert(isInSchedulingRegion(DepDest));
17088 // We have two limits to reduce the complexity:
17089 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
17090 // SLP->isAliased (which is the expensive part in this loop).
17091 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
17092 // the whole loop (even if the loop is fast, it's quadratic).
17093 // It's important for the loop break condition (see below) to
17094 // check this limit even between two read-only instructions.
17095 if (DistToSrc >= MaxMemDepDistance ||
17096 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
17097 (NumAliased >= AliasedCheckLimit ||
17098 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
17100 // We increment the counter only if the locations are aliased
17101 // (instead of counting all alias checks). This gives a better
17102 // balance between reduced runtime and accurate dependencies.
17103 NumAliased++;
17105 DepDest->MemoryDependencies.push_back(BundleMember);
17106 BundleMember->Dependencies++;
17107 ScheduleData *DestBundle = DepDest->FirstInBundle;
17108 if (!DestBundle->IsScheduled) {
17109 BundleMember->incrementUnscheduledDeps(1);
17111 if (!DestBundle->hasValidDependencies()) {
17112 WorkList.push_back(DestBundle);
17116 // Example, explaining the loop break condition: Let's assume our
17117 // starting instruction is i0 and MaxMemDepDistance = 3.
17119 // +--------v--v--v
17120 // i0,i1,i2,i3,i4,i5,i6,i7,i8
17121 // +--------^--^--^
17123 // MaxMemDepDistance let us stop alias-checking at i3 and we add
17124 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
17125 // Previously we already added dependencies from i3 to i6,i7,i8
17126 // (because of MaxMemDepDistance). As we added a dependency from
17127 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
17128 // and we can abort this loop at i6.
17129 if (DistToSrc >= 2 * MaxMemDepDistance)
17130 break;
17131 DistToSrc++;
17134 if (InsertInReadyList && SD->isReady()) {
17135 ReadyInsts.insert(SD);
17136 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst
17137 << "\n");
17142 void BoUpSLP::BlockScheduling::resetSchedule() {
17143 assert(ScheduleStart &&
17144 "tried to reset schedule on block which has not been scheduled");
17145 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
17146 if (ScheduleData *SD = getScheduleData(I)) {
17147 assert(isInSchedulingRegion(SD) &&
17148 "ScheduleData not in scheduling region");
17149 SD->IsScheduled = false;
17150 SD->resetUnscheduledDeps();
17153 ReadyInsts.clear();
17156 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
17157 if (!BS->ScheduleStart)
17158 return;
17160 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
17162 // A key point - if we got here, pre-scheduling was able to find a valid
17163 // scheduling of the sub-graph of the scheduling window which consists
17164 // of all vector bundles and their transitive users. As such, we do not
17165 // need to reschedule anything *outside of* that subgraph.
17167 BS->resetSchedule();
17169 // For the real scheduling we use a more sophisticated ready-list: it is
17170 // sorted by the original instruction location. This lets the final schedule
17171 // be as close as possible to the original instruction order.
17172 // WARNING: If changing this order causes a correctness issue, that means
17173 // there is some missing dependence edge in the schedule data graph.
17174 struct ScheduleDataCompare {
17175 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
17176 return SD2->SchedulingPriority < SD1->SchedulingPriority;
17179 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
17181 // Ensure that all dependency data is updated (for nodes in the sub-graph)
17182 // and fill the ready-list with initial instructions.
17183 int Idx = 0;
17184 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
17185 I = I->getNextNode()) {
17186 if (ScheduleData *SD = BS->getScheduleData(I)) {
17187 TreeEntry *SDTE = getTreeEntry(SD->Inst);
17188 (void)SDTE;
17189 assert((isVectorLikeInstWithConstOps(SD->Inst) ||
17190 SD->isPartOfBundle() ==
17191 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) &&
17192 "scheduler and vectorizer bundle mismatch");
17193 SD->FirstInBundle->SchedulingPriority = Idx++;
17195 if (SD->isSchedulingEntity() && SD->isPartOfBundle())
17196 BS->calculateDependencies(SD, false, this);
17199 BS->initialFillReadyList(ReadyInsts);
17201 Instruction *LastScheduledInst = BS->ScheduleEnd;
17203 // Do the "real" scheduling.
17204 while (!ReadyInsts.empty()) {
17205 ScheduleData *Picked = *ReadyInsts.begin();
17206 ReadyInsts.erase(ReadyInsts.begin());
17208 // Move the scheduled instruction(s) to their dedicated places, if not
17209 // there yet.
17210 for (ScheduleData *BundleMember = Picked; BundleMember;
17211 BundleMember = BundleMember->NextInBundle) {
17212 Instruction *PickedInst = BundleMember->Inst;
17213 if (PickedInst->getNextNonDebugInstruction() != LastScheduledInst)
17214 PickedInst->moveAfter(LastScheduledInst->getPrevNode());
17215 LastScheduledInst = PickedInst;
17218 BS->schedule(Picked, ReadyInsts);
17221 // Check that we didn't break any of our invariants.
17222 #ifdef EXPENSIVE_CHECKS
17223 BS->verify();
17224 #endif
17226 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
17227 // Check that all schedulable entities got scheduled
17228 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) {
17229 ScheduleData *SD = BS->getScheduleData(I);
17230 if (SD && SD->isSchedulingEntity() && SD->hasValidDependencies())
17231 assert(SD->IsScheduled && "must be scheduled at this point");
17233 #endif
17235 // Avoid duplicate scheduling of the block.
17236 BS->ScheduleStart = nullptr;
17239 unsigned BoUpSLP::getVectorElementSize(Value *V) {
17240 // If V is a store, just return the width of the stored value (or value
17241 // truncated just before storing) without traversing the expression tree.
17242 // This is the common case.
17243 if (auto *Store = dyn_cast<StoreInst>(V))
17244 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
17246 if (auto *IEI = dyn_cast<InsertElementInst>(V))
17247 return getVectorElementSize(IEI->getOperand(1));
17249 auto E = InstrElementSize.find(V);
17250 if (E != InstrElementSize.end())
17251 return E->second;
17253 // If V is not a store, we can traverse the expression tree to find loads
17254 // that feed it. The type of the loaded value may indicate a more suitable
17255 // width than V's type. We want to base the vector element size on the width
17256 // of memory operations where possible.
17257 SmallVector<std::tuple<Instruction *, BasicBlock *, unsigned>> Worklist;
17258 SmallPtrSet<Instruction *, 16> Visited;
17259 if (auto *I = dyn_cast<Instruction>(V)) {
17260 Worklist.emplace_back(I, I->getParent(), 0);
17261 Visited.insert(I);
17264 // Traverse the expression tree in bottom-up order looking for loads. If we
17265 // encounter an instruction we don't yet handle, we give up.
17266 auto Width = 0u;
17267 Value *FirstNonBool = nullptr;
17268 while (!Worklist.empty()) {
17269 auto [I, Parent, Level] = Worklist.pop_back_val();
17271 // We should only be looking at scalar instructions here. If the current
17272 // instruction has a vector type, skip.
17273 auto *Ty = I->getType();
17274 if (isa<VectorType>(Ty))
17275 continue;
17276 if (Ty != Builder.getInt1Ty() && !FirstNonBool)
17277 FirstNonBool = I;
17278 if (Level > RecursionMaxDepth)
17279 continue;
17281 // If the current instruction is a load, update MaxWidth to reflect the
17282 // width of the loaded value.
17283 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I))
17284 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
17286 // Otherwise, we need to visit the operands of the instruction. We only
17287 // handle the interesting cases from buildTree here. If an operand is an
17288 // instruction we haven't yet visited and from the same basic block as the
17289 // user or the use is a PHI node, we add it to the worklist.
17290 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst,
17291 BinaryOperator, UnaryOperator>(I)) {
17292 for (Use &U : I->operands()) {
17293 if (auto *J = dyn_cast<Instruction>(U.get()))
17294 if (Visited.insert(J).second &&
17295 (isa<PHINode>(I) || J->getParent() == Parent)) {
17296 Worklist.emplace_back(J, J->getParent(), Level + 1);
17297 continue;
17299 if (!FirstNonBool && U.get()->getType() != Builder.getInt1Ty())
17300 FirstNonBool = U.get();
17302 } else {
17303 break;
17307 // If we didn't encounter a memory access in the expression tree, or if we
17308 // gave up for some reason, just return the width of V. Otherwise, return the
17309 // maximum width we found.
17310 if (!Width) {
17311 if (V->getType() == Builder.getInt1Ty() && FirstNonBool)
17312 V = FirstNonBool;
17313 Width = DL->getTypeSizeInBits(V->getType());
17316 for (Instruction *I : Visited)
17317 InstrElementSize[I] = Width;
17319 return Width;
17322 bool BoUpSLP::collectValuesToDemote(
17323 const TreeEntry &E, bool IsProfitableToDemoteRoot, unsigned &BitWidth,
17324 SmallVectorImpl<unsigned> &ToDemote, DenseSet<const TreeEntry *> &Visited,
17325 unsigned &MaxDepthLevel, bool &IsProfitableToDemote,
17326 bool IsTruncRoot) const {
17327 // We can always demote constants.
17328 if (all_of(E.Scalars, IsaPred<Constant>))
17329 return true;
17331 unsigned OrigBitWidth =
17332 DL->getTypeSizeInBits(E.Scalars.front()->getType()->getScalarType());
17333 if (OrigBitWidth == BitWidth) {
17334 MaxDepthLevel = 1;
17335 return true;
17338 // If the value is not a vectorized instruction in the expression and not used
17339 // by the insertelement instruction and not used in multiple vector nodes, it
17340 // cannot be demoted.
17341 bool IsSignedNode = any_of(E.Scalars, [&](Value *R) {
17342 return !isKnownNonNegative(R, SimplifyQuery(*DL));
17344 auto IsPotentiallyTruncated = [&](Value *V, unsigned &BitWidth) -> bool {
17345 if (MultiNodeScalars.contains(V))
17346 return false;
17347 // For lat shuffle of sext/zext with many uses need to check the extra bit
17348 // for unsigned values, otherwise may have incorrect casting for reused
17349 // scalars.
17350 bool IsSignedVal = !isKnownNonNegative(V, SimplifyQuery(*DL));
17351 if ((!IsSignedNode || IsSignedVal) && OrigBitWidth > BitWidth) {
17352 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17353 if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
17354 return true;
17356 unsigned NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
17357 unsigned BitWidth1 = OrigBitWidth - NumSignBits;
17358 if (IsSignedNode)
17359 ++BitWidth1;
17360 if (auto *I = dyn_cast<Instruction>(V)) {
17361 APInt Mask = DB->getDemandedBits(I);
17362 unsigned BitWidth2 =
17363 std::max<unsigned>(1, Mask.getBitWidth() - Mask.countl_zero());
17364 while (!IsSignedNode && BitWidth2 < OrigBitWidth) {
17365 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth2 - 1);
17366 if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
17367 break;
17368 BitWidth2 *= 2;
17370 BitWidth1 = std::min(BitWidth1, BitWidth2);
17372 BitWidth = std::max(BitWidth, BitWidth1);
17373 return BitWidth > 0 && OrigBitWidth >= (BitWidth * 2);
17375 using namespace std::placeholders;
17376 auto FinalAnalysis = [&]() {
17377 if (!IsProfitableToDemote)
17378 return false;
17379 bool Res = all_of(
17380 E.Scalars, std::bind(IsPotentiallyTruncated, _1, std::ref(BitWidth)));
17381 // Demote gathers.
17382 if (Res && E.isGather()) {
17383 // Check possible extractelement instructions bases and final vector
17384 // length.
17385 SmallPtrSet<Value *, 4> UniqueBases;
17386 for (Value *V : E.Scalars) {
17387 auto *EE = dyn_cast<ExtractElementInst>(V);
17388 if (!EE)
17389 continue;
17390 UniqueBases.insert(EE->getVectorOperand());
17392 const unsigned VF = E.Scalars.size();
17393 Type *OrigScalarTy = E.Scalars.front()->getType();
17394 if (UniqueBases.size() <= 2 ||
17395 TTI->getNumberOfParts(getWidenedType(OrigScalarTy, VF)) ==
17396 TTI->getNumberOfParts(getWidenedType(
17397 IntegerType::get(OrigScalarTy->getContext(), BitWidth), VF)))
17398 ToDemote.push_back(E.Idx);
17400 return Res;
17402 if (E.isGather() || !Visited.insert(&E).second ||
17403 any_of(E.Scalars, [&](Value *V) {
17404 return all_of(V->users(), [&](User *U) {
17405 return isa<InsertElementInst>(U) && !getTreeEntry(U);
17408 return FinalAnalysis();
17410 if (any_of(E.Scalars, [&](Value *V) {
17411 return !all_of(V->users(), [=](User *U) {
17412 return getTreeEntry(U) ||
17413 (E.Idx == 0 && UserIgnoreList &&
17414 UserIgnoreList->contains(U)) ||
17415 (!isa<CmpInst>(U) && U->getType()->isSized() &&
17416 !U->getType()->isScalableTy() &&
17417 DL->getTypeSizeInBits(U->getType()) <= BitWidth);
17418 }) && !IsPotentiallyTruncated(V, BitWidth);
17420 return false;
17422 auto ProcessOperands = [&](ArrayRef<const TreeEntry *> Operands,
17423 bool &NeedToExit) {
17424 NeedToExit = false;
17425 unsigned InitLevel = MaxDepthLevel;
17426 for (const TreeEntry *Op : Operands) {
17427 unsigned Level = InitLevel;
17428 if (!collectValuesToDemote(*Op, IsProfitableToDemoteRoot, BitWidth,
17429 ToDemote, Visited, Level, IsProfitableToDemote,
17430 IsTruncRoot)) {
17431 if (!IsProfitableToDemote)
17432 return false;
17433 NeedToExit = true;
17434 if (!FinalAnalysis())
17435 return false;
17436 continue;
17438 MaxDepthLevel = std::max(MaxDepthLevel, Level);
17440 return true;
17442 auto AttemptCheckBitwidth =
17443 [&](function_ref<bool(unsigned, unsigned)> Checker, bool &NeedToExit) {
17444 // Try all bitwidth < OrigBitWidth.
17445 NeedToExit = false;
17446 unsigned BestFailBitwidth = 0;
17447 for (; BitWidth < OrigBitWidth; BitWidth *= 2) {
17448 if (Checker(BitWidth, OrigBitWidth))
17449 return true;
17450 if (BestFailBitwidth == 0 && FinalAnalysis())
17451 BestFailBitwidth = BitWidth;
17453 if (BitWidth >= OrigBitWidth) {
17454 if (BestFailBitwidth == 0) {
17455 BitWidth = OrigBitWidth;
17456 return false;
17458 MaxDepthLevel = 1;
17459 BitWidth = BestFailBitwidth;
17460 NeedToExit = true;
17461 return true;
17463 return false;
17465 auto TryProcessInstruction =
17466 [&](unsigned &BitWidth, ArrayRef<const TreeEntry *> Operands = {},
17467 function_ref<bool(unsigned, unsigned)> Checker = {}) {
17468 if (Operands.empty()) {
17469 if (!IsTruncRoot)
17470 MaxDepthLevel = 1;
17471 (void)for_each(E.Scalars, std::bind(IsPotentiallyTruncated, _1,
17472 std::ref(BitWidth)));
17473 } else {
17474 // Several vectorized uses? Check if we can truncate it, otherwise -
17475 // exit.
17476 if (E.UserTreeIndices.size() > 1 &&
17477 !all_of(E.Scalars, std::bind(IsPotentiallyTruncated, _1,
17478 std::ref(BitWidth))))
17479 return false;
17480 bool NeedToExit = false;
17481 if (Checker && !AttemptCheckBitwidth(Checker, NeedToExit))
17482 return false;
17483 if (NeedToExit)
17484 return true;
17485 if (!ProcessOperands(Operands, NeedToExit))
17486 return false;
17487 if (NeedToExit)
17488 return true;
17491 ++MaxDepthLevel;
17492 // Record the entry that we can demote.
17493 ToDemote.push_back(E.Idx);
17494 return IsProfitableToDemote;
17496 switch (E.getOpcode()) {
17498 // We can always demote truncations and extensions. Since truncations can
17499 // seed additional demotion, we save the truncated value.
17500 case Instruction::Trunc:
17501 if (IsProfitableToDemoteRoot)
17502 IsProfitableToDemote = true;
17503 return TryProcessInstruction(BitWidth);
17504 case Instruction::ZExt:
17505 case Instruction::SExt:
17506 IsProfitableToDemote = true;
17507 return TryProcessInstruction(BitWidth);
17509 // We can demote certain binary operations if we can demote both of their
17510 // operands.
17511 case Instruction::Add:
17512 case Instruction::Sub:
17513 case Instruction::Mul:
17514 case Instruction::And:
17515 case Instruction::Or:
17516 case Instruction::Xor: {
17517 return TryProcessInstruction(
17518 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)});
17520 case Instruction::Freeze:
17521 return TryProcessInstruction(BitWidth, getOperandEntry(&E, 0));
17522 case Instruction::Shl: {
17523 // If we are truncating the result of this SHL, and if it's a shift of an
17524 // inrange amount, we can always perform a SHL in a smaller type.
17525 auto ShlChecker = [&](unsigned BitWidth, unsigned) {
17526 return all_of(E.Scalars, [&](Value *V) {
17527 auto *I = cast<Instruction>(V);
17528 KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
17529 return AmtKnownBits.getMaxValue().ult(BitWidth);
17532 return TryProcessInstruction(
17533 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)}, ShlChecker);
17535 case Instruction::LShr: {
17536 // If this is a truncate of a logical shr, we can truncate it to a smaller
17537 // lshr iff we know that the bits we would otherwise be shifting in are
17538 // already zeros.
17539 auto LShrChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17540 return all_of(E.Scalars, [&](Value *V) {
17541 auto *I = cast<Instruction>(V);
17542 KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
17543 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17544 return AmtKnownBits.getMaxValue().ult(BitWidth) &&
17545 MaskedValueIsZero(I->getOperand(0), ShiftedBits,
17546 SimplifyQuery(*DL));
17549 return TryProcessInstruction(
17550 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)},
17551 LShrChecker);
17553 case Instruction::AShr: {
17554 // If this is a truncate of an arithmetic shr, we can truncate it to a
17555 // smaller ashr iff we know that all the bits from the sign bit of the
17556 // original type and the sign bit of the truncate type are similar.
17557 auto AShrChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17558 return all_of(E.Scalars, [&](Value *V) {
17559 auto *I = cast<Instruction>(V);
17560 KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
17561 unsigned ShiftedBits = OrigBitWidth - BitWidth;
17562 return AmtKnownBits.getMaxValue().ult(BitWidth) &&
17563 ShiftedBits < ComputeNumSignBits(I->getOperand(0), *DL, 0, AC,
17564 nullptr, DT);
17567 return TryProcessInstruction(
17568 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)},
17569 AShrChecker);
17571 case Instruction::UDiv:
17572 case Instruction::URem: {
17573 // UDiv and URem can be truncated if all the truncated bits are zero.
17574 auto Checker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17575 assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
17576 return all_of(E.Scalars, [&](Value *V) {
17577 auto *I = cast<Instruction>(V);
17578 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17579 return MaskedValueIsZero(I->getOperand(0), Mask, SimplifyQuery(*DL)) &&
17580 MaskedValueIsZero(I->getOperand(1), Mask, SimplifyQuery(*DL));
17583 return TryProcessInstruction(
17584 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)}, Checker);
17587 // We can demote selects if we can demote their true and false values.
17588 case Instruction::Select: {
17589 return TryProcessInstruction(
17590 BitWidth, {getOperandEntry(&E, 1), getOperandEntry(&E, 2)});
17593 // We can demote phis if we can demote all their incoming operands. Note that
17594 // we don't need to worry about cycles since we ensure single use above.
17595 case Instruction::PHI: {
17596 const unsigned NumOps = E.getNumOperands();
17597 SmallVector<const TreeEntry *> Ops(NumOps);
17598 transform(seq<unsigned>(0, NumOps), Ops.begin(),
17599 std::bind(&BoUpSLP::getOperandEntry, this, &E, _1));
17601 return TryProcessInstruction(BitWidth, Ops);
17604 case Instruction::Call: {
17605 auto *IC = dyn_cast<IntrinsicInst>(E.getMainOp());
17606 if (!IC)
17607 break;
17608 Intrinsic::ID ID = getVectorIntrinsicIDForCall(IC, TLI);
17609 if (ID != Intrinsic::abs && ID != Intrinsic::smin &&
17610 ID != Intrinsic::smax && ID != Intrinsic::umin && ID != Intrinsic::umax)
17611 break;
17612 SmallVector<const TreeEntry *, 2> Operands(1, getOperandEntry(&E, 0));
17613 function_ref<bool(unsigned, unsigned)> CallChecker;
17614 auto CompChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17615 assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
17616 return all_of(E.Scalars, [&](Value *V) {
17617 auto *I = cast<Instruction>(V);
17618 if (ID == Intrinsic::umin || ID == Intrinsic::umax) {
17619 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17620 return MaskedValueIsZero(I->getOperand(0), Mask,
17621 SimplifyQuery(*DL)) &&
17622 MaskedValueIsZero(I->getOperand(1), Mask, SimplifyQuery(*DL));
17624 assert((ID == Intrinsic::smin || ID == Intrinsic::smax) &&
17625 "Expected min/max intrinsics only.");
17626 unsigned SignBits = OrigBitWidth - BitWidth;
17627 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth - 1);
17628 unsigned Op0SignBits = ComputeNumSignBits(I->getOperand(0), *DL, 0, AC,
17629 nullptr, DT);
17630 unsigned Op1SignBits = ComputeNumSignBits(I->getOperand(1), *DL, 0, AC,
17631 nullptr, DT);
17632 return SignBits <= Op0SignBits &&
17633 ((SignBits != Op0SignBits &&
17634 !isKnownNonNegative(I->getOperand(0), SimplifyQuery(*DL))) ||
17635 MaskedValueIsZero(I->getOperand(0), Mask,
17636 SimplifyQuery(*DL))) &&
17637 SignBits <= Op1SignBits &&
17638 ((SignBits != Op1SignBits &&
17639 !isKnownNonNegative(I->getOperand(1), SimplifyQuery(*DL))) ||
17640 MaskedValueIsZero(I->getOperand(1), Mask, SimplifyQuery(*DL)));
17643 auto AbsChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17644 assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
17645 return all_of(E.Scalars, [&](Value *V) {
17646 auto *I = cast<Instruction>(V);
17647 unsigned SignBits = OrigBitWidth - BitWidth;
17648 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth - 1);
17649 unsigned Op0SignBits =
17650 ComputeNumSignBits(I->getOperand(0), *DL, 0, AC, nullptr, DT);
17651 return SignBits <= Op0SignBits &&
17652 ((SignBits != Op0SignBits &&
17653 !isKnownNonNegative(I->getOperand(0), SimplifyQuery(*DL))) ||
17654 MaskedValueIsZero(I->getOperand(0), Mask, SimplifyQuery(*DL)));
17657 if (ID != Intrinsic::abs) {
17658 Operands.push_back(getOperandEntry(&E, 1));
17659 CallChecker = CompChecker;
17660 } else {
17661 CallChecker = AbsChecker;
17663 InstructionCost BestCost =
17664 std::numeric_limits<InstructionCost::CostType>::max();
17665 unsigned BestBitWidth = BitWidth;
17666 unsigned VF = E.Scalars.size();
17667 // Choose the best bitwidth based on cost estimations.
17668 auto Checker = [&](unsigned BitWidth, unsigned) {
17669 unsigned MinBW = PowerOf2Ceil(BitWidth);
17670 SmallVector<Type *> ArgTys = buildIntrinsicArgTypes(IC, ID, VF, MinBW);
17671 auto VecCallCosts = getVectorCallCosts(
17672 IC, getWidenedType(IntegerType::get(IC->getContext(), MinBW), VF),
17673 TTI, TLI, ArgTys);
17674 InstructionCost Cost = std::min(VecCallCosts.first, VecCallCosts.second);
17675 if (Cost < BestCost) {
17676 BestCost = Cost;
17677 BestBitWidth = BitWidth;
17679 return false;
17681 [[maybe_unused]] bool NeedToExit;
17682 (void)AttemptCheckBitwidth(Checker, NeedToExit);
17683 BitWidth = BestBitWidth;
17684 return TryProcessInstruction(BitWidth, Operands, CallChecker);
17687 // Otherwise, conservatively give up.
17688 default:
17689 break;
17691 MaxDepthLevel = 1;
17692 return FinalAnalysis();
17695 static RecurKind getRdxKind(Value *V);
17697 void BoUpSLP::computeMinimumValueSizes() {
17698 // We only attempt to truncate integer expressions.
17699 bool IsStoreOrInsertElt =
17700 VectorizableTree.front()->getOpcode() == Instruction::Store ||
17701 VectorizableTree.front()->getOpcode() == Instruction::InsertElement;
17702 if ((IsStoreOrInsertElt || UserIgnoreList) &&
17703 ExtraBitWidthNodes.size() <= 1 &&
17704 (!CastMaxMinBWSizes || CastMaxMinBWSizes->second == 0 ||
17705 CastMaxMinBWSizes->first / CastMaxMinBWSizes->second <= 2))
17706 return;
17708 unsigned NodeIdx = 0;
17709 if (IsStoreOrInsertElt && !VectorizableTree.front()->isGather())
17710 NodeIdx = 1;
17712 // Ensure the roots of the vectorizable tree don't form a cycle.
17713 if (VectorizableTree[NodeIdx]->isGather() ||
17714 (NodeIdx == 0 && !VectorizableTree[NodeIdx]->UserTreeIndices.empty()) ||
17715 (NodeIdx != 0 && any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
17716 [NodeIdx](const EdgeInfo &EI) {
17717 return EI.UserTE->Idx > NodeIdx;
17718 })))
17719 return;
17721 // The first value node for store/insertelement is sext/zext/trunc? Skip it,
17722 // resize to the final type.
17723 bool IsTruncRoot = false;
17724 bool IsProfitableToDemoteRoot = !IsStoreOrInsertElt;
17725 SmallVector<unsigned> RootDemotes;
17726 if (NodeIdx != 0 &&
17727 VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
17728 VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {
17729 assert(IsStoreOrInsertElt && "Expected store/insertelement seeded graph.");
17730 IsTruncRoot = true;
17731 RootDemotes.push_back(NodeIdx);
17732 IsProfitableToDemoteRoot = true;
17733 ++NodeIdx;
17736 // Analyzed the reduction already and not profitable - exit.
17737 if (AnalyzedMinBWVals.contains(VectorizableTree[NodeIdx]->Scalars.front()))
17738 return;
17740 SmallVector<unsigned> ToDemote;
17741 auto ComputeMaxBitWidth = [&](const TreeEntry &E, bool IsTopRoot,
17742 bool IsProfitableToDemoteRoot, unsigned Opcode,
17743 unsigned Limit, bool IsTruncRoot,
17744 bool IsSignedCmp) -> unsigned {
17745 ToDemote.clear();
17746 // Check if the root is trunc and the next node is gather/buildvector, then
17747 // keep trunc in scalars, which is free in most cases.
17748 if (E.isGather() && IsTruncRoot && E.UserTreeIndices.size() == 1 &&
17749 E.Idx > (IsStoreOrInsertElt ? 2u : 1u) &&
17750 all_of(E.Scalars, [&](Value *V) {
17751 return V->hasOneUse() || isa<Constant>(V) ||
17752 (!V->hasNUsesOrMore(UsesLimit) &&
17753 none_of(V->users(), [&](User *U) {
17754 const TreeEntry *TE = getTreeEntry(U);
17755 const TreeEntry *UserTE = E.UserTreeIndices.back().UserTE;
17756 if (TE == UserTE || !TE)
17757 return false;
17758 if (!isa<CastInst, BinaryOperator, FreezeInst, PHINode,
17759 SelectInst>(U) ||
17760 !isa<CastInst, BinaryOperator, FreezeInst, PHINode,
17761 SelectInst>(UserTE->getMainOp()))
17762 return true;
17763 unsigned UserTESz = DL->getTypeSizeInBits(
17764 UserTE->Scalars.front()->getType());
17765 auto It = MinBWs.find(TE);
17766 if (It != MinBWs.end() && It->second.first > UserTESz)
17767 return true;
17768 return DL->getTypeSizeInBits(U->getType()) > UserTESz;
17769 }));
17770 })) {
17771 ToDemote.push_back(E.Idx);
17772 const TreeEntry *UserTE = E.UserTreeIndices.back().UserTE;
17773 auto It = MinBWs.find(UserTE);
17774 if (It != MinBWs.end())
17775 return It->second.first;
17776 unsigned MaxBitWidth =
17777 DL->getTypeSizeInBits(UserTE->Scalars.front()->getType());
17778 MaxBitWidth = bit_ceil(MaxBitWidth);
17779 if (MaxBitWidth < 8 && MaxBitWidth > 1)
17780 MaxBitWidth = 8;
17781 return MaxBitWidth;
17784 unsigned VF = E.getVectorFactor();
17785 Type *ScalarTy = E.Scalars.front()->getType();
17786 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
17787 auto *TreeRootIT = dyn_cast<IntegerType>(ScalarTy->getScalarType());
17788 if (!TreeRootIT || !Opcode)
17789 return 0u;
17791 if (any_of(E.Scalars,
17792 [&](Value *V) { return AnalyzedMinBWVals.contains(V); }))
17793 return 0u;
17795 unsigned NumParts = TTI->getNumberOfParts(
17796 getWidenedType(TreeRootIT, VF * ScalarTyNumElements));
17798 // The maximum bit width required to represent all the values that can be
17799 // demoted without loss of precision. It would be safe to truncate the roots
17800 // of the expression to this width.
17801 unsigned MaxBitWidth = 1u;
17803 // True if the roots can be zero-extended back to their original type,
17804 // rather than sign-extended. We know that if the leading bits are not
17805 // demanded, we can safely zero-extend. So we initialize IsKnownPositive to
17806 // True.
17807 // Determine if the sign bit of all the roots is known to be zero. If not,
17808 // IsKnownPositive is set to False.
17809 bool IsKnownPositive = !IsSignedCmp && all_of(E.Scalars, [&](Value *R) {
17810 KnownBits Known = computeKnownBits(R, *DL);
17811 return Known.isNonNegative();
17814 // We first check if all the bits of the roots are demanded. If they're not,
17815 // we can truncate the roots to this narrower type.
17816 for (Value *Root : E.Scalars) {
17817 unsigned NumSignBits = ComputeNumSignBits(Root, *DL, 0, AC, nullptr, DT);
17818 TypeSize NumTypeBits =
17819 DL->getTypeSizeInBits(Root->getType()->getScalarType());
17820 unsigned BitWidth1 = NumTypeBits - NumSignBits;
17821 // If we can't prove that the sign bit is zero, we must add one to the
17822 // maximum bit width to account for the unknown sign bit. This preserves
17823 // the existing sign bit so we can safely sign-extend the root back to the
17824 // original type. Otherwise, if we know the sign bit is zero, we will
17825 // zero-extend the root instead.
17827 // FIXME: This is somewhat suboptimal, as there will be cases where adding
17828 // one to the maximum bit width will yield a larger-than-necessary
17829 // type. In general, we need to add an extra bit only if we can't
17830 // prove that the upper bit of the original type is equal to the
17831 // upper bit of the proposed smaller type. If these two bits are
17832 // the same (either zero or one) we know that sign-extending from
17833 // the smaller type will result in the same value. Here, since we
17834 // can't yet prove this, we are just making the proposed smaller
17835 // type larger to ensure correctness.
17836 if (!IsKnownPositive)
17837 ++BitWidth1;
17839 APInt Mask = DB->getDemandedBits(cast<Instruction>(Root));
17840 unsigned BitWidth2 = Mask.getBitWidth() - Mask.countl_zero();
17841 MaxBitWidth =
17842 std::max<unsigned>(std::min(BitWidth1, BitWidth2), MaxBitWidth);
17845 if (MaxBitWidth < 8 && MaxBitWidth > 1)
17846 MaxBitWidth = 8;
17848 // If the original type is large, but reduced type does not improve the reg
17849 // use - ignore it.
17850 if (NumParts > 1 &&
17851 NumParts ==
17852 TTI->getNumberOfParts(getWidenedType(
17853 IntegerType::get(F->getContext(), bit_ceil(MaxBitWidth)), VF)))
17854 return 0u;
17856 bool IsProfitableToDemote = Opcode == Instruction::Trunc ||
17857 Opcode == Instruction::SExt ||
17858 Opcode == Instruction::ZExt || NumParts > 1;
17859 // Conservatively determine if we can actually truncate the roots of the
17860 // expression. Collect the values that can be demoted in ToDemote and
17861 // additional roots that require investigating in Roots.
17862 DenseSet<const TreeEntry *> Visited;
17863 unsigned MaxDepthLevel = IsTruncRoot ? Limit : 1;
17864 bool NeedToDemote = IsProfitableToDemote;
17866 if (!collectValuesToDemote(E, IsProfitableToDemoteRoot, MaxBitWidth,
17867 ToDemote, Visited, MaxDepthLevel, NeedToDemote,
17868 IsTruncRoot) ||
17869 (MaxDepthLevel <= Limit &&
17870 !(((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
17871 (!IsTopRoot || !(IsStoreOrInsertElt || UserIgnoreList) ||
17872 DL->getTypeSizeInBits(TreeRootIT) /
17873 DL->getTypeSizeInBits(cast<Instruction>(E.Scalars.front())
17874 ->getOperand(0)
17875 ->getType()) >
17876 2)))))
17877 return 0u;
17878 // Round MaxBitWidth up to the next power-of-two.
17879 MaxBitWidth = bit_ceil(MaxBitWidth);
17881 return MaxBitWidth;
17884 // If we can truncate the root, we must collect additional values that might
17885 // be demoted as a result. That is, those seeded by truncations we will
17886 // modify.
17887 // Add reduction ops sizes, if any.
17888 if (UserIgnoreList &&
17889 isa<IntegerType>(VectorizableTree.front()->Scalars.front()->getType())) {
17890 for (Value *V : *UserIgnoreList) {
17891 auto NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
17892 auto NumTypeBits = DL->getTypeSizeInBits(V->getType());
17893 unsigned BitWidth1 = NumTypeBits - NumSignBits;
17894 if (!isKnownNonNegative(V, SimplifyQuery(*DL)))
17895 ++BitWidth1;
17896 unsigned BitWidth2 = BitWidth1;
17897 if (!RecurrenceDescriptor::isIntMinMaxRecurrenceKind(::getRdxKind(V))) {
17898 auto Mask = DB->getDemandedBits(cast<Instruction>(V));
17899 BitWidth2 = Mask.getBitWidth() - Mask.countl_zero();
17901 ReductionBitWidth =
17902 std::max(std::min(BitWidth1, BitWidth2), ReductionBitWidth);
17904 if (ReductionBitWidth < 8 && ReductionBitWidth > 1)
17905 ReductionBitWidth = 8;
17907 ReductionBitWidth = bit_ceil(ReductionBitWidth);
17909 bool IsTopRoot = NodeIdx == 0;
17910 while (NodeIdx < VectorizableTree.size() &&
17911 VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
17912 VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {
17913 RootDemotes.push_back(NodeIdx);
17914 ++NodeIdx;
17915 IsTruncRoot = true;
17917 bool IsSignedCmp = false;
17918 while (NodeIdx < VectorizableTree.size()) {
17919 ArrayRef<Value *> TreeRoot = VectorizableTree[NodeIdx]->Scalars;
17920 unsigned Limit = 2;
17921 unsigned Opcode = VectorizableTree[NodeIdx]->getOpcode();
17922 if (IsTopRoot &&
17923 ReductionBitWidth ==
17924 DL->getTypeSizeInBits(
17925 VectorizableTree.front()->Scalars.front()->getType()))
17926 Limit = 3;
17927 unsigned MaxBitWidth = ComputeMaxBitWidth(
17928 *VectorizableTree[NodeIdx], IsTopRoot, IsProfitableToDemoteRoot, Opcode,
17929 Limit, IsTruncRoot, IsSignedCmp);
17930 if (ReductionBitWidth != 0 && (IsTopRoot || !RootDemotes.empty())) {
17931 if (MaxBitWidth != 0 && ReductionBitWidth < MaxBitWidth)
17932 ReductionBitWidth = bit_ceil(MaxBitWidth);
17933 else if (MaxBitWidth == 0)
17934 ReductionBitWidth = 0;
17937 for (unsigned Idx : RootDemotes) {
17938 if (all_of(VectorizableTree[Idx]->Scalars, [&](Value *V) {
17939 uint32_t OrigBitWidth =
17940 DL->getTypeSizeInBits(V->getType()->getScalarType());
17941 if (OrigBitWidth > MaxBitWidth) {
17942 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, MaxBitWidth);
17943 return MaskedValueIsZero(V, Mask, SimplifyQuery(*DL));
17945 return false;
17947 ToDemote.push_back(Idx);
17949 RootDemotes.clear();
17950 IsTopRoot = false;
17951 IsProfitableToDemoteRoot = true;
17953 if (ExtraBitWidthNodes.empty()) {
17954 NodeIdx = VectorizableTree.size();
17955 } else {
17956 unsigned NewIdx = 0;
17957 do {
17958 NewIdx = *ExtraBitWidthNodes.begin();
17959 ExtraBitWidthNodes.erase(ExtraBitWidthNodes.begin());
17960 } while (NewIdx <= NodeIdx && !ExtraBitWidthNodes.empty());
17961 NodeIdx = NewIdx;
17962 IsTruncRoot =
17963 NodeIdx < VectorizableTree.size() &&
17964 any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
17965 [](const EdgeInfo &EI) {
17966 return EI.EdgeIdx == 0 &&
17967 EI.UserTE->getOpcode() == Instruction::Trunc &&
17968 !EI.UserTE->isAltShuffle();
17970 IsSignedCmp =
17971 NodeIdx < VectorizableTree.size() &&
17972 any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
17973 [&](const EdgeInfo &EI) {
17974 return EI.UserTE->getOpcode() == Instruction::ICmp &&
17975 any_of(EI.UserTE->Scalars, [&](Value *V) {
17976 auto *IC = dyn_cast<ICmpInst>(V);
17977 return IC &&
17978 (IC->isSigned() ||
17979 !isKnownNonNegative(IC->getOperand(0),
17980 SimplifyQuery(*DL)) ||
17981 !isKnownNonNegative(IC->getOperand(1),
17982 SimplifyQuery(*DL)));
17987 // If the maximum bit width we compute is less than the with of the roots'
17988 // type, we can proceed with the narrowing. Otherwise, do nothing.
17989 if (MaxBitWidth == 0 ||
17990 MaxBitWidth >=
17991 cast<IntegerType>(TreeRoot.front()->getType()->getScalarType())
17992 ->getBitWidth()) {
17993 if (UserIgnoreList)
17994 AnalyzedMinBWVals.insert(TreeRoot.begin(), TreeRoot.end());
17995 continue;
17998 // Finally, map the values we can demote to the maximum bit with we
17999 // computed.
18000 for (unsigned Idx : ToDemote) {
18001 TreeEntry *TE = VectorizableTree[Idx].get();
18002 if (MinBWs.contains(TE))
18003 continue;
18004 bool IsSigned = any_of(TE->Scalars, [&](Value *R) {
18005 return !isKnownNonNegative(R, SimplifyQuery(*DL));
18007 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned);
18012 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
18013 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
18014 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
18015 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
18016 auto *AA = &AM.getResult<AAManager>(F);
18017 auto *LI = &AM.getResult<LoopAnalysis>(F);
18018 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
18019 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
18020 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
18021 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
18023 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
18024 if (!Changed)
18025 return PreservedAnalyses::all();
18027 PreservedAnalyses PA;
18028 PA.preserveSet<CFGAnalyses>();
18029 return PA;
18032 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
18033 TargetTransformInfo *TTI_,
18034 TargetLibraryInfo *TLI_, AAResults *AA_,
18035 LoopInfo *LI_, DominatorTree *DT_,
18036 AssumptionCache *AC_, DemandedBits *DB_,
18037 OptimizationRemarkEmitter *ORE_) {
18038 if (!RunSLPVectorization)
18039 return false;
18040 SE = SE_;
18041 TTI = TTI_;
18042 TLI = TLI_;
18043 AA = AA_;
18044 LI = LI_;
18045 DT = DT_;
18046 AC = AC_;
18047 DB = DB_;
18048 DL = &F.getDataLayout();
18050 Stores.clear();
18051 GEPs.clear();
18052 bool Changed = false;
18054 // If the target claims to have no vector registers don't attempt
18055 // vectorization.
18056 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
18057 LLVM_DEBUG(
18058 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n");
18059 return false;
18062 // Don't vectorize when the attribute NoImplicitFloat is used.
18063 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
18064 return false;
18066 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
18068 // Use the bottom up slp vectorizer to construct chains that start with
18069 // store instructions.
18070 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
18072 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
18073 // delete instructions.
18075 // Update DFS numbers now so that we can use them for ordering.
18076 DT->updateDFSNumbers();
18078 // Scan the blocks in the function in post order.
18079 for (auto *BB : post_order(&F.getEntryBlock())) {
18080 if (BB->isEHPad() || isa_and_nonnull<UnreachableInst>(BB->getTerminator()))
18081 continue;
18083 // Start new block - clear the list of reduction roots.
18084 R.clearReductionData();
18085 collectSeedInstructions(BB);
18087 // Vectorize trees that end at stores.
18088 if (!Stores.empty()) {
18089 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
18090 << " underlying objects.\n");
18091 Changed |= vectorizeStoreChains(R);
18094 // Vectorize trees that end at reductions.
18095 Changed |= vectorizeChainsInBlock(BB, R);
18097 // Vectorize the index computations of getelementptr instructions. This
18098 // is primarily intended to catch gather-like idioms ending at
18099 // non-consecutive loads.
18100 if (!GEPs.empty()) {
18101 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
18102 << " underlying objects.\n");
18103 Changed |= vectorizeGEPIndices(BB, R);
18107 if (Changed) {
18108 R.optimizeGatherSequence();
18109 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
18111 return Changed;
18114 std::optional<bool>
18115 SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
18116 unsigned Idx, unsigned MinVF,
18117 unsigned &Size) {
18118 Size = 0;
18119 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
18120 << "\n");
18121 const unsigned Sz = R.getVectorElementSize(Chain[0]);
18122 unsigned VF = Chain.size();
18124 if (!has_single_bit(Sz) ||
18125 !hasFullVectorsOrPowerOf2(
18126 *TTI, cast<StoreInst>(Chain.front())->getValueOperand()->getType(),
18127 VF) ||
18128 VF < 2 || VF < MinVF) {
18129 // Check if vectorizing with a non-power-of-2 VF should be considered. At
18130 // the moment, only consider cases where VF + 1 is a power-of-2, i.e. almost
18131 // all vector lanes are used.
18132 if (!VectorizeNonPowerOf2 || (VF < MinVF && VF + 1 != MinVF))
18133 return false;
18136 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
18137 << "\n");
18139 SetVector<Value *> ValOps;
18140 for (Value *V : Chain)
18141 ValOps.insert(cast<StoreInst>(V)->getValueOperand());
18142 // Operands are not same/alt opcodes or non-power-of-2 uniques - exit.
18143 InstructionsState S = getSameOpcode(ValOps.getArrayRef(), *TLI);
18144 if (all_of(ValOps, IsaPred<Instruction>) && ValOps.size() > 1) {
18145 DenseSet<Value *> Stores(Chain.begin(), Chain.end());
18146 bool IsAllowedSize =
18147 hasFullVectorsOrPowerOf2(*TTI, ValOps.front()->getType(),
18148 ValOps.size()) ||
18149 (VectorizeNonPowerOf2 && has_single_bit(ValOps.size() + 1));
18150 if ((!IsAllowedSize && S.getOpcode() &&
18151 S.getOpcode() != Instruction::Load &&
18152 (!S.MainOp->isSafeToRemove() ||
18153 any_of(ValOps.getArrayRef(),
18154 [&](Value *V) {
18155 return !isa<ExtractElementInst>(V) &&
18156 (V->getNumUses() > Chain.size() ||
18157 any_of(V->users(), [&](User *U) {
18158 return !Stores.contains(U);
18159 }));
18160 }))) ||
18161 (ValOps.size() > Chain.size() / 2 && !S.getOpcode())) {
18162 Size = (!IsAllowedSize && S.getOpcode()) ? 1 : 2;
18163 return false;
18166 if (R.isLoadCombineCandidate(Chain))
18167 return true;
18168 R.buildTree(Chain);
18169 // Check if tree tiny and store itself or its value is not vectorized.
18170 if (R.isTreeTinyAndNotFullyVectorizable()) {
18171 if (R.isGathered(Chain.front()) ||
18172 R.isNotScheduled(cast<StoreInst>(Chain.front())->getValueOperand()))
18173 return std::nullopt;
18174 Size = R.getCanonicalGraphSize();
18175 return false;
18177 R.reorderTopToBottom();
18178 R.reorderBottomToTop();
18179 R.transformNodes();
18180 R.buildExternalUses();
18182 R.computeMinimumValueSizes();
18184 Size = R.getCanonicalGraphSize();
18185 if (S.getOpcode() == Instruction::Load)
18186 Size = 2; // cut off masked gather small trees
18187 InstructionCost Cost = R.getTreeCost();
18189 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n");
18190 if (Cost < -SLPCostThreshold) {
18191 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
18193 using namespace ore;
18195 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
18196 cast<StoreInst>(Chain[0]))
18197 << "Stores SLP vectorized with cost " << NV("Cost", Cost)
18198 << " and with tree size "
18199 << NV("TreeSize", R.getTreeSize()));
18201 R.vectorizeTree();
18202 return true;
18205 return false;
18208 /// Checks if the quadratic mean deviation is less than 90% of the mean size.
18209 static bool checkTreeSizes(ArrayRef<std::pair<unsigned, unsigned>> Sizes,
18210 bool First) {
18211 unsigned Num = 0;
18212 uint64_t Sum = std::accumulate(
18213 Sizes.begin(), Sizes.end(), static_cast<uint64_t>(0),
18214 [&](uint64_t V, const std::pair<unsigned, unsigned> &Val) {
18215 unsigned Size = First ? Val.first : Val.second;
18216 if (Size == 1)
18217 return V;
18218 ++Num;
18219 return V + Size;
18221 if (Num == 0)
18222 return true;
18223 uint64_t Mean = Sum / Num;
18224 if (Mean == 0)
18225 return true;
18226 uint64_t Dev = std::accumulate(
18227 Sizes.begin(), Sizes.end(), static_cast<uint64_t>(0),
18228 [&](uint64_t V, const std::pair<unsigned, unsigned> &Val) {
18229 unsigned P = First ? Val.first : Val.second;
18230 if (P == 1)
18231 return V;
18232 return V + (P - Mean) * (P - Mean);
18233 }) /
18234 Num;
18235 return Dev * 81 / (Mean * Mean) == 0;
18238 bool SLPVectorizerPass::vectorizeStores(
18239 ArrayRef<StoreInst *> Stores, BoUpSLP &R,
18240 DenseSet<std::tuple<Value *, Value *, Value *, Value *, unsigned>>
18241 &Visited) {
18242 // We may run into multiple chains that merge into a single chain. We mark the
18243 // stores that we vectorized so that we don't visit the same store twice.
18244 BoUpSLP::ValueSet VectorizedStores;
18245 bool Changed = false;
18247 struct StoreDistCompare {
18248 bool operator()(const std::pair<unsigned, int> &Op1,
18249 const std::pair<unsigned, int> &Op2) const {
18250 return Op1.second < Op2.second;
18253 // A set of pairs (index of store in Stores array ref, Distance of the store
18254 // address relative to base store address in units).
18255 using StoreIndexToDistSet =
18256 std::set<std::pair<unsigned, int>, StoreDistCompare>;
18257 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) {
18258 int PrevDist = -1;
18259 BoUpSLP::ValueList Operands;
18260 // Collect the chain into a list.
18261 for (auto [Idx, Data] : enumerate(Set)) {
18262 if (Operands.empty() || Data.second - PrevDist == 1) {
18263 Operands.push_back(Stores[Data.first]);
18264 PrevDist = Data.second;
18265 if (Idx != Set.size() - 1)
18266 continue;
18268 auto E = make_scope_exit([&, &DataVar = Data]() {
18269 Operands.clear();
18270 Operands.push_back(Stores[DataVar.first]);
18271 PrevDist = DataVar.second;
18274 if (Operands.size() <= 1 ||
18275 !Visited
18276 .insert({Operands.front(),
18277 cast<StoreInst>(Operands.front())->getValueOperand(),
18278 Operands.back(),
18279 cast<StoreInst>(Operands.back())->getValueOperand(),
18280 Operands.size()})
18281 .second)
18282 continue;
18284 unsigned MaxVecRegSize = R.getMaxVecRegSize();
18285 unsigned EltSize = R.getVectorElementSize(Operands[0]);
18286 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize);
18288 unsigned MaxVF =
18289 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts);
18290 auto *Store = cast<StoreInst>(Operands[0]);
18291 Type *StoreTy = Store->getValueOperand()->getType();
18292 Type *ValueTy = StoreTy;
18293 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
18294 ValueTy = Trunc->getSrcTy();
18295 unsigned MinVF = std::max<unsigned>(
18296 2, PowerOf2Ceil(TTI->getStoreMinimumVF(
18297 R.getMinVF(DL->getTypeStoreSizeInBits(StoreTy)), StoreTy,
18298 ValueTy)));
18300 if (MaxVF < MinVF) {
18301 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF
18302 << ") < "
18303 << "MinVF (" << MinVF << ")\n");
18304 continue;
18307 unsigned NonPowerOf2VF = 0;
18308 if (VectorizeNonPowerOf2) {
18309 // First try vectorizing with a non-power-of-2 VF. At the moment, only
18310 // consider cases where VF + 1 is a power-of-2, i.e. almost all vector
18311 // lanes are used.
18312 unsigned CandVF = std::clamp<unsigned>(Operands.size(), MinVF, MaxVF);
18313 if (has_single_bit(CandVF + 1)) {
18314 NonPowerOf2VF = CandVF;
18315 assert(NonPowerOf2VF != MaxVF &&
18316 "Non-power-of-2 VF should not be equal to MaxVF");
18320 unsigned MaxRegVF = MaxVF;
18321 MaxVF = std::min<unsigned>(MaxVF, bit_floor(Operands.size()));
18322 if (MaxVF < MinVF) {
18323 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF
18324 << ") < "
18325 << "MinVF (" << MinVF << ")\n");
18326 continue;
18329 unsigned Sz = 1 + Log2_32(MaxVF) - Log2_32(MinVF);
18330 SmallVector<unsigned> CandidateVFs(Sz + (NonPowerOf2VF > 0 ? 1 : 0));
18331 unsigned Size = MinVF;
18332 for_each(reverse(CandidateVFs), [&](unsigned &VF) {
18333 VF = Size > MaxVF ? NonPowerOf2VF : Size;
18334 Size *= 2;
18336 unsigned End = Operands.size();
18337 unsigned Repeat = 0;
18338 constexpr unsigned MaxAttempts = 4;
18339 OwningArrayRef<std::pair<unsigned, unsigned>> RangeSizes(Operands.size());
18340 for_each(RangeSizes, [](std::pair<unsigned, unsigned> &P) {
18341 P.first = P.second = 1;
18343 DenseMap<Value *, std::pair<unsigned, unsigned>> NonSchedulable;
18344 auto IsNotVectorized = [](bool First,
18345 const std::pair<unsigned, unsigned> &P) {
18346 return First ? P.first > 0 : P.second > 0;
18348 auto IsVectorized = [](bool First,
18349 const std::pair<unsigned, unsigned> &P) {
18350 return First ? P.first == 0 : P.second == 0;
18352 auto VFIsProfitable = [](bool First, unsigned Size,
18353 const std::pair<unsigned, unsigned> &P) {
18354 return First ? Size >= P.first : Size >= P.second;
18356 auto FirstSizeSame = [](unsigned Size,
18357 const std::pair<unsigned, unsigned> &P) {
18358 return Size == P.first;
18360 while (true) {
18361 ++Repeat;
18362 bool RepeatChanged = false;
18363 bool AnyProfitableGraph = false;
18364 for (unsigned Size : CandidateVFs) {
18365 AnyProfitableGraph = false;
18366 unsigned StartIdx = std::distance(
18367 RangeSizes.begin(),
18368 find_if(RangeSizes, std::bind(IsNotVectorized, Size >= MaxRegVF,
18369 std::placeholders::_1)));
18370 while (StartIdx < End) {
18371 unsigned EndIdx =
18372 std::distance(RangeSizes.begin(),
18373 find_if(RangeSizes.drop_front(StartIdx),
18374 std::bind(IsVectorized, Size >= MaxRegVF,
18375 std::placeholders::_1)));
18376 unsigned Sz = EndIdx >= End ? End : EndIdx;
18377 for (unsigned Cnt = StartIdx; Cnt + Size <= Sz;) {
18378 if (!checkTreeSizes(RangeSizes.slice(Cnt, Size),
18379 Size >= MaxRegVF)) {
18380 ++Cnt;
18381 continue;
18383 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size);
18384 assert(all_of(Slice,
18385 [&](Value *V) {
18386 return cast<StoreInst>(V)
18387 ->getValueOperand()
18388 ->getType() ==
18389 cast<StoreInst>(Slice.front())
18390 ->getValueOperand()
18391 ->getType();
18392 }) &&
18393 "Expected all operands of same type.");
18394 if (!NonSchedulable.empty()) {
18395 auto [NonSchedSizeMax, NonSchedSizeMin] =
18396 NonSchedulable.lookup(Slice.front());
18397 if (NonSchedSizeMax > 0 && NonSchedSizeMin <= Size) {
18398 Cnt += NonSchedSizeMax;
18399 continue;
18402 unsigned TreeSize;
18403 std::optional<bool> Res =
18404 vectorizeStoreChain(Slice, R, Cnt, MinVF, TreeSize);
18405 if (!Res) {
18406 NonSchedulable
18407 .try_emplace(Slice.front(), std::make_pair(Size, Size))
18408 .first->getSecond()
18409 .second = Size;
18410 } else if (*Res) {
18411 // Mark the vectorized stores so that we don't vectorize them
18412 // again.
18413 VectorizedStores.insert(Slice.begin(), Slice.end());
18414 // Mark the vectorized stores so that we don't vectorize them
18415 // again.
18416 AnyProfitableGraph = RepeatChanged = Changed = true;
18417 // If we vectorized initial block, no need to try to vectorize
18418 // it again.
18419 for_each(RangeSizes.slice(Cnt, Size),
18420 [](std::pair<unsigned, unsigned> &P) {
18421 P.first = P.second = 0;
18423 if (Cnt < StartIdx + MinVF) {
18424 for_each(RangeSizes.slice(StartIdx, Cnt - StartIdx),
18425 [](std::pair<unsigned, unsigned> &P) {
18426 P.first = P.second = 0;
18428 StartIdx = Cnt + Size;
18430 if (Cnt > Sz - Size - MinVF) {
18431 for_each(RangeSizes.slice(Cnt + Size, Sz - (Cnt + Size)),
18432 [](std::pair<unsigned, unsigned> &P) {
18433 P.first = P.second = 0;
18435 if (Sz == End)
18436 End = Cnt;
18437 Sz = Cnt;
18439 Cnt += Size;
18440 continue;
18442 if (Size > 2 && Res &&
18443 !all_of(RangeSizes.slice(Cnt, Size),
18444 std::bind(VFIsProfitable, Size >= MaxRegVF, TreeSize,
18445 std::placeholders::_1))) {
18446 Cnt += Size;
18447 continue;
18449 // Check for the very big VFs that we're not rebuilding same
18450 // trees, just with larger number of elements.
18451 if (Size > MaxRegVF && TreeSize > 1 &&
18452 all_of(RangeSizes.slice(Cnt, Size),
18453 std::bind(FirstSizeSame, TreeSize,
18454 std::placeholders::_1))) {
18455 Cnt += Size;
18456 while (Cnt != Sz && RangeSizes[Cnt].first == TreeSize)
18457 ++Cnt;
18458 continue;
18460 if (TreeSize > 1)
18461 for_each(RangeSizes.slice(Cnt, Size),
18462 [&](std::pair<unsigned, unsigned> &P) {
18463 if (Size >= MaxRegVF)
18464 P.second = std::max(P.second, TreeSize);
18465 else
18466 P.first = std::max(P.first, TreeSize);
18468 ++Cnt;
18469 AnyProfitableGraph = true;
18471 if (StartIdx >= End)
18472 break;
18473 if (Sz - StartIdx < Size && Sz - StartIdx >= MinVF)
18474 AnyProfitableGraph = true;
18475 StartIdx = std::distance(
18476 RangeSizes.begin(),
18477 find_if(RangeSizes.drop_front(Sz),
18478 std::bind(IsNotVectorized, Size >= MaxRegVF,
18479 std::placeholders::_1)));
18481 if (!AnyProfitableGraph && Size >= MaxRegVF && has_single_bit(Size))
18482 break;
18484 // All values vectorized - exit.
18485 if (all_of(RangeSizes, [](const std::pair<unsigned, unsigned> &P) {
18486 return P.first == 0 && P.second == 0;
18488 break;
18489 // Check if tried all attempts or no need for the last attempts at all.
18490 if (Repeat >= MaxAttempts ||
18491 (Repeat > 1 && (RepeatChanged || !AnyProfitableGraph)))
18492 break;
18493 constexpr unsigned StoresLimit = 64;
18494 const unsigned MaxTotalNum = std::min<unsigned>(
18495 Operands.size(),
18496 static_cast<unsigned>(
18497 End -
18498 std::distance(
18499 RangeSizes.begin(),
18500 find_if(RangeSizes, std::bind(IsNotVectorized, true,
18501 std::placeholders::_1))) +
18502 1));
18503 unsigned VF = bit_ceil(CandidateVFs.front()) * 2;
18504 unsigned Limit =
18505 getFloorFullVectorNumberOfElements(*TTI, StoreTy, MaxTotalNum);
18506 CandidateVFs.clear();
18507 if (bit_floor(Limit) == VF)
18508 CandidateVFs.push_back(Limit);
18509 if (VF > MaxTotalNum || VF >= StoresLimit)
18510 break;
18511 for_each(RangeSizes, [&](std::pair<unsigned, unsigned> &P) {
18512 if (P.first != 0)
18513 P.first = std::max(P.second, P.first);
18515 // Last attempt to vectorize max number of elements, if all previous
18516 // attempts were unsuccessful because of the cost issues.
18517 CandidateVFs.push_back(VF);
18522 // Stores pair (first: index of the store into Stores array ref, address of
18523 // which taken as base, second: sorted set of pairs {index, dist}, which are
18524 // indices of stores in the set and their store location distances relative to
18525 // the base address).
18527 // Need to store the index of the very first store separately, since the set
18528 // may be reordered after the insertion and the first store may be moved. This
18529 // container allows to reduce number of calls of getPointersDiff() function.
18530 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores;
18531 // Inserts the specified store SI with the given index Idx to the set of the
18532 // stores. If the store with the same distance is found already - stop
18533 // insertion, try to vectorize already found stores. If some stores from this
18534 // sequence were not vectorized - try to vectorize them with the new store
18535 // later. But this logic is applied only to the stores, that come before the
18536 // previous store with the same distance.
18537 // Example:
18538 // 1. store x, %p
18539 // 2. store y, %p+1
18540 // 3. store z, %p+2
18541 // 4. store a, %p
18542 // 5. store b, %p+3
18543 // - Scan this from the last to first store. The very first bunch of stores is
18544 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores
18545 // vector).
18546 // - The next store in the list - #1 - has the same distance from store #5 as
18547 // the store #4.
18548 // - Try to vectorize sequence of stores 4,2,3,5.
18549 // - If all these stores are vectorized - just drop them.
18550 // - If some of them are not vectorized (say, #3 and #5), do extra analysis.
18551 // - Start new stores sequence.
18552 // The new bunch of stores is {1, {1, 0}}.
18553 // - Add the stores from previous sequence, that were not vectorized.
18554 // Here we consider the stores in the reversed order, rather they are used in
18555 // the IR (Stores are reversed already, see vectorizeStoreChains() function).
18556 // Store #3 can be added -> comes after store #4 with the same distance as
18557 // store #1.
18558 // Store #5 cannot be added - comes before store #4.
18559 // This logic allows to improve the compile time, we assume that the stores
18560 // after previous store with the same distance most likely have memory
18561 // dependencies and no need to waste compile time to try to vectorize them.
18562 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}.
18563 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) {
18564 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) {
18565 std::optional<int> Diff = getPointersDiff(
18566 Stores[Set.first]->getValueOperand()->getType(),
18567 Stores[Set.first]->getPointerOperand(),
18568 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE,
18569 /*StrictCheck=*/true);
18570 if (!Diff)
18571 continue;
18572 auto It = Set.second.find(std::make_pair(Idx, *Diff));
18573 if (It == Set.second.end()) {
18574 Set.second.emplace(Idx, *Diff);
18575 return;
18577 // Try to vectorize the first found set to avoid duplicate analysis.
18578 TryToVectorize(Set.second);
18579 unsigned ItIdx = It->first;
18580 int ItDist = It->second;
18581 StoreIndexToDistSet PrevSet;
18582 copy_if(Set.second, std::inserter(PrevSet, PrevSet.end()),
18583 [&](const std::pair<unsigned, int> &Pair) {
18584 return Pair.first > ItIdx;
18586 Set.second.clear();
18587 Set.first = Idx;
18588 Set.second.emplace(Idx, 0);
18589 // Insert stores that followed previous match to try to vectorize them
18590 // with this store.
18591 unsigned StartIdx = ItIdx + 1;
18592 SmallBitVector UsedStores(Idx - StartIdx);
18593 // Distances to previously found dup store (or this store, since they
18594 // store to the same addresses).
18595 SmallVector<int> Dists(Idx - StartIdx, 0);
18596 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) {
18597 // Do not try to vectorize sequences, we already tried.
18598 if (VectorizedStores.contains(Stores[Pair.first]))
18599 break;
18600 unsigned BI = Pair.first - StartIdx;
18601 UsedStores.set(BI);
18602 Dists[BI] = Pair.second - ItDist;
18604 for (unsigned I = StartIdx; I < Idx; ++I) {
18605 unsigned BI = I - StartIdx;
18606 if (UsedStores.test(BI))
18607 Set.second.emplace(I, Dists[BI]);
18609 return;
18611 auto &Res = SortedStores.emplace_back();
18612 Res.first = Idx;
18613 Res.second.emplace(Idx, 0);
18615 Type *PrevValTy = nullptr;
18616 for (auto [I, SI] : enumerate(Stores)) {
18617 if (R.isDeleted(SI))
18618 continue;
18619 if (!PrevValTy)
18620 PrevValTy = SI->getValueOperand()->getType();
18621 // Check that we do not try to vectorize stores of different types.
18622 if (PrevValTy != SI->getValueOperand()->getType()) {
18623 for (auto &Set : SortedStores)
18624 TryToVectorize(Set.second);
18625 SortedStores.clear();
18626 PrevValTy = SI->getValueOperand()->getType();
18628 FillStoresSet(I, SI);
18631 // Final vectorization attempt.
18632 for (auto &Set : SortedStores)
18633 TryToVectorize(Set.second);
18635 return Changed;
18638 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
18639 // Initialize the collections. We will make a single pass over the block.
18640 Stores.clear();
18641 GEPs.clear();
18643 // Visit the store and getelementptr instructions in BB and organize them in
18644 // Stores and GEPs according to the underlying objects of their pointer
18645 // operands.
18646 for (Instruction &I : *BB) {
18647 // Ignore store instructions that are volatile or have a pointer operand
18648 // that doesn't point to a scalar type.
18649 if (auto *SI = dyn_cast<StoreInst>(&I)) {
18650 if (!SI->isSimple())
18651 continue;
18652 if (!isValidElementType(SI->getValueOperand()->getType()))
18653 continue;
18654 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
18657 // Ignore getelementptr instructions that have more than one index, a
18658 // constant index, or a pointer operand that doesn't point to a scalar
18659 // type.
18660 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
18661 if (GEP->getNumIndices() != 1)
18662 continue;
18663 Value *Idx = GEP->idx_begin()->get();
18664 if (isa<Constant>(Idx))
18665 continue;
18666 if (!isValidElementType(Idx->getType()))
18667 continue;
18668 if (GEP->getType()->isVectorTy())
18669 continue;
18670 GEPs[GEP->getPointerOperand()].push_back(GEP);
18675 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
18676 bool MaxVFOnly) {
18677 if (VL.size() < 2)
18678 return false;
18680 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
18681 << VL.size() << ".\n");
18683 // Check that all of the parts are instructions of the same type,
18684 // we permit an alternate opcode via InstructionsState.
18685 InstructionsState S = getSameOpcode(VL, *TLI);
18686 if (!S.getOpcode())
18687 return false;
18689 Instruction *I0 = cast<Instruction>(S.OpValue);
18690 // Make sure invalid types (including vector type) are rejected before
18691 // determining vectorization factor for scalar instructions.
18692 for (Value *V : VL) {
18693 Type *Ty = V->getType();
18694 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
18695 // NOTE: the following will give user internal llvm type name, which may
18696 // not be useful.
18697 R.getORE()->emit([&]() {
18698 std::string TypeStr;
18699 llvm::raw_string_ostream rso(TypeStr);
18700 Ty->print(rso);
18701 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
18702 << "Cannot SLP vectorize list: type "
18703 << TypeStr + " is unsupported by vectorizer";
18705 return false;
18709 unsigned Sz = R.getVectorElementSize(I0);
18710 unsigned MinVF = R.getMinVF(Sz);
18711 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF);
18712 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
18713 if (MaxVF < 2) {
18714 R.getORE()->emit([&]() {
18715 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
18716 << "Cannot SLP vectorize list: vectorization factor "
18717 << "less than 2 is not supported";
18719 return false;
18722 bool Changed = false;
18723 bool CandidateFound = false;
18724 InstructionCost MinCost = SLPCostThreshold.getValue();
18725 Type *ScalarTy = getValueType(VL[0]);
18727 unsigned NextInst = 0, MaxInst = VL.size();
18728 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
18729 // No actual vectorization should happen, if number of parts is the same as
18730 // provided vectorization factor (i.e. the scalar type is used for vector
18731 // code during codegen).
18732 auto *VecTy = getWidenedType(ScalarTy, VF);
18733 if (TTI->getNumberOfParts(VecTy) == VF)
18734 continue;
18735 for (unsigned I = NextInst; I < MaxInst; ++I) {
18736 unsigned ActualVF = std::min(MaxInst - I, VF);
18738 if (!hasFullVectorsOrPowerOf2(*TTI, ScalarTy, ActualVF))
18739 continue;
18741 if (MaxVFOnly && ActualVF < MaxVF)
18742 break;
18743 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2))
18744 break;
18746 SmallVector<Value *> Ops(ActualVF, nullptr);
18747 unsigned Idx = 0;
18748 for (Value *V : VL.drop_front(I)) {
18749 // Check that a previous iteration of this loop did not delete the
18750 // Value.
18751 if (auto *Inst = dyn_cast<Instruction>(V);
18752 !Inst || !R.isDeleted(Inst)) {
18753 Ops[Idx] = V;
18754 ++Idx;
18755 if (Idx == ActualVF)
18756 break;
18759 // Not enough vectorizable instructions - exit.
18760 if (Idx != ActualVF)
18761 break;
18763 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations "
18764 << "\n");
18766 R.buildTree(Ops);
18767 if (R.isTreeTinyAndNotFullyVectorizable())
18768 continue;
18769 R.reorderTopToBottom();
18770 R.reorderBottomToTop(
18771 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) &&
18772 !R.doesRootHaveInTreeUses());
18773 R.transformNodes();
18774 R.buildExternalUses();
18776 R.computeMinimumValueSizes();
18777 InstructionCost Cost = R.getTreeCost();
18778 CandidateFound = true;
18779 MinCost = std::min(MinCost, Cost);
18781 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost
18782 << " for VF=" << ActualVF << "\n");
18783 if (Cost < -SLPCostThreshold) {
18784 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
18785 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
18786 cast<Instruction>(Ops[0]))
18787 << "SLP vectorized with cost " << ore::NV("Cost", Cost)
18788 << " and with tree size "
18789 << ore::NV("TreeSize", R.getTreeSize()));
18791 R.vectorizeTree();
18792 // Move to the next bundle.
18793 I += VF - 1;
18794 NextInst = I + 1;
18795 Changed = true;
18800 if (!Changed && CandidateFound) {
18801 R.getORE()->emit([&]() {
18802 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
18803 << "List vectorization was possible but not beneficial with cost "
18804 << ore::NV("Cost", MinCost) << " >= "
18805 << ore::NV("Treshold", -SLPCostThreshold);
18807 } else if (!Changed) {
18808 R.getORE()->emit([&]() {
18809 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
18810 << "Cannot SLP vectorize list: vectorization was impossible"
18811 << " with available vectorization factors";
18814 return Changed;
18817 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
18818 if (!I)
18819 return false;
18821 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType()))
18822 return false;
18824 Value *P = I->getParent();
18826 // Vectorize in current basic block only.
18827 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
18828 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
18829 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P ||
18830 R.isDeleted(Op0) || R.isDeleted(Op1))
18831 return false;
18833 // First collect all possible candidates
18834 SmallVector<std::pair<Value *, Value *>, 4> Candidates;
18835 Candidates.emplace_back(Op0, Op1);
18837 auto *A = dyn_cast<BinaryOperator>(Op0);
18838 auto *B = dyn_cast<BinaryOperator>(Op1);
18839 // Try to skip B.
18840 if (A && B && B->hasOneUse()) {
18841 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
18842 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
18843 if (B0 && B0->getParent() == P && !R.isDeleted(B0))
18844 Candidates.emplace_back(A, B0);
18845 if (B1 && B1->getParent() == P && !R.isDeleted(B1))
18846 Candidates.emplace_back(A, B1);
18848 // Try to skip A.
18849 if (B && A && A->hasOneUse()) {
18850 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
18851 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
18852 if (A0 && A0->getParent() == P && !R.isDeleted(A0))
18853 Candidates.emplace_back(A0, B);
18854 if (A1 && A1->getParent() == P && !R.isDeleted(A1))
18855 Candidates.emplace_back(A1, B);
18858 if (Candidates.size() == 1)
18859 return tryToVectorizeList({Op0, Op1}, R);
18861 // We have multiple options. Try to pick the single best.
18862 std::optional<int> BestCandidate = R.findBestRootPair(Candidates);
18863 if (!BestCandidate)
18864 return false;
18865 return tryToVectorizeList(
18866 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R);
18869 namespace {
18871 /// Model horizontal reductions.
18873 /// A horizontal reduction is a tree of reduction instructions that has values
18874 /// that can be put into a vector as its leaves. For example:
18876 /// mul mul mul mul
18877 /// \ / \ /
18878 /// + +
18879 /// \ /
18880 /// +
18881 /// This tree has "mul" as its leaf values and "+" as its reduction
18882 /// instructions. A reduction can feed into a store or a binary operation
18883 /// feeding a phi.
18884 /// ...
18885 /// \ /
18886 /// +
18887 /// |
18888 /// phi +=
18890 /// Or:
18891 /// ...
18892 /// \ /
18893 /// +
18894 /// |
18895 /// *p =
18897 class HorizontalReduction {
18898 using ReductionOpsType = SmallVector<Value *, 16>;
18899 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
18900 ReductionOpsListType ReductionOps;
18901 /// List of possibly reduced values.
18902 SmallVector<SmallVector<Value *>> ReducedVals;
18903 /// Maps reduced value to the corresponding reduction operation.
18904 SmallDenseMap<Value *, SmallVector<Instruction *>, 16> ReducedValsToOps;
18905 WeakTrackingVH ReductionRoot;
18906 /// The type of reduction operation.
18907 RecurKind RdxKind;
18908 /// Checks if the optimization of original scalar identity operations on
18909 /// matched horizontal reductions is enabled and allowed.
18910 bool IsSupportedHorRdxIdentityOp = false;
18912 static bool isCmpSelMinMax(Instruction *I) {
18913 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
18914 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
18917 // And/or are potentially poison-safe logical patterns like:
18918 // select x, y, false
18919 // select x, true, y
18920 static bool isBoolLogicOp(Instruction *I) {
18921 return isa<SelectInst>(I) &&
18922 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr()));
18925 /// Checks if instruction is associative and can be vectorized.
18926 static bool isVectorizable(RecurKind Kind, Instruction *I) {
18927 if (Kind == RecurKind::None)
18928 return false;
18930 // Integer ops that map to select instructions or intrinsics are fine.
18931 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
18932 isBoolLogicOp(I))
18933 return true;
18935 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
18936 // FP min/max are associative except for NaN and -0.0. We do not
18937 // have to rule out -0.0 here because the intrinsic semantics do not
18938 // specify a fixed result for it.
18939 return I->getFastMathFlags().noNaNs();
18942 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum)
18943 return true;
18945 return I->isAssociative();
18948 static Value *getRdxOperand(Instruction *I, unsigned Index) {
18949 // Poison-safe 'or' takes the form: select X, true, Y
18950 // To make that work with the normal operand processing, we skip the
18951 // true value operand.
18952 // TODO: Change the code and data structures to handle this without a hack.
18953 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
18954 return I->getOperand(2);
18955 return I->getOperand(Index);
18958 /// Creates reduction operation with the current opcode.
18959 static Value *createOp(IRBuilderBase &Builder, RecurKind Kind, Value *LHS,
18960 Value *RHS, const Twine &Name, bool UseSelect) {
18961 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
18962 switch (Kind) {
18963 case RecurKind::Or:
18964 if (UseSelect &&
18965 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
18966 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name);
18967 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
18968 Name);
18969 case RecurKind::And:
18970 if (UseSelect &&
18971 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
18972 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name);
18973 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
18974 Name);
18975 case RecurKind::Add:
18976 case RecurKind::Mul:
18977 case RecurKind::Xor:
18978 case RecurKind::FAdd:
18979 case RecurKind::FMul:
18980 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
18981 Name);
18982 case RecurKind::FMax:
18983 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
18984 case RecurKind::FMin:
18985 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
18986 case RecurKind::FMaximum:
18987 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS);
18988 case RecurKind::FMinimum:
18989 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS);
18990 case RecurKind::SMax:
18991 if (UseSelect) {
18992 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
18993 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
18995 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
18996 case RecurKind::SMin:
18997 if (UseSelect) {
18998 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
18999 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19001 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
19002 case RecurKind::UMax:
19003 if (UseSelect) {
19004 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
19005 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19007 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
19008 case RecurKind::UMin:
19009 if (UseSelect) {
19010 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
19011 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19013 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
19014 default:
19015 llvm_unreachable("Unknown reduction operation.");
19019 /// Creates reduction operation with the current opcode with the IR flags
19020 /// from \p ReductionOps, dropping nuw/nsw flags.
19021 static Value *createOp(IRBuilderBase &Builder, RecurKind RdxKind, Value *LHS,
19022 Value *RHS, const Twine &Name,
19023 const ReductionOpsListType &ReductionOps) {
19024 bool UseSelect = ReductionOps.size() == 2 ||
19025 // Logical or/and.
19026 (ReductionOps.size() == 1 &&
19027 any_of(ReductionOps.front(), IsaPred<SelectInst>));
19028 assert((!UseSelect || ReductionOps.size() != 2 ||
19029 isa<SelectInst>(ReductionOps[1][0])) &&
19030 "Expected cmp + select pairs for reduction");
19031 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
19032 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
19033 if (auto *Sel = dyn_cast<SelectInst>(Op)) {
19034 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr,
19035 /*IncludeWrapFlags=*/false);
19036 propagateIRFlags(Op, ReductionOps[1], nullptr,
19037 /*IncludeWrapFlags=*/false);
19038 return Op;
19041 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false);
19042 return Op;
19045 public:
19046 static RecurKind getRdxKind(Value *V) {
19047 auto *I = dyn_cast<Instruction>(V);
19048 if (!I)
19049 return RecurKind::None;
19050 if (match(I, m_Add(m_Value(), m_Value())))
19051 return RecurKind::Add;
19052 if (match(I, m_Mul(m_Value(), m_Value())))
19053 return RecurKind::Mul;
19054 if (match(I, m_And(m_Value(), m_Value())) ||
19055 match(I, m_LogicalAnd(m_Value(), m_Value())))
19056 return RecurKind::And;
19057 if (match(I, m_Or(m_Value(), m_Value())) ||
19058 match(I, m_LogicalOr(m_Value(), m_Value())))
19059 return RecurKind::Or;
19060 if (match(I, m_Xor(m_Value(), m_Value())))
19061 return RecurKind::Xor;
19062 if (match(I, m_FAdd(m_Value(), m_Value())))
19063 return RecurKind::FAdd;
19064 if (match(I, m_FMul(m_Value(), m_Value())))
19065 return RecurKind::FMul;
19067 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
19068 return RecurKind::FMax;
19069 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
19070 return RecurKind::FMin;
19072 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value())))
19073 return RecurKind::FMaximum;
19074 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())))
19075 return RecurKind::FMinimum;
19076 // This matches either cmp+select or intrinsics. SLP is expected to handle
19077 // either form.
19078 // TODO: If we are canonicalizing to intrinsics, we can remove several
19079 // special-case paths that deal with selects.
19080 if (match(I, m_SMax(m_Value(), m_Value())))
19081 return RecurKind::SMax;
19082 if (match(I, m_SMin(m_Value(), m_Value())))
19083 return RecurKind::SMin;
19084 if (match(I, m_UMax(m_Value(), m_Value())))
19085 return RecurKind::UMax;
19086 if (match(I, m_UMin(m_Value(), m_Value())))
19087 return RecurKind::UMin;
19089 if (auto *Select = dyn_cast<SelectInst>(I)) {
19090 // Try harder: look for min/max pattern based on instructions producing
19091 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
19092 // During the intermediate stages of SLP, it's very common to have
19093 // pattern like this (since optimizeGatherSequence is run only once
19094 // at the end):
19095 // %1 = extractelement <2 x i32> %a, i32 0
19096 // %2 = extractelement <2 x i32> %a, i32 1
19097 // %cond = icmp sgt i32 %1, %2
19098 // %3 = extractelement <2 x i32> %a, i32 0
19099 // %4 = extractelement <2 x i32> %a, i32 1
19100 // %select = select i1 %cond, i32 %3, i32 %4
19101 CmpInst::Predicate Pred;
19102 Instruction *L1;
19103 Instruction *L2;
19105 Value *LHS = Select->getTrueValue();
19106 Value *RHS = Select->getFalseValue();
19107 Value *Cond = Select->getCondition();
19109 // TODO: Support inverse predicates.
19110 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
19111 if (!isa<ExtractElementInst>(RHS) ||
19112 !L2->isIdenticalTo(cast<Instruction>(RHS)))
19113 return RecurKind::None;
19114 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
19115 if (!isa<ExtractElementInst>(LHS) ||
19116 !L1->isIdenticalTo(cast<Instruction>(LHS)))
19117 return RecurKind::None;
19118 } else {
19119 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
19120 return RecurKind::None;
19121 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
19122 !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
19123 !L2->isIdenticalTo(cast<Instruction>(RHS)))
19124 return RecurKind::None;
19127 switch (Pred) {
19128 default:
19129 return RecurKind::None;
19130 case CmpInst::ICMP_SGT:
19131 case CmpInst::ICMP_SGE:
19132 return RecurKind::SMax;
19133 case CmpInst::ICMP_SLT:
19134 case CmpInst::ICMP_SLE:
19135 return RecurKind::SMin;
19136 case CmpInst::ICMP_UGT:
19137 case CmpInst::ICMP_UGE:
19138 return RecurKind::UMax;
19139 case CmpInst::ICMP_ULT:
19140 case CmpInst::ICMP_ULE:
19141 return RecurKind::UMin;
19144 return RecurKind::None;
19147 /// Get the index of the first operand.
19148 static unsigned getFirstOperandIndex(Instruction *I) {
19149 return isCmpSelMinMax(I) ? 1 : 0;
19152 private:
19153 /// Total number of operands in the reduction operation.
19154 static unsigned getNumberOfOperands(Instruction *I) {
19155 return isCmpSelMinMax(I) ? 3 : 2;
19158 /// Checks if the instruction is in basic block \p BB.
19159 /// For a cmp+sel min/max reduction check that both ops are in \p BB.
19160 static bool hasSameParent(Instruction *I, BasicBlock *BB) {
19161 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) {
19162 auto *Sel = cast<SelectInst>(I);
19163 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition());
19164 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB;
19166 return I->getParent() == BB;
19169 /// Expected number of uses for reduction operations/reduced values.
19170 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
19171 if (IsCmpSelMinMax) {
19172 // SelectInst must be used twice while the condition op must have single
19173 // use only.
19174 if (auto *Sel = dyn_cast<SelectInst>(I))
19175 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
19176 return I->hasNUses(2);
19179 // Arithmetic reduction operation must be used once only.
19180 return I->hasOneUse();
19183 /// Initializes the list of reduction operations.
19184 void initReductionOps(Instruction *I) {
19185 if (isCmpSelMinMax(I))
19186 ReductionOps.assign(2, ReductionOpsType());
19187 else
19188 ReductionOps.assign(1, ReductionOpsType());
19191 /// Add all reduction operations for the reduction instruction \p I.
19192 void addReductionOps(Instruction *I) {
19193 if (isCmpSelMinMax(I)) {
19194 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
19195 ReductionOps[1].emplace_back(I);
19196 } else {
19197 ReductionOps[0].emplace_back(I);
19201 static bool isGoodForReduction(ArrayRef<Value *> Data) {
19202 int Sz = Data.size();
19203 auto *I = dyn_cast<Instruction>(Data.front());
19204 return Sz > 1 || isConstant(Data.front()) ||
19205 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode()));
19208 public:
19209 HorizontalReduction() = default;
19211 /// Try to find a reduction tree.
19212 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root,
19213 ScalarEvolution &SE, const DataLayout &DL,
19214 const TargetLibraryInfo &TLI) {
19215 RdxKind = HorizontalReduction::getRdxKind(Root);
19216 if (!isVectorizable(RdxKind, Root))
19217 return false;
19219 // Analyze "regular" integer/FP types for reductions - no target-specific
19220 // types or pointers.
19221 Type *Ty = Root->getType();
19222 if (!isValidElementType(Ty) || Ty->isPointerTy())
19223 return false;
19225 // Though the ultimate reduction may have multiple uses, its condition must
19226 // have only single use.
19227 if (auto *Sel = dyn_cast<SelectInst>(Root))
19228 if (!Sel->getCondition()->hasOneUse())
19229 return false;
19231 ReductionRoot = Root;
19233 // Iterate through all the operands of the possible reduction tree and
19234 // gather all the reduced values, sorting them by their value id.
19235 BasicBlock *BB = Root->getParent();
19236 bool IsCmpSelMinMax = isCmpSelMinMax(Root);
19237 SmallVector<std::pair<Instruction *, unsigned>> Worklist(
19238 1, std::make_pair(Root, 0));
19239 // Checks if the operands of the \p TreeN instruction are also reduction
19240 // operations or should be treated as reduced values or an extra argument,
19241 // which is not part of the reduction.
19242 auto CheckOperands = [&](Instruction *TreeN,
19243 SmallVectorImpl<Value *> &PossibleReducedVals,
19244 SmallVectorImpl<Instruction *> &ReductionOps,
19245 unsigned Level) {
19246 for (int I : reverse(seq<int>(getFirstOperandIndex(TreeN),
19247 getNumberOfOperands(TreeN)))) {
19248 Value *EdgeVal = getRdxOperand(TreeN, I);
19249 ReducedValsToOps[EdgeVal].push_back(TreeN);
19250 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
19251 // If the edge is not an instruction, or it is different from the main
19252 // reduction opcode or has too many uses - possible reduced value.
19253 // Also, do not try to reduce const values, if the operation is not
19254 // foldable.
19255 if (!EdgeInst || Level > RecursionMaxDepth ||
19256 getRdxKind(EdgeInst) != RdxKind ||
19257 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) ||
19258 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) ||
19259 !isVectorizable(RdxKind, EdgeInst) ||
19260 (R.isAnalyzedReductionRoot(EdgeInst) &&
19261 all_of(EdgeInst->operands(), IsaPred<Constant>))) {
19262 PossibleReducedVals.push_back(EdgeVal);
19263 continue;
19265 ReductionOps.push_back(EdgeInst);
19268 // Try to regroup reduced values so that it gets more profitable to try to
19269 // reduce them. Values are grouped by their value ids, instructions - by
19270 // instruction op id and/or alternate op id, plus do extra analysis for
19271 // loads (grouping them by the distabce between pointers) and cmp
19272 // instructions (grouping them by the predicate).
19273 SmallMapVector<
19274 size_t, SmallMapVector<size_t, SmallMapVector<Value *, unsigned, 2>, 2>,
19276 PossibleReducedVals;
19277 initReductionOps(Root);
19278 DenseMap<std::pair<size_t, Value *>, SmallVector<LoadInst *>> LoadsMap;
19279 SmallSet<size_t, 2> LoadKeyUsed;
19281 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) {
19282 Key = hash_combine(hash_value(LI->getParent()), Key);
19283 Value *Ptr =
19284 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth);
19285 if (!LoadKeyUsed.insert(Key).second) {
19286 auto LIt = LoadsMap.find(std::make_pair(Key, Ptr));
19287 if (LIt != LoadsMap.end()) {
19288 for (LoadInst *RLI : LIt->second) {
19289 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(),
19290 LI->getType(), LI->getPointerOperand(), DL, SE,
19291 /*StrictCheck=*/true))
19292 return hash_value(RLI->getPointerOperand());
19294 for (LoadInst *RLI : LIt->second) {
19295 if (arePointersCompatible(RLI->getPointerOperand(),
19296 LI->getPointerOperand(), TLI)) {
19297 hash_code SubKey = hash_value(RLI->getPointerOperand());
19298 return SubKey;
19301 if (LIt->second.size() > 2) {
19302 hash_code SubKey =
19303 hash_value(LIt->second.back()->getPointerOperand());
19304 return SubKey;
19308 LoadsMap.try_emplace(std::make_pair(Key, Ptr))
19309 .first->second.push_back(LI);
19310 return hash_value(LI->getPointerOperand());
19313 while (!Worklist.empty()) {
19314 auto [TreeN, Level] = Worklist.pop_back_val();
19315 SmallVector<Value *> PossibleRedVals;
19316 SmallVector<Instruction *> PossibleReductionOps;
19317 CheckOperands(TreeN, PossibleRedVals, PossibleReductionOps, Level);
19318 addReductionOps(TreeN);
19319 // Add reduction values. The values are sorted for better vectorization
19320 // results.
19321 for (Value *V : PossibleRedVals) {
19322 size_t Key, Idx;
19323 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey,
19324 /*AllowAlternate=*/false);
19325 ++PossibleReducedVals[Key][Idx]
19326 .insert(std::make_pair(V, 0))
19327 .first->second;
19329 for (Instruction *I : reverse(PossibleReductionOps))
19330 Worklist.emplace_back(I, I->getParent() == BB ? 0 : Level + 1);
19332 auto PossibleReducedValsVect = PossibleReducedVals.takeVector();
19333 // Sort values by the total number of values kinds to start the reduction
19334 // from the longest possible reduced values sequences.
19335 for (auto &PossibleReducedVals : PossibleReducedValsVect) {
19336 auto PossibleRedVals = PossibleReducedVals.second.takeVector();
19337 SmallVector<SmallVector<Value *>> PossibleRedValsVect;
19338 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end();
19339 It != E; ++It) {
19340 PossibleRedValsVect.emplace_back();
19341 auto RedValsVect = It->second.takeVector();
19342 stable_sort(RedValsVect, llvm::less_second());
19343 for (const std::pair<Value *, unsigned> &Data : RedValsVect)
19344 PossibleRedValsVect.back().append(Data.second, Data.first);
19346 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) {
19347 return P1.size() > P2.size();
19349 int NewIdx = -1;
19350 for (ArrayRef<Value *> Data : PossibleRedValsVect) {
19351 if (NewIdx < 0 ||
19352 (!isGoodForReduction(Data) &&
19353 (!isa<LoadInst>(Data.front()) ||
19354 !isa<LoadInst>(ReducedVals[NewIdx].front()) ||
19355 getUnderlyingObject(
19356 cast<LoadInst>(Data.front())->getPointerOperand()) !=
19357 getUnderlyingObject(
19358 cast<LoadInst>(ReducedVals[NewIdx].front())
19359 ->getPointerOperand())))) {
19360 NewIdx = ReducedVals.size();
19361 ReducedVals.emplace_back();
19363 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend());
19366 // Sort the reduced values by number of same/alternate opcode and/or pointer
19367 // operand.
19368 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) {
19369 return P1.size() > P2.size();
19371 return true;
19374 /// Attempt to vectorize the tree found by matchAssociativeReduction.
19375 Value *tryToReduce(BoUpSLP &V, const DataLayout &DL, TargetTransformInfo *TTI,
19376 const TargetLibraryInfo &TLI) {
19377 const unsigned ReductionLimit = VectorizeNonPowerOf2 ? 3 : 4;
19378 constexpr unsigned RegMaxNumber = 4;
19379 constexpr unsigned RedValsMaxNumber = 128;
19380 // If there are a sufficient number of reduction values, reduce
19381 // to a nearby power-of-2. We can safely generate oversized
19382 // vectors and rely on the backend to split them to legal sizes.
19383 if (unsigned NumReducedVals = std::accumulate(
19384 ReducedVals.begin(), ReducedVals.end(), 0,
19385 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned {
19386 if (!isGoodForReduction(Vals))
19387 return Num;
19388 return Num + Vals.size();
19390 NumReducedVals < ReductionLimit &&
19391 all_of(ReducedVals, [](ArrayRef<Value *> RedV) {
19392 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV);
19393 })) {
19394 for (ReductionOpsType &RdxOps : ReductionOps)
19395 for (Value *RdxOp : RdxOps)
19396 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
19397 return nullptr;
19400 IRBuilder<TargetFolder> Builder(ReductionRoot->getContext(),
19401 TargetFolder(DL));
19402 Builder.SetInsertPoint(cast<Instruction>(ReductionRoot));
19404 // Track the reduced values in case if they are replaced by extractelement
19405 // because of the vectorization.
19406 DenseMap<Value *, WeakTrackingVH> TrackedVals(ReducedVals.size() *
19407 ReducedVals.front().size());
19409 // The compare instruction of a min/max is the insertion point for new
19410 // instructions and may be replaced with a new compare instruction.
19411 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
19412 assert(isa<SelectInst>(RdxRootInst) &&
19413 "Expected min/max reduction to have select root instruction");
19414 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
19415 assert(isa<Instruction>(ScalarCond) &&
19416 "Expected min/max reduction to have compare condition");
19417 return cast<Instruction>(ScalarCond);
19420 // Return new VectorizedTree, based on previous value.
19421 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) {
19422 if (VectorizedTree) {
19423 // Update the final value in the reduction.
19424 Builder.SetCurrentDebugLocation(
19425 cast<Instruction>(ReductionOps.front().front())->getDebugLoc());
19426 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) ||
19427 (isGuaranteedNotToBePoison(Res) &&
19428 !isGuaranteedNotToBePoison(VectorizedTree))) {
19429 auto It = ReducedValsToOps.find(Res);
19430 if (It != ReducedValsToOps.end() &&
19431 any_of(It->getSecond(),
19432 [](Instruction *I) { return isBoolLogicOp(I); }))
19433 std::swap(VectorizedTree, Res);
19436 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx",
19437 ReductionOps);
19439 // Initialize the final value in the reduction.
19440 return Res;
19442 bool AnyBoolLogicOp = any_of(ReductionOps.back(), [](Value *V) {
19443 return isBoolLogicOp(cast<Instruction>(V));
19445 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() *
19446 ReductionOps.front().size());
19447 for (ReductionOpsType &RdxOps : ReductionOps)
19448 for (Value *RdxOp : RdxOps) {
19449 if (!RdxOp)
19450 continue;
19451 IgnoreList.insert(RdxOp);
19453 // Intersect the fast-math-flags from all reduction operations.
19454 FastMathFlags RdxFMF;
19455 RdxFMF.set();
19456 for (Value *U : IgnoreList)
19457 if (auto *FPMO = dyn_cast<FPMathOperator>(U))
19458 RdxFMF &= FPMO->getFastMathFlags();
19459 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot));
19461 // Need to track reduced vals, they may be changed during vectorization of
19462 // subvectors.
19463 for (ArrayRef<Value *> Candidates : ReducedVals)
19464 for (Value *V : Candidates)
19465 TrackedVals.try_emplace(V, V);
19467 auto At = [](SmallMapVector<Value *, unsigned, 16> &MV,
19468 Value *V) -> unsigned & {
19469 auto *It = MV.find(V);
19470 assert(It != MV.end() && "Unable to find given key.");
19471 return It->second;
19474 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size());
19475 // List of the values that were reduced in other trees as part of gather
19476 // nodes and thus requiring extract if fully vectorized in other trees.
19477 SmallPtrSet<Value *, 4> RequiredExtract;
19478 WeakTrackingVH VectorizedTree = nullptr;
19479 bool CheckForReusedReductionOps = false;
19480 // Try to vectorize elements based on their type.
19481 SmallVector<InstructionsState> States;
19482 for (ArrayRef<Value *> RV : ReducedVals)
19483 States.push_back(getSameOpcode(RV, TLI));
19484 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) {
19485 ArrayRef<Value *> OrigReducedVals = ReducedVals[I];
19486 InstructionsState S = States[I];
19487 SmallVector<Value *> Candidates;
19488 Candidates.reserve(2 * OrigReducedVals.size());
19489 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size());
19490 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) {
19491 Value *RdxVal = TrackedVals.at(OrigReducedVals[Cnt]);
19492 // Check if the reduction value was not overriden by the extractelement
19493 // instruction because of the vectorization and exclude it, if it is not
19494 // compatible with other values.
19495 // Also check if the instruction was folded to constant/other value.
19496 auto *Inst = dyn_cast<Instruction>(RdxVal);
19497 if ((Inst && isVectorLikeInstWithConstOps(Inst) &&
19498 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) ||
19499 (S.getOpcode() && !Inst))
19500 continue;
19501 Candidates.push_back(RdxVal);
19502 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]);
19504 bool ShuffledExtracts = false;
19505 // Try to handle shuffled extractelements.
19506 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() &&
19507 I + 1 < E) {
19508 SmallVector<Value *> CommonCandidates(Candidates);
19509 for (Value *RV : ReducedVals[I + 1]) {
19510 Value *RdxVal = TrackedVals.at(RV);
19511 // Check if the reduction value was not overriden by the
19512 // extractelement instruction because of the vectorization and
19513 // exclude it, if it is not compatible with other values.
19514 auto *Inst = dyn_cast<ExtractElementInst>(RdxVal);
19515 if (!Inst)
19516 continue;
19517 CommonCandidates.push_back(RdxVal);
19518 TrackedToOrig.try_emplace(RdxVal, RV);
19520 SmallVector<int> Mask;
19521 if (isFixedVectorShuffle(CommonCandidates, Mask)) {
19522 ++I;
19523 Candidates.swap(CommonCandidates);
19524 ShuffledExtracts = true;
19528 // Emit code for constant values.
19529 if (Candidates.size() > 1 && allConstant(Candidates)) {
19530 Value *Res = Candidates.front();
19531 Value *OrigV = TrackedToOrig.at(Candidates.front());
19532 ++VectorizedVals.try_emplace(OrigV).first->getSecond();
19533 for (Value *VC : ArrayRef(Candidates).drop_front()) {
19534 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps);
19535 Value *OrigV = TrackedToOrig.at(VC);
19536 ++VectorizedVals.try_emplace(OrigV).first->getSecond();
19537 if (auto *ResI = dyn_cast<Instruction>(Res))
19538 V.analyzedReductionRoot(ResI);
19540 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res);
19541 continue;
19544 unsigned NumReducedVals = Candidates.size();
19545 if (NumReducedVals < ReductionLimit &&
19546 (NumReducedVals < 2 || !isSplat(Candidates)))
19547 continue;
19549 // Check if we support repeated scalar values processing (optimization of
19550 // original scalar identity operations on matched horizontal reductions).
19551 IsSupportedHorRdxIdentityOp = RdxKind != RecurKind::Mul &&
19552 RdxKind != RecurKind::FMul &&
19553 RdxKind != RecurKind::FMulAdd;
19554 // Gather same values.
19555 SmallMapVector<Value *, unsigned, 16> SameValuesCounter;
19556 if (IsSupportedHorRdxIdentityOp)
19557 for (Value *V : Candidates) {
19558 Value *OrigV = TrackedToOrig.at(V);
19559 ++SameValuesCounter.try_emplace(OrigV).first->second;
19561 // Used to check if the reduced values used same number of times. In this
19562 // case the compiler may produce better code. E.g. if reduced values are
19563 // aabbccdd (8 x values), then the first node of the tree will have a node
19564 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>.
19565 // Plus, the final reduction will be performed on <8 x aabbccdd>.
19566 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4
19567 // x abcd) * 2.
19568 // Currently it only handles add/fadd/xor. and/or/min/max do not require
19569 // this analysis, other operations may require an extra estimation of
19570 // the profitability.
19571 bool SameScaleFactor = false;
19572 bool OptReusedScalars = IsSupportedHorRdxIdentityOp &&
19573 SameValuesCounter.size() != Candidates.size();
19574 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
19575 if (OptReusedScalars) {
19576 SameScaleFactor =
19577 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd ||
19578 RdxKind == RecurKind::Xor) &&
19579 all_of(drop_begin(SameValuesCounter),
19580 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) {
19581 return P.second == SameValuesCounter.front().second;
19583 Candidates.resize(SameValuesCounter.size());
19584 transform(SameValuesCounter, Candidates.begin(),
19585 [&](const auto &P) { return TrackedVals.at(P.first); });
19586 NumReducedVals = Candidates.size();
19587 // Have a reduction of the same element.
19588 if (NumReducedVals == 1) {
19589 Value *OrigV = TrackedToOrig.at(Candidates.front());
19590 unsigned Cnt = At(SameValuesCounter, OrigV);
19591 Value *RedVal =
19592 emitScaleForReusedOps(Candidates.front(), Builder, Cnt);
19593 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal);
19594 VectorizedVals.try_emplace(OrigV, Cnt);
19595 ExternallyUsedValues.insert(OrigV);
19596 continue;
19600 unsigned MaxVecRegSize = V.getMaxVecRegSize();
19601 unsigned EltSize = V.getVectorElementSize(Candidates[0]);
19602 const unsigned MaxElts = std::clamp<unsigned>(
19603 llvm::bit_floor(MaxVecRegSize / EltSize), RedValsMaxNumber,
19604 RegMaxNumber * RedValsMaxNumber);
19606 unsigned ReduxWidth = NumReducedVals;
19607 auto GetVectorFactor = [&, &TTI = *TTI](unsigned ReduxWidth) {
19608 unsigned NumParts, NumRegs;
19609 Type *ScalarTy = Candidates.front()->getType();
19610 ReduxWidth =
19611 getFloorFullVectorNumberOfElements(TTI, ScalarTy, ReduxWidth);
19612 VectorType *Tp = getWidenedType(ScalarTy, ReduxWidth);
19613 NumParts = TTI.getNumberOfParts(Tp);
19614 NumRegs =
19615 TTI.getNumberOfRegisters(TTI.getRegisterClassForType(true, Tp));
19616 while (NumParts > NumRegs) {
19617 ReduxWidth = bit_floor(ReduxWidth - 1);
19618 VectorType *Tp = getWidenedType(ScalarTy, ReduxWidth);
19619 NumParts = TTI.getNumberOfParts(Tp);
19620 NumRegs =
19621 TTI.getNumberOfRegisters(TTI.getRegisterClassForType(true, Tp));
19623 if (NumParts > NumRegs / 2)
19624 ReduxWidth = bit_floor(ReduxWidth);
19625 return ReduxWidth;
19627 if (!VectorizeNonPowerOf2 || !has_single_bit(ReduxWidth + 1))
19628 ReduxWidth = GetVectorFactor(ReduxWidth);
19629 ReduxWidth = std::min(ReduxWidth, MaxElts);
19631 unsigned Start = 0;
19632 unsigned Pos = Start;
19633 // Restarts vectorization attempt with lower vector factor.
19634 unsigned PrevReduxWidth = ReduxWidth;
19635 bool CheckForReusedReductionOpsLocal = false;
19636 auto AdjustReducedVals = [&](bool IgnoreVL = false) {
19637 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList);
19638 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) {
19639 // Check if any of the reduction ops are gathered. If so, worth
19640 // trying again with less number of reduction ops.
19641 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered;
19643 ++Pos;
19644 if (Pos < NumReducedVals - ReduxWidth + 1)
19645 return IsAnyRedOpGathered;
19646 Pos = Start;
19647 --ReduxWidth;
19648 if (ReduxWidth > 1)
19649 ReduxWidth = GetVectorFactor(ReduxWidth);
19650 return IsAnyRedOpGathered;
19652 bool AnyVectorized = false;
19653 SmallDenseSet<std::pair<unsigned, unsigned>, 8> IgnoredCandidates;
19654 while (Pos < NumReducedVals - ReduxWidth + 1 &&
19655 ReduxWidth >= ReductionLimit) {
19656 // Dependency in tree of the reduction ops - drop this attempt, try
19657 // later.
19658 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth &&
19659 Start == 0) {
19660 CheckForReusedReductionOps = true;
19661 break;
19663 PrevReduxWidth = ReduxWidth;
19664 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth);
19665 // Been analyzed already - skip.
19666 if (IgnoredCandidates.contains(std::make_pair(Pos, ReduxWidth)) ||
19667 (!has_single_bit(ReduxWidth) &&
19668 (IgnoredCandidates.contains(
19669 std::make_pair(Pos, bit_floor(ReduxWidth))) ||
19670 IgnoredCandidates.contains(
19671 std::make_pair(Pos + (ReduxWidth - bit_floor(ReduxWidth)),
19672 bit_floor(ReduxWidth))))) ||
19673 V.areAnalyzedReductionVals(VL)) {
19674 (void)AdjustReducedVals(/*IgnoreVL=*/true);
19675 continue;
19677 // Early exit if any of the reduction values were deleted during
19678 // previous vectorization attempts.
19679 if (any_of(VL, [&V](Value *RedVal) {
19680 auto *RedValI = dyn_cast<Instruction>(RedVal);
19681 if (!RedValI)
19682 return false;
19683 return V.isDeleted(RedValI);
19685 break;
19686 V.buildTree(VL, IgnoreList);
19687 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) {
19688 if (!AdjustReducedVals())
19689 V.analyzedReductionVals(VL);
19690 continue;
19692 if (V.isLoadCombineReductionCandidate(RdxKind)) {
19693 if (!AdjustReducedVals())
19694 V.analyzedReductionVals(VL);
19695 continue;
19697 V.reorderTopToBottom();
19698 // No need to reorder the root node at all.
19699 V.reorderBottomToTop(/*IgnoreReorder=*/true);
19700 // Keep extracted other reduction values, if they are used in the
19701 // vectorization trees.
19702 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues(
19703 ExternallyUsedValues);
19704 // The reduction root is used as the insertion point for new
19705 // instructions, so set it as externally used to prevent it from being
19706 // deleted.
19707 LocalExternallyUsedValues.insert(ReductionRoot);
19708 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) {
19709 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1))
19710 continue;
19711 for (Value *V : ReducedVals[Cnt])
19712 if (isa<Instruction>(V))
19713 LocalExternallyUsedValues.insert(TrackedVals[V]);
19715 if (!IsSupportedHorRdxIdentityOp) {
19716 // Number of uses of the candidates in the vector of values.
19717 assert(SameValuesCounter.empty() &&
19718 "Reused values counter map is not empty");
19719 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) {
19720 if (Cnt >= Pos && Cnt < Pos + ReduxWidth)
19721 continue;
19722 Value *V = Candidates[Cnt];
19723 Value *OrigV = TrackedToOrig.at(V);
19724 ++SameValuesCounter.try_emplace(OrigV).first->second;
19727 V.transformNodes();
19728 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end());
19729 // Gather externally used values.
19730 SmallPtrSet<Value *, 4> Visited;
19731 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) {
19732 if (Cnt >= Pos && Cnt < Pos + ReduxWidth)
19733 continue;
19734 Value *RdxVal = Candidates[Cnt];
19735 if (auto It = TrackedVals.find(RdxVal); It != TrackedVals.end())
19736 RdxVal = It->second;
19737 if (!Visited.insert(RdxVal).second)
19738 continue;
19739 // Check if the scalar was vectorized as part of the vectorization
19740 // tree but not the top node.
19741 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) {
19742 LocalExternallyUsedValues.insert(RdxVal);
19743 continue;
19745 Value *OrigV = TrackedToOrig.at(RdxVal);
19746 unsigned NumOps =
19747 VectorizedVals.lookup(OrigV) + At(SameValuesCounter, OrigV);
19748 if (NumOps != ReducedValsToOps.at(OrigV).size())
19749 LocalExternallyUsedValues.insert(RdxVal);
19751 // Do not need the list of reused scalars in regular mode anymore.
19752 if (!IsSupportedHorRdxIdentityOp)
19753 SameValuesCounter.clear();
19754 for (Value *RdxVal : VL)
19755 if (RequiredExtract.contains(RdxVal))
19756 LocalExternallyUsedValues.insert(RdxVal);
19757 V.buildExternalUses(LocalExternallyUsedValues);
19759 V.computeMinimumValueSizes();
19761 // Estimate cost.
19762 InstructionCost TreeCost = V.getTreeCost(VL);
19763 InstructionCost ReductionCost =
19764 getReductionCost(TTI, VL, IsCmpSelMinMax, ReduxWidth, RdxFMF);
19765 InstructionCost Cost = TreeCost + ReductionCost;
19766 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost
19767 << " for reduction\n");
19768 if (!Cost.isValid())
19769 break;
19770 if (Cost >= -SLPCostThreshold) {
19771 V.getORE()->emit([&]() {
19772 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
19773 ReducedValsToOps.at(VL[0]).front())
19774 << "Vectorizing horizontal reduction is possible "
19775 << "but not beneficial with cost " << ore::NV("Cost", Cost)
19776 << " and threshold "
19777 << ore::NV("Threshold", -SLPCostThreshold);
19779 if (!AdjustReducedVals()) {
19780 V.analyzedReductionVals(VL);
19781 unsigned Offset = Pos == Start ? Pos : Pos - 1;
19782 if (ReduxWidth > ReductionLimit && V.isTreeNotExtendable()) {
19783 // Add subvectors of VL to the list of the analyzed values.
19784 for (unsigned VF = getFloorFullVectorNumberOfElements(
19785 *TTI, VL.front()->getType(), ReduxWidth - 1);
19786 VF >= ReductionLimit;
19787 VF = getFloorFullVectorNumberOfElements(
19788 *TTI, VL.front()->getType(), VF - 1)) {
19789 if (has_single_bit(VF) &&
19790 V.getCanonicalGraphSize() != V.getTreeSize())
19791 continue;
19792 for (unsigned Idx : seq<unsigned>(ReduxWidth - VF))
19793 IgnoredCandidates.insert(std::make_pair(Offset + Idx, VF));
19797 continue;
19800 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
19801 << Cost << ". (HorRdx)\n");
19802 V.getORE()->emit([&]() {
19803 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
19804 ReducedValsToOps.at(VL[0]).front())
19805 << "Vectorized horizontal reduction with cost "
19806 << ore::NV("Cost", Cost) << " and with tree size "
19807 << ore::NV("TreeSize", V.getTreeSize());
19810 Builder.setFastMathFlags(RdxFMF);
19812 // Emit a reduction. If the root is a select (min/max idiom), the insert
19813 // point is the compare condition of that select.
19814 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
19815 Instruction *InsertPt = RdxRootInst;
19816 if (IsCmpSelMinMax)
19817 InsertPt = GetCmpForMinMaxReduction(RdxRootInst);
19819 // Vectorize a tree.
19820 Value *VectorizedRoot =
19821 V.vectorizeTree(LocalExternallyUsedValues, InsertPt);
19822 // Update TrackedToOrig mapping, since the tracked values might be
19823 // updated.
19824 for (Value *RdxVal : Candidates) {
19825 Value *OrigVal = TrackedToOrig.at(RdxVal);
19826 Value *TransformedRdxVal = TrackedVals.at(OrigVal);
19827 if (TransformedRdxVal != RdxVal)
19828 TrackedToOrig.try_emplace(TransformedRdxVal, OrigVal);
19831 Builder.SetInsertPoint(InsertPt);
19833 // To prevent poison from leaking across what used to be sequential,
19834 // safe, scalar boolean logic operations, the reduction operand must be
19835 // frozen.
19836 if (AnyBoolLogicOp && !isGuaranteedNotToBePoison(VectorizedRoot))
19837 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
19839 // Emit code to correctly handle reused reduced values, if required.
19840 if (OptReusedScalars && !SameScaleFactor) {
19841 VectorizedRoot = emitReusedOps(VectorizedRoot, Builder, V,
19842 SameValuesCounter, TrackedToOrig);
19845 Value *ReducedSubTree;
19846 Type *ScalarTy = VL.front()->getType();
19847 if (isa<FixedVectorType>(ScalarTy)) {
19848 assert(SLPReVec && "FixedVectorType is not expected.");
19849 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
19850 ReducedSubTree = PoisonValue::get(FixedVectorType::get(
19851 VectorizedRoot->getType()->getScalarType(), ScalarTyNumElements));
19852 for (unsigned I : seq<unsigned>(ScalarTyNumElements)) {
19853 // Do reduction for each lane.
19854 // e.g., do reduce add for
19855 // VL[0] = <4 x Ty> <a, b, c, d>
19856 // VL[1] = <4 x Ty> <e, f, g, h>
19857 // Lane[0] = <2 x Ty> <a, e>
19858 // Lane[1] = <2 x Ty> <b, f>
19859 // Lane[2] = <2 x Ty> <c, g>
19860 // Lane[3] = <2 x Ty> <d, h>
19861 // result[0] = reduce add Lane[0]
19862 // result[1] = reduce add Lane[1]
19863 // result[2] = reduce add Lane[2]
19864 // result[3] = reduce add Lane[3]
19865 SmallVector<int, 16> Mask =
19866 createStrideMask(I, ScalarTyNumElements, VL.size());
19867 Value *Lane = Builder.CreateShuffleVector(VectorizedRoot, Mask);
19868 ReducedSubTree = Builder.CreateInsertElement(
19869 ReducedSubTree, emitReduction(Lane, Builder, TTI), I);
19871 } else {
19872 ReducedSubTree = emitReduction(VectorizedRoot, Builder, TTI);
19874 if (ReducedSubTree->getType() != VL.front()->getType()) {
19875 assert(ReducedSubTree->getType() != VL.front()->getType() &&
19876 "Expected different reduction type.");
19877 ReducedSubTree =
19878 Builder.CreateIntCast(ReducedSubTree, VL.front()->getType(),
19879 V.isSignedMinBitwidthRootNode());
19882 // Improved analysis for add/fadd/xor reductions with same scale factor
19883 // for all operands of reductions. We can emit scalar ops for them
19884 // instead.
19885 if (OptReusedScalars && SameScaleFactor)
19886 ReducedSubTree = emitScaleForReusedOps(
19887 ReducedSubTree, Builder, SameValuesCounter.front().second);
19889 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree);
19890 // Count vectorized reduced values to exclude them from final reduction.
19891 for (Value *RdxVal : VL) {
19892 Value *OrigV = TrackedToOrig.at(RdxVal);
19893 if (IsSupportedHorRdxIdentityOp) {
19894 VectorizedVals.try_emplace(OrigV, At(SameValuesCounter, OrigV));
19895 continue;
19897 ++VectorizedVals.try_emplace(OrigV).first->getSecond();
19898 if (!V.isVectorized(RdxVal))
19899 RequiredExtract.insert(RdxVal);
19901 Pos += ReduxWidth;
19902 Start = Pos;
19903 ReduxWidth = NumReducedVals - Pos;
19904 if (ReduxWidth > 1)
19905 ReduxWidth = GetVectorFactor(NumReducedVals - Pos);
19906 AnyVectorized = true;
19908 if (OptReusedScalars && !AnyVectorized) {
19909 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) {
19910 Value *RdxVal = TrackedVals.at(P.first);
19911 Value *RedVal = emitScaleForReusedOps(RdxVal, Builder, P.second);
19912 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal);
19913 VectorizedVals.try_emplace(P.first, P.second);
19915 continue;
19918 if (VectorizedTree) {
19919 // Reorder operands of bool logical op in the natural order to avoid
19920 // possible problem with poison propagation. If not possible to reorder
19921 // (both operands are originally RHS), emit an extra freeze instruction
19922 // for the LHS operand.
19923 // I.e., if we have original code like this:
19924 // RedOp1 = select i1 ?, i1 LHS, i1 false
19925 // RedOp2 = select i1 RHS, i1 ?, i1 false
19927 // Then, we swap LHS/RHS to create a new op that matches the poison
19928 // semantics of the original code.
19930 // If we have original code like this and both values could be poison:
19931 // RedOp1 = select i1 ?, i1 LHS, i1 false
19932 // RedOp2 = select i1 ?, i1 RHS, i1 false
19934 // Then, we must freeze LHS in the new op.
19935 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS,
19936 Instruction *RedOp1,
19937 Instruction *RedOp2,
19938 bool InitStep) {
19939 if (!AnyBoolLogicOp)
19940 return;
19941 if (isBoolLogicOp(RedOp1) &&
19942 ((!InitStep && LHS == VectorizedTree) ||
19943 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS)))
19944 return;
19945 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) ||
19946 getRdxOperand(RedOp2, 0) == RHS ||
19947 isGuaranteedNotToBePoison(RHS))) {
19948 std::swap(LHS, RHS);
19949 return;
19951 if (LHS != VectorizedTree)
19952 LHS = Builder.CreateFreeze(LHS);
19954 // Finish the reduction.
19955 // Need to add extra arguments and not vectorized possible reduction
19956 // values.
19957 // Try to avoid dependencies between the scalar remainders after
19958 // reductions.
19959 auto FinalGen =
19960 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals,
19961 bool InitStep) {
19962 unsigned Sz = InstVals.size();
19963 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 +
19964 Sz % 2);
19965 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) {
19966 Instruction *RedOp = InstVals[I + 1].first;
19967 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
19968 Value *RdxVal1 = InstVals[I].second;
19969 Value *StableRdxVal1 = RdxVal1;
19970 auto It1 = TrackedVals.find(RdxVal1);
19971 if (It1 != TrackedVals.end())
19972 StableRdxVal1 = It1->second;
19973 Value *RdxVal2 = InstVals[I + 1].second;
19974 Value *StableRdxVal2 = RdxVal2;
19975 auto It2 = TrackedVals.find(RdxVal2);
19976 if (It2 != TrackedVals.end())
19977 StableRdxVal2 = It2->second;
19978 // To prevent poison from leaking across what used to be
19979 // sequential, safe, scalar boolean logic operations, the
19980 // reduction operand must be frozen.
19981 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first,
19982 RedOp, InitStep);
19983 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1,
19984 StableRdxVal2, "op.rdx", ReductionOps);
19985 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed);
19987 if (Sz % 2 == 1)
19988 ExtraReds[Sz / 2] = InstVals.back();
19989 return ExtraReds;
19991 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions;
19992 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot),
19993 VectorizedTree);
19994 SmallPtrSet<Value *, 8> Visited;
19995 for (ArrayRef<Value *> Candidates : ReducedVals) {
19996 for (Value *RdxVal : Candidates) {
19997 if (!Visited.insert(RdxVal).second)
19998 continue;
19999 unsigned NumOps = VectorizedVals.lookup(RdxVal);
20000 for (Instruction *RedOp :
20001 ArrayRef(ReducedValsToOps.at(RdxVal)).drop_back(NumOps))
20002 ExtraReductions.emplace_back(RedOp, RdxVal);
20005 // Iterate through all not-vectorized reduction values/extra arguments.
20006 bool InitStep = true;
20007 while (ExtraReductions.size() > 1) {
20008 SmallVector<std::pair<Instruction *, Value *>> NewReds =
20009 FinalGen(ExtraReductions, InitStep);
20010 ExtraReductions.swap(NewReds);
20011 InitStep = false;
20013 VectorizedTree = ExtraReductions.front().second;
20015 ReductionRoot->replaceAllUsesWith(VectorizedTree);
20017 // The original scalar reduction is expected to have no remaining
20018 // uses outside the reduction tree itself. Assert that we got this
20019 // correct, replace internal uses with undef, and mark for eventual
20020 // deletion.
20021 #ifndef NDEBUG
20022 SmallSet<Value *, 4> IgnoreSet;
20023 for (ArrayRef<Value *> RdxOps : ReductionOps)
20024 IgnoreSet.insert(RdxOps.begin(), RdxOps.end());
20025 #endif
20026 for (ArrayRef<Value *> RdxOps : ReductionOps) {
20027 for (Value *Ignore : RdxOps) {
20028 if (!Ignore)
20029 continue;
20030 #ifndef NDEBUG
20031 for (auto *U : Ignore->users()) {
20032 assert(IgnoreSet.count(U) &&
20033 "All users must be either in the reduction ops list.");
20035 #endif
20036 if (!Ignore->use_empty()) {
20037 Value *P = PoisonValue::get(Ignore->getType());
20038 Ignore->replaceAllUsesWith(P);
20041 V.removeInstructionsAndOperands(RdxOps);
20043 } else if (!CheckForReusedReductionOps) {
20044 for (ReductionOpsType &RdxOps : ReductionOps)
20045 for (Value *RdxOp : RdxOps)
20046 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
20048 return VectorizedTree;
20051 private:
20052 /// Calculate the cost of a reduction.
20053 InstructionCost getReductionCost(TargetTransformInfo *TTI,
20054 ArrayRef<Value *> ReducedVals,
20055 bool IsCmpSelMinMax, unsigned ReduxWidth,
20056 FastMathFlags FMF) {
20057 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
20058 Type *ScalarTy = ReducedVals.front()->getType();
20059 FixedVectorType *VectorTy = getWidenedType(ScalarTy, ReduxWidth);
20060 InstructionCost VectorCost = 0, ScalarCost;
20061 // If all of the reduced values are constant, the vector cost is 0, since
20062 // the reduction value can be calculated at the compile time.
20063 bool AllConsts = allConstant(ReducedVals);
20064 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) {
20065 InstructionCost Cost = 0;
20066 // Scalar cost is repeated for N-1 elements.
20067 int Cnt = ReducedVals.size();
20068 for (Value *RdxVal : ReducedVals) {
20069 if (Cnt == 1)
20070 break;
20071 --Cnt;
20072 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) {
20073 Cost += GenCostFn();
20074 continue;
20076 InstructionCost ScalarCost = 0;
20077 for (User *U : RdxVal->users()) {
20078 auto *RdxOp = cast<Instruction>(U);
20079 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) {
20080 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind);
20081 continue;
20083 ScalarCost = InstructionCost::getInvalid();
20084 break;
20086 if (ScalarCost.isValid())
20087 Cost += ScalarCost;
20088 else
20089 Cost += GenCostFn();
20091 return Cost;
20093 switch (RdxKind) {
20094 case RecurKind::Add:
20095 case RecurKind::Mul:
20096 case RecurKind::Or:
20097 case RecurKind::And:
20098 case RecurKind::Xor:
20099 case RecurKind::FAdd:
20100 case RecurKind::FMul: {
20101 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
20102 if (!AllConsts) {
20103 if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
20104 assert(SLPReVec && "FixedVectorType is not expected.");
20105 unsigned ScalarTyNumElements = VecTy->getNumElements();
20106 for (unsigned I : seq<unsigned>(ReducedVals.size())) {
20107 VectorCost += TTI->getShuffleCost(
20108 TTI::SK_PermuteSingleSrc, VectorTy,
20109 createStrideMask(I, ScalarTyNumElements, ReducedVals.size()));
20110 VectorCost += TTI->getArithmeticReductionCost(RdxOpcode, VecTy, FMF,
20111 CostKind);
20113 VectorCost += TTI->getScalarizationOverhead(
20114 VecTy, APInt::getAllOnes(ScalarTyNumElements), /*Insert*/ true,
20115 /*Extract*/ false, TTI::TCK_RecipThroughput);
20116 } else {
20117 VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy, FMF,
20118 CostKind);
20121 ScalarCost = EvaluateScalarCost([&]() {
20122 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind);
20124 break;
20126 case RecurKind::FMax:
20127 case RecurKind::FMin:
20128 case RecurKind::FMaximum:
20129 case RecurKind::FMinimum:
20130 case RecurKind::SMax:
20131 case RecurKind::SMin:
20132 case RecurKind::UMax:
20133 case RecurKind::UMin: {
20134 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind);
20135 if (!AllConsts)
20136 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind);
20137 ScalarCost = EvaluateScalarCost([&]() {
20138 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF);
20139 return TTI->getIntrinsicInstrCost(ICA, CostKind);
20141 break;
20143 default:
20144 llvm_unreachable("Expected arithmetic or min/max reduction operation");
20147 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
20148 << " for reduction of " << shortBundleName(ReducedVals)
20149 << " (It is a splitting reduction)\n");
20150 return VectorCost - ScalarCost;
20153 /// Emit a horizontal reduction of the vectorized value.
20154 Value *emitReduction(Value *VectorizedValue, IRBuilderBase &Builder,
20155 const TargetTransformInfo *TTI) {
20156 assert(VectorizedValue && "Need to have a vectorized tree node");
20157 assert(RdxKind != RecurKind::FMulAdd &&
20158 "A call to the llvm.fmuladd intrinsic is not handled yet");
20160 ++NumVectorInstructions;
20161 return createSimpleReduction(Builder, VectorizedValue, RdxKind);
20164 /// Emits optimized code for unique scalar value reused \p Cnt times.
20165 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder,
20166 unsigned Cnt) {
20167 assert(IsSupportedHorRdxIdentityOp &&
20168 "The optimization of matched scalar identity horizontal reductions "
20169 "must be supported.");
20170 if (Cnt == 1)
20171 return VectorizedValue;
20172 switch (RdxKind) {
20173 case RecurKind::Add: {
20174 // res = mul vv, n
20175 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt);
20176 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of "
20177 << VectorizedValue << ". (HorRdx)\n");
20178 return Builder.CreateMul(VectorizedValue, Scale);
20180 case RecurKind::Xor: {
20181 // res = n % 2 ? 0 : vv
20182 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue
20183 << ". (HorRdx)\n");
20184 if (Cnt % 2 == 0)
20185 return Constant::getNullValue(VectorizedValue->getType());
20186 return VectorizedValue;
20188 case RecurKind::FAdd: {
20189 // res = fmul v, n
20190 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt);
20191 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of "
20192 << VectorizedValue << ". (HorRdx)\n");
20193 return Builder.CreateFMul(VectorizedValue, Scale);
20195 case RecurKind::And:
20196 case RecurKind::Or:
20197 case RecurKind::SMax:
20198 case RecurKind::SMin:
20199 case RecurKind::UMax:
20200 case RecurKind::UMin:
20201 case RecurKind::FMax:
20202 case RecurKind::FMin:
20203 case RecurKind::FMaximum:
20204 case RecurKind::FMinimum:
20205 // res = vv
20206 return VectorizedValue;
20207 case RecurKind::Mul:
20208 case RecurKind::FMul:
20209 case RecurKind::FMulAdd:
20210 case RecurKind::IAnyOf:
20211 case RecurKind::FAnyOf:
20212 case RecurKind::None:
20213 llvm_unreachable("Unexpected reduction kind for repeated scalar.");
20215 return nullptr;
20218 /// Emits actual operation for the scalar identity values, found during
20219 /// horizontal reduction analysis.
20220 Value *
20221 emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, BoUpSLP &R,
20222 const SmallMapVector<Value *, unsigned, 16> &SameValuesCounter,
20223 const DenseMap<Value *, Value *> &TrackedToOrig) {
20224 assert(IsSupportedHorRdxIdentityOp &&
20225 "The optimization of matched scalar identity horizontal reductions "
20226 "must be supported.");
20227 ArrayRef<Value *> VL = R.getRootNodeScalars();
20228 auto *VTy = cast<FixedVectorType>(VectorizedValue->getType());
20229 if (VTy->getElementType() != VL.front()->getType()) {
20230 VectorizedValue = Builder.CreateIntCast(
20231 VectorizedValue,
20232 getWidenedType(VL.front()->getType(), VTy->getNumElements()),
20233 R.isSignedMinBitwidthRootNode());
20235 switch (RdxKind) {
20236 case RecurKind::Add: {
20237 // root = mul prev_root, <1, 1, n, 1>
20238 SmallVector<Constant *> Vals;
20239 for (Value *V : VL) {
20240 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.at(V));
20241 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false));
20243 auto *Scale = ConstantVector::get(Vals);
20244 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of "
20245 << VectorizedValue << ". (HorRdx)\n");
20246 return Builder.CreateMul(VectorizedValue, Scale);
20248 case RecurKind::And:
20249 case RecurKind::Or:
20250 // No need for multiple or/and(s).
20251 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue
20252 << ". (HorRdx)\n");
20253 return VectorizedValue;
20254 case RecurKind::SMax:
20255 case RecurKind::SMin:
20256 case RecurKind::UMax:
20257 case RecurKind::UMin:
20258 case RecurKind::FMax:
20259 case RecurKind::FMin:
20260 case RecurKind::FMaximum:
20261 case RecurKind::FMinimum:
20262 // No need for multiple min/max(s) of the same value.
20263 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue
20264 << ". (HorRdx)\n");
20265 return VectorizedValue;
20266 case RecurKind::Xor: {
20267 // Replace values with even number of repeats with 0, since
20268 // x xor x = 0.
20269 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6,
20270 // 7>, if elements 4th and 6th elements have even number of repeats.
20271 SmallVector<int> Mask(
20272 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(),
20273 PoisonMaskElem);
20274 std::iota(Mask.begin(), Mask.end(), 0);
20275 bool NeedShuffle = false;
20276 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) {
20277 Value *V = VL[I];
20278 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.at(V));
20279 if (Cnt % 2 == 0) {
20280 Mask[I] = VF;
20281 NeedShuffle = true;
20284 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I
20285 : Mask) dbgs()
20286 << I << " ";
20287 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n");
20288 if (NeedShuffle)
20289 VectorizedValue = Builder.CreateShuffleVector(
20290 VectorizedValue,
20291 ConstantVector::getNullValue(VectorizedValue->getType()), Mask);
20292 return VectorizedValue;
20294 case RecurKind::FAdd: {
20295 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0>
20296 SmallVector<Constant *> Vals;
20297 for (Value *V : VL) {
20298 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.at(V));
20299 Vals.push_back(ConstantFP::get(V->getType(), Cnt));
20301 auto *Scale = ConstantVector::get(Vals);
20302 return Builder.CreateFMul(VectorizedValue, Scale);
20304 case RecurKind::Mul:
20305 case RecurKind::FMul:
20306 case RecurKind::FMulAdd:
20307 case RecurKind::IAnyOf:
20308 case RecurKind::FAnyOf:
20309 case RecurKind::None:
20310 llvm_unreachable("Unexpected reduction kind for reused scalars.");
20312 return nullptr;
20315 } // end anonymous namespace
20317 /// Gets recurrence kind from the specified value.
20318 static RecurKind getRdxKind(Value *V) {
20319 return HorizontalReduction::getRdxKind(V);
20321 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) {
20322 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
20323 return cast<FixedVectorType>(IE->getType())->getNumElements();
20325 unsigned AggregateSize = 1;
20326 auto *IV = cast<InsertValueInst>(InsertInst);
20327 Type *CurrentType = IV->getType();
20328 do {
20329 if (auto *ST = dyn_cast<StructType>(CurrentType)) {
20330 for (auto *Elt : ST->elements())
20331 if (Elt != ST->getElementType(0)) // check homogeneity
20332 return std::nullopt;
20333 AggregateSize *= ST->getNumElements();
20334 CurrentType = ST->getElementType(0);
20335 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
20336 AggregateSize *= AT->getNumElements();
20337 CurrentType = AT->getElementType();
20338 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
20339 AggregateSize *= VT->getNumElements();
20340 return AggregateSize;
20341 } else if (CurrentType->isSingleValueType()) {
20342 return AggregateSize;
20343 } else {
20344 return std::nullopt;
20346 } while (true);
20349 static void findBuildAggregate_rec(Instruction *LastInsertInst,
20350 TargetTransformInfo *TTI,
20351 SmallVectorImpl<Value *> &BuildVectorOpds,
20352 SmallVectorImpl<Value *> &InsertElts,
20353 unsigned OperandOffset, const BoUpSLP &R) {
20354 do {
20355 Value *InsertedOperand = LastInsertInst->getOperand(1);
20356 std::optional<unsigned> OperandIndex =
20357 getElementIndex(LastInsertInst, OperandOffset);
20358 if (!OperandIndex || R.isDeleted(LastInsertInst))
20359 return;
20360 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) {
20361 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
20362 BuildVectorOpds, InsertElts, *OperandIndex, R);
20364 } else {
20365 BuildVectorOpds[*OperandIndex] = InsertedOperand;
20366 InsertElts[*OperandIndex] = LastInsertInst;
20368 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
20369 } while (LastInsertInst != nullptr &&
20370 isa<InsertValueInst, InsertElementInst>(LastInsertInst) &&
20371 LastInsertInst->hasOneUse());
20374 /// Recognize construction of vectors like
20375 /// %ra = insertelement <4 x float> poison, float %s0, i32 0
20376 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
20377 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
20378 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
20379 /// starting from the last insertelement or insertvalue instruction.
20381 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
20382 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
20383 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
20385 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
20387 /// \return true if it matches.
20388 static bool findBuildAggregate(Instruction *LastInsertInst,
20389 TargetTransformInfo *TTI,
20390 SmallVectorImpl<Value *> &BuildVectorOpds,
20391 SmallVectorImpl<Value *> &InsertElts,
20392 const BoUpSLP &R) {
20394 assert((isa<InsertElementInst>(LastInsertInst) ||
20395 isa<InsertValueInst>(LastInsertInst)) &&
20396 "Expected insertelement or insertvalue instruction!");
20398 assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
20399 "Expected empty result vectors!");
20401 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
20402 if (!AggregateSize)
20403 return false;
20404 BuildVectorOpds.resize(*AggregateSize);
20405 InsertElts.resize(*AggregateSize);
20407 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0,
20409 llvm::erase(BuildVectorOpds, nullptr);
20410 llvm::erase(InsertElts, nullptr);
20411 if (BuildVectorOpds.size() >= 2)
20412 return true;
20414 return false;
20417 /// Try and get a reduction instruction from a phi node.
20419 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
20420 /// if they come from either \p ParentBB or a containing loop latch.
20422 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
20423 /// if not possible.
20424 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P,
20425 BasicBlock *ParentBB, LoopInfo *LI) {
20426 // There are situations where the reduction value is not dominated by the
20427 // reduction phi. Vectorizing such cases has been reported to cause
20428 // miscompiles. See PR25787.
20429 auto DominatedReduxValue = [&](Value *R) {
20430 return isa<Instruction>(R) &&
20431 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
20434 Instruction *Rdx = nullptr;
20436 // Return the incoming value if it comes from the same BB as the phi node.
20437 if (P->getIncomingBlock(0) == ParentBB) {
20438 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0));
20439 } else if (P->getIncomingBlock(1) == ParentBB) {
20440 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1));
20443 if (Rdx && DominatedReduxValue(Rdx))
20444 return Rdx;
20446 // Otherwise, check whether we have a loop latch to look at.
20447 Loop *BBL = LI->getLoopFor(ParentBB);
20448 if (!BBL)
20449 return nullptr;
20450 BasicBlock *BBLatch = BBL->getLoopLatch();
20451 if (!BBLatch)
20452 return nullptr;
20454 // There is a loop latch, return the incoming value if it comes from
20455 // that. This reduction pattern occasionally turns up.
20456 if (P->getIncomingBlock(0) == BBLatch) {
20457 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0));
20458 } else if (P->getIncomingBlock(1) == BBLatch) {
20459 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1));
20462 if (Rdx && DominatedReduxValue(Rdx))
20463 return Rdx;
20465 return nullptr;
20468 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
20469 if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
20470 return true;
20471 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
20472 return true;
20473 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
20474 return true;
20475 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1))))
20476 return true;
20477 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1))))
20478 return true;
20479 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
20480 return true;
20481 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
20482 return true;
20483 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
20484 return true;
20485 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
20486 return true;
20487 return false;
20490 /// We could have an initial reduction that is not an add.
20491 /// r *= v1 + v2 + v3 + v4
20492 /// In such a case start looking for a tree rooted in the first '+'.
20493 /// \Returns the new root if found, which may be nullptr if not an instruction.
20494 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi,
20495 Instruction *Root) {
20496 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) ||
20497 isa<IntrinsicInst>(Root)) &&
20498 "Expected binop, select, or intrinsic for reduction matching");
20499 Value *LHS =
20500 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root));
20501 Value *RHS =
20502 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1);
20503 if (LHS == Phi)
20504 return dyn_cast<Instruction>(RHS);
20505 if (RHS == Phi)
20506 return dyn_cast<Instruction>(LHS);
20507 return nullptr;
20510 /// \p Returns the first operand of \p I that does not match \p Phi. If
20511 /// operand is not an instruction it returns nullptr.
20512 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) {
20513 Value *Op0 = nullptr;
20514 Value *Op1 = nullptr;
20515 if (!matchRdxBop(I, Op0, Op1))
20516 return nullptr;
20517 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0);
20520 /// \Returns true if \p I is a candidate instruction for reduction vectorization.
20521 static bool isReductionCandidate(Instruction *I) {
20522 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value()));
20523 Value *B0 = nullptr, *B1 = nullptr;
20524 bool IsBinop = matchRdxBop(I, B0, B1);
20525 return IsBinop || IsSelect;
20528 bool SLPVectorizerPass::vectorizeHorReduction(
20529 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
20530 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) {
20531 if (!ShouldVectorizeHor)
20532 return false;
20533 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root);
20535 if (Root->getParent() != BB || isa<PHINode>(Root))
20536 return false;
20538 // If we can find a secondary reduction root, use that instead.
20539 auto SelectRoot = [&]() {
20540 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) &&
20541 HorizontalReduction::getRdxKind(Root) != RecurKind::None)
20542 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root))
20543 return NewRoot;
20544 return Root;
20547 // Start analysis starting from Root instruction. If horizontal reduction is
20548 // found, try to vectorize it. If it is not a horizontal reduction or
20549 // vectorization is not possible or not effective, and currently analyzed
20550 // instruction is a binary operation, try to vectorize the operands, using
20551 // pre-order DFS traversal order. If the operands were not vectorized, repeat
20552 // the same procedure considering each operand as a possible root of the
20553 // horizontal reduction.
20554 // Interrupt the process if the Root instruction itself was vectorized or all
20555 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
20556 // If a horizintal reduction was not matched or vectorized we collect
20557 // instructions for possible later attempts for vectorization.
20558 std::queue<std::pair<Instruction *, unsigned>> Stack;
20559 Stack.emplace(SelectRoot(), 0);
20560 SmallPtrSet<Value *, 8> VisitedInstrs;
20561 bool Res = false;
20562 auto &&TryToReduce = [this, &R](Instruction *Inst) -> Value * {
20563 if (R.isAnalyzedReductionRoot(Inst))
20564 return nullptr;
20565 if (!isReductionCandidate(Inst))
20566 return nullptr;
20567 HorizontalReduction HorRdx;
20568 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI))
20569 return nullptr;
20570 return HorRdx.tryToReduce(R, *DL, TTI, *TLI);
20572 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) {
20573 if (TryOperandsAsNewSeeds && FutureSeed == Root) {
20574 FutureSeed = getNonPhiOperand(Root, P);
20575 if (!FutureSeed)
20576 return false;
20578 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their
20579 // analysis is done separately.
20580 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed))
20581 PostponedInsts.push_back(FutureSeed);
20582 return true;
20585 while (!Stack.empty()) {
20586 Instruction *Inst;
20587 unsigned Level;
20588 std::tie(Inst, Level) = Stack.front();
20589 Stack.pop();
20590 // Do not try to analyze instruction that has already been vectorized.
20591 // This may happen when we vectorize instruction operands on a previous
20592 // iteration while stack was populated before that happened.
20593 if (R.isDeleted(Inst))
20594 continue;
20595 if (Value *VectorizedV = TryToReduce(Inst)) {
20596 Res = true;
20597 if (auto *I = dyn_cast<Instruction>(VectorizedV)) {
20598 // Try to find another reduction.
20599 Stack.emplace(I, Level);
20600 continue;
20602 if (R.isDeleted(Inst))
20603 continue;
20604 } else {
20605 // We could not vectorize `Inst` so try to use it as a future seed.
20606 if (!TryAppendToPostponedInsts(Inst)) {
20607 assert(Stack.empty() && "Expected empty stack");
20608 break;
20612 // Try to vectorize operands.
20613 // Continue analysis for the instruction from the same basic block only to
20614 // save compile time.
20615 if (++Level < RecursionMaxDepth)
20616 for (auto *Op : Inst->operand_values())
20617 if (VisitedInstrs.insert(Op).second)
20618 if (auto *I = dyn_cast<Instruction>(Op))
20619 // Do not try to vectorize CmpInst operands, this is done
20620 // separately.
20621 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) &&
20622 !R.isDeleted(I) && I->getParent() == BB)
20623 Stack.emplace(I, Level);
20625 return Res;
20628 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root,
20629 BasicBlock *BB, BoUpSLP &R) {
20630 SmallVector<WeakTrackingVH> PostponedInsts;
20631 bool Res = vectorizeHorReduction(P, Root, BB, R, PostponedInsts);
20632 Res |= tryToVectorize(PostponedInsts, R);
20633 return Res;
20636 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts,
20637 BoUpSLP &R) {
20638 bool Res = false;
20639 for (Value *V : Insts)
20640 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst))
20641 Res |= tryToVectorize(Inst, R);
20642 return Res;
20645 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
20646 BasicBlock *BB, BoUpSLP &R,
20647 bool MaxVFOnly) {
20648 if (!R.canMapToVector(IVI->getType()))
20649 return false;
20651 SmallVector<Value *, 16> BuildVectorOpds;
20652 SmallVector<Value *, 16> BuildVectorInsts;
20653 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts, R))
20654 return false;
20656 if (MaxVFOnly && BuildVectorOpds.size() == 2) {
20657 R.getORE()->emit([&]() {
20658 return OptimizationRemarkMissed(SV_NAME, "NotPossible", IVI)
20659 << "Cannot SLP vectorize list: only 2 elements of buildvalue, "
20660 "trying reduction first.";
20662 return false;
20664 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
20665 // Aggregate value is unlikely to be processed in vector register.
20666 return tryToVectorizeList(BuildVectorOpds, R, MaxVFOnly);
20669 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
20670 BasicBlock *BB, BoUpSLP &R,
20671 bool MaxVFOnly) {
20672 SmallVector<Value *, 16> BuildVectorInsts;
20673 SmallVector<Value *, 16> BuildVectorOpds;
20674 SmallVector<int> Mask;
20675 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts, R) ||
20676 (llvm::all_of(BuildVectorOpds, IsaPred<ExtractElementInst, UndefValue>) &&
20677 isFixedVectorShuffle(BuildVectorOpds, Mask)))
20678 return false;
20680 if (MaxVFOnly && BuildVectorInsts.size() == 2) {
20681 R.getORE()->emit([&]() {
20682 return OptimizationRemarkMissed(SV_NAME, "NotPossible", IEI)
20683 << "Cannot SLP vectorize list: only 2 elements of buildvector, "
20684 "trying reduction first.";
20686 return false;
20688 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
20689 return tryToVectorizeList(BuildVectorInsts, R, MaxVFOnly);
20692 template <typename T>
20693 static bool tryToVectorizeSequence(
20694 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator,
20695 function_ref<bool(T *, T *)> AreCompatible,
20696 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
20697 bool MaxVFOnly, BoUpSLP &R) {
20698 bool Changed = false;
20699 // Sort by type, parent, operands.
20700 stable_sort(Incoming, Comparator);
20702 // Try to vectorize elements base on their type.
20703 SmallVector<T *> Candidates;
20704 SmallVector<T *> VL;
20705 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;
20706 VL.clear()) {
20707 // Look for the next elements with the same type, parent and operand
20708 // kinds.
20709 auto *I = dyn_cast<Instruction>(*IncIt);
20710 if (!I || R.isDeleted(I)) {
20711 ++IncIt;
20712 continue;
20714 auto *SameTypeIt = IncIt;
20715 while (SameTypeIt != E && (!isa<Instruction>(*SameTypeIt) ||
20716 R.isDeleted(cast<Instruction>(*SameTypeIt)) ||
20717 AreCompatible(*SameTypeIt, *IncIt))) {
20718 auto *I = dyn_cast<Instruction>(*SameTypeIt);
20719 ++SameTypeIt;
20720 if (I && !R.isDeleted(I))
20721 VL.push_back(cast<T>(I));
20724 // Try to vectorize them.
20725 unsigned NumElts = VL.size();
20726 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes ("
20727 << NumElts << ")\n");
20728 // The vectorization is a 3-state attempt:
20729 // 1. Try to vectorize instructions with the same/alternate opcodes with the
20730 // size of maximal register at first.
20731 // 2. Try to vectorize remaining instructions with the same type, if
20732 // possible. This may result in the better vectorization results rather than
20733 // if we try just to vectorize instructions with the same/alternate opcodes.
20734 // 3. Final attempt to try to vectorize all instructions with the
20735 // same/alternate ops only, this may result in some extra final
20736 // vectorization.
20737 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(VL), MaxVFOnly)) {
20738 // Success start over because instructions might have been changed.
20739 Changed = true;
20740 VL.swap(Candidates);
20741 Candidates.clear();
20742 for (T *V : VL) {
20743 if (auto *I = dyn_cast<Instruction>(V); I && !R.isDeleted(I))
20744 Candidates.push_back(V);
20746 } else {
20747 /// \Returns the minimum number of elements that we will attempt to
20748 /// vectorize.
20749 auto GetMinNumElements = [&R](Value *V) {
20750 unsigned EltSize = R.getVectorElementSize(V);
20751 return std::max(2U, R.getMaxVecRegSize() / EltSize);
20753 if (NumElts < GetMinNumElements(*IncIt) &&
20754 (Candidates.empty() ||
20755 Candidates.front()->getType() == (*IncIt)->getType())) {
20756 for (T *V : VL) {
20757 if (auto *I = dyn_cast<Instruction>(V); I && !R.isDeleted(I))
20758 Candidates.push_back(V);
20762 // Final attempt to vectorize instructions with the same types.
20763 if (Candidates.size() > 1 &&
20764 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
20765 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) {
20766 // Success start over because instructions might have been changed.
20767 Changed = true;
20768 } else if (MaxVFOnly) {
20769 // Try to vectorize using small vectors.
20770 SmallVector<T *> VL;
20771 for (auto *It = Candidates.begin(), *End = Candidates.end(); It != End;
20772 VL.clear()) {
20773 auto *I = dyn_cast<Instruction>(*It);
20774 if (!I || R.isDeleted(I)) {
20775 ++It;
20776 continue;
20778 auto *SameTypeIt = It;
20779 while (SameTypeIt != End &&
20780 (!isa<Instruction>(*SameTypeIt) ||
20781 R.isDeleted(cast<Instruction>(*SameTypeIt)) ||
20782 AreCompatible(*SameTypeIt, *It))) {
20783 auto *I = dyn_cast<Instruction>(*SameTypeIt);
20784 ++SameTypeIt;
20785 if (I && !R.isDeleted(I))
20786 VL.push_back(cast<T>(I));
20788 unsigned NumElts = VL.size();
20789 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(VL),
20790 /*MaxVFOnly=*/false))
20791 Changed = true;
20792 It = SameTypeIt;
20795 Candidates.clear();
20798 // Start over at the next instruction of a different type (or the end).
20799 IncIt = SameTypeIt;
20801 return Changed;
20804 /// Compare two cmp instructions. If IsCompatibility is true, function returns
20805 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding
20806 /// operands. If IsCompatibility is false, function implements strict weak
20807 /// ordering relation between two cmp instructions, returning true if the first
20808 /// instruction is "less" than the second, i.e. its predicate is less than the
20809 /// predicate of the second or the operands IDs are less than the operands IDs
20810 /// of the second cmp instruction.
20811 template <bool IsCompatibility>
20812 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI,
20813 const DominatorTree &DT) {
20814 assert(isValidElementType(V->getType()) &&
20815 isValidElementType(V2->getType()) &&
20816 "Expected valid element types only.");
20817 if (V == V2)
20818 return IsCompatibility;
20819 auto *CI1 = cast<CmpInst>(V);
20820 auto *CI2 = cast<CmpInst>(V2);
20821 if (CI1->getOperand(0)->getType()->getTypeID() <
20822 CI2->getOperand(0)->getType()->getTypeID())
20823 return !IsCompatibility;
20824 if (CI1->getOperand(0)->getType()->getTypeID() >
20825 CI2->getOperand(0)->getType()->getTypeID())
20826 return false;
20827 if (CI1->getOperand(0)->getType()->getScalarSizeInBits() <
20828 CI2->getOperand(0)->getType()->getScalarSizeInBits())
20829 return !IsCompatibility;
20830 if (CI1->getOperand(0)->getType()->getScalarSizeInBits() >
20831 CI2->getOperand(0)->getType()->getScalarSizeInBits())
20832 return false;
20833 CmpInst::Predicate Pred1 = CI1->getPredicate();
20834 CmpInst::Predicate Pred2 = CI2->getPredicate();
20835 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
20836 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2);
20837 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1);
20838 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2);
20839 if (BasePred1 < BasePred2)
20840 return !IsCompatibility;
20841 if (BasePred1 > BasePred2)
20842 return false;
20843 // Compare operands.
20844 bool CI1Preds = Pred1 == BasePred1;
20845 bool CI2Preds = Pred2 == BasePred1;
20846 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) {
20847 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1);
20848 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1);
20849 if (Op1 == Op2)
20850 continue;
20851 if (Op1->getValueID() < Op2->getValueID())
20852 return !IsCompatibility;
20853 if (Op1->getValueID() > Op2->getValueID())
20854 return false;
20855 if (auto *I1 = dyn_cast<Instruction>(Op1))
20856 if (auto *I2 = dyn_cast<Instruction>(Op2)) {
20857 if (IsCompatibility) {
20858 if (I1->getParent() != I2->getParent())
20859 return false;
20860 } else {
20861 // Try to compare nodes with same parent.
20862 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent());
20863 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent());
20864 if (!NodeI1)
20865 return NodeI2 != nullptr;
20866 if (!NodeI2)
20867 return false;
20868 assert((NodeI1 == NodeI2) ==
20869 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
20870 "Different nodes should have different DFS numbers");
20871 if (NodeI1 != NodeI2)
20872 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
20874 InstructionsState S = getSameOpcode({I1, I2}, TLI);
20875 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle()))
20876 continue;
20877 if (IsCompatibility)
20878 return false;
20879 if (I1->getOpcode() != I2->getOpcode())
20880 return I1->getOpcode() < I2->getOpcode();
20883 return IsCompatibility;
20886 template <typename ItT>
20887 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts,
20888 BasicBlock *BB, BoUpSLP &R) {
20889 bool Changed = false;
20890 // Try to find reductions first.
20891 for (CmpInst *I : CmpInsts) {
20892 if (R.isDeleted(I))
20893 continue;
20894 for (Value *Op : I->operands())
20895 if (auto *RootOp = dyn_cast<Instruction>(Op))
20896 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R);
20898 // Try to vectorize operands as vector bundles.
20899 for (CmpInst *I : CmpInsts) {
20900 if (R.isDeleted(I))
20901 continue;
20902 Changed |= tryToVectorize(I, R);
20904 // Try to vectorize list of compares.
20905 // Sort by type, compare predicate, etc.
20906 auto CompareSorter = [&](Value *V, Value *V2) {
20907 if (V == V2)
20908 return false;
20909 return compareCmp<false>(V, V2, *TLI, *DT);
20912 auto AreCompatibleCompares = [&](Value *V1, Value *V2) {
20913 if (V1 == V2)
20914 return true;
20915 return compareCmp<true>(V1, V2, *TLI, *DT);
20918 SmallVector<Value *> Vals;
20919 for (Instruction *V : CmpInsts)
20920 if (!R.isDeleted(V) && isValidElementType(getValueType(V)))
20921 Vals.push_back(V);
20922 if (Vals.size() <= 1)
20923 return Changed;
20924 Changed |= tryToVectorizeSequence<Value>(
20925 Vals, CompareSorter, AreCompatibleCompares,
20926 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) {
20927 // Exclude possible reductions from other blocks.
20928 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) {
20929 return any_of(V->users(), [V](User *U) {
20930 auto *Select = dyn_cast<SelectInst>(U);
20931 return Select &&
20932 Select->getParent() != cast<Instruction>(V)->getParent();
20935 if (ArePossiblyReducedInOtherBlock)
20936 return false;
20937 return tryToVectorizeList(Candidates, R, MaxVFOnly);
20939 /*MaxVFOnly=*/true, R);
20940 return Changed;
20943 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions,
20944 BasicBlock *BB, BoUpSLP &R) {
20945 assert(all_of(Instructions, IsaPred<InsertElementInst, InsertValueInst>) &&
20946 "This function only accepts Insert instructions");
20947 bool OpsChanged = false;
20948 SmallVector<WeakTrackingVH> PostponedInsts;
20949 for (auto *I : reverse(Instructions)) {
20950 // pass1 - try to match and vectorize a buildvector sequence for MaxVF only.
20951 if (R.isDeleted(I) || isa<CmpInst>(I))
20952 continue;
20953 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
20954 OpsChanged |=
20955 vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/true);
20956 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
20957 OpsChanged |=
20958 vectorizeInsertElementInst(LastInsertElem, BB, R, /*MaxVFOnly=*/true);
20960 // pass2 - try to vectorize reductions only
20961 if (R.isDeleted(I))
20962 continue;
20963 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, PostponedInsts);
20964 if (R.isDeleted(I) || isa<CmpInst>(I))
20965 continue;
20966 // pass3 - try to match and vectorize a buildvector sequence.
20967 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
20968 OpsChanged |=
20969 vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/false);
20970 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
20971 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R,
20972 /*MaxVFOnly=*/false);
20975 // Now try to vectorize postponed instructions.
20976 OpsChanged |= tryToVectorize(PostponedInsts, R);
20978 Instructions.clear();
20979 return OpsChanged;
20982 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
20983 bool Changed = false;
20984 SmallVector<Value *, 4> Incoming;
20985 SmallPtrSet<Value *, 16> VisitedInstrs;
20986 // Maps phi nodes to the non-phi nodes found in the use tree for each phi
20987 // node. Allows better to identify the chains that can be vectorized in the
20988 // better way.
20989 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
20990 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) {
20991 assert(isValidElementType(V1->getType()) &&
20992 isValidElementType(V2->getType()) &&
20993 "Expected vectorizable types only.");
20994 // It is fine to compare type IDs here, since we expect only vectorizable
20995 // types, like ints, floats and pointers, we don't care about other type.
20996 if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
20997 return true;
20998 if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
20999 return false;
21000 if (V1->getType()->getScalarSizeInBits() <
21001 V2->getType()->getScalarSizeInBits())
21002 return true;
21003 if (V1->getType()->getScalarSizeInBits() >
21004 V2->getType()->getScalarSizeInBits())
21005 return false;
21006 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
21007 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
21008 if (Opcodes1.size() < Opcodes2.size())
21009 return true;
21010 if (Opcodes1.size() > Opcodes2.size())
21011 return false;
21012 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
21014 // Instructions come first.
21015 auto *I1 = dyn_cast<Instruction>(Opcodes1[I]);
21016 auto *I2 = dyn_cast<Instruction>(Opcodes2[I]);
21017 if (I1 && I2) {
21018 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
21019 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
21020 if (!NodeI1)
21021 return NodeI2 != nullptr;
21022 if (!NodeI2)
21023 return false;
21024 assert((NodeI1 == NodeI2) ==
21025 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
21026 "Different nodes should have different DFS numbers");
21027 if (NodeI1 != NodeI2)
21028 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
21029 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21030 if (S.getOpcode() && !S.isAltShuffle())
21031 continue;
21032 return I1->getOpcode() < I2->getOpcode();
21034 if (I1)
21035 return true;
21036 if (I2)
21037 return false;
21040 // Non-undef constants come next.
21041 bool C1 = isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I]);
21042 bool C2 = isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I]);
21043 if (C1 && C2)
21044 continue;
21045 if (C1)
21046 return true;
21047 if (C2)
21048 return false;
21050 bool U1 = isa<UndefValue>(Opcodes1[I]);
21051 bool U2 = isa<UndefValue>(Opcodes2[I]);
21053 // Non-constant non-instructions come next.
21054 if (!U1 && !U2) {
21055 auto ValID1 = Opcodes1[I]->getValueID();
21056 auto ValID2 = Opcodes2[I]->getValueID();
21057 if (ValID1 == ValID2)
21058 continue;
21059 if (ValID1 < ValID2)
21060 return true;
21061 if (ValID1 > ValID2)
21062 return false;
21064 if (!U1)
21065 return true;
21066 if (!U2)
21067 return false;
21069 // Undefs come last.
21070 assert(U1 && U2 && "The only thing left should be undef & undef.");
21072 return false;
21074 auto AreCompatiblePHIs = [&PHIToOpcodes, this, &R](Value *V1, Value *V2) {
21075 if (V1 == V2)
21076 return true;
21077 if (V1->getType() != V2->getType())
21078 return false;
21079 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
21080 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
21081 if (Opcodes1.size() != Opcodes2.size())
21082 return false;
21083 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
21084 // Undefs are compatible with any other value.
21085 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
21086 continue;
21087 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
21088 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
21089 if (R.isDeleted(I1) || R.isDeleted(I2))
21090 return false;
21091 if (I1->getParent() != I2->getParent())
21092 return false;
21093 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21094 if (S.getOpcode())
21095 continue;
21096 return false;
21098 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
21099 continue;
21100 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
21101 return false;
21103 return true;
21106 bool HaveVectorizedPhiNodes = false;
21107 do {
21108 // Collect the incoming values from the PHIs.
21109 Incoming.clear();
21110 for (Instruction &I : *BB) {
21111 auto *P = dyn_cast<PHINode>(&I);
21112 if (!P || P->getNumIncomingValues() > MaxPHINumOperands)
21113 break;
21115 // No need to analyze deleted, vectorized and non-vectorizable
21116 // instructions.
21117 if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
21118 isValidElementType(P->getType()))
21119 Incoming.push_back(P);
21122 if (Incoming.size() <= 1)
21123 break;
21125 // Find the corresponding non-phi nodes for better matching when trying to
21126 // build the tree.
21127 for (Value *V : Incoming) {
21128 SmallVectorImpl<Value *> &Opcodes =
21129 PHIToOpcodes.try_emplace(V).first->getSecond();
21130 if (!Opcodes.empty())
21131 continue;
21132 SmallVector<Value *, 4> Nodes(1, V);
21133 SmallPtrSet<Value *, 4> Visited;
21134 while (!Nodes.empty()) {
21135 auto *PHI = cast<PHINode>(Nodes.pop_back_val());
21136 if (!Visited.insert(PHI).second)
21137 continue;
21138 for (Value *V : PHI->incoming_values()) {
21139 if (auto *PHI1 = dyn_cast<PHINode>((V))) {
21140 Nodes.push_back(PHI1);
21141 continue;
21143 Opcodes.emplace_back(V);
21148 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>(
21149 Incoming, PHICompare, AreCompatiblePHIs,
21150 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) {
21151 return tryToVectorizeList(Candidates, R, MaxVFOnly);
21153 /*MaxVFOnly=*/true, R);
21154 Changed |= HaveVectorizedPhiNodes;
21155 if (HaveVectorizedPhiNodes && any_of(PHIToOpcodes, [&](const auto &P) {
21156 auto *PHI = dyn_cast<PHINode>(P.first);
21157 return !PHI || R.isDeleted(PHI);
21159 PHIToOpcodes.clear();
21160 VisitedInstrs.insert(Incoming.begin(), Incoming.end());
21161 } while (HaveVectorizedPhiNodes);
21163 VisitedInstrs.clear();
21165 InstSetVector PostProcessInserts;
21166 SmallSetVector<CmpInst *, 8> PostProcessCmps;
21167 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true
21168 // also vectorizes `PostProcessCmps`.
21169 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) {
21170 bool Changed = vectorizeInserts(PostProcessInserts, BB, R);
21171 if (VectorizeCmps) {
21172 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R);
21173 PostProcessCmps.clear();
21175 PostProcessInserts.clear();
21176 return Changed;
21178 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`.
21179 auto IsInPostProcessInstrs = [&](Instruction *I) {
21180 if (auto *Cmp = dyn_cast<CmpInst>(I))
21181 return PostProcessCmps.contains(Cmp);
21182 return isa<InsertElementInst, InsertValueInst>(I) &&
21183 PostProcessInserts.contains(I);
21185 // Returns true if `I` is an instruction without users, like terminator, or
21186 // function call with ignored return value, store. Ignore unused instructions
21187 // (basing on instruction type, except for CallInst and InvokeInst).
21188 auto HasNoUsers = [](Instruction *I) {
21189 return I->use_empty() &&
21190 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I));
21192 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) {
21193 // Skip instructions with scalable type. The num of elements is unknown at
21194 // compile-time for scalable type.
21195 if (isa<ScalableVectorType>(It->getType()))
21196 continue;
21198 // Skip instructions marked for the deletion.
21199 if (R.isDeleted(&*It))
21200 continue;
21201 // We may go through BB multiple times so skip the one we have checked.
21202 if (!VisitedInstrs.insert(&*It).second) {
21203 if (HasNoUsers(&*It) &&
21204 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) {
21205 // We would like to start over since some instructions are deleted
21206 // and the iterator may become invalid value.
21207 Changed = true;
21208 It = BB->begin();
21209 E = BB->end();
21211 continue;
21214 if (isa<DbgInfoIntrinsic>(It))
21215 continue;
21217 // Try to vectorize reductions that use PHINodes.
21218 if (PHINode *P = dyn_cast<PHINode>(It)) {
21219 // Check that the PHI is a reduction PHI.
21220 if (P->getNumIncomingValues() == 2) {
21221 // Try to match and vectorize a horizontal reduction.
21222 Instruction *Root = getReductionInstr(DT, P, BB, LI);
21223 if (Root && vectorizeRootInstruction(P, Root, BB, R)) {
21224 Changed = true;
21225 It = BB->begin();
21226 E = BB->end();
21227 continue;
21230 // Try to vectorize the incoming values of the PHI, to catch reductions
21231 // that feed into PHIs.
21232 for (unsigned I : seq<unsigned>(P->getNumIncomingValues())) {
21233 // Skip if the incoming block is the current BB for now. Also, bypass
21234 // unreachable IR for efficiency and to avoid crashing.
21235 // TODO: Collect the skipped incoming values and try to vectorize them
21236 // after processing BB.
21237 if (BB == P->getIncomingBlock(I) ||
21238 !DT->isReachableFromEntry(P->getIncomingBlock(I)))
21239 continue;
21241 // Postponed instructions should not be vectorized here, delay their
21242 // vectorization.
21243 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I));
21244 PI && !IsInPostProcessInstrs(PI)) {
21245 bool Res =
21246 vectorizeRootInstruction(nullptr, PI, P->getIncomingBlock(I), R);
21247 Changed |= Res;
21248 if (Res && R.isDeleted(P)) {
21249 It = BB->begin();
21250 E = BB->end();
21251 break;
21255 continue;
21258 if (HasNoUsers(&*It)) {
21259 bool OpsChanged = false;
21260 auto *SI = dyn_cast<StoreInst>(It);
21261 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI;
21262 if (SI) {
21263 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand()));
21264 // Try to vectorize chain in store, if this is the only store to the
21265 // address in the block.
21266 // TODO: This is just a temporarily solution to save compile time. Need
21267 // to investigate if we can safely turn on slp-vectorize-hor-store
21268 // instead to allow lookup for reduction chains in all non-vectorized
21269 // stores (need to check side effects and compile time).
21270 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) &&
21271 SI->getValueOperand()->hasOneUse();
21273 if (TryToVectorizeRoot) {
21274 for (auto *V : It->operand_values()) {
21275 // Postponed instructions should not be vectorized here, delay their
21276 // vectorization.
21277 if (auto *VI = dyn_cast<Instruction>(V);
21278 VI && !IsInPostProcessInstrs(VI))
21279 // Try to match and vectorize a horizontal reduction.
21280 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R);
21283 // Start vectorization of post-process list of instructions from the
21284 // top-tree instructions to try to vectorize as many instructions as
21285 // possible.
21286 OpsChanged |=
21287 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator());
21288 if (OpsChanged) {
21289 // We would like to start over since some instructions are deleted
21290 // and the iterator may become invalid value.
21291 Changed = true;
21292 It = BB->begin();
21293 E = BB->end();
21294 continue;
21298 if (isa<InsertElementInst, InsertValueInst>(It))
21299 PostProcessInserts.insert(&*It);
21300 else if (isa<CmpInst>(It))
21301 PostProcessCmps.insert(cast<CmpInst>(&*It));
21304 return Changed;
21307 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
21308 auto Changed = false;
21309 for (auto &Entry : GEPs) {
21310 // If the getelementptr list has fewer than two elements, there's nothing
21311 // to do.
21312 if (Entry.second.size() < 2)
21313 continue;
21315 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
21316 << Entry.second.size() << ".\n");
21318 // Process the GEP list in chunks suitable for the target's supported
21319 // vector size. If a vector register can't hold 1 element, we are done. We
21320 // are trying to vectorize the index computations, so the maximum number of
21321 // elements is based on the size of the index expression, rather than the
21322 // size of the GEP itself (the target's pointer size).
21323 auto *It = find_if(Entry.second, [&](GetElementPtrInst *GEP) {
21324 return !R.isDeleted(GEP);
21326 if (It == Entry.second.end())
21327 continue;
21328 unsigned MaxVecRegSize = R.getMaxVecRegSize();
21329 unsigned EltSize = R.getVectorElementSize(*(*It)->idx_begin());
21330 if (MaxVecRegSize < EltSize)
21331 continue;
21333 unsigned MaxElts = MaxVecRegSize / EltSize;
21334 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
21335 auto Len = std::min<unsigned>(BE - BI, MaxElts);
21336 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
21338 // Initialize a set a candidate getelementptrs. Note that we use a
21339 // SetVector here to preserve program order. If the index computations
21340 // are vectorizable and begin with loads, we want to minimize the chance
21341 // of having to reorder them later.
21342 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
21344 // Some of the candidates may have already been vectorized after we
21345 // initially collected them or their index is optimized to constant value.
21346 // If so, they are marked as deleted, so remove them from the set of
21347 // candidates.
21348 Candidates.remove_if([&R](Value *I) {
21349 return R.isDeleted(cast<Instruction>(I)) ||
21350 isa<Constant>(cast<GetElementPtrInst>(I)->idx_begin()->get());
21353 // Remove from the set of candidates all pairs of getelementptrs with
21354 // constant differences. Such getelementptrs are likely not good
21355 // candidates for vectorization in a bottom-up phase since one can be
21356 // computed from the other. We also ensure all candidate getelementptr
21357 // indices are unique.
21358 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
21359 auto *GEPI = GEPList[I];
21360 if (!Candidates.count(GEPI))
21361 continue;
21362 const SCEV *SCEVI = SE->getSCEV(GEPList[I]);
21363 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
21364 auto *GEPJ = GEPList[J];
21365 const SCEV *SCEVJ = SE->getSCEV(GEPList[J]);
21366 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
21367 Candidates.remove(GEPI);
21368 Candidates.remove(GEPJ);
21369 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
21370 Candidates.remove(GEPJ);
21375 // We break out of the above computation as soon as we know there are
21376 // fewer than two candidates remaining.
21377 if (Candidates.size() < 2)
21378 continue;
21380 // Add the single, non-constant index of each candidate to the bundle. We
21381 // ensured the indices met these constraints when we originally collected
21382 // the getelementptrs.
21383 SmallVector<Value *, 16> Bundle(Candidates.size());
21384 auto BundleIndex = 0u;
21385 for (auto *V : Candidates) {
21386 auto *GEP = cast<GetElementPtrInst>(V);
21387 auto *GEPIdx = GEP->idx_begin()->get();
21388 assert(GEP->getNumIndices() == 1 && !isa<Constant>(GEPIdx));
21389 Bundle[BundleIndex++] = GEPIdx;
21392 // Try and vectorize the indices. We are currently only interested in
21393 // gather-like cases of the form:
21395 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
21397 // where the loads of "a", the loads of "b", and the subtractions can be
21398 // performed in parallel. It's likely that detecting this pattern in a
21399 // bottom-up phase will be simpler and less costly than building a
21400 // full-blown top-down phase beginning at the consecutive loads.
21401 Changed |= tryToVectorizeList(Bundle, R);
21404 return Changed;
21407 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
21408 bool Changed = false;
21409 // Sort by type, base pointers and values operand. Value operands must be
21410 // compatible (have the same opcode, same parent), otherwise it is
21411 // definitely not profitable to try to vectorize them.
21412 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
21413 if (V->getValueOperand()->getType()->getTypeID() <
21414 V2->getValueOperand()->getType()->getTypeID())
21415 return true;
21416 if (V->getValueOperand()->getType()->getTypeID() >
21417 V2->getValueOperand()->getType()->getTypeID())
21418 return false;
21419 if (V->getPointerOperandType()->getTypeID() <
21420 V2->getPointerOperandType()->getTypeID())
21421 return true;
21422 if (V->getPointerOperandType()->getTypeID() >
21423 V2->getPointerOperandType()->getTypeID())
21424 return false;
21425 if (V->getValueOperand()->getType()->getScalarSizeInBits() <
21426 V2->getValueOperand()->getType()->getScalarSizeInBits())
21427 return true;
21428 if (V->getValueOperand()->getType()->getScalarSizeInBits() >
21429 V2->getValueOperand()->getType()->getScalarSizeInBits())
21430 return false;
21431 // UndefValues are compatible with all other values.
21432 if (isa<UndefValue>(V->getValueOperand()) ||
21433 isa<UndefValue>(V2->getValueOperand()))
21434 return false;
21435 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
21436 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
21437 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
21438 DT->getNode(I1->getParent());
21439 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
21440 DT->getNode(I2->getParent());
21441 assert(NodeI1 && "Should only process reachable instructions");
21442 assert(NodeI2 && "Should only process reachable instructions");
21443 assert((NodeI1 == NodeI2) ==
21444 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
21445 "Different nodes should have different DFS numbers");
21446 if (NodeI1 != NodeI2)
21447 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
21448 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21449 if (S.getOpcode())
21450 return false;
21451 return I1->getOpcode() < I2->getOpcode();
21453 if (isa<Constant>(V->getValueOperand()) &&
21454 isa<Constant>(V2->getValueOperand()))
21455 return false;
21456 return V->getValueOperand()->getValueID() <
21457 V2->getValueOperand()->getValueID();
21460 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) {
21461 if (V1 == V2)
21462 return true;
21463 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType())
21464 return false;
21465 if (V1->getPointerOperandType() != V2->getPointerOperandType())
21466 return false;
21467 // Undefs are compatible with any other value.
21468 if (isa<UndefValue>(V1->getValueOperand()) ||
21469 isa<UndefValue>(V2->getValueOperand()))
21470 return true;
21471 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
21472 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
21473 if (I1->getParent() != I2->getParent())
21474 return false;
21475 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21476 return S.getOpcode() > 0;
21478 if (isa<Constant>(V1->getValueOperand()) &&
21479 isa<Constant>(V2->getValueOperand()))
21480 return true;
21481 return V1->getValueOperand()->getValueID() ==
21482 V2->getValueOperand()->getValueID();
21485 // Attempt to sort and vectorize each of the store-groups.
21486 DenseSet<std::tuple<Value *, Value *, Value *, Value *, unsigned>> Attempted;
21487 for (auto &Pair : Stores) {
21488 if (Pair.second.size() < 2)
21489 continue;
21491 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
21492 << Pair.second.size() << ".\n");
21494 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType()))
21495 continue;
21497 // Reverse stores to do bottom-to-top analysis. This is important if the
21498 // values are stores to the same addresses several times, in this case need
21499 // to follow the stores order (reversed to meet the memory dependecies).
21500 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(),
21501 Pair.second.rend());
21502 Changed |= tryToVectorizeSequence<StoreInst>(
21503 ReversedStores, StoreSorter, AreCompatibleStores,
21504 [&](ArrayRef<StoreInst *> Candidates, bool) {
21505 return vectorizeStores(Candidates, R, Attempted);
21507 /*MaxVFOnly=*/false, R);
21509 return Changed;