[analyzer][Z3] Restore the original timeout of 15s (#118291)
[llvm-project.git] / llvm / lib / Transforms / Vectorize / SLPVectorizer.cpp
blob0e11e8704db2f25c00d9b74adf43ff8e5295a5ea
1 //===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass implements the Bottom Up SLP vectorizer. It detects consecutive
10 // stores that can be put together into vector-stores. Next, it attempts to
11 // construct vectorizable tree using the use-def chains. If a profitable tree
12 // was found, the SLP vectorizer performs vectorization on the tree.
14 // The pass is inspired by the work described in the paper:
15 // "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Transforms/Vectorize/SLPVectorizer.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/DenseSet.h"
22 #include "llvm/ADT/PriorityQueue.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/ScopeExit.h"
25 #include "llvm/ADT/SetOperations.h"
26 #include "llvm/ADT/SetVector.h"
27 #include "llvm/ADT/SmallBitVector.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallString.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/iterator.h"
33 #include "llvm/ADT/iterator_range.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Analysis/AssumptionCache.h"
36 #include "llvm/Analysis/CodeMetrics.h"
37 #include "llvm/Analysis/ConstantFolding.h"
38 #include "llvm/Analysis/DemandedBits.h"
39 #include "llvm/Analysis/GlobalsModRef.h"
40 #include "llvm/Analysis/IVDescriptors.h"
41 #include "llvm/Analysis/LoopAccessAnalysis.h"
42 #include "llvm/Analysis/LoopInfo.h"
43 #include "llvm/Analysis/MemoryLocation.h"
44 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
45 #include "llvm/Analysis/ScalarEvolution.h"
46 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
47 #include "llvm/Analysis/TargetLibraryInfo.h"
48 #include "llvm/Analysis/TargetTransformInfo.h"
49 #include "llvm/Analysis/ValueTracking.h"
50 #include "llvm/Analysis/VectorUtils.h"
51 #include "llvm/IR/Attributes.h"
52 #include "llvm/IR/BasicBlock.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/IR/Operator.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Use.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/IR/ValueHandle.h"
73 #ifdef EXPENSIVE_CHECKS
74 #include "llvm/IR/Verifier.h"
75 #endif
76 #include "llvm/Pass.h"
77 #include "llvm/Support/Casting.h"
78 #include "llvm/Support/CommandLine.h"
79 #include "llvm/Support/Compiler.h"
80 #include "llvm/Support/DOTGraphTraits.h"
81 #include "llvm/Support/Debug.h"
82 #include "llvm/Support/DebugCounter.h"
83 #include "llvm/Support/ErrorHandling.h"
84 #include "llvm/Support/GraphWriter.h"
85 #include "llvm/Support/InstructionCost.h"
86 #include "llvm/Support/KnownBits.h"
87 #include "llvm/Support/MathExtras.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Transforms/Utils/InjectTLIMappings.h"
90 #include "llvm/Transforms/Utils/Local.h"
91 #include "llvm/Transforms/Utils/LoopUtils.h"
92 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <iterator>
97 #include <memory>
98 #include <optional>
99 #include <set>
100 #include <string>
101 #include <tuple>
102 #include <utility>
104 using namespace llvm;
105 using namespace llvm::PatternMatch;
106 using namespace slpvectorizer;
108 #define SV_NAME "slp-vectorizer"
109 #define DEBUG_TYPE "SLP"
111 STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
113 DEBUG_COUNTER(VectorizedGraphs, "slp-vectorized",
114 "Controls which SLP graphs should be vectorized.");
116 static cl::opt<bool>
117 RunSLPVectorization("vectorize-slp", cl::init(true), cl::Hidden,
118 cl::desc("Run the SLP vectorization passes"));
120 static cl::opt<bool>
121 SLPReVec("slp-revec", cl::init(false), cl::Hidden,
122 cl::desc("Enable vectorization for wider vector utilization"));
124 static cl::opt<int>
125 SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
126 cl::desc("Only vectorize if you gain more than this "
127 "number "));
129 static cl::opt<bool> SLPSkipEarlyProfitabilityCheck(
130 "slp-skip-early-profitability-check", cl::init(false), cl::Hidden,
131 cl::desc("When true, SLP vectorizer bypasses profitability checks based on "
132 "heuristics and makes vectorization decision via cost modeling."));
134 static cl::opt<bool>
135 ShouldVectorizeHor("slp-vectorize-hor", cl::init(true), cl::Hidden,
136 cl::desc("Attempt to vectorize horizontal reductions"));
138 static cl::opt<bool> ShouldStartVectorizeHorAtStore(
139 "slp-vectorize-hor-store", cl::init(false), cl::Hidden,
140 cl::desc(
141 "Attempt to vectorize horizontal reductions feeding into a store"));
143 static cl::opt<int>
144 MaxVectorRegSizeOption("slp-max-reg-size", cl::init(128), cl::Hidden,
145 cl::desc("Attempt to vectorize for this register size in bits"));
147 static cl::opt<unsigned>
148 MaxVFOption("slp-max-vf", cl::init(0), cl::Hidden,
149 cl::desc("Maximum SLP vectorization factor (0=unlimited)"));
151 /// Limits the size of scheduling regions in a block.
152 /// It avoid long compile times for _very_ large blocks where vector
153 /// instructions are spread over a wide range.
154 /// This limit is way higher than needed by real-world functions.
155 static cl::opt<int>
156 ScheduleRegionSizeBudget("slp-schedule-budget", cl::init(100000), cl::Hidden,
157 cl::desc("Limit the size of the SLP scheduling region per block"));
159 static cl::opt<int> MinVectorRegSizeOption(
160 "slp-min-reg-size", cl::init(128), cl::Hidden,
161 cl::desc("Attempt to vectorize for this register size in bits"));
163 static cl::opt<unsigned> RecursionMaxDepth(
164 "slp-recursion-max-depth", cl::init(12), cl::Hidden,
165 cl::desc("Limit the recursion depth when building a vectorizable tree"));
167 static cl::opt<unsigned> MinTreeSize(
168 "slp-min-tree-size", cl::init(3), cl::Hidden,
169 cl::desc("Only vectorize small trees if they are fully vectorizable"));
171 // The maximum depth that the look-ahead score heuristic will explore.
172 // The higher this value, the higher the compilation time overhead.
173 static cl::opt<int> LookAheadMaxDepth(
174 "slp-max-look-ahead-depth", cl::init(2), cl::Hidden,
175 cl::desc("The maximum look-ahead depth for operand reordering scores"));
177 // The maximum depth that the look-ahead score heuristic will explore
178 // when it probing among candidates for vectorization tree roots.
179 // The higher this value, the higher the compilation time overhead but unlike
180 // similar limit for operands ordering this is less frequently used, hence
181 // impact of higher value is less noticeable.
182 static cl::opt<int> RootLookAheadMaxDepth(
183 "slp-max-root-look-ahead-depth", cl::init(2), cl::Hidden,
184 cl::desc("The maximum look-ahead depth for searching best rooting option"));
186 static cl::opt<unsigned> MinProfitableStridedLoads(
187 "slp-min-strided-loads", cl::init(2), cl::Hidden,
188 cl::desc("The minimum number of loads, which should be considered strided, "
189 "if the stride is > 1 or is runtime value"));
191 static cl::opt<unsigned> MaxProfitableLoadStride(
192 "slp-max-stride", cl::init(8), cl::Hidden,
193 cl::desc("The maximum stride, considered to be profitable."));
195 static cl::opt<bool>
196 ViewSLPTree("view-slp-tree", cl::Hidden,
197 cl::desc("Display the SLP trees with Graphviz"));
199 static cl::opt<bool> VectorizeNonPowerOf2(
200 "slp-vectorize-non-power-of-2", cl::init(false), cl::Hidden,
201 cl::desc("Try to vectorize with non-power-of-2 number of elements."));
203 // Limit the number of alias checks. The limit is chosen so that
204 // it has no negative effect on the llvm benchmarks.
205 static const unsigned AliasedCheckLimit = 10;
207 // Limit of the number of uses for potentially transformed instructions/values,
208 // used in checks to avoid compile-time explode.
209 static constexpr int UsesLimit = 64;
211 // Another limit for the alias checks: The maximum distance between load/store
212 // instructions where alias checks are done.
213 // This limit is useful for very large basic blocks.
214 static const unsigned MaxMemDepDistance = 160;
216 /// If the ScheduleRegionSizeBudget is exhausted, we allow small scheduling
217 /// regions to be handled.
218 static const int MinScheduleRegionSize = 16;
220 /// Maximum allowed number of operands in the PHI nodes.
221 static const unsigned MaxPHINumOperands = 128;
223 /// Predicate for the element types that the SLP vectorizer supports.
225 /// The most important thing to filter here are types which are invalid in LLVM
226 /// vectors. We also filter target specific types which have absolutely no
227 /// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
228 /// avoids spending time checking the cost model and realizing that they will
229 /// be inevitably scalarized.
230 static bool isValidElementType(Type *Ty) {
231 // TODO: Support ScalableVectorType.
232 if (SLPReVec && isa<FixedVectorType>(Ty))
233 Ty = Ty->getScalarType();
234 return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
235 !Ty->isPPC_FP128Ty();
238 /// Returns the type of the given value/instruction \p V. If it is store,
239 /// returns the type of its value operand, for Cmp - the types of the compare
240 /// operands and for insertelement - the type os the inserted operand.
241 /// Otherwise, just the type of the value is returned.
242 static Type *getValueType(Value *V) {
243 if (auto *SI = dyn_cast<StoreInst>(V))
244 return SI->getValueOperand()->getType();
245 if (auto *CI = dyn_cast<CmpInst>(V))
246 return CI->getOperand(0)->getType();
247 if (auto *IE = dyn_cast<InsertElementInst>(V))
248 return IE->getOperand(1)->getType();
249 return V->getType();
252 /// \returns the number of elements for Ty.
253 static unsigned getNumElements(Type *Ty) {
254 assert(!isa<ScalableVectorType>(Ty) &&
255 "ScalableVectorType is not supported.");
256 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty))
257 return VecTy->getNumElements();
258 return 1;
261 /// \returns the vector type of ScalarTy based on vectorization factor.
262 static FixedVectorType *getWidenedType(Type *ScalarTy, unsigned VF) {
263 return FixedVectorType::get(ScalarTy->getScalarType(),
264 VF * getNumElements(ScalarTy));
267 /// Returns the number of elements of the given type \p Ty, not less than \p Sz,
268 /// which forms type, which splits by \p TTI into whole vector types during
269 /// legalization.
270 static unsigned getFullVectorNumberOfElements(const TargetTransformInfo &TTI,
271 Type *Ty, unsigned Sz) {
272 if (!isValidElementType(Ty))
273 return bit_ceil(Sz);
274 // Find the number of elements, which forms full vectors.
275 const unsigned NumParts = TTI.getNumberOfParts(getWidenedType(Ty, Sz));
276 if (NumParts == 0 || NumParts >= Sz)
277 return bit_ceil(Sz);
278 return bit_ceil(divideCeil(Sz, NumParts)) * NumParts;
281 /// Returns the number of elements of the given type \p Ty, not greater than \p
282 /// Sz, which forms type, which splits by \p TTI into whole vector types during
283 /// legalization.
284 static unsigned
285 getFloorFullVectorNumberOfElements(const TargetTransformInfo &TTI, Type *Ty,
286 unsigned Sz) {
287 if (!isValidElementType(Ty))
288 return bit_floor(Sz);
289 // Find the number of elements, which forms full vectors.
290 unsigned NumParts = TTI.getNumberOfParts(getWidenedType(Ty, Sz));
291 if (NumParts == 0 || NumParts >= Sz)
292 return bit_floor(Sz);
293 unsigned RegVF = bit_ceil(divideCeil(Sz, NumParts));
294 if (RegVF > Sz)
295 return bit_floor(Sz);
296 return (Sz / RegVF) * RegVF;
299 static void transformScalarShuffleIndiciesToVector(unsigned VecTyNumElements,
300 SmallVectorImpl<int> &Mask) {
301 // The ShuffleBuilder implementation use shufflevector to splat an "element".
302 // But the element have different meaning for SLP (scalar) and REVEC
303 // (vector). We need to expand Mask into masks which shufflevector can use
304 // directly.
305 SmallVector<int> NewMask(Mask.size() * VecTyNumElements);
306 for (unsigned I : seq<unsigned>(Mask.size()))
307 for (auto [J, MaskV] : enumerate(MutableArrayRef(NewMask).slice(
308 I * VecTyNumElements, VecTyNumElements)))
309 MaskV = Mask[I] == PoisonMaskElem ? PoisonMaskElem
310 : Mask[I] * VecTyNumElements + J;
311 Mask.swap(NewMask);
314 /// \returns the number of groups of shufflevector
315 /// A group has the following features
316 /// 1. All of value in a group are shufflevector.
317 /// 2. The mask of all shufflevector is isExtractSubvectorMask.
318 /// 3. The mask of all shufflevector uses all of the elements of the source.
319 /// e.g., it is 1 group (%0)
320 /// %1 = shufflevector <16 x i8> %0, <16 x i8> poison,
321 /// <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
322 /// %2 = shufflevector <16 x i8> %0, <16 x i8> poison,
323 /// <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
324 /// it is 2 groups (%3 and %4)
325 /// %5 = shufflevector <8 x i16> %3, <8 x i16> poison,
326 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
327 /// %6 = shufflevector <8 x i16> %3, <8 x i16> poison,
328 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
329 /// %7 = shufflevector <8 x i16> %4, <8 x i16> poison,
330 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
331 /// %8 = shufflevector <8 x i16> %4, <8 x i16> poison,
332 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
333 /// it is 0 group
334 /// %12 = shufflevector <8 x i16> %10, <8 x i16> poison,
335 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
336 /// %13 = shufflevector <8 x i16> %11, <8 x i16> poison,
337 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
338 static unsigned getShufflevectorNumGroups(ArrayRef<Value *> VL) {
339 if (VL.empty())
340 return 0;
341 if (!all_of(VL, IsaPred<ShuffleVectorInst>))
342 return 0;
343 auto *SV = cast<ShuffleVectorInst>(VL.front());
344 unsigned SVNumElements =
345 cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
346 unsigned ShuffleMaskSize = SV->getShuffleMask().size();
347 if (SVNumElements % ShuffleMaskSize != 0)
348 return 0;
349 unsigned GroupSize = SVNumElements / ShuffleMaskSize;
350 if (GroupSize == 0 || (VL.size() % GroupSize) != 0)
351 return 0;
352 unsigned NumGroup = 0;
353 for (size_t I = 0, E = VL.size(); I != E; I += GroupSize) {
354 auto *SV = cast<ShuffleVectorInst>(VL[I]);
355 Value *Src = SV->getOperand(0);
356 ArrayRef<Value *> Group = VL.slice(I, GroupSize);
357 SmallBitVector ExpectedIndex(GroupSize);
358 if (!all_of(Group, [&](Value *V) {
359 auto *SV = cast<ShuffleVectorInst>(V);
360 // From the same source.
361 if (SV->getOperand(0) != Src)
362 return false;
363 int Index;
364 if (!SV->isExtractSubvectorMask(Index))
365 return false;
366 ExpectedIndex.set(Index / ShuffleMaskSize);
367 return true;
369 return 0;
370 if (!ExpectedIndex.all())
371 return 0;
372 ++NumGroup;
374 assert(NumGroup == (VL.size() / GroupSize) && "Unexpected number of groups");
375 return NumGroup;
378 /// \returns a shufflevector mask which is used to vectorize shufflevectors
379 /// e.g.,
380 /// %5 = shufflevector <8 x i16> %3, <8 x i16> poison,
381 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
382 /// %6 = shufflevector <8 x i16> %3, <8 x i16> poison,
383 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
384 /// %7 = shufflevector <8 x i16> %4, <8 x i16> poison,
385 /// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
386 /// %8 = shufflevector <8 x i16> %4, <8 x i16> poison,
387 /// <4 x i32> <i32 4, i32 5, i32 6, i32 7>
388 /// the result is
389 /// <0, 1, 2, 3, 12, 13, 14, 15, 16, 17, 18, 19, 28, 29, 30, 31>
390 static SmallVector<int> calculateShufflevectorMask(ArrayRef<Value *> VL) {
391 assert(getShufflevectorNumGroups(VL) && "Not supported shufflevector usage.");
392 auto *SV = cast<ShuffleVectorInst>(VL.front());
393 unsigned SVNumElements =
394 cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
395 SmallVector<int> Mask;
396 unsigned AccumulateLength = 0;
397 for (Value *V : VL) {
398 auto *SV = cast<ShuffleVectorInst>(V);
399 for (int M : SV->getShuffleMask())
400 Mask.push_back(M == PoisonMaskElem ? PoisonMaskElem
401 : AccumulateLength + M);
402 AccumulateLength += SVNumElements;
404 return Mask;
407 /// \returns True if the value is a constant (but not globals/constant
408 /// expressions).
409 static bool isConstant(Value *V) {
410 return isa<Constant>(V) && !isa<ConstantExpr, GlobalValue>(V);
413 /// Checks if \p V is one of vector-like instructions, i.e. undef,
414 /// insertelement/extractelement with constant indices for fixed vector type or
415 /// extractvalue instruction.
416 static bool isVectorLikeInstWithConstOps(Value *V) {
417 if (!isa<InsertElementInst, ExtractElementInst>(V) &&
418 !isa<ExtractValueInst, UndefValue>(V))
419 return false;
420 auto *I = dyn_cast<Instruction>(V);
421 if (!I || isa<ExtractValueInst>(I))
422 return true;
423 if (!isa<FixedVectorType>(I->getOperand(0)->getType()))
424 return false;
425 if (isa<ExtractElementInst>(I))
426 return isConstant(I->getOperand(1));
427 assert(isa<InsertElementInst>(V) && "Expected only insertelement.");
428 return isConstant(I->getOperand(2));
431 /// Returns power-of-2 number of elements in a single register (part), given the
432 /// total number of elements \p Size and number of registers (parts) \p
433 /// NumParts.
434 static unsigned getPartNumElems(unsigned Size, unsigned NumParts) {
435 return std::min<unsigned>(Size, bit_ceil(divideCeil(Size, NumParts)));
438 /// Returns correct remaining number of elements, considering total amount \p
439 /// Size, (power-of-2 number) of elements in a single register \p PartNumElems
440 /// and current register (part) \p Part.
441 static unsigned getNumElems(unsigned Size, unsigned PartNumElems,
442 unsigned Part) {
443 return std::min<unsigned>(PartNumElems, Size - Part * PartNumElems);
446 #if !defined(NDEBUG)
447 /// Print a short descriptor of the instruction bundle suitable for debug output.
448 static std::string shortBundleName(ArrayRef<Value *> VL, int Idx = -1) {
449 std::string Result;
450 raw_string_ostream OS(Result);
451 if (Idx >= 0)
452 OS << "Idx: " << Idx << ", ";
453 OS << "n=" << VL.size() << " [" << *VL.front() << ", ..]";
454 return Result;
456 #endif
458 /// \returns true if all of the instructions in \p VL are in the same block or
459 /// false otherwise.
460 static bool allSameBlock(ArrayRef<Value *> VL) {
461 auto *It = find_if(VL, IsaPred<Instruction>);
462 if (It == VL.end())
463 return false;
464 Instruction *I0 = cast<Instruction>(*It);
465 if (all_of(VL, isVectorLikeInstWithConstOps))
466 return true;
468 BasicBlock *BB = I0->getParent();
469 for (Value *V : iterator_range(It, VL.end())) {
470 if (isa<PoisonValue>(V))
471 continue;
472 auto *II = dyn_cast<Instruction>(V);
473 if (!II)
474 return false;
476 if (BB != II->getParent())
477 return false;
479 return true;
482 /// \returns True if all of the values in \p VL are constants (but not
483 /// globals/constant expressions).
484 static bool allConstant(ArrayRef<Value *> VL) {
485 // Constant expressions and globals can't be vectorized like normal integer/FP
486 // constants.
487 return all_of(VL, isConstant);
490 /// \returns True if all of the values in \p VL are identical or some of them
491 /// are UndefValue.
492 static bool isSplat(ArrayRef<Value *> VL) {
493 Value *FirstNonUndef = nullptr;
494 for (Value *V : VL) {
495 if (isa<UndefValue>(V))
496 continue;
497 if (!FirstNonUndef) {
498 FirstNonUndef = V;
499 continue;
501 if (V != FirstNonUndef)
502 return false;
504 return FirstNonUndef != nullptr;
507 /// \returns True if \p I is commutative, handles CmpInst and BinaryOperator.
508 static bool isCommutative(Instruction *I) {
509 if (auto *Cmp = dyn_cast<CmpInst>(I))
510 return Cmp->isCommutative();
511 if (auto *BO = dyn_cast<BinaryOperator>(I))
512 return BO->isCommutative() ||
513 (BO->getOpcode() == Instruction::Sub &&
514 !BO->hasNUsesOrMore(UsesLimit) &&
515 all_of(
516 BO->uses(),
517 [](const Use &U) {
518 // Commutative, if icmp eq/ne sub, 0
519 ICmpInst::Predicate Pred;
520 if (match(U.getUser(),
521 m_ICmp(Pred, m_Specific(U.get()), m_Zero())) &&
522 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE))
523 return true;
524 // Commutative, if abs(sub nsw, true) or abs(sub, false).
525 ConstantInt *Flag;
526 return match(U.getUser(),
527 m_Intrinsic<Intrinsic::abs>(
528 m_Specific(U.get()), m_ConstantInt(Flag))) &&
529 (!cast<Instruction>(U.get())->hasNoSignedWrap() ||
530 Flag->isOne());
531 })) ||
532 (BO->getOpcode() == Instruction::FSub &&
533 !BO->hasNUsesOrMore(UsesLimit) &&
534 all_of(BO->uses(), [](const Use &U) {
535 return match(U.getUser(),
536 m_Intrinsic<Intrinsic::fabs>(m_Specific(U.get())));
537 }));
538 return I->isCommutative();
541 template <typename T>
542 static std::optional<unsigned> getInsertExtractIndex(const Value *Inst,
543 unsigned Offset) {
544 static_assert(std::is_same_v<T, InsertElementInst> ||
545 std::is_same_v<T, ExtractElementInst>,
546 "unsupported T");
547 int Index = Offset;
548 if (const auto *IE = dyn_cast<T>(Inst)) {
549 const auto *VT = dyn_cast<FixedVectorType>(IE->getType());
550 if (!VT)
551 return std::nullopt;
552 const auto *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
553 if (!CI)
554 return std::nullopt;
555 if (CI->getValue().uge(VT->getNumElements()))
556 return std::nullopt;
557 Index *= VT->getNumElements();
558 Index += CI->getZExtValue();
559 return Index;
561 return std::nullopt;
564 /// \returns inserting or extracting index of InsertElement, ExtractElement or
565 /// InsertValue instruction, using Offset as base offset for index.
566 /// \returns std::nullopt if the index is not an immediate.
567 static std::optional<unsigned> getElementIndex(const Value *Inst,
568 unsigned Offset = 0) {
569 if (auto Index = getInsertExtractIndex<InsertElementInst>(Inst, Offset))
570 return Index;
571 if (auto Index = getInsertExtractIndex<ExtractElementInst>(Inst, Offset))
572 return Index;
574 int Index = Offset;
576 const auto *IV = dyn_cast<InsertValueInst>(Inst);
577 if (!IV)
578 return std::nullopt;
580 Type *CurrentType = IV->getType();
581 for (unsigned I : IV->indices()) {
582 if (const auto *ST = dyn_cast<StructType>(CurrentType)) {
583 Index *= ST->getNumElements();
584 CurrentType = ST->getElementType(I);
585 } else if (const auto *AT = dyn_cast<ArrayType>(CurrentType)) {
586 Index *= AT->getNumElements();
587 CurrentType = AT->getElementType();
588 } else {
589 return std::nullopt;
591 Index += I;
593 return Index;
596 namespace {
597 /// Specifies the way the mask should be analyzed for undefs/poisonous elements
598 /// in the shuffle mask.
599 enum class UseMask {
600 FirstArg, ///< The mask is expected to be for permutation of 1-2 vectors,
601 ///< check for the mask elements for the first argument (mask
602 ///< indices are in range [0:VF)).
603 SecondArg, ///< The mask is expected to be for permutation of 2 vectors, check
604 ///< for the mask elements for the second argument (mask indices
605 ///< are in range [VF:2*VF))
606 UndefsAsMask ///< Consider undef mask elements (-1) as placeholders for
607 ///< future shuffle elements and mark them as ones as being used
608 ///< in future. Non-undef elements are considered as unused since
609 ///< they're already marked as used in the mask.
611 } // namespace
613 /// Prepares a use bitset for the given mask either for the first argument or
614 /// for the second.
615 static SmallBitVector buildUseMask(int VF, ArrayRef<int> Mask,
616 UseMask MaskArg) {
617 SmallBitVector UseMask(VF, true);
618 for (auto [Idx, Value] : enumerate(Mask)) {
619 if (Value == PoisonMaskElem) {
620 if (MaskArg == UseMask::UndefsAsMask)
621 UseMask.reset(Idx);
622 continue;
624 if (MaskArg == UseMask::FirstArg && Value < VF)
625 UseMask.reset(Value);
626 else if (MaskArg == UseMask::SecondArg && Value >= VF)
627 UseMask.reset(Value - VF);
629 return UseMask;
632 /// Checks if the given value is actually an undefined constant vector.
633 /// Also, if the \p UseMask is not empty, tries to check if the non-masked
634 /// elements actually mask the insertelement buildvector, if any.
635 template <bool IsPoisonOnly = false>
636 static SmallBitVector isUndefVector(const Value *V,
637 const SmallBitVector &UseMask = {}) {
638 SmallBitVector Res(UseMask.empty() ? 1 : UseMask.size(), true);
639 using T = std::conditional_t<IsPoisonOnly, PoisonValue, UndefValue>;
640 if (isa<T>(V))
641 return Res;
642 auto *VecTy = dyn_cast<FixedVectorType>(V->getType());
643 if (!VecTy)
644 return Res.reset();
645 auto *C = dyn_cast<Constant>(V);
646 if (!C) {
647 if (!UseMask.empty()) {
648 const Value *Base = V;
649 while (auto *II = dyn_cast<InsertElementInst>(Base)) {
650 Base = II->getOperand(0);
651 if (isa<T>(II->getOperand(1)))
652 continue;
653 std::optional<unsigned> Idx = getElementIndex(II);
654 if (!Idx) {
655 Res.reset();
656 return Res;
658 if (*Idx < UseMask.size() && !UseMask.test(*Idx))
659 Res.reset(*Idx);
661 // TODO: Add analysis for shuffles here too.
662 if (V == Base) {
663 Res.reset();
664 } else {
665 SmallBitVector SubMask(UseMask.size(), false);
666 Res &= isUndefVector<IsPoisonOnly>(Base, SubMask);
668 } else {
669 Res.reset();
671 return Res;
673 for (unsigned I = 0, E = VecTy->getNumElements(); I != E; ++I) {
674 if (Constant *Elem = C->getAggregateElement(I))
675 if (!isa<T>(Elem) &&
676 (UseMask.empty() || (I < UseMask.size() && !UseMask.test(I))))
677 Res.reset(I);
679 return Res;
682 /// Checks if the vector of instructions can be represented as a shuffle, like:
683 /// %x0 = extractelement <4 x i8> %x, i32 0
684 /// %x3 = extractelement <4 x i8> %x, i32 3
685 /// %y1 = extractelement <4 x i8> %y, i32 1
686 /// %y2 = extractelement <4 x i8> %y, i32 2
687 /// %x0x0 = mul i8 %x0, %x0
688 /// %x3x3 = mul i8 %x3, %x3
689 /// %y1y1 = mul i8 %y1, %y1
690 /// %y2y2 = mul i8 %y2, %y2
691 /// %ins1 = insertelement <4 x i8> poison, i8 %x0x0, i32 0
692 /// %ins2 = insertelement <4 x i8> %ins1, i8 %x3x3, i32 1
693 /// %ins3 = insertelement <4 x i8> %ins2, i8 %y1y1, i32 2
694 /// %ins4 = insertelement <4 x i8> %ins3, i8 %y2y2, i32 3
695 /// ret <4 x i8> %ins4
696 /// can be transformed into:
697 /// %1 = shufflevector <4 x i8> %x, <4 x i8> %y, <4 x i32> <i32 0, i32 3, i32 5,
698 /// i32 6>
699 /// %2 = mul <4 x i8> %1, %1
700 /// ret <4 x i8> %2
701 /// Mask will return the Shuffle Mask equivalent to the extracted elements.
702 /// TODO: Can we split off and reuse the shuffle mask detection from
703 /// ShuffleVectorInst/getShuffleCost?
704 static std::optional<TargetTransformInfo::ShuffleKind>
705 isFixedVectorShuffle(ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) {
706 const auto *It = find_if(VL, IsaPred<ExtractElementInst>);
707 if (It == VL.end())
708 return std::nullopt;
709 unsigned Size =
710 std::accumulate(VL.begin(), VL.end(), 0u, [](unsigned S, Value *V) {
711 auto *EI = dyn_cast<ExtractElementInst>(V);
712 if (!EI)
713 return S;
714 auto *VTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType());
715 if (!VTy)
716 return S;
717 return std::max(S, VTy->getNumElements());
720 Value *Vec1 = nullptr;
721 Value *Vec2 = nullptr;
722 bool HasNonUndefVec = any_of(VL, [](Value *V) {
723 auto *EE = dyn_cast<ExtractElementInst>(V);
724 if (!EE)
725 return false;
726 Value *Vec = EE->getVectorOperand();
727 if (isa<UndefValue>(Vec))
728 return false;
729 return isGuaranteedNotToBePoison(Vec);
731 enum ShuffleMode { Unknown, Select, Permute };
732 ShuffleMode CommonShuffleMode = Unknown;
733 Mask.assign(VL.size(), PoisonMaskElem);
734 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
735 // Undef can be represented as an undef element in a vector.
736 if (isa<UndefValue>(VL[I]))
737 continue;
738 auto *EI = cast<ExtractElementInst>(VL[I]);
739 if (isa<ScalableVectorType>(EI->getVectorOperandType()))
740 return std::nullopt;
741 auto *Vec = EI->getVectorOperand();
742 // We can extractelement from undef or poison vector.
743 if (isUndefVector</*isPoisonOnly=*/true>(Vec).all())
744 continue;
745 // All vector operands must have the same number of vector elements.
746 if (isa<UndefValue>(Vec)) {
747 Mask[I] = I;
748 } else {
749 if (isa<UndefValue>(EI->getIndexOperand()))
750 continue;
751 auto *Idx = dyn_cast<ConstantInt>(EI->getIndexOperand());
752 if (!Idx)
753 return std::nullopt;
754 // Undefined behavior if Idx is negative or >= Size.
755 if (Idx->getValue().uge(Size))
756 continue;
757 unsigned IntIdx = Idx->getValue().getZExtValue();
758 Mask[I] = IntIdx;
760 if (isUndefVector(Vec).all() && HasNonUndefVec)
761 continue;
762 // For correct shuffling we have to have at most 2 different vector operands
763 // in all extractelement instructions.
764 if (!Vec1 || Vec1 == Vec) {
765 Vec1 = Vec;
766 } else if (!Vec2 || Vec2 == Vec) {
767 Vec2 = Vec;
768 Mask[I] += Size;
769 } else {
770 return std::nullopt;
772 if (CommonShuffleMode == Permute)
773 continue;
774 // If the extract index is not the same as the operation number, it is a
775 // permutation.
776 if (Mask[I] % Size != I) {
777 CommonShuffleMode = Permute;
778 continue;
780 CommonShuffleMode = Select;
782 // If we're not crossing lanes in different vectors, consider it as blending.
783 if (CommonShuffleMode == Select && Vec2)
784 return TargetTransformInfo::SK_Select;
785 // If Vec2 was never used, we have a permutation of a single vector, otherwise
786 // we have permutation of 2 vectors.
787 return Vec2 ? TargetTransformInfo::SK_PermuteTwoSrc
788 : TargetTransformInfo::SK_PermuteSingleSrc;
791 /// \returns True if Extract{Value,Element} instruction extracts element Idx.
792 static std::optional<unsigned> getExtractIndex(Instruction *E) {
793 unsigned Opcode = E->getOpcode();
794 assert((Opcode == Instruction::ExtractElement ||
795 Opcode == Instruction::ExtractValue) &&
796 "Expected extractelement or extractvalue instruction.");
797 if (Opcode == Instruction::ExtractElement) {
798 auto *CI = dyn_cast<ConstantInt>(E->getOperand(1));
799 if (!CI)
800 return std::nullopt;
801 return CI->getZExtValue();
803 auto *EI = cast<ExtractValueInst>(E);
804 if (EI->getNumIndices() != 1)
805 return std::nullopt;
806 return *EI->idx_begin();
809 namespace {
811 /// Main data required for vectorization of instructions.
812 class InstructionsState {
813 /// The main/alternate instruction. MainOp is also VL0.
814 Instruction *MainOp = nullptr;
815 Instruction *AltOp = nullptr;
817 public:
818 Instruction *getMainOp() const { return MainOp; }
820 Instruction *getAltOp() const { return AltOp; }
822 /// The main/alternate opcodes for the list of instructions.
823 unsigned getOpcode() const {
824 return MainOp ? MainOp->getOpcode() : 0;
827 unsigned getAltOpcode() const {
828 return AltOp ? AltOp->getOpcode() : 0;
831 /// Some of the instructions in the list have alternate opcodes.
832 bool isAltShuffle() const { return AltOp != MainOp; }
834 bool isOpcodeOrAlt(Instruction *I) const {
835 unsigned CheckedOpcode = I->getOpcode();
836 return getOpcode() == CheckedOpcode || getAltOpcode() == CheckedOpcode;
839 InstructionsState() = delete;
840 InstructionsState(Instruction *MainOp, Instruction *AltOp)
841 : MainOp(MainOp), AltOp(AltOp) {}
842 static InstructionsState invalid() { return {nullptr, nullptr}; }
845 } // end anonymous namespace
847 /// \returns true if \p Opcode is allowed as part of the main/alternate
848 /// instruction for SLP vectorization.
850 /// Example of unsupported opcode is SDIV that can potentially cause UB if the
851 /// "shuffled out" lane would result in division by zero.
852 static bool isValidForAlternation(unsigned Opcode) {
853 if (Instruction::isIntDivRem(Opcode))
854 return false;
856 return true;
859 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
860 const TargetLibraryInfo &TLI);
862 /// Checks if the provided operands of 2 cmp instructions are compatible, i.e.
863 /// compatible instructions or constants, or just some other regular values.
864 static bool areCompatibleCmpOps(Value *BaseOp0, Value *BaseOp1, Value *Op0,
865 Value *Op1, const TargetLibraryInfo &TLI) {
866 return (isConstant(BaseOp0) && isConstant(Op0)) ||
867 (isConstant(BaseOp1) && isConstant(Op1)) ||
868 (!isa<Instruction>(BaseOp0) && !isa<Instruction>(Op0) &&
869 !isa<Instruction>(BaseOp1) && !isa<Instruction>(Op1)) ||
870 BaseOp0 == Op0 || BaseOp1 == Op1 ||
871 getSameOpcode({BaseOp0, Op0}, TLI).getOpcode() ||
872 getSameOpcode({BaseOp1, Op1}, TLI).getOpcode();
875 /// \returns true if a compare instruction \p CI has similar "look" and
876 /// same predicate as \p BaseCI, "as is" or with its operands and predicate
877 /// swapped, false otherwise.
878 static bool isCmpSameOrSwapped(const CmpInst *BaseCI, const CmpInst *CI,
879 const TargetLibraryInfo &TLI) {
880 assert(BaseCI->getOperand(0)->getType() == CI->getOperand(0)->getType() &&
881 "Assessing comparisons of different types?");
882 CmpInst::Predicate BasePred = BaseCI->getPredicate();
883 CmpInst::Predicate Pred = CI->getPredicate();
884 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(Pred);
886 Value *BaseOp0 = BaseCI->getOperand(0);
887 Value *BaseOp1 = BaseCI->getOperand(1);
888 Value *Op0 = CI->getOperand(0);
889 Value *Op1 = CI->getOperand(1);
891 return (BasePred == Pred &&
892 areCompatibleCmpOps(BaseOp0, BaseOp1, Op0, Op1, TLI)) ||
893 (BasePred == SwappedPred &&
894 areCompatibleCmpOps(BaseOp0, BaseOp1, Op1, Op0, TLI));
897 /// \returns analysis of the Instructions in \p VL described in
898 /// InstructionsState, the Opcode that we suppose the whole list
899 /// could be vectorized even if its structure is diverse.
900 static InstructionsState getSameOpcode(ArrayRef<Value *> VL,
901 const TargetLibraryInfo &TLI) {
902 // Make sure these are all Instructions.
903 if (!all_of(VL, IsaPred<Instruction, PoisonValue>))
904 return InstructionsState::invalid();
906 auto *It = find_if(VL, IsaPred<Instruction>);
907 if (It == VL.end())
908 return InstructionsState::invalid();
910 Value *V = *It;
911 unsigned InstCnt = std::count_if(It, VL.end(), IsaPred<Instruction>);
912 if ((VL.size() > 2 && !isa<PHINode>(V) && InstCnt < VL.size() / 2) ||
913 (VL.size() == 2 && InstCnt < 2))
914 return InstructionsState::invalid();
916 bool IsCastOp = isa<CastInst>(V);
917 bool IsBinOp = isa<BinaryOperator>(V);
918 bool IsCmpOp = isa<CmpInst>(V);
919 CmpInst::Predicate BasePred =
920 IsCmpOp ? cast<CmpInst>(V)->getPredicate() : CmpInst::BAD_ICMP_PREDICATE;
921 unsigned Opcode = cast<Instruction>(V)->getOpcode();
922 unsigned AltOpcode = Opcode;
923 unsigned AltIndex = std::distance(VL.begin(), It);
925 bool SwappedPredsCompatible = [&]() {
926 if (!IsCmpOp)
927 return false;
928 SetVector<unsigned> UniquePreds, UniqueNonSwappedPreds;
929 UniquePreds.insert(BasePred);
930 UniqueNonSwappedPreds.insert(BasePred);
931 for (Value *V : VL) {
932 auto *I = dyn_cast<CmpInst>(V);
933 if (!I)
934 return false;
935 CmpInst::Predicate CurrentPred = I->getPredicate();
936 CmpInst::Predicate SwappedCurrentPred =
937 CmpInst::getSwappedPredicate(CurrentPred);
938 UniqueNonSwappedPreds.insert(CurrentPred);
939 if (!UniquePreds.contains(CurrentPred) &&
940 !UniquePreds.contains(SwappedCurrentPred))
941 UniquePreds.insert(CurrentPred);
943 // Total number of predicates > 2, but if consider swapped predicates
944 // compatible only 2, consider swappable predicates as compatible opcodes,
945 // not alternate.
946 return UniqueNonSwappedPreds.size() > 2 && UniquePreds.size() == 2;
947 }();
948 // Check for one alternate opcode from another BinaryOperator.
949 // TODO - generalize to support all operators (types, calls etc.).
950 auto *IBase = cast<Instruction>(V);
951 Intrinsic::ID BaseID = 0;
952 SmallVector<VFInfo> BaseMappings;
953 if (auto *CallBase = dyn_cast<CallInst>(IBase)) {
954 BaseID = getVectorIntrinsicIDForCall(CallBase, &TLI);
955 BaseMappings = VFDatabase(*CallBase).getMappings(*CallBase);
956 if (!isTriviallyVectorizable(BaseID) && BaseMappings.empty())
957 return InstructionsState::invalid();
959 bool AnyPoison = InstCnt != VL.size();
960 for (int Cnt = 0, E = VL.size(); Cnt < E; Cnt++) {
961 auto *I = dyn_cast<Instruction>(VL[Cnt]);
962 if (!I)
963 continue;
965 // Cannot combine poison and divisions.
966 // TODO: do some smart analysis of the CallInsts to exclude divide-like
967 // intrinsics/functions only.
968 if (AnyPoison && (I->isIntDivRem() || I->isFPDivRem() || isa<CallInst>(I)))
969 return InstructionsState::invalid();
970 unsigned InstOpcode = I->getOpcode();
971 if (IsBinOp && isa<BinaryOperator>(I)) {
972 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
973 continue;
974 if (Opcode == AltOpcode && isValidForAlternation(InstOpcode) &&
975 isValidForAlternation(Opcode)) {
976 AltOpcode = InstOpcode;
977 AltIndex = Cnt;
978 continue;
980 } else if (IsCastOp && isa<CastInst>(I)) {
981 Value *Op0 = IBase->getOperand(0);
982 Type *Ty0 = Op0->getType();
983 Value *Op1 = I->getOperand(0);
984 Type *Ty1 = Op1->getType();
985 if (Ty0 == Ty1) {
986 if (InstOpcode == Opcode || InstOpcode == AltOpcode)
987 continue;
988 if (Opcode == AltOpcode) {
989 assert(isValidForAlternation(Opcode) &&
990 isValidForAlternation(InstOpcode) &&
991 "Cast isn't safe for alternation, logic needs to be updated!");
992 AltOpcode = InstOpcode;
993 AltIndex = Cnt;
994 continue;
997 } else if (auto *Inst = dyn_cast<CmpInst>(VL[Cnt]); Inst && IsCmpOp) {
998 auto *BaseInst = cast<CmpInst>(V);
999 Type *Ty0 = BaseInst->getOperand(0)->getType();
1000 Type *Ty1 = Inst->getOperand(0)->getType();
1001 if (Ty0 == Ty1) {
1002 assert(InstOpcode == Opcode && "Expected same CmpInst opcode.");
1003 assert(InstOpcode == AltOpcode &&
1004 "Alternate instructions are only supported by BinaryOperator "
1005 "and CastInst.");
1006 // Check for compatible operands. If the corresponding operands are not
1007 // compatible - need to perform alternate vectorization.
1008 CmpInst::Predicate CurrentPred = Inst->getPredicate();
1009 CmpInst::Predicate SwappedCurrentPred =
1010 CmpInst::getSwappedPredicate(CurrentPred);
1012 if ((E == 2 || SwappedPredsCompatible) &&
1013 (BasePred == CurrentPred || BasePred == SwappedCurrentPred))
1014 continue;
1016 if (isCmpSameOrSwapped(BaseInst, Inst, TLI))
1017 continue;
1018 auto *AltInst = cast<CmpInst>(VL[AltIndex]);
1019 if (AltIndex) {
1020 if (isCmpSameOrSwapped(AltInst, Inst, TLI))
1021 continue;
1022 } else if (BasePred != CurrentPred) {
1023 assert(
1024 isValidForAlternation(InstOpcode) &&
1025 "CmpInst isn't safe for alternation, logic needs to be updated!");
1026 AltIndex = Cnt;
1027 continue;
1029 CmpInst::Predicate AltPred = AltInst->getPredicate();
1030 if (BasePred == CurrentPred || BasePred == SwappedCurrentPred ||
1031 AltPred == CurrentPred || AltPred == SwappedCurrentPred)
1032 continue;
1034 } else if (InstOpcode == Opcode) {
1035 assert(InstOpcode == AltOpcode &&
1036 "Alternate instructions are only supported by BinaryOperator and "
1037 "CastInst.");
1038 if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
1039 if (Gep->getNumOperands() != 2 ||
1040 Gep->getOperand(0)->getType() != IBase->getOperand(0)->getType())
1041 return InstructionsState::invalid();
1042 } else if (auto *EI = dyn_cast<ExtractElementInst>(I)) {
1043 if (!isVectorLikeInstWithConstOps(EI))
1044 return InstructionsState::invalid();
1045 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1046 auto *BaseLI = cast<LoadInst>(IBase);
1047 if (!LI->isSimple() || !BaseLI->isSimple())
1048 return InstructionsState::invalid();
1049 } else if (auto *Call = dyn_cast<CallInst>(I)) {
1050 auto *CallBase = cast<CallInst>(IBase);
1051 if (Call->getCalledFunction() != CallBase->getCalledFunction())
1052 return InstructionsState::invalid();
1053 if (Call->hasOperandBundles() &&
1054 (!CallBase->hasOperandBundles() ||
1055 !std::equal(Call->op_begin() + Call->getBundleOperandsStartIndex(),
1056 Call->op_begin() + Call->getBundleOperandsEndIndex(),
1057 CallBase->op_begin() +
1058 CallBase->getBundleOperandsStartIndex())))
1059 return InstructionsState::invalid();
1060 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, &TLI);
1061 if (ID != BaseID)
1062 return InstructionsState::invalid();
1063 if (!ID) {
1064 SmallVector<VFInfo> Mappings = VFDatabase(*Call).getMappings(*Call);
1065 if (Mappings.size() != BaseMappings.size() ||
1066 Mappings.front().ISA != BaseMappings.front().ISA ||
1067 Mappings.front().ScalarName != BaseMappings.front().ScalarName ||
1068 Mappings.front().VectorName != BaseMappings.front().VectorName ||
1069 Mappings.front().Shape.VF != BaseMappings.front().Shape.VF ||
1070 Mappings.front().Shape.Parameters !=
1071 BaseMappings.front().Shape.Parameters)
1072 return InstructionsState::invalid();
1075 continue;
1077 return InstructionsState::invalid();
1080 return InstructionsState(cast<Instruction>(V),
1081 cast<Instruction>(VL[AltIndex]));
1084 /// \returns true if all of the values in \p VL have the same type or false
1085 /// otherwise.
1086 static bool allSameType(ArrayRef<Value *> VL) {
1087 Type *Ty = VL.front()->getType();
1088 return all_of(VL.drop_front(), [&](Value *V) { return V->getType() == Ty; });
1091 /// \returns True if in-tree use also needs extract. This refers to
1092 /// possible scalar operand in vectorized instruction.
1093 static bool doesInTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
1094 TargetLibraryInfo *TLI) {
1095 if (!UserInst)
1096 return false;
1097 unsigned Opcode = UserInst->getOpcode();
1098 switch (Opcode) {
1099 case Instruction::Load: {
1100 LoadInst *LI = cast<LoadInst>(UserInst);
1101 return (LI->getPointerOperand() == Scalar);
1103 case Instruction::Store: {
1104 StoreInst *SI = cast<StoreInst>(UserInst);
1105 return (SI->getPointerOperand() == Scalar);
1107 case Instruction::Call: {
1108 CallInst *CI = cast<CallInst>(UserInst);
1109 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
1110 return any_of(enumerate(CI->args()), [&](auto &&Arg) {
1111 return isVectorIntrinsicWithScalarOpAtArg(ID, Arg.index()) &&
1112 Arg.value().get() == Scalar;
1115 default:
1116 return false;
1120 /// \returns the AA location that is being access by the instruction.
1121 static MemoryLocation getLocation(Instruction *I) {
1122 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1123 return MemoryLocation::get(SI);
1124 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1125 return MemoryLocation::get(LI);
1126 return MemoryLocation();
1129 /// \returns True if the instruction is not a volatile or atomic load/store.
1130 static bool isSimple(Instruction *I) {
1131 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1132 return LI->isSimple();
1133 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1134 return SI->isSimple();
1135 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
1136 return !MI->isVolatile();
1137 return true;
1140 /// Shuffles \p Mask in accordance with the given \p SubMask.
1141 /// \param ExtendingManyInputs Supports reshuffling of the mask with not only
1142 /// one but two input vectors.
1143 static void addMask(SmallVectorImpl<int> &Mask, ArrayRef<int> SubMask,
1144 bool ExtendingManyInputs = false) {
1145 if (SubMask.empty())
1146 return;
1147 assert(
1148 (!ExtendingManyInputs || SubMask.size() > Mask.size() ||
1149 // Check if input scalars were extended to match the size of other node.
1150 (SubMask.size() == Mask.size() && Mask.back() == PoisonMaskElem)) &&
1151 "SubMask with many inputs support must be larger than the mask.");
1152 if (Mask.empty()) {
1153 Mask.append(SubMask.begin(), SubMask.end());
1154 return;
1156 SmallVector<int> NewMask(SubMask.size(), PoisonMaskElem);
1157 int TermValue = std::min(Mask.size(), SubMask.size());
1158 for (int I = 0, E = SubMask.size(); I < E; ++I) {
1159 if (SubMask[I] == PoisonMaskElem ||
1160 (!ExtendingManyInputs &&
1161 (SubMask[I] >= TermValue || Mask[SubMask[I]] >= TermValue)))
1162 continue;
1163 NewMask[I] = Mask[SubMask[I]];
1165 Mask.swap(NewMask);
1168 /// Order may have elements assigned special value (size) which is out of
1169 /// bounds. Such indices only appear on places which correspond to undef values
1170 /// (see canReuseExtract for details) and used in order to avoid undef values
1171 /// have effect on operands ordering.
1172 /// The first loop below simply finds all unused indices and then the next loop
1173 /// nest assigns these indices for undef values positions.
1174 /// As an example below Order has two undef positions and they have assigned
1175 /// values 3 and 7 respectively:
1176 /// before: 6 9 5 4 9 2 1 0
1177 /// after: 6 3 5 4 7 2 1 0
1178 static void fixupOrderingIndices(MutableArrayRef<unsigned> Order) {
1179 const unsigned Sz = Order.size();
1180 SmallBitVector UnusedIndices(Sz, /*t=*/true);
1181 SmallBitVector MaskedIndices(Sz);
1182 for (unsigned I = 0; I < Sz; ++I) {
1183 if (Order[I] < Sz)
1184 UnusedIndices.reset(Order[I]);
1185 else
1186 MaskedIndices.set(I);
1188 if (MaskedIndices.none())
1189 return;
1190 assert(UnusedIndices.count() == MaskedIndices.count() &&
1191 "Non-synced masked/available indices.");
1192 int Idx = UnusedIndices.find_first();
1193 int MIdx = MaskedIndices.find_first();
1194 while (MIdx >= 0) {
1195 assert(Idx >= 0 && "Indices must be synced.");
1196 Order[MIdx] = Idx;
1197 Idx = UnusedIndices.find_next(Idx);
1198 MIdx = MaskedIndices.find_next(MIdx);
1202 /// \returns a bitset for selecting opcodes. false for Opcode0 and true for
1203 /// Opcode1.
1204 static SmallBitVector getAltInstrMask(ArrayRef<Value *> VL, unsigned Opcode0,
1205 unsigned Opcode1) {
1206 Type *ScalarTy = VL[0]->getType();
1207 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
1208 SmallBitVector OpcodeMask(VL.size() * ScalarTyNumElements, false);
1209 for (unsigned Lane : seq<unsigned>(VL.size())) {
1210 if (isa<PoisonValue>(VL[Lane]))
1211 continue;
1212 if (cast<Instruction>(VL[Lane])->getOpcode() == Opcode1)
1213 OpcodeMask.set(Lane * ScalarTyNumElements,
1214 Lane * ScalarTyNumElements + ScalarTyNumElements);
1216 return OpcodeMask;
1219 namespace llvm {
1221 static void inversePermutation(ArrayRef<unsigned> Indices,
1222 SmallVectorImpl<int> &Mask) {
1223 Mask.clear();
1224 const unsigned E = Indices.size();
1225 Mask.resize(E, PoisonMaskElem);
1226 for (unsigned I = 0; I < E; ++I)
1227 Mask[Indices[I]] = I;
1230 /// Reorders the list of scalars in accordance with the given \p Mask.
1231 static void reorderScalars(SmallVectorImpl<Value *> &Scalars,
1232 ArrayRef<int> Mask) {
1233 assert(!Mask.empty() && "Expected non-empty mask.");
1234 SmallVector<Value *> Prev(Scalars.size(),
1235 PoisonValue::get(Scalars.front()->getType()));
1236 Prev.swap(Scalars);
1237 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
1238 if (Mask[I] != PoisonMaskElem)
1239 Scalars[Mask[I]] = Prev[I];
1242 /// Checks if the provided value does not require scheduling. It does not
1243 /// require scheduling if this is not an instruction or it is an instruction
1244 /// that does not read/write memory and all operands are either not instructions
1245 /// or phi nodes or instructions from different blocks.
1246 static bool areAllOperandsNonInsts(Value *V) {
1247 auto *I = dyn_cast<Instruction>(V);
1248 if (!I)
1249 return true;
1250 return !mayHaveNonDefUseDependency(*I) &&
1251 all_of(I->operands(), [I](Value *V) {
1252 auto *IO = dyn_cast<Instruction>(V);
1253 if (!IO)
1254 return true;
1255 return isa<PHINode>(IO) || IO->getParent() != I->getParent();
1259 /// Checks if the provided value does not require scheduling. It does not
1260 /// require scheduling if this is not an instruction or it is an instruction
1261 /// that does not read/write memory and all users are phi nodes or instructions
1262 /// from the different blocks.
1263 static bool isUsedOutsideBlock(Value *V) {
1264 auto *I = dyn_cast<Instruction>(V);
1265 if (!I)
1266 return true;
1267 // Limits the number of uses to save compile time.
1268 return !I->mayReadOrWriteMemory() && !I->hasNUsesOrMore(UsesLimit) &&
1269 all_of(I->users(), [I](User *U) {
1270 auto *IU = dyn_cast<Instruction>(U);
1271 if (!IU)
1272 return true;
1273 return IU->getParent() != I->getParent() || isa<PHINode>(IU);
1277 /// Checks if the specified value does not require scheduling. It does not
1278 /// require scheduling if all operands and all users do not need to be scheduled
1279 /// in the current basic block.
1280 static bool doesNotNeedToBeScheduled(Value *V) {
1281 return areAllOperandsNonInsts(V) && isUsedOutsideBlock(V);
1284 /// Checks if the specified array of instructions does not require scheduling.
1285 /// It is so if all either instructions have operands that do not require
1286 /// scheduling or their users do not require scheduling since they are phis or
1287 /// in other basic blocks.
1288 static bool doesNotNeedToSchedule(ArrayRef<Value *> VL) {
1289 return !VL.empty() &&
1290 (all_of(VL, isUsedOutsideBlock) || all_of(VL, areAllOperandsNonInsts));
1293 /// Returns true if widened type of \p Ty elements with size \p Sz represents
1294 /// full vector type, i.e. adding extra element results in extra parts upon type
1295 /// legalization.
1296 static bool hasFullVectorsOrPowerOf2(const TargetTransformInfo &TTI, Type *Ty,
1297 unsigned Sz) {
1298 if (Sz <= 1)
1299 return false;
1300 if (!isValidElementType(Ty) && !isa<FixedVectorType>(Ty))
1301 return false;
1302 if (has_single_bit(Sz))
1303 return true;
1304 const unsigned NumParts = TTI.getNumberOfParts(getWidenedType(Ty, Sz));
1305 return NumParts > 0 && NumParts < Sz && has_single_bit(Sz / NumParts) &&
1306 Sz % NumParts == 0;
1309 namespace slpvectorizer {
1311 /// Bottom Up SLP Vectorizer.
1312 class BoUpSLP {
1313 struct TreeEntry;
1314 struct ScheduleData;
1315 class ShuffleCostEstimator;
1316 class ShuffleInstructionBuilder;
1318 public:
1319 /// Tracks the state we can represent the loads in the given sequence.
1320 enum class LoadsState {
1321 Gather,
1322 Vectorize,
1323 ScatterVectorize,
1324 StridedVectorize
1327 using ValueList = SmallVector<Value *, 8>;
1328 using InstrList = SmallVector<Instruction *, 16>;
1329 using ValueSet = SmallPtrSet<Value *, 16>;
1330 using StoreList = SmallVector<StoreInst *, 8>;
1331 using ExtraValueToDebugLocsMap = SmallDenseSet<Value *, 4>;
1332 using OrdersType = SmallVector<unsigned, 4>;
1334 BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
1335 TargetLibraryInfo *TLi, AAResults *Aa, LoopInfo *Li,
1336 DominatorTree *Dt, AssumptionCache *AC, DemandedBits *DB,
1337 const DataLayout *DL, OptimizationRemarkEmitter *ORE)
1338 : BatchAA(*Aa), F(Func), SE(Se), TTI(Tti), TLI(TLi), LI(Li), DT(Dt),
1339 AC(AC), DB(DB), DL(DL), ORE(ORE),
1340 Builder(Se->getContext(), TargetFolder(*DL)) {
1341 CodeMetrics::collectEphemeralValues(F, AC, EphValues);
1342 // Use the vector register size specified by the target unless overridden
1343 // by a command-line option.
1344 // TODO: It would be better to limit the vectorization factor based on
1345 // data type rather than just register size. For example, x86 AVX has
1346 // 256-bit registers, but it does not support integer operations
1347 // at that width (that requires AVX2).
1348 if (MaxVectorRegSizeOption.getNumOccurrences())
1349 MaxVecRegSize = MaxVectorRegSizeOption;
1350 else
1351 MaxVecRegSize =
1352 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
1353 .getFixedValue();
1355 if (MinVectorRegSizeOption.getNumOccurrences())
1356 MinVecRegSize = MinVectorRegSizeOption;
1357 else
1358 MinVecRegSize = TTI->getMinVectorRegisterBitWidth();
1361 /// Vectorize the tree that starts with the elements in \p VL.
1362 /// Returns the vectorized root.
1363 Value *vectorizeTree();
1365 /// Vectorize the tree but with the list of externally used values \p
1366 /// ExternallyUsedValues. Values in this MapVector can be replaced but the
1367 /// generated extractvalue instructions.
1368 Value *
1369 vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues,
1370 Instruction *ReductionRoot = nullptr);
1372 /// \returns the cost incurred by unwanted spills and fills, caused by
1373 /// holding live values over call sites.
1374 InstructionCost getSpillCost() const;
1376 /// \returns the vectorization cost of the subtree that starts at \p VL.
1377 /// A negative number means that this is profitable.
1378 InstructionCost getTreeCost(ArrayRef<Value *> VectorizedVals = {});
1380 /// Construct a vectorizable tree that starts at \p Roots, ignoring users for
1381 /// the purpose of scheduling and extraction in the \p UserIgnoreLst.
1382 void buildTree(ArrayRef<Value *> Roots,
1383 const SmallDenseSet<Value *> &UserIgnoreLst);
1385 /// Construct a vectorizable tree that starts at \p Roots.
1386 void buildTree(ArrayRef<Value *> Roots);
1388 /// Returns whether the root node has in-tree uses.
1389 bool doesRootHaveInTreeUses() const {
1390 return !VectorizableTree.empty() &&
1391 !VectorizableTree.front()->UserTreeIndices.empty();
1394 /// Return the scalars of the root node.
1395 ArrayRef<Value *> getRootNodeScalars() const {
1396 assert(!VectorizableTree.empty() && "No graph to get the first node from");
1397 return VectorizableTree.front()->Scalars;
1400 /// Returns the type/is-signed info for the root node in the graph without
1401 /// casting.
1402 std::optional<std::pair<Type *, bool>> getRootNodeTypeWithNoCast() const {
1403 const TreeEntry &Root = *VectorizableTree.front().get();
1404 if (Root.State != TreeEntry::Vectorize || Root.isAltShuffle() ||
1405 !Root.Scalars.front()->getType()->isIntegerTy())
1406 return std::nullopt;
1407 auto It = MinBWs.find(&Root);
1408 if (It != MinBWs.end())
1409 return std::make_pair(IntegerType::get(Root.Scalars.front()->getContext(),
1410 It->second.first),
1411 It->second.second);
1412 if (Root.getOpcode() == Instruction::ZExt ||
1413 Root.getOpcode() == Instruction::SExt)
1414 return std::make_pair(cast<CastInst>(Root.getMainOp())->getSrcTy(),
1415 Root.getOpcode() == Instruction::SExt);
1416 return std::nullopt;
1419 /// Checks if the root graph node can be emitted with narrower bitwidth at
1420 /// codegen and returns it signedness, if so.
1421 bool isSignedMinBitwidthRootNode() const {
1422 return MinBWs.at(VectorizableTree.front().get()).second;
1425 /// Returns reduction type after minbitdth analysis.
1426 FixedVectorType *getReductionType() const {
1427 if (ReductionBitWidth == 0 ||
1428 !VectorizableTree.front()->Scalars.front()->getType()->isIntegerTy() ||
1429 ReductionBitWidth >=
1430 DL->getTypeSizeInBits(
1431 VectorizableTree.front()->Scalars.front()->getType()))
1432 return getWidenedType(
1433 VectorizableTree.front()->Scalars.front()->getType(),
1434 VectorizableTree.front()->getVectorFactor());
1435 return getWidenedType(
1436 IntegerType::get(
1437 VectorizableTree.front()->Scalars.front()->getContext(),
1438 ReductionBitWidth),
1439 VectorizableTree.front()->getVectorFactor());
1442 /// Builds external uses of the vectorized scalars, i.e. the list of
1443 /// vectorized scalars to be extracted, their lanes and their scalar users. \p
1444 /// ExternallyUsedValues contains additional list of external uses to handle
1445 /// vectorization of reductions.
1446 void
1447 buildExternalUses(const ExtraValueToDebugLocsMap &ExternallyUsedValues = {});
1449 /// Transforms graph nodes to target specific representations, if profitable.
1450 void transformNodes();
1452 /// Clear the internal data structures that are created by 'buildTree'.
1453 void deleteTree() {
1454 VectorizableTree.clear();
1455 ScalarToTreeEntry.clear();
1456 MultiNodeScalars.clear();
1457 MustGather.clear();
1458 NonScheduledFirst.clear();
1459 EntryToLastInstruction.clear();
1460 LoadEntriesToVectorize.clear();
1461 IsGraphTransformMode = false;
1462 GatheredLoadsEntriesFirst.reset();
1463 ExternalUses.clear();
1464 ExternalUsesAsOriginalScalar.clear();
1465 for (auto &Iter : BlocksSchedules) {
1466 BlockScheduling *BS = Iter.second.get();
1467 BS->clear();
1469 MinBWs.clear();
1470 ReductionBitWidth = 0;
1471 BaseGraphSize = 1;
1472 CastMaxMinBWSizes.reset();
1473 ExtraBitWidthNodes.clear();
1474 InstrElementSize.clear();
1475 UserIgnoreList = nullptr;
1476 PostponedGathers.clear();
1477 ValueToGatherNodes.clear();
1480 unsigned getTreeSize() const { return VectorizableTree.size(); }
1482 /// Returns the base graph size, before any transformations.
1483 unsigned getCanonicalGraphSize() const { return BaseGraphSize; }
1485 /// Perform LICM and CSE on the newly generated gather sequences.
1486 void optimizeGatherSequence();
1488 /// Does this non-empty order represent an identity order? Identity
1489 /// should be represented as an empty order, so this is used to
1490 /// decide if we can canonicalize a computed order. Undef elements
1491 /// (represented as size) are ignored.
1492 bool isIdentityOrder(ArrayRef<unsigned> Order) const {
1493 assert(!Order.empty() && "expected non-empty order");
1494 const unsigned Sz = Order.size();
1495 return all_of(enumerate(Order), [&](const auto &P) {
1496 return P.value() == P.index() || P.value() == Sz;
1500 /// Checks if the specified gather tree entry \p TE can be represented as a
1501 /// shuffled vector entry + (possibly) permutation with other gathers. It
1502 /// implements the checks only for possibly ordered scalars (Loads,
1503 /// ExtractElement, ExtractValue), which can be part of the graph.
1504 std::optional<OrdersType> findReusedOrderedScalars(const TreeEntry &TE);
1506 /// Sort loads into increasing pointers offsets to allow greater clustering.
1507 std::optional<OrdersType> findPartiallyOrderedLoads(const TreeEntry &TE);
1509 /// Gets reordering data for the given tree entry. If the entry is vectorized
1510 /// - just return ReorderIndices, otherwise check if the scalars can be
1511 /// reordered and return the most optimal order.
1512 /// \return std::nullopt if ordering is not important, empty order, if
1513 /// identity order is important, or the actual order.
1514 /// \param TopToBottom If true, include the order of vectorized stores and
1515 /// insertelement nodes, otherwise skip them.
1516 std::optional<OrdersType> getReorderingData(const TreeEntry &TE,
1517 bool TopToBottom);
1519 /// Reorders the current graph to the most profitable order starting from the
1520 /// root node to the leaf nodes. The best order is chosen only from the nodes
1521 /// of the same size (vectorization factor). Smaller nodes are considered
1522 /// parts of subgraph with smaller VF and they are reordered independently. We
1523 /// can make it because we still need to extend smaller nodes to the wider VF
1524 /// and we can merge reordering shuffles with the widening shuffles.
1525 void reorderTopToBottom();
1527 /// Reorders the current graph to the most profitable order starting from
1528 /// leaves to the root. It allows to rotate small subgraphs and reduce the
1529 /// number of reshuffles if the leaf nodes use the same order. In this case we
1530 /// can merge the orders and just shuffle user node instead of shuffling its
1531 /// operands. Plus, even the leaf nodes have different orders, it allows to
1532 /// sink reordering in the graph closer to the root node and merge it later
1533 /// during analysis.
1534 void reorderBottomToTop(bool IgnoreReorder = false);
1536 /// \return The vector element size in bits to use when vectorizing the
1537 /// expression tree ending at \p V. If V is a store, the size is the width of
1538 /// the stored value. Otherwise, the size is the width of the largest loaded
1539 /// value reaching V. This method is used by the vectorizer to calculate
1540 /// vectorization factors.
1541 unsigned getVectorElementSize(Value *V);
1543 /// Compute the minimum type sizes required to represent the entries in a
1544 /// vectorizable tree.
1545 void computeMinimumValueSizes();
1547 // \returns maximum vector register size as set by TTI or overridden by cl::opt.
1548 unsigned getMaxVecRegSize() const {
1549 return MaxVecRegSize;
1552 // \returns minimum vector register size as set by cl::opt.
1553 unsigned getMinVecRegSize() const {
1554 return MinVecRegSize;
1557 unsigned getMinVF(unsigned Sz) const {
1558 return std::max(2U, getMinVecRegSize() / Sz);
1561 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const {
1562 unsigned MaxVF = MaxVFOption.getNumOccurrences() ?
1563 MaxVFOption : TTI->getMaximumVF(ElemWidth, Opcode);
1564 return MaxVF ? MaxVF : UINT_MAX;
1567 /// Check if homogeneous aggregate is isomorphic to some VectorType.
1568 /// Accepts homogeneous multidimensional aggregate of scalars/vectors like
1569 /// {[4 x i16], [4 x i16]}, { <2 x float>, <2 x float> },
1570 /// {{{i16, i16}, {i16, i16}}, {{i16, i16}, {i16, i16}}} and so on.
1572 /// \returns number of elements in vector if isomorphism exists, 0 otherwise.
1573 unsigned canMapToVector(Type *T) const;
1575 /// \returns True if the VectorizableTree is both tiny and not fully
1576 /// vectorizable. We do not vectorize such trees.
1577 bool isTreeTinyAndNotFullyVectorizable(bool ForReduction = false) const;
1579 /// Checks if the graph and all its subgraphs cannot be better vectorized.
1580 /// It may happen, if all gather nodes are loads and they cannot be
1581 /// "clusterized". In this case even subgraphs cannot be vectorized more
1582 /// effectively than the base graph.
1583 bool isTreeNotExtendable() const;
1585 /// Assume that a legal-sized 'or'-reduction of shifted/zexted loaded values
1586 /// can be load combined in the backend. Load combining may not be allowed in
1587 /// the IR optimizer, so we do not want to alter the pattern. For example,
1588 /// partially transforming a scalar bswap() pattern into vector code is
1589 /// effectively impossible for the backend to undo.
1590 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1591 /// may not be necessary.
1592 bool isLoadCombineReductionCandidate(RecurKind RdxKind) const;
1594 /// Assume that a vector of stores of bitwise-or/shifted/zexted loaded values
1595 /// can be load combined in the backend. Load combining may not be allowed in
1596 /// the IR optimizer, so we do not want to alter the pattern. For example,
1597 /// partially transforming a scalar bswap() pattern into vector code is
1598 /// effectively impossible for the backend to undo.
1599 /// TODO: If load combining is allowed in the IR optimizer, this analysis
1600 /// may not be necessary.
1601 bool isLoadCombineCandidate(ArrayRef<Value *> Stores) const;
1603 /// Checks if the given array of loads can be represented as a vectorized,
1604 /// scatter or just simple gather.
1605 /// \param VL list of loads.
1606 /// \param VL0 main load value.
1607 /// \param Order returned order of load instructions.
1608 /// \param PointerOps returned list of pointer operands.
1609 /// \param BestVF return best vector factor, if recursive check found better
1610 /// vectorization sequences rather than masked gather.
1611 /// \param TryRecursiveCheck used to check if long masked gather can be
1612 /// represented as a serie of loads/insert subvector, if profitable.
1613 LoadsState canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
1614 SmallVectorImpl<unsigned> &Order,
1615 SmallVectorImpl<Value *> &PointerOps,
1616 unsigned *BestVF = nullptr,
1617 bool TryRecursiveCheck = true) const;
1619 /// Registers non-vectorizable sequence of loads
1620 template <typename T> void registerNonVectorizableLoads(ArrayRef<T *> VL) {
1621 ListOfKnonwnNonVectorizableLoads.insert(hash_value(VL));
1624 /// Checks if the given loads sequence is known as not vectorizable
1625 template <typename T>
1626 bool areKnownNonVectorizableLoads(ArrayRef<T *> VL) const {
1627 return ListOfKnonwnNonVectorizableLoads.contains(hash_value(VL));
1630 OptimizationRemarkEmitter *getORE() { return ORE; }
1632 /// This structure holds any data we need about the edges being traversed
1633 /// during buildTree_rec(). We keep track of:
1634 /// (i) the user TreeEntry index, and
1635 /// (ii) the index of the edge.
1636 struct EdgeInfo {
1637 EdgeInfo() = default;
1638 EdgeInfo(TreeEntry *UserTE, unsigned EdgeIdx)
1639 : UserTE(UserTE), EdgeIdx(EdgeIdx) {}
1640 /// The user TreeEntry.
1641 TreeEntry *UserTE = nullptr;
1642 /// The operand index of the use.
1643 unsigned EdgeIdx = UINT_MAX;
1644 #ifndef NDEBUG
1645 friend inline raw_ostream &operator<<(raw_ostream &OS,
1646 const BoUpSLP::EdgeInfo &EI) {
1647 EI.dump(OS);
1648 return OS;
1650 /// Debug print.
1651 void dump(raw_ostream &OS) const {
1652 OS << "{User:" << (UserTE ? std::to_string(UserTE->Idx) : "null")
1653 << " EdgeIdx:" << EdgeIdx << "}";
1655 LLVM_DUMP_METHOD void dump() const { dump(dbgs()); }
1656 #endif
1657 bool operator == (const EdgeInfo &Other) const {
1658 return UserTE == Other.UserTE && EdgeIdx == Other.EdgeIdx;
1662 /// A helper class used for scoring candidates for two consecutive lanes.
1663 class LookAheadHeuristics {
1664 const TargetLibraryInfo &TLI;
1665 const DataLayout &DL;
1666 ScalarEvolution &SE;
1667 const BoUpSLP &R;
1668 int NumLanes; // Total number of lanes (aka vectorization factor).
1669 int MaxLevel; // The maximum recursion depth for accumulating score.
1671 public:
1672 LookAheadHeuristics(const TargetLibraryInfo &TLI, const DataLayout &DL,
1673 ScalarEvolution &SE, const BoUpSLP &R, int NumLanes,
1674 int MaxLevel)
1675 : TLI(TLI), DL(DL), SE(SE), R(R), NumLanes(NumLanes),
1676 MaxLevel(MaxLevel) {}
1678 // The hard-coded scores listed here are not very important, though it shall
1679 // be higher for better matches to improve the resulting cost. When
1680 // computing the scores of matching one sub-tree with another, we are
1681 // basically counting the number of values that are matching. So even if all
1682 // scores are set to 1, we would still get a decent matching result.
1683 // However, sometimes we have to break ties. For example we may have to
1684 // choose between matching loads vs matching opcodes. This is what these
1685 // scores are helping us with: they provide the order of preference. Also,
1686 // this is important if the scalar is externally used or used in another
1687 // tree entry node in the different lane.
1689 /// Loads from consecutive memory addresses, e.g. load(A[i]), load(A[i+1]).
1690 static const int ScoreConsecutiveLoads = 4;
1691 /// The same load multiple times. This should have a better score than
1692 /// `ScoreSplat` because it in x86 for a 2-lane vector we can represent it
1693 /// with `movddup (%reg), xmm0` which has a throughput of 0.5 versus 0.5 for
1694 /// a vector load and 1.0 for a broadcast.
1695 static const int ScoreSplatLoads = 3;
1696 /// Loads from reversed memory addresses, e.g. load(A[i+1]), load(A[i]).
1697 static const int ScoreReversedLoads = 3;
1698 /// A load candidate for masked gather.
1699 static const int ScoreMaskedGatherCandidate = 1;
1700 /// ExtractElementInst from same vector and consecutive indexes.
1701 static const int ScoreConsecutiveExtracts = 4;
1702 /// ExtractElementInst from same vector and reversed indices.
1703 static const int ScoreReversedExtracts = 3;
1704 /// Constants.
1705 static const int ScoreConstants = 2;
1706 /// Instructions with the same opcode.
1707 static const int ScoreSameOpcode = 2;
1708 /// Instructions with alt opcodes (e.g, add + sub).
1709 static const int ScoreAltOpcodes = 1;
1710 /// Identical instructions (a.k.a. splat or broadcast).
1711 static const int ScoreSplat = 1;
1712 /// Matching with an undef is preferable to failing.
1713 static const int ScoreUndef = 1;
1714 /// Score for failing to find a decent match.
1715 static const int ScoreFail = 0;
1716 /// Score if all users are vectorized.
1717 static const int ScoreAllUserVectorized = 1;
1719 /// \returns the score of placing \p V1 and \p V2 in consecutive lanes.
1720 /// \p U1 and \p U2 are the users of \p V1 and \p V2.
1721 /// Also, checks if \p V1 and \p V2 are compatible with instructions in \p
1722 /// MainAltOps.
1723 int getShallowScore(Value *V1, Value *V2, Instruction *U1, Instruction *U2,
1724 ArrayRef<Value *> MainAltOps) const {
1725 if (!isValidElementType(V1->getType()) ||
1726 !isValidElementType(V2->getType()))
1727 return LookAheadHeuristics::ScoreFail;
1729 if (V1 == V2) {
1730 if (isa<LoadInst>(V1)) {
1731 // Retruns true if the users of V1 and V2 won't need to be extracted.
1732 auto AllUsersAreInternal = [U1, U2, this](Value *V1, Value *V2) {
1733 // Bail out if we have too many uses to save compilation time.
1734 if (V1->hasNUsesOrMore(UsesLimit) || V2->hasNUsesOrMore(UsesLimit))
1735 return false;
1737 auto AllUsersVectorized = [U1, U2, this](Value *V) {
1738 return llvm::all_of(V->users(), [U1, U2, this](Value *U) {
1739 return U == U1 || U == U2 || R.getTreeEntry(U) != nullptr;
1742 return AllUsersVectorized(V1) && AllUsersVectorized(V2);
1744 // A broadcast of a load can be cheaper on some targets.
1745 if (R.TTI->isLegalBroadcastLoad(V1->getType(),
1746 ElementCount::getFixed(NumLanes)) &&
1747 ((int)V1->getNumUses() == NumLanes ||
1748 AllUsersAreInternal(V1, V2)))
1749 return LookAheadHeuristics::ScoreSplatLoads;
1751 return LookAheadHeuristics::ScoreSplat;
1754 auto CheckSameEntryOrFail = [&]() {
1755 if (const TreeEntry *TE1 = R.getTreeEntry(V1);
1756 TE1 && TE1 == R.getTreeEntry(V2))
1757 return LookAheadHeuristics::ScoreSplatLoads;
1758 return LookAheadHeuristics::ScoreFail;
1761 auto *LI1 = dyn_cast<LoadInst>(V1);
1762 auto *LI2 = dyn_cast<LoadInst>(V2);
1763 if (LI1 && LI2) {
1764 if (LI1->getParent() != LI2->getParent() || !LI1->isSimple() ||
1765 !LI2->isSimple())
1766 return CheckSameEntryOrFail();
1768 std::optional<int> Dist = getPointersDiff(
1769 LI1->getType(), LI1->getPointerOperand(), LI2->getType(),
1770 LI2->getPointerOperand(), DL, SE, /*StrictCheck=*/true);
1771 if (!Dist || *Dist == 0) {
1772 if (getUnderlyingObject(LI1->getPointerOperand()) ==
1773 getUnderlyingObject(LI2->getPointerOperand()) &&
1774 R.TTI->isLegalMaskedGather(
1775 getWidenedType(LI1->getType(), NumLanes), LI1->getAlign()))
1776 return LookAheadHeuristics::ScoreMaskedGatherCandidate;
1777 return CheckSameEntryOrFail();
1779 // The distance is too large - still may be profitable to use masked
1780 // loads/gathers.
1781 if (std::abs(*Dist) > NumLanes / 2)
1782 return LookAheadHeuristics::ScoreMaskedGatherCandidate;
1783 // This still will detect consecutive loads, but we might have "holes"
1784 // in some cases. It is ok for non-power-2 vectorization and may produce
1785 // better results. It should not affect current vectorization.
1786 return (*Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveLoads
1787 : LookAheadHeuristics::ScoreReversedLoads;
1790 auto *C1 = dyn_cast<Constant>(V1);
1791 auto *C2 = dyn_cast<Constant>(V2);
1792 if (C1 && C2)
1793 return LookAheadHeuristics::ScoreConstants;
1795 // Extracts from consecutive indexes of the same vector better score as
1796 // the extracts could be optimized away.
1797 Value *EV1;
1798 ConstantInt *Ex1Idx;
1799 if (match(V1, m_ExtractElt(m_Value(EV1), m_ConstantInt(Ex1Idx)))) {
1800 // Undefs are always profitable for extractelements.
1801 // Compiler can easily combine poison and extractelement <non-poison> or
1802 // undef and extractelement <poison>. But combining undef +
1803 // extractelement <non-poison-but-may-produce-poison> requires some
1804 // extra operations.
1805 if (isa<UndefValue>(V2))
1806 return (isa<PoisonValue>(V2) || isUndefVector(EV1).all())
1807 ? LookAheadHeuristics::ScoreConsecutiveExtracts
1808 : LookAheadHeuristics::ScoreSameOpcode;
1809 Value *EV2 = nullptr;
1810 ConstantInt *Ex2Idx = nullptr;
1811 if (match(V2,
1812 m_ExtractElt(m_Value(EV2), m_CombineOr(m_ConstantInt(Ex2Idx),
1813 m_Undef())))) {
1814 // Undefs are always profitable for extractelements.
1815 if (!Ex2Idx)
1816 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1817 if (isUndefVector(EV2).all() && EV2->getType() == EV1->getType())
1818 return LookAheadHeuristics::ScoreConsecutiveExtracts;
1819 if (EV2 == EV1) {
1820 int Idx1 = Ex1Idx->getZExtValue();
1821 int Idx2 = Ex2Idx->getZExtValue();
1822 int Dist = Idx2 - Idx1;
1823 // The distance is too large - still may be profitable to use
1824 // shuffles.
1825 if (std::abs(Dist) == 0)
1826 return LookAheadHeuristics::ScoreSplat;
1827 if (std::abs(Dist) > NumLanes / 2)
1828 return LookAheadHeuristics::ScoreSameOpcode;
1829 return (Dist > 0) ? LookAheadHeuristics::ScoreConsecutiveExtracts
1830 : LookAheadHeuristics::ScoreReversedExtracts;
1832 return LookAheadHeuristics::ScoreAltOpcodes;
1834 return CheckSameEntryOrFail();
1837 auto *I1 = dyn_cast<Instruction>(V1);
1838 auto *I2 = dyn_cast<Instruction>(V2);
1839 if (I1 && I2) {
1840 if (I1->getParent() != I2->getParent())
1841 return CheckSameEntryOrFail();
1842 SmallVector<Value *, 4> Ops(MainAltOps);
1843 Ops.push_back(I1);
1844 Ops.push_back(I2);
1845 InstructionsState S = getSameOpcode(Ops, TLI);
1846 // Note: Only consider instructions with <= 2 operands to avoid
1847 // complexity explosion.
1848 if (S.getOpcode() &&
1849 (S.getMainOp()->getNumOperands() <= 2 || !MainAltOps.empty() ||
1850 !S.isAltShuffle()) &&
1851 all_of(Ops, [&S](Value *V) {
1852 return isa<PoisonValue>(V) ||
1853 cast<Instruction>(V)->getNumOperands() ==
1854 S.getMainOp()->getNumOperands();
1856 return S.isAltShuffle() ? LookAheadHeuristics::ScoreAltOpcodes
1857 : LookAheadHeuristics::ScoreSameOpcode;
1860 if (I1 && isa<PoisonValue>(V2))
1861 return LookAheadHeuristics::ScoreSameOpcode;
1863 if (isa<UndefValue>(V2))
1864 return LookAheadHeuristics::ScoreUndef;
1866 return CheckSameEntryOrFail();
1869 /// Go through the operands of \p LHS and \p RHS recursively until
1870 /// MaxLevel, and return the cummulative score. \p U1 and \p U2 are
1871 /// the users of \p LHS and \p RHS (that is \p LHS and \p RHS are operands
1872 /// of \p U1 and \p U2), except at the beginning of the recursion where
1873 /// these are set to nullptr.
1875 /// For example:
1876 /// \verbatim
1877 /// A[0] B[0] A[1] B[1] C[0] D[0] B[1] A[1]
1878 /// \ / \ / \ / \ /
1879 /// + + + +
1880 /// G1 G2 G3 G4
1881 /// \endverbatim
1882 /// The getScoreAtLevelRec(G1, G2) function will try to match the nodes at
1883 /// each level recursively, accumulating the score. It starts from matching
1884 /// the additions at level 0, then moves on to the loads (level 1). The
1885 /// score of G1 and G2 is higher than G1 and G3, because {A[0],A[1]} and
1886 /// {B[0],B[1]} match with LookAheadHeuristics::ScoreConsecutiveLoads, while
1887 /// {A[0],C[0]} has a score of LookAheadHeuristics::ScoreFail.
1888 /// Please note that the order of the operands does not matter, as we
1889 /// evaluate the score of all profitable combinations of operands. In
1890 /// other words the score of G1 and G4 is the same as G1 and G2. This
1891 /// heuristic is based on ideas described in:
1892 /// Look-ahead SLP: Auto-vectorization in the presence of commutative
1893 /// operations, CGO 2018 by Vasileios Porpodas, Rodrigo C. O. Rocha,
1894 /// Luís F. W. Góes
1895 int getScoreAtLevelRec(Value *LHS, Value *RHS, Instruction *U1,
1896 Instruction *U2, int CurrLevel,
1897 ArrayRef<Value *> MainAltOps) const {
1899 // Get the shallow score of V1 and V2.
1900 int ShallowScoreAtThisLevel =
1901 getShallowScore(LHS, RHS, U1, U2, MainAltOps);
1903 // If reached MaxLevel,
1904 // or if V1 and V2 are not instructions,
1905 // or if they are SPLAT,
1906 // or if they are not consecutive,
1907 // or if profitable to vectorize loads or extractelements, early return
1908 // the current cost.
1909 auto *I1 = dyn_cast<Instruction>(LHS);
1910 auto *I2 = dyn_cast<Instruction>(RHS);
1911 if (CurrLevel == MaxLevel || !(I1 && I2) || I1 == I2 ||
1912 ShallowScoreAtThisLevel == LookAheadHeuristics::ScoreFail ||
1913 (((isa<LoadInst>(I1) && isa<LoadInst>(I2)) ||
1914 (I1->getNumOperands() > 2 && I2->getNumOperands() > 2) ||
1915 (isa<ExtractElementInst>(I1) && isa<ExtractElementInst>(I2))) &&
1916 ShallowScoreAtThisLevel))
1917 return ShallowScoreAtThisLevel;
1918 assert(I1 && I2 && "Should have early exited.");
1920 // Contains the I2 operand indexes that got matched with I1 operands.
1921 SmallSet<unsigned, 4> Op2Used;
1923 // Recursion towards the operands of I1 and I2. We are trying all possible
1924 // operand pairs, and keeping track of the best score.
1925 for (unsigned OpIdx1 = 0, NumOperands1 = I1->getNumOperands();
1926 OpIdx1 != NumOperands1; ++OpIdx1) {
1927 // Try to pair op1I with the best operand of I2.
1928 int MaxTmpScore = 0;
1929 unsigned MaxOpIdx2 = 0;
1930 bool FoundBest = false;
1931 // If I2 is commutative try all combinations.
1932 unsigned FromIdx = isCommutative(I2) ? 0 : OpIdx1;
1933 unsigned ToIdx = isCommutative(I2)
1934 ? I2->getNumOperands()
1935 : std::min(I2->getNumOperands(), OpIdx1 + 1);
1936 assert(FromIdx <= ToIdx && "Bad index");
1937 for (unsigned OpIdx2 = FromIdx; OpIdx2 != ToIdx; ++OpIdx2) {
1938 // Skip operands already paired with OpIdx1.
1939 if (Op2Used.count(OpIdx2))
1940 continue;
1941 // Recursively calculate the cost at each level
1942 int TmpScore =
1943 getScoreAtLevelRec(I1->getOperand(OpIdx1), I2->getOperand(OpIdx2),
1944 I1, I2, CurrLevel + 1, {});
1945 // Look for the best score.
1946 if (TmpScore > LookAheadHeuristics::ScoreFail &&
1947 TmpScore > MaxTmpScore) {
1948 MaxTmpScore = TmpScore;
1949 MaxOpIdx2 = OpIdx2;
1950 FoundBest = true;
1953 if (FoundBest) {
1954 // Pair {OpIdx1, MaxOpIdx2} was found to be best. Never revisit it.
1955 Op2Used.insert(MaxOpIdx2);
1956 ShallowScoreAtThisLevel += MaxTmpScore;
1959 return ShallowScoreAtThisLevel;
1962 /// A helper data structure to hold the operands of a vector of instructions.
1963 /// This supports a fixed vector length for all operand vectors.
1964 class VLOperands {
1965 /// For each operand we need (i) the value, and (ii) the opcode that it
1966 /// would be attached to if the expression was in a left-linearized form.
1967 /// This is required to avoid illegal operand reordering.
1968 /// For example:
1969 /// \verbatim
1970 /// 0 Op1
1971 /// |/
1972 /// Op1 Op2 Linearized + Op2
1973 /// \ / ----------> |/
1974 /// - -
1976 /// Op1 - Op2 (0 + Op1) - Op2
1977 /// \endverbatim
1979 /// Value Op1 is attached to a '+' operation, and Op2 to a '-'.
1981 /// Another way to think of this is to track all the operations across the
1982 /// path from the operand all the way to the root of the tree and to
1983 /// calculate the operation that corresponds to this path. For example, the
1984 /// path from Op2 to the root crosses the RHS of the '-', therefore the
1985 /// corresponding operation is a '-' (which matches the one in the
1986 /// linearized tree, as shown above).
1988 /// For lack of a better term, we refer to this operation as Accumulated
1989 /// Path Operation (APO).
1990 struct OperandData {
1991 OperandData() = default;
1992 OperandData(Value *V, bool APO, bool IsUsed)
1993 : V(V), APO(APO), IsUsed(IsUsed) {}
1994 /// The operand value.
1995 Value *V = nullptr;
1996 /// TreeEntries only allow a single opcode, or an alternate sequence of
1997 /// them (e.g, +, -). Therefore, we can safely use a boolean value for the
1998 /// APO. It is set to 'true' if 'V' is attached to an inverse operation
1999 /// in the left-linearized form (e.g., Sub/Div), and 'false' otherwise
2000 /// (e.g., Add/Mul)
2001 bool APO = false;
2002 /// Helper data for the reordering function.
2003 bool IsUsed = false;
2006 /// During operand reordering, we are trying to select the operand at lane
2007 /// that matches best with the operand at the neighboring lane. Our
2008 /// selection is based on the type of value we are looking for. For example,
2009 /// if the neighboring lane has a load, we need to look for a load that is
2010 /// accessing a consecutive address. These strategies are summarized in the
2011 /// 'ReorderingMode' enumerator.
2012 enum class ReorderingMode {
2013 Load, ///< Matching loads to consecutive memory addresses
2014 Opcode, ///< Matching instructions based on opcode (same or alternate)
2015 Constant, ///< Matching constants
2016 Splat, ///< Matching the same instruction multiple times (broadcast)
2017 Failed, ///< We failed to create a vectorizable group
2020 using OperandDataVec = SmallVector<OperandData, 2>;
2022 /// A vector of operand vectors.
2023 SmallVector<OperandDataVec, 4> OpsVec;
2024 /// When VL[0] is IntrinsicInst, ArgSize is CallBase::arg_size. When VL[0]
2025 /// is not IntrinsicInst, ArgSize is User::getNumOperands.
2026 unsigned ArgSize = 0;
2028 const TargetLibraryInfo &TLI;
2029 const DataLayout &DL;
2030 ScalarEvolution &SE;
2031 const BoUpSLP &R;
2032 const Loop *L = nullptr;
2034 /// \returns the operand data at \p OpIdx and \p Lane.
2035 OperandData &getData(unsigned OpIdx, unsigned Lane) {
2036 return OpsVec[OpIdx][Lane];
2039 /// \returns the operand data at \p OpIdx and \p Lane. Const version.
2040 const OperandData &getData(unsigned OpIdx, unsigned Lane) const {
2041 return OpsVec[OpIdx][Lane];
2044 /// Clears the used flag for all entries.
2045 void clearUsed() {
2046 for (unsigned OpIdx = 0, NumOperands = getNumOperands();
2047 OpIdx != NumOperands; ++OpIdx)
2048 for (unsigned Lane = 0, NumLanes = getNumLanes(); Lane != NumLanes;
2049 ++Lane)
2050 OpsVec[OpIdx][Lane].IsUsed = false;
2053 /// Swap the operand at \p OpIdx1 with that one at \p OpIdx2.
2054 void swap(unsigned OpIdx1, unsigned OpIdx2, unsigned Lane) {
2055 std::swap(OpsVec[OpIdx1][Lane], OpsVec[OpIdx2][Lane]);
2058 /// \param Lane lane of the operands under analysis.
2059 /// \param OpIdx operand index in \p Lane lane we're looking the best
2060 /// candidate for.
2061 /// \param Idx operand index of the current candidate value.
2062 /// \returns The additional score due to possible broadcasting of the
2063 /// elements in the lane. It is more profitable to have power-of-2 unique
2064 /// elements in the lane, it will be vectorized with higher probability
2065 /// after removing duplicates. Currently the SLP vectorizer supports only
2066 /// vectorization of the power-of-2 number of unique scalars.
2067 int getSplatScore(unsigned Lane, unsigned OpIdx, unsigned Idx,
2068 const SmallBitVector &UsedLanes) const {
2069 Value *IdxLaneV = getData(Idx, Lane).V;
2070 if (!isa<Instruction>(IdxLaneV) || IdxLaneV == getData(OpIdx, Lane).V ||
2071 isa<ExtractElementInst>(IdxLaneV))
2072 return 0;
2073 SmallDenseMap<Value *, unsigned, 4> Uniques;
2074 for (unsigned Ln : seq<unsigned>(getNumLanes())) {
2075 if (Ln == Lane)
2076 continue;
2077 Value *OpIdxLnV = getData(OpIdx, Ln).V;
2078 if (!isa<Instruction>(OpIdxLnV))
2079 return 0;
2080 Uniques.try_emplace(OpIdxLnV, Ln);
2082 unsigned UniquesCount = Uniques.size();
2083 auto IdxIt = Uniques.find(IdxLaneV);
2084 unsigned UniquesCntWithIdxLaneV =
2085 IdxIt != Uniques.end() ? UniquesCount : UniquesCount + 1;
2086 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
2087 auto OpIdxIt = Uniques.find(OpIdxLaneV);
2088 unsigned UniquesCntWithOpIdxLaneV =
2089 OpIdxIt != Uniques.end() ? UniquesCount : UniquesCount + 1;
2090 if (UniquesCntWithIdxLaneV == UniquesCntWithOpIdxLaneV)
2091 return 0;
2092 return std::min(bit_ceil(UniquesCntWithOpIdxLaneV) -
2093 UniquesCntWithOpIdxLaneV,
2094 UniquesCntWithOpIdxLaneV -
2095 bit_floor(UniquesCntWithOpIdxLaneV)) -
2096 ((IdxIt != Uniques.end() && UsedLanes.test(IdxIt->second))
2097 ? UniquesCntWithIdxLaneV - bit_floor(UniquesCntWithIdxLaneV)
2098 : bit_ceil(UniquesCntWithIdxLaneV) - UniquesCntWithIdxLaneV);
2101 /// \param Lane lane of the operands under analysis.
2102 /// \param OpIdx operand index in \p Lane lane we're looking the best
2103 /// candidate for.
2104 /// \param Idx operand index of the current candidate value.
2105 /// \returns The additional score for the scalar which users are all
2106 /// vectorized.
2107 int getExternalUseScore(unsigned Lane, unsigned OpIdx, unsigned Idx) const {
2108 Value *IdxLaneV = getData(Idx, Lane).V;
2109 Value *OpIdxLaneV = getData(OpIdx, Lane).V;
2110 // Do not care about number of uses for vector-like instructions
2111 // (extractelement/extractvalue with constant indices), they are extracts
2112 // themselves and already externally used. Vectorization of such
2113 // instructions does not add extra extractelement instruction, just may
2114 // remove it.
2115 if (isVectorLikeInstWithConstOps(IdxLaneV) &&
2116 isVectorLikeInstWithConstOps(OpIdxLaneV))
2117 return LookAheadHeuristics::ScoreAllUserVectorized;
2118 auto *IdxLaneI = dyn_cast<Instruction>(IdxLaneV);
2119 if (!IdxLaneI || !isa<Instruction>(OpIdxLaneV))
2120 return 0;
2121 return R.areAllUsersVectorized(IdxLaneI)
2122 ? LookAheadHeuristics::ScoreAllUserVectorized
2123 : 0;
2126 /// Score scaling factor for fully compatible instructions but with
2127 /// different number of external uses. Allows better selection of the
2128 /// instructions with less external uses.
2129 static const int ScoreScaleFactor = 10;
2131 /// \Returns the look-ahead score, which tells us how much the sub-trees
2132 /// rooted at \p LHS and \p RHS match, the more they match the higher the
2133 /// score. This helps break ties in an informed way when we cannot decide on
2134 /// the order of the operands by just considering the immediate
2135 /// predecessors.
2136 int getLookAheadScore(Value *LHS, Value *RHS, ArrayRef<Value *> MainAltOps,
2137 int Lane, unsigned OpIdx, unsigned Idx,
2138 bool &IsUsed, const SmallBitVector &UsedLanes) {
2139 LookAheadHeuristics LookAhead(TLI, DL, SE, R, getNumLanes(),
2140 LookAheadMaxDepth);
2141 // Keep track of the instruction stack as we recurse into the operands
2142 // during the look-ahead score exploration.
2143 int Score =
2144 LookAhead.getScoreAtLevelRec(LHS, RHS, /*U1=*/nullptr, /*U2=*/nullptr,
2145 /*CurrLevel=*/1, MainAltOps);
2146 if (Score) {
2147 int SplatScore = getSplatScore(Lane, OpIdx, Idx, UsedLanes);
2148 if (Score <= -SplatScore) {
2149 // Failed score.
2150 Score = 0;
2151 } else {
2152 Score += SplatScore;
2153 // Scale score to see the difference between different operands
2154 // and similar operands but all vectorized/not all vectorized
2155 // uses. It does not affect actual selection of the best
2156 // compatible operand in general, just allows to select the
2157 // operand with all vectorized uses.
2158 Score *= ScoreScaleFactor;
2159 Score += getExternalUseScore(Lane, OpIdx, Idx);
2160 IsUsed = true;
2163 return Score;
2166 /// Best defined scores per lanes between the passes. Used to choose the
2167 /// best operand (with the highest score) between the passes.
2168 /// The key - {Operand Index, Lane}.
2169 /// The value - the best score between the passes for the lane and the
2170 /// operand.
2171 SmallDenseMap<std::pair<unsigned, unsigned>, unsigned, 8>
2172 BestScoresPerLanes;
2174 // Search all operands in Ops[*][Lane] for the one that matches best
2175 // Ops[OpIdx][LastLane] and return its opreand index.
2176 // If no good match can be found, return std::nullopt.
2177 std::optional<unsigned>
2178 getBestOperand(unsigned OpIdx, int Lane, int LastLane,
2179 ArrayRef<ReorderingMode> ReorderingModes,
2180 ArrayRef<Value *> MainAltOps,
2181 const SmallBitVector &UsedLanes) {
2182 unsigned NumOperands = getNumOperands();
2184 // The operand of the previous lane at OpIdx.
2185 Value *OpLastLane = getData(OpIdx, LastLane).V;
2187 // Our strategy mode for OpIdx.
2188 ReorderingMode RMode = ReorderingModes[OpIdx];
2189 if (RMode == ReorderingMode::Failed)
2190 return std::nullopt;
2192 // The linearized opcode of the operand at OpIdx, Lane.
2193 bool OpIdxAPO = getData(OpIdx, Lane).APO;
2195 // The best operand index and its score.
2196 // Sometimes we have more than one option (e.g., Opcode and Undefs), so we
2197 // are using the score to differentiate between the two.
2198 struct BestOpData {
2199 std::optional<unsigned> Idx;
2200 unsigned Score = 0;
2201 } BestOp;
2202 BestOp.Score =
2203 BestScoresPerLanes.try_emplace(std::make_pair(OpIdx, Lane), 0)
2204 .first->second;
2206 // Track if the operand must be marked as used. If the operand is set to
2207 // Score 1 explicitly (because of non power-of-2 unique scalars, we may
2208 // want to reestimate the operands again on the following iterations).
2209 bool IsUsed = RMode == ReorderingMode::Splat ||
2210 RMode == ReorderingMode::Constant ||
2211 RMode == ReorderingMode::Load;
2212 // Iterate through all unused operands and look for the best.
2213 for (unsigned Idx = 0; Idx != NumOperands; ++Idx) {
2214 // Get the operand at Idx and Lane.
2215 OperandData &OpData = getData(Idx, Lane);
2216 Value *Op = OpData.V;
2217 bool OpAPO = OpData.APO;
2219 // Skip already selected operands.
2220 if (OpData.IsUsed)
2221 continue;
2223 // Skip if we are trying to move the operand to a position with a
2224 // different opcode in the linearized tree form. This would break the
2225 // semantics.
2226 if (OpAPO != OpIdxAPO)
2227 continue;
2229 // Look for an operand that matches the current mode.
2230 switch (RMode) {
2231 case ReorderingMode::Load:
2232 case ReorderingMode::Opcode: {
2233 bool LeftToRight = Lane > LastLane;
2234 Value *OpLeft = (LeftToRight) ? OpLastLane : Op;
2235 Value *OpRight = (LeftToRight) ? Op : OpLastLane;
2236 int Score = getLookAheadScore(OpLeft, OpRight, MainAltOps, Lane,
2237 OpIdx, Idx, IsUsed, UsedLanes);
2238 if (Score > static_cast<int>(BestOp.Score) ||
2239 (Score > 0 && Score == static_cast<int>(BestOp.Score) &&
2240 Idx == OpIdx)) {
2241 BestOp.Idx = Idx;
2242 BestOp.Score = Score;
2243 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] = Score;
2245 break;
2247 case ReorderingMode::Constant:
2248 if (isa<Constant>(Op) ||
2249 (!BestOp.Score && L && L->isLoopInvariant(Op))) {
2250 BestOp.Idx = Idx;
2251 if (isa<Constant>(Op)) {
2252 BestOp.Score = LookAheadHeuristics::ScoreConstants;
2253 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] =
2254 LookAheadHeuristics::ScoreConstants;
2256 if (isa<UndefValue>(Op) || !isa<Constant>(Op))
2257 IsUsed = false;
2259 break;
2260 case ReorderingMode::Splat:
2261 if (Op == OpLastLane || (!BestOp.Score && isa<Constant>(Op))) {
2262 IsUsed = Op == OpLastLane;
2263 if (Op == OpLastLane) {
2264 BestOp.Score = LookAheadHeuristics::ScoreSplat;
2265 BestScoresPerLanes[std::make_pair(OpIdx, Lane)] =
2266 LookAheadHeuristics::ScoreSplat;
2268 BestOp.Idx = Idx;
2270 break;
2271 case ReorderingMode::Failed:
2272 llvm_unreachable("Not expected Failed reordering mode.");
2276 if (BestOp.Idx) {
2277 getData(*BestOp.Idx, Lane).IsUsed = IsUsed;
2278 return BestOp.Idx;
2280 // If we could not find a good match return std::nullopt.
2281 return std::nullopt;
2284 /// Helper for reorderOperandVecs.
2285 /// \returns the lane that we should start reordering from. This is the one
2286 /// which has the least number of operands that can freely move about or
2287 /// less profitable because it already has the most optimal set of operands.
2288 unsigned getBestLaneToStartReordering() const {
2289 unsigned Min = UINT_MAX;
2290 unsigned SameOpNumber = 0;
2291 // std::pair<unsigned, unsigned> is used to implement a simple voting
2292 // algorithm and choose the lane with the least number of operands that
2293 // can freely move about or less profitable because it already has the
2294 // most optimal set of operands. The first unsigned is a counter for
2295 // voting, the second unsigned is the counter of lanes with instructions
2296 // with same/alternate opcodes and same parent basic block.
2297 MapVector<unsigned, std::pair<unsigned, unsigned>> HashMap;
2298 // Try to be closer to the original results, if we have multiple lanes
2299 // with same cost. If 2 lanes have the same cost, use the one with the
2300 // highest index.
2301 for (int I = getNumLanes(); I > 0; --I) {
2302 unsigned Lane = I - 1;
2303 OperandsOrderData NumFreeOpsHash =
2304 getMaxNumOperandsThatCanBeReordered(Lane);
2305 // Compare the number of operands that can move and choose the one with
2306 // the least number.
2307 if (NumFreeOpsHash.NumOfAPOs < Min) {
2308 Min = NumFreeOpsHash.NumOfAPOs;
2309 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
2310 HashMap.clear();
2311 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
2312 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
2313 NumFreeOpsHash.NumOpsWithSameOpcodeParent < SameOpNumber) {
2314 // Select the most optimal lane in terms of number of operands that
2315 // should be moved around.
2316 SameOpNumber = NumFreeOpsHash.NumOpsWithSameOpcodeParent;
2317 HashMap[NumFreeOpsHash.Hash] = std::make_pair(1, Lane);
2318 } else if (NumFreeOpsHash.NumOfAPOs == Min &&
2319 NumFreeOpsHash.NumOpsWithSameOpcodeParent == SameOpNumber) {
2320 auto [It, Inserted] =
2321 HashMap.try_emplace(NumFreeOpsHash.Hash, 1, Lane);
2322 if (!Inserted)
2323 ++It->second.first;
2326 // Select the lane with the minimum counter.
2327 unsigned BestLane = 0;
2328 unsigned CntMin = UINT_MAX;
2329 for (const auto &Data : reverse(HashMap)) {
2330 if (Data.second.first < CntMin) {
2331 CntMin = Data.second.first;
2332 BestLane = Data.second.second;
2335 return BestLane;
2338 /// Data structure that helps to reorder operands.
2339 struct OperandsOrderData {
2340 /// The best number of operands with the same APOs, which can be
2341 /// reordered.
2342 unsigned NumOfAPOs = UINT_MAX;
2343 /// Number of operands with the same/alternate instruction opcode and
2344 /// parent.
2345 unsigned NumOpsWithSameOpcodeParent = 0;
2346 /// Hash for the actual operands ordering.
2347 /// Used to count operands, actually their position id and opcode
2348 /// value. It is used in the voting mechanism to find the lane with the
2349 /// least number of operands that can freely move about or less profitable
2350 /// because it already has the most optimal set of operands. Can be
2351 /// replaced with SmallVector<unsigned> instead but hash code is faster
2352 /// and requires less memory.
2353 unsigned Hash = 0;
2355 /// \returns the maximum number of operands that are allowed to be reordered
2356 /// for \p Lane and the number of compatible instructions(with the same
2357 /// parent/opcode). This is used as a heuristic for selecting the first lane
2358 /// to start operand reordering.
2359 OperandsOrderData getMaxNumOperandsThatCanBeReordered(unsigned Lane) const {
2360 unsigned CntTrue = 0;
2361 unsigned NumOperands = getNumOperands();
2362 // Operands with the same APO can be reordered. We therefore need to count
2363 // how many of them we have for each APO, like this: Cnt[APO] = x.
2364 // Since we only have two APOs, namely true and false, we can avoid using
2365 // a map. Instead we can simply count the number of operands that
2366 // correspond to one of them (in this case the 'true' APO), and calculate
2367 // the other by subtracting it from the total number of operands.
2368 // Operands with the same instruction opcode and parent are more
2369 // profitable since we don't need to move them in many cases, with a high
2370 // probability such lane already can be vectorized effectively.
2371 bool AllUndefs = true;
2372 unsigned NumOpsWithSameOpcodeParent = 0;
2373 Instruction *OpcodeI = nullptr;
2374 BasicBlock *Parent = nullptr;
2375 unsigned Hash = 0;
2376 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2377 const OperandData &OpData = getData(OpIdx, Lane);
2378 if (OpData.APO)
2379 ++CntTrue;
2380 // Use Boyer-Moore majority voting for finding the majority opcode and
2381 // the number of times it occurs.
2382 if (auto *I = dyn_cast<Instruction>(OpData.V)) {
2383 if (!OpcodeI || !getSameOpcode({OpcodeI, I}, TLI).getOpcode() ||
2384 I->getParent() != Parent) {
2385 if (NumOpsWithSameOpcodeParent == 0) {
2386 NumOpsWithSameOpcodeParent = 1;
2387 OpcodeI = I;
2388 Parent = I->getParent();
2389 } else {
2390 --NumOpsWithSameOpcodeParent;
2392 } else {
2393 ++NumOpsWithSameOpcodeParent;
2396 Hash = hash_combine(
2397 Hash, hash_value((OpIdx + 1) * (OpData.V->getValueID() + 1)));
2398 AllUndefs = AllUndefs && isa<UndefValue>(OpData.V);
2400 if (AllUndefs)
2401 return {};
2402 OperandsOrderData Data;
2403 Data.NumOfAPOs = std::max(CntTrue, NumOperands - CntTrue);
2404 Data.NumOpsWithSameOpcodeParent = NumOpsWithSameOpcodeParent;
2405 Data.Hash = Hash;
2406 return Data;
2409 /// Go through the instructions in VL and append their operands.
2410 void appendOperandsOfVL(ArrayRef<Value *> VL, Instruction *VL0) {
2411 assert(!VL.empty() && "Bad VL");
2412 assert((empty() || VL.size() == getNumLanes()) &&
2413 "Expected same number of lanes");
2414 // IntrinsicInst::isCommutative returns true if swapping the first "two"
2415 // arguments to the intrinsic produces the same result.
2416 constexpr unsigned IntrinsicNumOperands = 2;
2417 unsigned NumOperands = VL0->getNumOperands();
2418 ArgSize = isa<IntrinsicInst>(VL0) ? IntrinsicNumOperands : NumOperands;
2419 OpsVec.resize(NumOperands);
2420 unsigned NumLanes = VL.size();
2421 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2422 OpsVec[OpIdx].resize(NumLanes);
2423 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
2424 assert((isa<Instruction>(VL[Lane]) || isa<PoisonValue>(VL[Lane])) &&
2425 "Expected instruction or poison value");
2426 // Our tree has just 3 nodes: the root and two operands.
2427 // It is therefore trivial to get the APO. We only need to check the
2428 // opcode of VL[Lane] and whether the operand at OpIdx is the LHS or
2429 // RHS operand. The LHS operand of both add and sub is never attached
2430 // to an inversese operation in the linearized form, therefore its APO
2431 // is false. The RHS is true only if VL[Lane] is an inverse operation.
2433 // Since operand reordering is performed on groups of commutative
2434 // operations or alternating sequences (e.g., +, -), we can safely
2435 // tell the inverse operations by checking commutativity.
2436 if (isa<PoisonValue>(VL[Lane])) {
2437 OpsVec[OpIdx][Lane] = {
2438 PoisonValue::get(VL0->getOperand(OpIdx)->getType()), true,
2439 false};
2440 continue;
2442 bool IsInverseOperation = !isCommutative(cast<Instruction>(VL[Lane]));
2443 bool APO = (OpIdx == 0) ? false : IsInverseOperation;
2444 OpsVec[OpIdx][Lane] = {cast<Instruction>(VL[Lane])->getOperand(OpIdx),
2445 APO, false};
2450 /// \returns the number of operands.
2451 unsigned getNumOperands() const { return ArgSize; }
2453 /// \returns the number of lanes.
2454 unsigned getNumLanes() const { return OpsVec[0].size(); }
2456 /// \returns the operand value at \p OpIdx and \p Lane.
2457 Value *getValue(unsigned OpIdx, unsigned Lane) const {
2458 return getData(OpIdx, Lane).V;
2461 /// \returns true if the data structure is empty.
2462 bool empty() const { return OpsVec.empty(); }
2464 /// Clears the data.
2465 void clear() { OpsVec.clear(); }
2467 /// \Returns true if there are enough operands identical to \p Op to fill
2468 /// the whole vector (it is mixed with constants or loop invariant values).
2469 /// Note: This modifies the 'IsUsed' flag, so a cleanUsed() must follow.
2470 bool shouldBroadcast(Value *Op, unsigned OpIdx, unsigned Lane) {
2471 assert(Op == getValue(OpIdx, Lane) &&
2472 "Op is expected to be getValue(OpIdx, Lane).");
2473 // Small number of loads - try load matching.
2474 if (isa<LoadInst>(Op) && getNumLanes() == 2 && getNumOperands() == 2)
2475 return false;
2476 bool OpAPO = getData(OpIdx, Lane).APO;
2477 bool IsInvariant = L && L->isLoopInvariant(Op);
2478 unsigned Cnt = 0;
2479 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
2480 if (Ln == Lane)
2481 continue;
2482 // This is set to true if we found a candidate for broadcast at Lane.
2483 bool FoundCandidate = false;
2484 for (unsigned OpI = 0, OpE = getNumOperands(); OpI != OpE; ++OpI) {
2485 OperandData &Data = getData(OpI, Ln);
2486 if (Data.APO != OpAPO || Data.IsUsed)
2487 continue;
2488 Value *OpILane = getValue(OpI, Lane);
2489 bool IsConstantOp = isa<Constant>(OpILane);
2490 // Consider the broadcast candidate if:
2491 // 1. Same value is found in one of the operands.
2492 if (Data.V == Op ||
2493 // 2. The operand in the given lane is not constant but there is a
2494 // constant operand in another lane (which can be moved to the
2495 // given lane). In this case we can represent it as a simple
2496 // permutation of constant and broadcast.
2497 (!IsConstantOp &&
2498 ((Lns > 2 && isa<Constant>(Data.V)) ||
2499 // 2.1. If we have only 2 lanes, need to check that value in the
2500 // next lane does not build same opcode sequence.
2501 (Lns == 2 &&
2502 !getSameOpcode({Op, getValue((OpI + 1) % OpE, Ln)}, TLI)
2503 .getOpcode() &&
2504 isa<Constant>(Data.V)))) ||
2505 // 3. The operand in the current lane is loop invariant (can be
2506 // hoisted out) and another operand is also a loop invariant
2507 // (though not a constant). In this case the whole vector can be
2508 // hoisted out.
2509 // FIXME: need to teach the cost model about this case for better
2510 // estimation.
2511 (IsInvariant && !isa<Constant>(Data.V) &&
2512 !getSameOpcode({Op, Data.V}, TLI).getOpcode() &&
2513 L->isLoopInvariant(Data.V))) {
2514 FoundCandidate = true;
2515 Data.IsUsed = Data.V == Op;
2516 if (Data.V == Op)
2517 ++Cnt;
2518 break;
2521 if (!FoundCandidate)
2522 return false;
2524 return getNumLanes() == 2 || Cnt > 1;
2527 /// Checks if there is at least single compatible operand in lanes other
2528 /// than \p Lane, compatible with the operand \p Op.
2529 bool canBeVectorized(Instruction *Op, unsigned OpIdx, unsigned Lane) const {
2530 assert(Op == getValue(OpIdx, Lane) &&
2531 "Op is expected to be getValue(OpIdx, Lane).");
2532 bool OpAPO = getData(OpIdx, Lane).APO;
2533 for (unsigned Ln = 0, Lns = getNumLanes(); Ln != Lns; ++Ln) {
2534 if (Ln == Lane)
2535 continue;
2536 if (any_of(seq<unsigned>(getNumOperands()), [&](unsigned OpI) {
2537 const OperandData &Data = getData(OpI, Ln);
2538 if (Data.APO != OpAPO || Data.IsUsed)
2539 return true;
2540 Value *OpILn = getValue(OpI, Ln);
2541 return (L && L->isLoopInvariant(OpILn)) ||
2542 (getSameOpcode({Op, OpILn}, TLI).getOpcode() &&
2543 allSameBlock({Op, OpILn}));
2545 return true;
2547 return false;
2550 public:
2551 /// Initialize with all the operands of the instruction vector \p RootVL.
2552 VLOperands(ArrayRef<Value *> RootVL, Instruction *VL0, const BoUpSLP &R)
2553 : TLI(*R.TLI), DL(*R.DL), SE(*R.SE), R(R),
2554 L(R.LI->getLoopFor((VL0->getParent()))) {
2555 // Append all the operands of RootVL.
2556 appendOperandsOfVL(RootVL, VL0);
2559 /// \Returns a value vector with the operands across all lanes for the
2560 /// opearnd at \p OpIdx.
2561 ValueList getVL(unsigned OpIdx) const {
2562 ValueList OpVL(OpsVec[OpIdx].size());
2563 assert(OpsVec[OpIdx].size() == getNumLanes() &&
2564 "Expected same num of lanes across all operands");
2565 for (unsigned Lane = 0, Lanes = getNumLanes(); Lane != Lanes; ++Lane)
2566 OpVL[Lane] = OpsVec[OpIdx][Lane].V;
2567 return OpVL;
2570 // Performs operand reordering for 2 or more operands.
2571 // The original operands are in OrigOps[OpIdx][Lane].
2572 // The reordered operands are returned in 'SortedOps[OpIdx][Lane]'.
2573 void reorder() {
2574 unsigned NumOperands = getNumOperands();
2575 unsigned NumLanes = getNumLanes();
2576 // Each operand has its own mode. We are using this mode to help us select
2577 // the instructions for each lane, so that they match best with the ones
2578 // we have selected so far.
2579 SmallVector<ReorderingMode, 2> ReorderingModes(NumOperands);
2581 // This is a greedy single-pass algorithm. We are going over each lane
2582 // once and deciding on the best order right away with no back-tracking.
2583 // However, in order to increase its effectiveness, we start with the lane
2584 // that has operands that can move the least. For example, given the
2585 // following lanes:
2586 // Lane 0 : A[0] = B[0] + C[0] // Visited 3rd
2587 // Lane 1 : A[1] = C[1] - B[1] // Visited 1st
2588 // Lane 2 : A[2] = B[2] + C[2] // Visited 2nd
2589 // Lane 3 : A[3] = C[3] - B[3] // Visited 4th
2590 // we will start at Lane 1, since the operands of the subtraction cannot
2591 // be reordered. Then we will visit the rest of the lanes in a circular
2592 // fashion. That is, Lanes 2, then Lane 0, and finally Lane 3.
2594 // Find the first lane that we will start our search from.
2595 unsigned FirstLane = getBestLaneToStartReordering();
2597 // Initialize the modes.
2598 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2599 Value *OpLane0 = getValue(OpIdx, FirstLane);
2600 // Keep track if we have instructions with all the same opcode on one
2601 // side.
2602 if (auto *OpILane0 = dyn_cast<Instruction>(OpLane0)) {
2603 // Check if OpLane0 should be broadcast.
2604 if (shouldBroadcast(OpLane0, OpIdx, FirstLane) ||
2605 !canBeVectorized(OpILane0, OpIdx, FirstLane))
2606 ReorderingModes[OpIdx] = ReorderingMode::Splat;
2607 else if (isa<LoadInst>(OpILane0))
2608 ReorderingModes[OpIdx] = ReorderingMode::Load;
2609 else
2610 ReorderingModes[OpIdx] = ReorderingMode::Opcode;
2611 } else if (isa<Constant>(OpLane0)) {
2612 ReorderingModes[OpIdx] = ReorderingMode::Constant;
2613 } else if (isa<Argument>(OpLane0)) {
2614 // Our best hope is a Splat. It may save some cost in some cases.
2615 ReorderingModes[OpIdx] = ReorderingMode::Splat;
2616 } else {
2617 llvm_unreachable("Unexpected value kind.");
2621 // Check that we don't have same operands. No need to reorder if operands
2622 // are just perfect diamond or shuffled diamond match. Do not do it only
2623 // for possible broadcasts or non-power of 2 number of scalars (just for
2624 // now).
2625 auto &&SkipReordering = [this]() {
2626 SmallPtrSet<Value *, 4> UniqueValues;
2627 ArrayRef<OperandData> Op0 = OpsVec.front();
2628 for (const OperandData &Data : Op0)
2629 UniqueValues.insert(Data.V);
2630 for (ArrayRef<OperandData> Op :
2631 ArrayRef(OpsVec).slice(1, getNumOperands() - 1)) {
2632 if (any_of(Op, [&UniqueValues](const OperandData &Data) {
2633 return !UniqueValues.contains(Data.V);
2635 return false;
2637 // TODO: Check if we can remove a check for non-power-2 number of
2638 // scalars after full support of non-power-2 vectorization.
2639 return UniqueValues.size() != 2 && has_single_bit(UniqueValues.size());
2642 // If the initial strategy fails for any of the operand indexes, then we
2643 // perform reordering again in a second pass. This helps avoid assigning
2644 // high priority to the failed strategy, and should improve reordering for
2645 // the non-failed operand indexes.
2646 for (int Pass = 0; Pass != 2; ++Pass) {
2647 // Check if no need to reorder operands since they're are perfect or
2648 // shuffled diamond match.
2649 // Need to do it to avoid extra external use cost counting for
2650 // shuffled matches, which may cause regressions.
2651 if (SkipReordering())
2652 break;
2653 // Skip the second pass if the first pass did not fail.
2654 bool StrategyFailed = false;
2655 // Mark all operand data as free to use.
2656 clearUsed();
2657 // We keep the original operand order for the FirstLane, so reorder the
2658 // rest of the lanes. We are visiting the nodes in a circular fashion,
2659 // using FirstLane as the center point and increasing the radius
2660 // distance.
2661 SmallVector<SmallVector<Value *, 2>> MainAltOps(NumOperands);
2662 for (unsigned I = 0; I < NumOperands; ++I)
2663 MainAltOps[I].push_back(getData(I, FirstLane).V);
2665 SmallBitVector UsedLanes(NumLanes);
2666 UsedLanes.set(FirstLane);
2667 for (unsigned Distance = 1; Distance != NumLanes; ++Distance) {
2668 // Visit the lane on the right and then the lane on the left.
2669 for (int Direction : {+1, -1}) {
2670 int Lane = FirstLane + Direction * Distance;
2671 if (Lane < 0 || Lane >= (int)NumLanes)
2672 continue;
2673 UsedLanes.set(Lane);
2674 int LastLane = Lane - Direction;
2675 assert(LastLane >= 0 && LastLane < (int)NumLanes &&
2676 "Out of bounds");
2677 // Look for a good match for each operand.
2678 for (unsigned OpIdx = 0; OpIdx != NumOperands; ++OpIdx) {
2679 // Search for the operand that matches SortedOps[OpIdx][Lane-1].
2680 std::optional<unsigned> BestIdx =
2681 getBestOperand(OpIdx, Lane, LastLane, ReorderingModes,
2682 MainAltOps[OpIdx], UsedLanes);
2683 // By not selecting a value, we allow the operands that follow to
2684 // select a better matching value. We will get a non-null value in
2685 // the next run of getBestOperand().
2686 if (BestIdx) {
2687 // Swap the current operand with the one returned by
2688 // getBestOperand().
2689 swap(OpIdx, *BestIdx, Lane);
2690 } else {
2691 // Enable the second pass.
2692 StrategyFailed = true;
2694 // Try to get the alternate opcode and follow it during analysis.
2695 if (MainAltOps[OpIdx].size() != 2) {
2696 OperandData &AltOp = getData(OpIdx, Lane);
2697 InstructionsState OpS =
2698 getSameOpcode({MainAltOps[OpIdx].front(), AltOp.V}, TLI);
2699 if (OpS.getOpcode() && OpS.isAltShuffle())
2700 MainAltOps[OpIdx].push_back(AltOp.V);
2705 // Skip second pass if the strategy did not fail.
2706 if (!StrategyFailed)
2707 break;
2711 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2712 LLVM_DUMP_METHOD static StringRef getModeStr(ReorderingMode RMode) {
2713 switch (RMode) {
2714 case ReorderingMode::Load:
2715 return "Load";
2716 case ReorderingMode::Opcode:
2717 return "Opcode";
2718 case ReorderingMode::Constant:
2719 return "Constant";
2720 case ReorderingMode::Splat:
2721 return "Splat";
2722 case ReorderingMode::Failed:
2723 return "Failed";
2725 llvm_unreachable("Unimplemented Reordering Type");
2728 LLVM_DUMP_METHOD static raw_ostream &printMode(ReorderingMode RMode,
2729 raw_ostream &OS) {
2730 return OS << getModeStr(RMode);
2733 /// Debug print.
2734 LLVM_DUMP_METHOD static void dumpMode(ReorderingMode RMode) {
2735 printMode(RMode, dbgs());
2738 friend raw_ostream &operator<<(raw_ostream &OS, ReorderingMode RMode) {
2739 return printMode(RMode, OS);
2742 LLVM_DUMP_METHOD raw_ostream &print(raw_ostream &OS) const {
2743 const unsigned Indent = 2;
2744 unsigned Cnt = 0;
2745 for (const OperandDataVec &OpDataVec : OpsVec) {
2746 OS << "Operand " << Cnt++ << "\n";
2747 for (const OperandData &OpData : OpDataVec) {
2748 OS.indent(Indent) << "{";
2749 if (Value *V = OpData.V)
2750 OS << *V;
2751 else
2752 OS << "null";
2753 OS << ", APO:" << OpData.APO << "}\n";
2755 OS << "\n";
2757 return OS;
2760 /// Debug print.
2761 LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
2762 #endif
2765 /// Evaluate each pair in \p Candidates and return index into \p Candidates
2766 /// for a pair which have highest score deemed to have best chance to form
2767 /// root of profitable tree to vectorize. Return std::nullopt if no candidate
2768 /// scored above the LookAheadHeuristics::ScoreFail. \param Limit Lower limit
2769 /// of the cost, considered to be good enough score.
2770 std::optional<int>
2771 findBestRootPair(ArrayRef<std::pair<Value *, Value *>> Candidates,
2772 int Limit = LookAheadHeuristics::ScoreFail) const {
2773 LookAheadHeuristics LookAhead(*TLI, *DL, *SE, *this, /*NumLanes=*/2,
2774 RootLookAheadMaxDepth);
2775 int BestScore = Limit;
2776 std::optional<int> Index;
2777 for (int I : seq<int>(0, Candidates.size())) {
2778 int Score = LookAhead.getScoreAtLevelRec(Candidates[I].first,
2779 Candidates[I].second,
2780 /*U1=*/nullptr, /*U2=*/nullptr,
2781 /*CurrLevel=*/1, {});
2782 if (Score > BestScore) {
2783 BestScore = Score;
2784 Index = I;
2787 return Index;
2790 /// Checks if the instruction is marked for deletion.
2791 bool isDeleted(Instruction *I) const { return DeletedInstructions.count(I); }
2793 /// Removes an instruction from its block and eventually deletes it.
2794 /// It's like Instruction::eraseFromParent() except that the actual deletion
2795 /// is delayed until BoUpSLP is destructed.
2796 void eraseInstruction(Instruction *I) {
2797 DeletedInstructions.insert(I);
2800 /// Remove instructions from the parent function and clear the operands of \p
2801 /// DeadVals instructions, marking for deletion trivially dead operands.
2802 template <typename T>
2803 void removeInstructionsAndOperands(ArrayRef<T *> DeadVals) {
2804 SmallVector<WeakTrackingVH> DeadInsts;
2805 for (T *V : DeadVals) {
2806 auto *I = cast<Instruction>(V);
2807 DeletedInstructions.insert(I);
2809 DenseSet<Value *> Processed;
2810 for (T *V : DeadVals) {
2811 if (!V || !Processed.insert(V).second)
2812 continue;
2813 auto *I = cast<Instruction>(V);
2814 salvageDebugInfo(*I);
2815 SmallVector<const TreeEntry *> Entries;
2816 if (const TreeEntry *Entry = getTreeEntry(I)) {
2817 Entries.push_back(Entry);
2818 auto It = MultiNodeScalars.find(I);
2819 if (It != MultiNodeScalars.end())
2820 Entries.append(It->second.begin(), It->second.end());
2822 for (Use &U : I->operands()) {
2823 if (auto *OpI = dyn_cast_if_present<Instruction>(U.get());
2824 OpI && !DeletedInstructions.contains(OpI) && OpI->hasOneUser() &&
2825 wouldInstructionBeTriviallyDead(OpI, TLI) &&
2826 (Entries.empty() || none_of(Entries, [&](const TreeEntry *Entry) {
2827 return Entry->VectorizedValue == OpI;
2828 })))
2829 DeadInsts.push_back(OpI);
2831 I->dropAllReferences();
2833 for (T *V : DeadVals) {
2834 auto *I = cast<Instruction>(V);
2835 if (!I->getParent())
2836 continue;
2837 assert((I->use_empty() || all_of(I->uses(),
2838 [&](Use &U) {
2839 return isDeleted(
2840 cast<Instruction>(U.getUser()));
2841 })) &&
2842 "trying to erase instruction with users.");
2843 I->removeFromParent();
2844 SE->forgetValue(I);
2846 // Process the dead instruction list until empty.
2847 while (!DeadInsts.empty()) {
2848 Value *V = DeadInsts.pop_back_val();
2849 Instruction *VI = cast_or_null<Instruction>(V);
2850 if (!VI || !VI->getParent())
2851 continue;
2852 assert(isInstructionTriviallyDead(VI, TLI) &&
2853 "Live instruction found in dead worklist!");
2854 assert(VI->use_empty() && "Instructions with uses are not dead.");
2856 // Don't lose the debug info while deleting the instructions.
2857 salvageDebugInfo(*VI);
2859 // Null out all of the instruction's operands to see if any operand
2860 // becomes dead as we go.
2861 for (Use &OpU : VI->operands()) {
2862 Value *OpV = OpU.get();
2863 if (!OpV)
2864 continue;
2865 OpU.set(nullptr);
2867 if (!OpV->use_empty())
2868 continue;
2870 // If the operand is an instruction that became dead as we nulled out
2871 // the operand, and if it is 'trivially' dead, delete it in a future
2872 // loop iteration.
2873 if (auto *OpI = dyn_cast<Instruction>(OpV))
2874 if (!DeletedInstructions.contains(OpI) &&
2875 isInstructionTriviallyDead(OpI, TLI))
2876 DeadInsts.push_back(OpI);
2879 VI->removeFromParent();
2880 DeletedInstructions.insert(VI);
2881 SE->forgetValue(VI);
2885 /// Checks if the instruction was already analyzed for being possible
2886 /// reduction root.
2887 bool isAnalyzedReductionRoot(Instruction *I) const {
2888 return AnalyzedReductionsRoots.count(I);
2890 /// Register given instruction as already analyzed for being possible
2891 /// reduction root.
2892 void analyzedReductionRoot(Instruction *I) {
2893 AnalyzedReductionsRoots.insert(I);
2895 /// Checks if the provided list of reduced values was checked already for
2896 /// vectorization.
2897 bool areAnalyzedReductionVals(ArrayRef<Value *> VL) const {
2898 return AnalyzedReductionVals.contains(hash_value(VL));
2900 /// Adds the list of reduced values to list of already checked values for the
2901 /// vectorization.
2902 void analyzedReductionVals(ArrayRef<Value *> VL) {
2903 AnalyzedReductionVals.insert(hash_value(VL));
2905 /// Clear the list of the analyzed reduction root instructions.
2906 void clearReductionData() {
2907 AnalyzedReductionsRoots.clear();
2908 AnalyzedReductionVals.clear();
2909 AnalyzedMinBWVals.clear();
2911 /// Checks if the given value is gathered in one of the nodes.
2912 bool isAnyGathered(const SmallDenseSet<Value *> &Vals) const {
2913 return any_of(MustGather, [&](Value *V) { return Vals.contains(V); });
2915 /// Checks if the given value is gathered in one of the nodes.
2916 bool isGathered(const Value *V) const {
2917 return MustGather.contains(V);
2919 /// Checks if the specified value was not schedule.
2920 bool isNotScheduled(const Value *V) const {
2921 return NonScheduledFirst.contains(V);
2924 /// Check if the value is vectorized in the tree.
2925 bool isVectorized(Value *V) const { return getTreeEntry(V); }
2927 ~BoUpSLP();
2929 private:
2930 /// Determine if a node \p E in can be demoted to a smaller type with a
2931 /// truncation. We collect the entries that will be demoted in ToDemote.
2932 /// \param E Node for analysis
2933 /// \param ToDemote indices of the nodes to be demoted.
2934 bool collectValuesToDemote(const TreeEntry &E, bool IsProfitableToDemoteRoot,
2935 unsigned &BitWidth,
2936 SmallVectorImpl<unsigned> &ToDemote,
2937 DenseSet<const TreeEntry *> &Visited,
2938 unsigned &MaxDepthLevel,
2939 bool &IsProfitableToDemote,
2940 bool IsTruncRoot) const;
2942 /// Check if the operands on the edges \p Edges of the \p UserTE allows
2943 /// reordering (i.e. the operands can be reordered because they have only one
2944 /// user and reordarable).
2945 /// \param ReorderableGathers List of all gather nodes that require reordering
2946 /// (e.g., gather of extractlements or partially vectorizable loads).
2947 /// \param GatherOps List of gather operand nodes for \p UserTE that require
2948 /// reordering, subset of \p NonVectorized.
2949 bool
2950 canReorderOperands(TreeEntry *UserTE,
2951 SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
2952 ArrayRef<TreeEntry *> ReorderableGathers,
2953 SmallVectorImpl<TreeEntry *> &GatherOps);
2955 /// Checks if the given \p TE is a gather node with clustered reused scalars
2956 /// and reorders it per given \p Mask.
2957 void reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const;
2959 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2960 /// if any. If it is not vectorized (gather node), returns nullptr.
2961 TreeEntry *getVectorizedOperand(TreeEntry *UserTE, unsigned OpIdx) {
2962 ArrayRef<Value *> VL = UserTE->getOperand(OpIdx);
2963 TreeEntry *TE = nullptr;
2964 const auto *It = find_if(VL, [&](Value *V) {
2965 TE = getTreeEntry(V);
2966 if (TE && is_contained(TE->UserTreeIndices, EdgeInfo(UserTE, OpIdx)))
2967 return true;
2968 auto It = MultiNodeScalars.find(V);
2969 if (It != MultiNodeScalars.end()) {
2970 for (TreeEntry *E : It->second) {
2971 if (is_contained(E->UserTreeIndices, EdgeInfo(UserTE, OpIdx))) {
2972 TE = E;
2973 return true;
2977 return false;
2979 if (It != VL.end()) {
2980 assert(TE->isSame(VL) && "Expected same scalars.");
2981 return TE;
2983 return nullptr;
2986 /// Returns vectorized operand \p OpIdx of the node \p UserTE from the graph,
2987 /// if any. If it is not vectorized (gather node), returns nullptr.
2988 const TreeEntry *getVectorizedOperand(const TreeEntry *UserTE,
2989 unsigned OpIdx) const {
2990 return const_cast<BoUpSLP *>(this)->getVectorizedOperand(
2991 const_cast<TreeEntry *>(UserTE), OpIdx);
2994 /// Checks if all users of \p I are the part of the vectorization tree.
2995 bool areAllUsersVectorized(
2996 Instruction *I,
2997 const SmallDenseSet<Value *> *VectorizedVals = nullptr) const;
2999 /// Return information about the vector formed for the specified index
3000 /// of a vector of (the same) instruction.
3001 TargetTransformInfo::OperandValueInfo getOperandInfo(ArrayRef<Value *> Ops);
3003 /// \ returns the graph entry for the \p Idx operand of the \p E entry.
3004 const TreeEntry *getOperandEntry(const TreeEntry *E, unsigned Idx) const;
3006 /// Gets the root instruction for the given node. If the node is a strided
3007 /// load/store node with the reverse order, the root instruction is the last
3008 /// one.
3009 Instruction *getRootEntryInstruction(const TreeEntry &Entry) const;
3011 /// \returns Cast context for the given graph node.
3012 TargetTransformInfo::CastContextHint
3013 getCastContextHint(const TreeEntry &TE) const;
3015 /// \returns the cost of the vectorizable entry.
3016 InstructionCost getEntryCost(const TreeEntry *E,
3017 ArrayRef<Value *> VectorizedVals,
3018 SmallPtrSetImpl<Value *> &CheckedExtracts);
3020 /// This is the recursive part of buildTree.
3021 void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth,
3022 const EdgeInfo &EI, unsigned InterleaveFactor = 0);
3024 /// \returns true if the ExtractElement/ExtractValue instructions in \p VL can
3025 /// be vectorized to use the original vector (or aggregate "bitcast" to a
3026 /// vector) and sets \p CurrentOrder to the identity permutation; otherwise
3027 /// returns false, setting \p CurrentOrder to either an empty vector or a
3028 /// non-identity permutation that allows to reuse extract instructions.
3029 /// \param ResizeAllowed indicates whether it is allowed to handle subvector
3030 /// extract order.
3031 bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
3032 SmallVectorImpl<unsigned> &CurrentOrder,
3033 bool ResizeAllowed = false) const;
3035 /// Vectorize a single entry in the tree.
3036 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to
3037 /// avoid issues with def-use order.
3038 Value *vectorizeTree(TreeEntry *E, bool PostponedPHIs);
3040 /// Returns vectorized operand node, that matches the order of the scalars
3041 /// operand number \p NodeIdx in entry \p E.
3042 TreeEntry *getMatchedVectorizedOperand(const TreeEntry *E, unsigned NodeIdx);
3043 const TreeEntry *getMatchedVectorizedOperand(const TreeEntry *E,
3044 unsigned NodeIdx) const {
3045 return const_cast<BoUpSLP *>(this)->getMatchedVectorizedOperand(E, NodeIdx);
3048 /// Vectorize a single entry in the tree, the \p Idx-th operand of the entry
3049 /// \p E.
3050 /// \param PostponedPHIs true, if need to postpone emission of phi nodes to
3051 /// avoid issues with def-use order.
3052 Value *vectorizeOperand(TreeEntry *E, unsigned NodeIdx, bool PostponedPHIs);
3054 /// Create a new vector from a list of scalar values. Produces a sequence
3055 /// which exploits values reused across lanes, and arranges the inserts
3056 /// for ease of later optimization.
3057 template <typename BVTy, typename ResTy, typename... Args>
3058 ResTy processBuildVector(const TreeEntry *E, Type *ScalarTy, Args &...Params);
3060 /// Create a new vector from a list of scalar values. Produces a sequence
3061 /// which exploits values reused across lanes, and arranges the inserts
3062 /// for ease of later optimization.
3063 Value *createBuildVector(const TreeEntry *E, Type *ScalarTy,
3064 bool PostponedPHIs);
3066 /// Returns the instruction in the bundle, which can be used as a base point
3067 /// for scheduling. Usually it is the last instruction in the bundle, except
3068 /// for the case when all operands are external (in this case, it is the first
3069 /// instruction in the list).
3070 Instruction &getLastInstructionInBundle(const TreeEntry *E);
3072 /// Tries to find extractelement instructions with constant indices from fixed
3073 /// vector type and gather such instructions into a bunch, which highly likely
3074 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt
3075 /// was successful, the matched scalars are replaced by poison values in \p VL
3076 /// for future analysis.
3077 std::optional<TargetTransformInfo::ShuffleKind>
3078 tryToGatherSingleRegisterExtractElements(MutableArrayRef<Value *> VL,
3079 SmallVectorImpl<int> &Mask) const;
3081 /// Tries to find extractelement instructions with constant indices from fixed
3082 /// vector type and gather such instructions into a bunch, which highly likely
3083 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt
3084 /// was successful, the matched scalars are replaced by poison values in \p VL
3085 /// for future analysis.
3086 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
3087 tryToGatherExtractElements(SmallVectorImpl<Value *> &VL,
3088 SmallVectorImpl<int> &Mask,
3089 unsigned NumParts) const;
3091 /// Checks if the gathered \p VL can be represented as a single register
3092 /// shuffle(s) of previous tree entries.
3093 /// \param TE Tree entry checked for permutation.
3094 /// \param VL List of scalars (a subset of the TE scalar), checked for
3095 /// permutations. Must form single-register vector.
3096 /// \param ForOrder Tries to fetch the best candidates for ordering info. Also
3097 /// commands to build the mask using the original vector value, without
3098 /// relying on the potential reordering.
3099 /// \returns ShuffleKind, if gathered values can be represented as shuffles of
3100 /// previous tree entries. \p Part of \p Mask is filled with the shuffle mask.
3101 std::optional<TargetTransformInfo::ShuffleKind>
3102 isGatherShuffledSingleRegisterEntry(
3103 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask,
3104 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part,
3105 bool ForOrder);
3107 /// Checks if the gathered \p VL can be represented as multi-register
3108 /// shuffle(s) of previous tree entries.
3109 /// \param TE Tree entry checked for permutation.
3110 /// \param VL List of scalars (a subset of the TE scalar), checked for
3111 /// permutations.
3112 /// \param ForOrder Tries to fetch the best candidates for ordering info. Also
3113 /// commands to build the mask using the original vector value, without
3114 /// relying on the potential reordering.
3115 /// \returns per-register series of ShuffleKind, if gathered values can be
3116 /// represented as shuffles of previous tree entries. \p Mask is filled with
3117 /// the shuffle mask (also on per-register base).
3118 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
3119 isGatherShuffledEntry(
3120 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask,
3121 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries,
3122 unsigned NumParts, bool ForOrder = false);
3124 /// \returns the cost of gathering (inserting) the values in \p VL into a
3125 /// vector.
3126 /// \param ForPoisonSrc true if initial vector is poison, false otherwise.
3127 InstructionCost getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
3128 Type *ScalarTy) const;
3130 /// Set the Builder insert point to one after the last instruction in
3131 /// the bundle
3132 void setInsertPointAfterBundle(const TreeEntry *E);
3134 /// \returns a vector from a collection of scalars in \p VL. if \p Root is not
3135 /// specified, the starting vector value is poison.
3136 Value *
3137 gather(ArrayRef<Value *> VL, Value *Root, Type *ScalarTy,
3138 function_ref<Value *(Value *, Value *, ArrayRef<int>)> CreateShuffle);
3140 /// \returns whether the VectorizableTree is fully vectorizable and will
3141 /// be beneficial even the tree height is tiny.
3142 bool isFullyVectorizableTinyTree(bool ForReduction) const;
3144 /// Run through the list of all gathered loads in the graph and try to find
3145 /// vector loads/masked gathers instead of regular gathers. Later these loads
3146 /// are reshufled to build final gathered nodes.
3147 void tryToVectorizeGatheredLoads(
3148 const SmallMapVector<std::tuple<BasicBlock *, Value *, Type *>,
3149 SmallVector<SmallVector<std::pair<LoadInst *, int>>>,
3150 8> &GatheredLoads);
3152 /// Helper for `findExternalStoreUsersReorderIndices()`. It iterates over the
3153 /// users of \p TE and collects the stores. It returns the map from the store
3154 /// pointers to the collected stores.
3155 SmallVector<SmallVector<StoreInst *>>
3156 collectUserStores(const BoUpSLP::TreeEntry *TE) const;
3158 /// Helper for `findExternalStoreUsersReorderIndices()`. It checks if the
3159 /// stores in \p StoresVec can form a vector instruction. If so it returns
3160 /// true and populates \p ReorderIndices with the shuffle indices of the
3161 /// stores when compared to the sorted vector.
3162 bool canFormVector(ArrayRef<StoreInst *> StoresVec,
3163 OrdersType &ReorderIndices) const;
3165 /// Iterates through the users of \p TE, looking for scalar stores that can be
3166 /// potentially vectorized in a future SLP-tree. If found, it keeps track of
3167 /// their order and builds an order index vector for each store bundle. It
3168 /// returns all these order vectors found.
3169 /// We run this after the tree has formed, otherwise we may come across user
3170 /// instructions that are not yet in the tree.
3171 SmallVector<OrdersType, 1>
3172 findExternalStoreUsersReorderIndices(TreeEntry *TE) const;
3174 /// Tries to reorder the gathering node for better vectorization
3175 /// opportunities.
3176 void reorderGatherNode(TreeEntry &TE);
3178 struct TreeEntry {
3179 using VecTreeTy = SmallVector<std::unique_ptr<TreeEntry>, 8>;
3180 TreeEntry(VecTreeTy &Container) : Container(Container) {}
3182 /// \returns Common mask for reorder indices and reused scalars.
3183 SmallVector<int> getCommonMask() const {
3184 SmallVector<int> Mask;
3185 inversePermutation(ReorderIndices, Mask);
3186 ::addMask(Mask, ReuseShuffleIndices);
3187 return Mask;
3190 /// \returns true if the scalars in VL are equal to this entry.
3191 bool isSame(ArrayRef<Value *> VL) const {
3192 auto &&IsSame = [VL](ArrayRef<Value *> Scalars, ArrayRef<int> Mask) {
3193 if (Mask.size() != VL.size() && VL.size() == Scalars.size())
3194 return std::equal(VL.begin(), VL.end(), Scalars.begin());
3195 return VL.size() == Mask.size() &&
3196 std::equal(VL.begin(), VL.end(), Mask.begin(),
3197 [Scalars](Value *V, int Idx) {
3198 return (isa<UndefValue>(V) &&
3199 Idx == PoisonMaskElem) ||
3200 (Idx != PoisonMaskElem && V == Scalars[Idx]);
3203 if (!ReorderIndices.empty()) {
3204 // TODO: implement matching if the nodes are just reordered, still can
3205 // treat the vector as the same if the list of scalars matches VL
3206 // directly, without reordering.
3207 SmallVector<int> Mask;
3208 inversePermutation(ReorderIndices, Mask);
3209 if (VL.size() == Scalars.size())
3210 return IsSame(Scalars, Mask);
3211 if (VL.size() == ReuseShuffleIndices.size()) {
3212 ::addMask(Mask, ReuseShuffleIndices);
3213 return IsSame(Scalars, Mask);
3215 return false;
3217 return IsSame(Scalars, ReuseShuffleIndices);
3220 bool isOperandGatherNode(const EdgeInfo &UserEI) const {
3221 return isGather() && !UserTreeIndices.empty() &&
3222 UserTreeIndices.front().EdgeIdx == UserEI.EdgeIdx &&
3223 UserTreeIndices.front().UserTE == UserEI.UserTE;
3226 /// \returns true if current entry has same operands as \p TE.
3227 bool hasEqualOperands(const TreeEntry &TE) const {
3228 if (TE.getNumOperands() != getNumOperands())
3229 return false;
3230 SmallBitVector Used(getNumOperands());
3231 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
3232 unsigned PrevCount = Used.count();
3233 for (unsigned K = 0; K < E; ++K) {
3234 if (Used.test(K))
3235 continue;
3236 if (getOperand(K) == TE.getOperand(I)) {
3237 Used.set(K);
3238 break;
3241 // Check if we actually found the matching operand.
3242 if (PrevCount == Used.count())
3243 return false;
3245 return true;
3248 /// \return Final vectorization factor for the node. Defined by the total
3249 /// number of vectorized scalars, including those, used several times in the
3250 /// entry and counted in the \a ReuseShuffleIndices, if any.
3251 unsigned getVectorFactor() const {
3252 if (!ReuseShuffleIndices.empty())
3253 return ReuseShuffleIndices.size();
3254 return Scalars.size();
3257 /// Checks if the current node is a gather node.
3258 bool isGather() const {return State == NeedToGather; }
3260 /// A vector of scalars.
3261 ValueList Scalars;
3263 /// The Scalars are vectorized into this value. It is initialized to Null.
3264 WeakTrackingVH VectorizedValue = nullptr;
3266 /// New vector phi instructions emitted for the vectorized phi nodes.
3267 PHINode *PHI = nullptr;
3269 /// Do we need to gather this sequence or vectorize it
3270 /// (either with vector instruction or with scatter/gather
3271 /// intrinsics for store/load)?
3272 enum EntryState {
3273 Vectorize, ///< The node is regularly vectorized.
3274 ScatterVectorize, ///< Masked scatter/gather node.
3275 StridedVectorize, ///< Strided loads (and stores)
3276 NeedToGather, ///< Gather/buildvector node.
3277 CombinedVectorize, ///< Vectorized node, combined with its user into more
3278 ///< complex node like select/cmp to minmax, mul/add to
3279 ///< fma, etc. Must be used for the following nodes in
3280 ///< the pattern, not the very first one.
3282 EntryState State;
3284 /// List of combined opcodes supported by the vectorizer.
3285 enum CombinedOpcode {
3286 NotCombinedOp = -1,
3287 MinMax = Instruction::OtherOpsEnd + 1,
3289 CombinedOpcode CombinedOp = NotCombinedOp;
3291 /// Does this sequence require some shuffling?
3292 SmallVector<int, 4> ReuseShuffleIndices;
3294 /// Does this entry require reordering?
3295 SmallVector<unsigned, 4> ReorderIndices;
3297 /// Points back to the VectorizableTree.
3299 /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
3300 /// to be a pointer and needs to be able to initialize the child iterator.
3301 /// Thus we need a reference back to the container to translate the indices
3302 /// to entries.
3303 VecTreeTy &Container;
3305 /// The TreeEntry index containing the user of this entry. We can actually
3306 /// have multiple users so the data structure is not truly a tree.
3307 SmallVector<EdgeInfo, 1> UserTreeIndices;
3309 /// The index of this treeEntry in VectorizableTree.
3310 unsigned Idx = 0;
3312 /// For gather/buildvector/alt opcode (TODO) nodes, which are combined from
3313 /// other nodes as a series of insertvector instructions.
3314 SmallVector<std::pair<unsigned, unsigned>, 0> CombinedEntriesWithIndices;
3316 private:
3317 /// The operands of each instruction in each lane Operands[op_index][lane].
3318 /// Note: This helps avoid the replication of the code that performs the
3319 /// reordering of operands during buildTree_rec() and vectorizeTree().
3320 SmallVector<ValueList, 2> Operands;
3322 /// The main/alternate instruction.
3323 Instruction *MainOp = nullptr;
3324 Instruction *AltOp = nullptr;
3326 /// Interleaving factor for interleaved loads Vectorize nodes.
3327 unsigned InterleaveFactor = 0;
3329 public:
3330 /// Returns interleave factor for interleave nodes.
3331 unsigned getInterleaveFactor() const { return InterleaveFactor; }
3332 /// Sets interleaving factor for the interleaving nodes.
3333 void setInterleave(unsigned Factor) { InterleaveFactor = Factor; }
3335 /// Set this bundle's \p OpIdx'th operand to \p OpVL.
3336 void setOperand(unsigned OpIdx, ArrayRef<Value *> OpVL) {
3337 if (Operands.size() < OpIdx + 1)
3338 Operands.resize(OpIdx + 1);
3339 assert(Operands[OpIdx].empty() && "Already resized?");
3340 assert(OpVL.size() <= Scalars.size() &&
3341 "Number of operands is greater than the number of scalars.");
3342 Operands[OpIdx].resize(OpVL.size());
3343 copy(OpVL, Operands[OpIdx].begin());
3346 /// Set this bundle's operand from Scalars.
3347 void setOperand(const BoUpSLP &R, bool RequireReorder = false) {
3348 VLOperands Ops(Scalars, MainOp, R);
3349 if (RequireReorder)
3350 Ops.reorder();
3351 for (unsigned I : seq<unsigned>(MainOp->getNumOperands()))
3352 setOperand(I, Ops.getVL(I));
3355 /// Reorders operands of the node to the given mask \p Mask.
3356 void reorderOperands(ArrayRef<int> Mask) {
3357 for (ValueList &Operand : Operands)
3358 reorderScalars(Operand, Mask);
3361 /// \returns the \p OpIdx operand of this TreeEntry.
3362 ValueList &getOperand(unsigned OpIdx) {
3363 assert(OpIdx < Operands.size() && "Off bounds");
3364 return Operands[OpIdx];
3367 /// \returns the \p OpIdx operand of this TreeEntry.
3368 ArrayRef<Value *> getOperand(unsigned OpIdx) const {
3369 assert(OpIdx < Operands.size() && "Off bounds");
3370 return Operands[OpIdx];
3373 /// \returns the number of operands.
3374 unsigned getNumOperands() const { return Operands.size(); }
3376 /// \return the single \p OpIdx operand.
3377 Value *getSingleOperand(unsigned OpIdx) const {
3378 assert(OpIdx < Operands.size() && "Off bounds");
3379 assert(!Operands[OpIdx].empty() && "No operand available");
3380 return Operands[OpIdx][0];
3383 /// Some of the instructions in the list have alternate opcodes.
3384 bool isAltShuffle() const { return MainOp != AltOp; }
3386 bool isOpcodeOrAlt(Instruction *I) const {
3387 unsigned CheckedOpcode = I->getOpcode();
3388 return (getOpcode() == CheckedOpcode ||
3389 getAltOpcode() == CheckedOpcode);
3392 /// Chooses the correct key for scheduling data. If \p Op has the same (or
3393 /// alternate) opcode as \p OpValue, the key is \p Op. Otherwise the key is
3394 /// \p OpValue.
3395 Value *isOneOf(Value *Op) const {
3396 auto *I = dyn_cast<Instruction>(Op);
3397 if (I && isOpcodeOrAlt(I))
3398 return Op;
3399 return MainOp;
3402 void setOperations(const InstructionsState &S) {
3403 MainOp = S.getMainOp();
3404 AltOp = S.getAltOp();
3407 Instruction *getMainOp() const {
3408 return MainOp;
3411 Instruction *getAltOp() const {
3412 return AltOp;
3415 /// The main/alternate opcodes for the list of instructions.
3416 unsigned getOpcode() const {
3417 return MainOp ? MainOp->getOpcode() : 0;
3420 unsigned getAltOpcode() const {
3421 return AltOp ? AltOp->getOpcode() : 0;
3424 /// When ReuseReorderShuffleIndices is empty it just returns position of \p
3425 /// V within vector of Scalars. Otherwise, try to remap on its reuse index.
3426 int findLaneForValue(Value *V) const {
3427 unsigned FoundLane = getVectorFactor();
3428 for (auto *It = find(Scalars, V), *End = Scalars.end(); It != End;
3429 std::advance(It, 1)) {
3430 if (*It != V)
3431 continue;
3432 FoundLane = std::distance(Scalars.begin(), It);
3433 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
3434 if (!ReorderIndices.empty())
3435 FoundLane = ReorderIndices[FoundLane];
3436 assert(FoundLane < Scalars.size() && "Couldn't find extract lane");
3437 if (ReuseShuffleIndices.empty())
3438 break;
3439 if (auto *RIt = find(ReuseShuffleIndices, FoundLane);
3440 RIt != ReuseShuffleIndices.end()) {
3441 FoundLane = std::distance(ReuseShuffleIndices.begin(), RIt);
3442 break;
3445 assert(FoundLane < getVectorFactor() && "Unable to find given value.");
3446 return FoundLane;
3449 /// Build a shuffle mask for graph entry which represents a merge of main
3450 /// and alternate operations.
3451 void
3452 buildAltOpShuffleMask(const function_ref<bool(Instruction *)> IsAltOp,
3453 SmallVectorImpl<int> &Mask,
3454 SmallVectorImpl<Value *> *OpScalars = nullptr,
3455 SmallVectorImpl<Value *> *AltScalars = nullptr) const;
3457 /// Return true if this is a non-power-of-2 node.
3458 bool isNonPowOf2Vec() const {
3459 bool IsNonPowerOf2 = !has_single_bit(Scalars.size());
3460 return IsNonPowerOf2;
3463 /// Return true if this is a node, which tries to vectorize number of
3464 /// elements, forming whole vectors.
3465 bool
3466 hasNonWholeRegisterOrNonPowerOf2Vec(const TargetTransformInfo &TTI) const {
3467 bool IsNonPowerOf2 = !hasFullVectorsOrPowerOf2(
3468 TTI, getValueType(Scalars.front()), Scalars.size());
3469 assert((!IsNonPowerOf2 || ReuseShuffleIndices.empty()) &&
3470 "Reshuffling not supported with non-power-of-2 vectors yet.");
3471 return IsNonPowerOf2;
3474 Value *getOrdered(unsigned Idx) const {
3475 assert(isGather() && "Must be used only for buildvectors/gathers.");
3476 if (ReorderIndices.empty())
3477 return Scalars[Idx];
3478 SmallVector<int> Mask;
3479 inversePermutation(ReorderIndices, Mask);
3480 return Scalars[Mask[Idx]];
3483 #ifndef NDEBUG
3484 /// Debug printer.
3485 LLVM_DUMP_METHOD void dump() const {
3486 dbgs() << Idx << ".\n";
3487 for (unsigned OpI = 0, OpE = Operands.size(); OpI != OpE; ++OpI) {
3488 dbgs() << "Operand " << OpI << ":\n";
3489 for (const Value *V : Operands[OpI])
3490 dbgs().indent(2) << *V << "\n";
3492 dbgs() << "Scalars: \n";
3493 for (Value *V : Scalars)
3494 dbgs().indent(2) << *V << "\n";
3495 dbgs() << "State: ";
3496 switch (State) {
3497 case Vectorize:
3498 if (InterleaveFactor > 0) {
3499 dbgs() << "Vectorize with interleave factor " << InterleaveFactor
3500 << "\n";
3501 } else {
3502 dbgs() << "Vectorize\n";
3504 break;
3505 case ScatterVectorize:
3506 dbgs() << "ScatterVectorize\n";
3507 break;
3508 case StridedVectorize:
3509 dbgs() << "StridedVectorize\n";
3510 break;
3511 case NeedToGather:
3512 dbgs() << "NeedToGather\n";
3513 break;
3514 case CombinedVectorize:
3515 dbgs() << "CombinedVectorize\n";
3516 break;
3518 dbgs() << "MainOp: ";
3519 if (MainOp)
3520 dbgs() << *MainOp << "\n";
3521 else
3522 dbgs() << "NULL\n";
3523 dbgs() << "AltOp: ";
3524 if (AltOp)
3525 dbgs() << *AltOp << "\n";
3526 else
3527 dbgs() << "NULL\n";
3528 dbgs() << "VectorizedValue: ";
3529 if (VectorizedValue)
3530 dbgs() << *VectorizedValue << "\n";
3531 else
3532 dbgs() << "NULL\n";
3533 dbgs() << "ReuseShuffleIndices: ";
3534 if (ReuseShuffleIndices.empty())
3535 dbgs() << "Empty";
3536 else
3537 for (int ReuseIdx : ReuseShuffleIndices)
3538 dbgs() << ReuseIdx << ", ";
3539 dbgs() << "\n";
3540 dbgs() << "ReorderIndices: ";
3541 for (unsigned ReorderIdx : ReorderIndices)
3542 dbgs() << ReorderIdx << ", ";
3543 dbgs() << "\n";
3544 dbgs() << "UserTreeIndices: ";
3545 for (const auto &EInfo : UserTreeIndices)
3546 dbgs() << EInfo << ", ";
3547 dbgs() << "\n";
3549 #endif
3552 #ifndef NDEBUG
3553 void dumpTreeCosts(const TreeEntry *E, InstructionCost ReuseShuffleCost,
3554 InstructionCost VecCost, InstructionCost ScalarCost,
3555 StringRef Banner) const {
3556 dbgs() << "SLP: " << Banner << ":\n";
3557 E->dump();
3558 dbgs() << "SLP: Costs:\n";
3559 dbgs() << "SLP: ReuseShuffleCost = " << ReuseShuffleCost << "\n";
3560 dbgs() << "SLP: VectorCost = " << VecCost << "\n";
3561 dbgs() << "SLP: ScalarCost = " << ScalarCost << "\n";
3562 dbgs() << "SLP: ReuseShuffleCost + VecCost - ScalarCost = "
3563 << ReuseShuffleCost + VecCost - ScalarCost << "\n";
3565 #endif
3567 /// Create a new VectorizableTree entry.
3568 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
3569 std::optional<ScheduleData *> Bundle,
3570 const InstructionsState &S,
3571 const EdgeInfo &UserTreeIdx,
3572 ArrayRef<int> ReuseShuffleIndices = {},
3573 ArrayRef<unsigned> ReorderIndices = {},
3574 unsigned InterleaveFactor = 0) {
3575 TreeEntry::EntryState EntryState =
3576 Bundle ? TreeEntry::Vectorize : TreeEntry::NeedToGather;
3577 TreeEntry *E = newTreeEntry(VL, EntryState, Bundle, S, UserTreeIdx,
3578 ReuseShuffleIndices, ReorderIndices);
3579 if (E && InterleaveFactor > 0)
3580 E->setInterleave(InterleaveFactor);
3581 return E;
3584 TreeEntry *newTreeEntry(ArrayRef<Value *> VL,
3585 TreeEntry::EntryState EntryState,
3586 std::optional<ScheduleData *> Bundle,
3587 const InstructionsState &S,
3588 const EdgeInfo &UserTreeIdx,
3589 ArrayRef<int> ReuseShuffleIndices = {},
3590 ArrayRef<unsigned> ReorderIndices = {}) {
3591 assert(((!Bundle && EntryState == TreeEntry::NeedToGather) ||
3592 (Bundle && EntryState != TreeEntry::NeedToGather)) &&
3593 "Need to vectorize gather entry?");
3594 // Gathered loads still gathered? Do not create entry, use the original one.
3595 if (GatheredLoadsEntriesFirst.has_value() &&
3596 EntryState == TreeEntry::NeedToGather &&
3597 S.getOpcode() == Instruction::Load && UserTreeIdx.EdgeIdx == UINT_MAX &&
3598 !UserTreeIdx.UserTE)
3599 return nullptr;
3600 VectorizableTree.push_back(std::make_unique<TreeEntry>(VectorizableTree));
3601 TreeEntry *Last = VectorizableTree.back().get();
3602 Last->Idx = VectorizableTree.size() - 1;
3603 Last->State = EntryState;
3604 // FIXME: Remove once support for ReuseShuffleIndices has been implemented
3605 // for non-power-of-two vectors.
3606 assert(
3607 (hasFullVectorsOrPowerOf2(*TTI, getValueType(VL.front()), VL.size()) ||
3608 ReuseShuffleIndices.empty()) &&
3609 "Reshuffling scalars not yet supported for nodes with padding");
3610 Last->ReuseShuffleIndices.append(ReuseShuffleIndices.begin(),
3611 ReuseShuffleIndices.end());
3612 if (ReorderIndices.empty()) {
3613 Last->Scalars.assign(VL.begin(), VL.end());
3614 Last->setOperations(S);
3615 } else {
3616 // Reorder scalars and build final mask.
3617 Last->Scalars.assign(VL.size(), nullptr);
3618 transform(ReorderIndices, Last->Scalars.begin(),
3619 [VL](unsigned Idx) -> Value * {
3620 if (Idx >= VL.size())
3621 return UndefValue::get(VL.front()->getType());
3622 return VL[Idx];
3624 InstructionsState S = getSameOpcode(Last->Scalars, *TLI);
3625 Last->setOperations(S);
3626 Last->ReorderIndices.append(ReorderIndices.begin(), ReorderIndices.end());
3628 if (!Last->isGather()) {
3629 for (Value *V : VL) {
3630 const TreeEntry *TE = getTreeEntry(V);
3631 assert((!TE || TE == Last || doesNotNeedToBeScheduled(V)) &&
3632 "Scalar already in tree!");
3633 if (TE) {
3634 if (TE != Last)
3635 MultiNodeScalars.try_emplace(V).first->getSecond().push_back(Last);
3636 continue;
3638 ScalarToTreeEntry[V] = Last;
3640 // Update the scheduler bundle to point to this TreeEntry.
3641 ScheduleData *BundleMember = *Bundle;
3642 assert((BundleMember || isa<PHINode>(S.getMainOp()) ||
3643 isVectorLikeInstWithConstOps(S.getMainOp()) ||
3644 doesNotNeedToSchedule(VL)) &&
3645 "Bundle and VL out of sync");
3646 if (BundleMember) {
3647 for (Value *V : VL) {
3648 if (doesNotNeedToBeScheduled(V))
3649 continue;
3650 if (!BundleMember)
3651 continue;
3652 BundleMember->TE = Last;
3653 BundleMember = BundleMember->NextInBundle;
3656 assert(!BundleMember && "Bundle and VL out of sync");
3657 } else {
3658 // Build a map for gathered scalars to the nodes where they are used.
3659 bool AllConstsOrCasts = true;
3660 for (Value *V : VL)
3661 if (!isConstant(V)) {
3662 auto *I = dyn_cast<CastInst>(V);
3663 AllConstsOrCasts &= I && I->getType()->isIntegerTy();
3664 if (UserTreeIdx.EdgeIdx != UINT_MAX || !UserTreeIdx.UserTE ||
3665 !UserTreeIdx.UserTE->isGather())
3666 ValueToGatherNodes.try_emplace(V).first->getSecond().insert(Last);
3668 if (AllConstsOrCasts)
3669 CastMaxMinBWSizes =
3670 std::make_pair(std::numeric_limits<unsigned>::max(), 1);
3671 MustGather.insert(VL.begin(), VL.end());
3674 if (UserTreeIdx.UserTE)
3675 Last->UserTreeIndices.push_back(UserTreeIdx);
3676 return Last;
3679 /// -- Vectorization State --
3680 /// Holds all of the tree entries.
3681 TreeEntry::VecTreeTy VectorizableTree;
3683 #ifndef NDEBUG
3684 /// Debug printer.
3685 LLVM_DUMP_METHOD void dumpVectorizableTree() const {
3686 for (unsigned Id = 0, IdE = VectorizableTree.size(); Id != IdE; ++Id) {
3687 VectorizableTree[Id]->dump();
3688 dbgs() << "\n";
3691 #endif
3693 TreeEntry *getTreeEntry(Value *V) { return ScalarToTreeEntry.lookup(V); }
3695 const TreeEntry *getTreeEntry(Value *V) const {
3696 return ScalarToTreeEntry.lookup(V);
3699 /// Check that the operand node of alternate node does not generate
3700 /// buildvector sequence. If it is, then probably not worth it to build
3701 /// alternate shuffle, if number of buildvector operands + alternate
3702 /// instruction > than the number of buildvector instructions.
3703 /// \param S the instructions state of the analyzed values.
3704 /// \param VL list of the instructions with alternate opcodes.
3705 bool areAltOperandsProfitable(const InstructionsState &S,
3706 ArrayRef<Value *> VL) const;
3708 /// Checks if the specified list of the instructions/values can be vectorized
3709 /// and fills required data before actual scheduling of the instructions.
3710 TreeEntry::EntryState
3711 getScalarsVectorizationState(const InstructionsState &S, ArrayRef<Value *> VL,
3712 bool IsScatterVectorizeUserTE,
3713 OrdersType &CurrentOrder,
3714 SmallVectorImpl<Value *> &PointerOps);
3716 /// Maps a specific scalar to its tree entry.
3717 SmallDenseMap<Value *, TreeEntry *> ScalarToTreeEntry;
3719 /// List of scalars, used in several vectorize nodes, and the list of the
3720 /// nodes.
3721 SmallDenseMap<Value *, SmallVector<TreeEntry *>> MultiNodeScalars;
3723 /// Maps a value to the proposed vectorizable size.
3724 SmallDenseMap<Value *, unsigned> InstrElementSize;
3726 /// A list of scalars that we found that we need to keep as scalars.
3727 ValueSet MustGather;
3729 /// A set of first non-schedulable values.
3730 ValueSet NonScheduledFirst;
3732 /// A map between the vectorized entries and the last instructions in the
3733 /// bundles. The bundles are built in use order, not in the def order of the
3734 /// instructions. So, we cannot rely directly on the last instruction in the
3735 /// bundle being the last instruction in the program order during
3736 /// vectorization process since the basic blocks are affected, need to
3737 /// pre-gather them before.
3738 DenseMap<const TreeEntry *, Instruction *> EntryToLastInstruction;
3740 /// List of gather nodes, depending on other gather/vector nodes, which should
3741 /// be emitted after the vector instruction emission process to correctly
3742 /// handle order of the vector instructions and shuffles.
3743 SetVector<const TreeEntry *> PostponedGathers;
3745 using ValueToGatherNodesMap =
3746 DenseMap<Value *, SmallPtrSet<const TreeEntry *, 4>>;
3747 ValueToGatherNodesMap ValueToGatherNodes;
3749 /// A list of the load entries (node indices), which can be vectorized using
3750 /// strided or masked gather approach, but attempted to be represented as
3751 /// contiguous loads.
3752 SetVector<unsigned> LoadEntriesToVectorize;
3754 /// true if graph nodes transforming mode is on.
3755 bool IsGraphTransformMode = false;
3757 /// The index of the first gathered load entry in the VectorizeTree.
3758 std::optional<unsigned> GatheredLoadsEntriesFirst;
3760 /// This POD struct describes one external user in the vectorized tree.
3761 struct ExternalUser {
3762 ExternalUser(Value *S, llvm::User *U, int L)
3763 : Scalar(S), User(U), Lane(L) {}
3765 // Which scalar in our function.
3766 Value *Scalar;
3768 // Which user that uses the scalar.
3769 llvm::User *User;
3771 // Which lane does the scalar belong to.
3772 int Lane;
3774 using UserList = SmallVector<ExternalUser, 16>;
3776 /// Checks if two instructions may access the same memory.
3778 /// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
3779 /// is invariant in the calling loop.
3780 bool isAliased(const MemoryLocation &Loc1, Instruction *Inst1,
3781 Instruction *Inst2) {
3782 if (!Loc1.Ptr || !isSimple(Inst1) || !isSimple(Inst2))
3783 return true;
3784 // First check if the result is already in the cache.
3785 AliasCacheKey Key = std::make_pair(Inst1, Inst2);
3786 auto It = AliasCache.find(Key);
3787 if (It != AliasCache.end())
3788 return It->second;
3789 bool Aliased = isModOrRefSet(BatchAA.getModRefInfo(Inst2, Loc1));
3790 // Store the result in the cache.
3791 AliasCache.try_emplace(Key, Aliased);
3792 AliasCache.try_emplace(std::make_pair(Inst2, Inst1), Aliased);
3793 return Aliased;
3796 using AliasCacheKey = std::pair<Instruction *, Instruction *>;
3798 /// Cache for alias results.
3799 /// TODO: consider moving this to the AliasAnalysis itself.
3800 DenseMap<AliasCacheKey, bool> AliasCache;
3802 // Cache for pointerMayBeCaptured calls inside AA. This is preserved
3803 // globally through SLP because we don't perform any action which
3804 // invalidates capture results.
3805 BatchAAResults BatchAA;
3807 /// Temporary store for deleted instructions. Instructions will be deleted
3808 /// eventually when the BoUpSLP is destructed. The deferral is required to
3809 /// ensure that there are no incorrect collisions in the AliasCache, which
3810 /// can happen if a new instruction is allocated at the same address as a
3811 /// previously deleted instruction.
3812 DenseSet<Instruction *> DeletedInstructions;
3814 /// Set of the instruction, being analyzed already for reductions.
3815 SmallPtrSet<Instruction *, 16> AnalyzedReductionsRoots;
3817 /// Set of hashes for the list of reduction values already being analyzed.
3818 DenseSet<size_t> AnalyzedReductionVals;
3820 /// Values, already been analyzed for mininmal bitwidth and found to be
3821 /// non-profitable.
3822 DenseSet<Value *> AnalyzedMinBWVals;
3824 /// A list of values that need to extracted out of the tree.
3825 /// This list holds pairs of (Internal Scalar : External User). External User
3826 /// can be nullptr, it means that this Internal Scalar will be used later,
3827 /// after vectorization.
3828 UserList ExternalUses;
3830 /// A list of GEPs which can be reaplced by scalar GEPs instead of
3831 /// extractelement instructions.
3832 SmallPtrSet<Value *, 4> ExternalUsesAsOriginalScalar;
3834 /// Values used only by @llvm.assume calls.
3835 SmallPtrSet<const Value *, 32> EphValues;
3837 /// Holds all of the instructions that we gathered, shuffle instructions and
3838 /// extractelements.
3839 SetVector<Instruction *> GatherShuffleExtractSeq;
3841 /// A list of blocks that we are going to CSE.
3842 DenseSet<BasicBlock *> CSEBlocks;
3844 /// List of hashes of vector of loads, which are known to be non vectorizable.
3845 DenseSet<size_t> ListOfKnonwnNonVectorizableLoads;
3847 /// Contains all scheduling relevant data for an instruction.
3848 /// A ScheduleData either represents a single instruction or a member of an
3849 /// instruction bundle (= a group of instructions which is combined into a
3850 /// vector instruction).
3851 struct ScheduleData {
3852 // The initial value for the dependency counters. It means that the
3853 // dependencies are not calculated yet.
3854 enum { InvalidDeps = -1 };
3856 ScheduleData() = default;
3858 void init(int BlockSchedulingRegionID, Instruction *I) {
3859 FirstInBundle = this;
3860 NextInBundle = nullptr;
3861 NextLoadStore = nullptr;
3862 IsScheduled = false;
3863 SchedulingRegionID = BlockSchedulingRegionID;
3864 clearDependencies();
3865 Inst = I;
3866 TE = nullptr;
3869 /// Verify basic self consistency properties
3870 void verify() {
3871 if (hasValidDependencies()) {
3872 assert(UnscheduledDeps <= Dependencies && "invariant");
3873 } else {
3874 assert(UnscheduledDeps == Dependencies && "invariant");
3877 if (IsScheduled) {
3878 assert(isSchedulingEntity() &&
3879 "unexpected scheduled state");
3880 for (const ScheduleData *BundleMember = this; BundleMember;
3881 BundleMember = BundleMember->NextInBundle) {
3882 assert(BundleMember->hasValidDependencies() &&
3883 BundleMember->UnscheduledDeps == 0 &&
3884 "unexpected scheduled state");
3885 assert((BundleMember == this || !BundleMember->IsScheduled) &&
3886 "only bundle is marked scheduled");
3890 assert(Inst->getParent() == FirstInBundle->Inst->getParent() &&
3891 "all bundle members must be in same basic block");
3894 /// Returns true if the dependency information has been calculated.
3895 /// Note that depenendency validity can vary between instructions within
3896 /// a single bundle.
3897 bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
3899 /// Returns true for single instructions and for bundle representatives
3900 /// (= the head of a bundle).
3901 bool isSchedulingEntity() const { return FirstInBundle == this; }
3903 /// Returns true if it represents an instruction bundle and not only a
3904 /// single instruction.
3905 bool isPartOfBundle() const {
3906 return NextInBundle != nullptr || FirstInBundle != this || TE;
3909 /// Returns true if it is ready for scheduling, i.e. it has no more
3910 /// unscheduled depending instructions/bundles.
3911 bool isReady() const {
3912 assert(isSchedulingEntity() &&
3913 "can't consider non-scheduling entity for ready list");
3914 return unscheduledDepsInBundle() == 0 && !IsScheduled;
3917 /// Modifies the number of unscheduled dependencies for this instruction,
3918 /// and returns the number of remaining dependencies for the containing
3919 /// bundle.
3920 int incrementUnscheduledDeps(int Incr) {
3921 assert(hasValidDependencies() &&
3922 "increment of unscheduled deps would be meaningless");
3923 UnscheduledDeps += Incr;
3924 return FirstInBundle->unscheduledDepsInBundle();
3927 /// Sets the number of unscheduled dependencies to the number of
3928 /// dependencies.
3929 void resetUnscheduledDeps() {
3930 UnscheduledDeps = Dependencies;
3933 /// Clears all dependency information.
3934 void clearDependencies() {
3935 Dependencies = InvalidDeps;
3936 resetUnscheduledDeps();
3937 MemoryDependencies.clear();
3938 ControlDependencies.clear();
3941 int unscheduledDepsInBundle() const {
3942 assert(isSchedulingEntity() && "only meaningful on the bundle");
3943 int Sum = 0;
3944 for (const ScheduleData *BundleMember = this; BundleMember;
3945 BundleMember = BundleMember->NextInBundle) {
3946 if (BundleMember->UnscheduledDeps == InvalidDeps)
3947 return InvalidDeps;
3948 Sum += BundleMember->UnscheduledDeps;
3950 return Sum;
3953 void dump(raw_ostream &os) const {
3954 if (!isSchedulingEntity()) {
3955 os << "/ " << *Inst;
3956 } else if (NextInBundle) {
3957 os << '[' << *Inst;
3958 ScheduleData *SD = NextInBundle;
3959 while (SD) {
3960 os << ';' << *SD->Inst;
3961 SD = SD->NextInBundle;
3963 os << ']';
3964 } else {
3965 os << *Inst;
3969 Instruction *Inst = nullptr;
3971 /// The TreeEntry that this instruction corresponds to.
3972 TreeEntry *TE = nullptr;
3974 /// Points to the head in an instruction bundle (and always to this for
3975 /// single instructions).
3976 ScheduleData *FirstInBundle = nullptr;
3978 /// Single linked list of all instructions in a bundle. Null if it is a
3979 /// single instruction.
3980 ScheduleData *NextInBundle = nullptr;
3982 /// Single linked list of all memory instructions (e.g. load, store, call)
3983 /// in the block - until the end of the scheduling region.
3984 ScheduleData *NextLoadStore = nullptr;
3986 /// The dependent memory instructions.
3987 /// This list is derived on demand in calculateDependencies().
3988 SmallVector<ScheduleData *, 4> MemoryDependencies;
3990 /// List of instructions which this instruction could be control dependent
3991 /// on. Allowing such nodes to be scheduled below this one could introduce
3992 /// a runtime fault which didn't exist in the original program.
3993 /// ex: this is a load or udiv following a readonly call which inf loops
3994 SmallVector<ScheduleData *, 4> ControlDependencies;
3996 /// This ScheduleData is in the current scheduling region if this matches
3997 /// the current SchedulingRegionID of BlockScheduling.
3998 int SchedulingRegionID = 0;
4000 /// Used for getting a "good" final ordering of instructions.
4001 int SchedulingPriority = 0;
4003 /// The number of dependencies. Constitutes of the number of users of the
4004 /// instruction plus the number of dependent memory instructions (if any).
4005 /// This value is calculated on demand.
4006 /// If InvalidDeps, the number of dependencies is not calculated yet.
4007 int Dependencies = InvalidDeps;
4009 /// The number of dependencies minus the number of dependencies of scheduled
4010 /// instructions. As soon as this is zero, the instruction/bundle gets ready
4011 /// for scheduling.
4012 /// Note that this is negative as long as Dependencies is not calculated.
4013 int UnscheduledDeps = InvalidDeps;
4015 /// True if this instruction is scheduled (or considered as scheduled in the
4016 /// dry-run).
4017 bool IsScheduled = false;
4020 #ifndef NDEBUG
4021 friend inline raw_ostream &operator<<(raw_ostream &os,
4022 const BoUpSLP::ScheduleData &SD) {
4023 SD.dump(os);
4024 return os;
4026 #endif
4028 friend struct GraphTraits<BoUpSLP *>;
4029 friend struct DOTGraphTraits<BoUpSLP *>;
4031 /// Contains all scheduling data for a basic block.
4032 /// It does not schedules instructions, which are not memory read/write
4033 /// instructions and their operands are either constants, or arguments, or
4034 /// phis, or instructions from others blocks, or their users are phis or from
4035 /// the other blocks. The resulting vector instructions can be placed at the
4036 /// beginning of the basic block without scheduling (if operands does not need
4037 /// to be scheduled) or at the end of the block (if users are outside of the
4038 /// block). It allows to save some compile time and memory used by the
4039 /// compiler.
4040 /// ScheduleData is assigned for each instruction in between the boundaries of
4041 /// the tree entry, even for those, which are not part of the graph. It is
4042 /// required to correctly follow the dependencies between the instructions and
4043 /// their correct scheduling. The ScheduleData is not allocated for the
4044 /// instructions, which do not require scheduling, like phis, nodes with
4045 /// extractelements/insertelements only or nodes with instructions, with
4046 /// uses/operands outside of the block.
4047 struct BlockScheduling {
4048 BlockScheduling(BasicBlock *BB)
4049 : BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize) {}
4051 void clear() {
4052 ReadyInsts.clear();
4053 ScheduleStart = nullptr;
4054 ScheduleEnd = nullptr;
4055 FirstLoadStoreInRegion = nullptr;
4056 LastLoadStoreInRegion = nullptr;
4057 RegionHasStackSave = false;
4059 // Reduce the maximum schedule region size by the size of the
4060 // previous scheduling run.
4061 ScheduleRegionSizeLimit -= ScheduleRegionSize;
4062 if (ScheduleRegionSizeLimit < MinScheduleRegionSize)
4063 ScheduleRegionSizeLimit = MinScheduleRegionSize;
4064 ScheduleRegionSize = 0;
4066 // Make a new scheduling region, i.e. all existing ScheduleData is not
4067 // in the new region yet.
4068 ++SchedulingRegionID;
4071 ScheduleData *getScheduleData(Instruction *I) {
4072 if (BB != I->getParent())
4073 // Avoid lookup if can't possibly be in map.
4074 return nullptr;
4075 ScheduleData *SD = ScheduleDataMap.lookup(I);
4076 if (SD && isInSchedulingRegion(SD))
4077 return SD;
4078 return nullptr;
4081 ScheduleData *getScheduleData(Value *V) {
4082 if (auto *I = dyn_cast<Instruction>(V))
4083 return getScheduleData(I);
4084 return nullptr;
4087 bool isInSchedulingRegion(ScheduleData *SD) const {
4088 return SD->SchedulingRegionID == SchedulingRegionID;
4091 /// Marks an instruction as scheduled and puts all dependent ready
4092 /// instructions into the ready-list.
4093 template <typename ReadyListType>
4094 void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
4095 SD->IsScheduled = true;
4096 LLVM_DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
4098 for (ScheduleData *BundleMember = SD; BundleMember;
4099 BundleMember = BundleMember->NextInBundle) {
4101 // Handle the def-use chain dependencies.
4103 // Decrement the unscheduled counter and insert to ready list if ready.
4104 auto &&DecrUnsched = [this, &ReadyList](Instruction *I) {
4105 ScheduleData *OpDef = getScheduleData(I);
4106 if (OpDef && OpDef->hasValidDependencies() &&
4107 OpDef->incrementUnscheduledDeps(-1) == 0) {
4108 // There are no more unscheduled dependencies after
4109 // decrementing, so we can put the dependent instruction
4110 // into the ready list.
4111 ScheduleData *DepBundle = OpDef->FirstInBundle;
4112 assert(!DepBundle->IsScheduled &&
4113 "already scheduled bundle gets ready");
4114 ReadyList.insert(DepBundle);
4115 LLVM_DEBUG(dbgs()
4116 << "SLP: gets ready (def): " << *DepBundle << "\n");
4120 // If BundleMember is a vector bundle, its operands may have been
4121 // reordered during buildTree(). We therefore need to get its operands
4122 // through the TreeEntry.
4123 if (TreeEntry *TE = BundleMember->TE) {
4124 // Need to search for the lane since the tree entry can be reordered.
4125 int Lane = std::distance(TE->Scalars.begin(),
4126 find(TE->Scalars, BundleMember->Inst));
4127 assert(Lane >= 0 && "Lane not set");
4129 // Since vectorization tree is being built recursively this assertion
4130 // ensures that the tree entry has all operands set before reaching
4131 // this code. Couple of exceptions known at the moment are extracts
4132 // where their second (immediate) operand is not added. Since
4133 // immediates do not affect scheduler behavior this is considered
4134 // okay.
4135 auto *In = BundleMember->Inst;
4136 assert(
4137 In &&
4138 (isa<ExtractValueInst, ExtractElementInst, IntrinsicInst>(In) ||
4139 In->getNumOperands() == TE->getNumOperands()) &&
4140 "Missed TreeEntry operands?");
4141 (void)In; // fake use to avoid build failure when assertions disabled
4143 for (unsigned OpIdx = 0, NumOperands = TE->getNumOperands();
4144 OpIdx != NumOperands; ++OpIdx)
4145 if (auto *I = dyn_cast<Instruction>(TE->getOperand(OpIdx)[Lane]))
4146 DecrUnsched(I);
4147 } else {
4148 // If BundleMember is a stand-alone instruction, no operand reordering
4149 // has taken place, so we directly access its operands.
4150 for (Use &U : BundleMember->Inst->operands())
4151 if (auto *I = dyn_cast<Instruction>(U.get()))
4152 DecrUnsched(I);
4154 // Handle the memory dependencies.
4155 for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
4156 if (MemoryDepSD->hasValidDependencies() &&
4157 MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
4158 // There are no more unscheduled dependencies after decrementing,
4159 // so we can put the dependent instruction into the ready list.
4160 ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
4161 assert(!DepBundle->IsScheduled &&
4162 "already scheduled bundle gets ready");
4163 ReadyList.insert(DepBundle);
4164 LLVM_DEBUG(dbgs()
4165 << "SLP: gets ready (mem): " << *DepBundle << "\n");
4168 // Handle the control dependencies.
4169 for (ScheduleData *DepSD : BundleMember->ControlDependencies) {
4170 if (DepSD->incrementUnscheduledDeps(-1) == 0) {
4171 // There are no more unscheduled dependencies after decrementing,
4172 // so we can put the dependent instruction into the ready list.
4173 ScheduleData *DepBundle = DepSD->FirstInBundle;
4174 assert(!DepBundle->IsScheduled &&
4175 "already scheduled bundle gets ready");
4176 ReadyList.insert(DepBundle);
4177 LLVM_DEBUG(dbgs()
4178 << "SLP: gets ready (ctl): " << *DepBundle << "\n");
4184 /// Verify basic self consistency properties of the data structure.
4185 void verify() {
4186 if (!ScheduleStart)
4187 return;
4189 assert(ScheduleStart->getParent() == ScheduleEnd->getParent() &&
4190 ScheduleStart->comesBefore(ScheduleEnd) &&
4191 "Not a valid scheduling region?");
4193 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
4194 auto *SD = getScheduleData(I);
4195 if (!SD)
4196 continue;
4197 assert(isInSchedulingRegion(SD) &&
4198 "primary schedule data not in window?");
4199 assert(isInSchedulingRegion(SD->FirstInBundle) &&
4200 "entire bundle in window!");
4201 SD->verify();
4204 for (auto *SD : ReadyInsts) {
4205 assert(SD->isSchedulingEntity() && SD->isReady() &&
4206 "item in ready list not ready?");
4207 (void)SD;
4211 /// Put all instructions into the ReadyList which are ready for scheduling.
4212 template <typename ReadyListType>
4213 void initialFillReadyList(ReadyListType &ReadyList) {
4214 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
4215 ScheduleData *SD = getScheduleData(I);
4216 if (SD && SD->isSchedulingEntity() && SD->hasValidDependencies() &&
4217 SD->isReady()) {
4218 ReadyList.insert(SD);
4219 LLVM_DEBUG(dbgs()
4220 << "SLP: initially in ready list: " << *SD << "\n");
4225 /// Build a bundle from the ScheduleData nodes corresponding to the
4226 /// scalar instruction for each lane.
4227 ScheduleData *buildBundle(ArrayRef<Value *> VL);
4229 /// Checks if a bundle of instructions can be scheduled, i.e. has no
4230 /// cyclic dependencies. This is only a dry-run, no instructions are
4231 /// actually moved at this stage.
4232 /// \returns the scheduling bundle. The returned Optional value is not
4233 /// std::nullopt if \p VL is allowed to be scheduled.
4234 std::optional<ScheduleData *>
4235 tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
4236 const InstructionsState &S);
4238 /// Un-bundles a group of instructions.
4239 void cancelScheduling(ArrayRef<Value *> VL, Value *OpValue);
4241 /// Allocates schedule data chunk.
4242 ScheduleData *allocateScheduleDataChunks();
4244 /// Extends the scheduling region so that V is inside the region.
4245 /// \returns true if the region size is within the limit.
4246 bool extendSchedulingRegion(Value *V, const InstructionsState &S);
4248 /// Initialize the ScheduleData structures for new instructions in the
4249 /// scheduling region.
4250 void initScheduleData(Instruction *FromI, Instruction *ToI,
4251 ScheduleData *PrevLoadStore,
4252 ScheduleData *NextLoadStore);
4254 /// Updates the dependency information of a bundle and of all instructions/
4255 /// bundles which depend on the original bundle.
4256 void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
4257 BoUpSLP *SLP);
4259 /// Sets all instruction in the scheduling region to un-scheduled.
4260 void resetSchedule();
4262 BasicBlock *BB;
4264 /// Simple memory allocation for ScheduleData.
4265 SmallVector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
4267 /// The size of a ScheduleData array in ScheduleDataChunks.
4268 int ChunkSize;
4270 /// The allocator position in the current chunk, which is the last entry
4271 /// of ScheduleDataChunks.
4272 int ChunkPos;
4274 /// Attaches ScheduleData to Instruction.
4275 /// Note that the mapping survives during all vectorization iterations, i.e.
4276 /// ScheduleData structures are recycled.
4277 DenseMap<Instruction *, ScheduleData *> ScheduleDataMap;
4279 /// The ready-list for scheduling (only used for the dry-run).
4280 SetVector<ScheduleData *> ReadyInsts;
4282 /// The first instruction of the scheduling region.
4283 Instruction *ScheduleStart = nullptr;
4285 /// The first instruction _after_ the scheduling region.
4286 Instruction *ScheduleEnd = nullptr;
4288 /// The first memory accessing instruction in the scheduling region
4289 /// (can be null).
4290 ScheduleData *FirstLoadStoreInRegion = nullptr;
4292 /// The last memory accessing instruction in the scheduling region
4293 /// (can be null).
4294 ScheduleData *LastLoadStoreInRegion = nullptr;
4296 /// Is there an llvm.stacksave or llvm.stackrestore in the scheduling
4297 /// region? Used to optimize the dependence calculation for the
4298 /// common case where there isn't.
4299 bool RegionHasStackSave = false;
4301 /// The current size of the scheduling region.
4302 int ScheduleRegionSize = 0;
4304 /// The maximum size allowed for the scheduling region.
4305 int ScheduleRegionSizeLimit = ScheduleRegionSizeBudget;
4307 /// The ID of the scheduling region. For a new vectorization iteration this
4308 /// is incremented which "removes" all ScheduleData from the region.
4309 /// Make sure that the initial SchedulingRegionID is greater than the
4310 /// initial SchedulingRegionID in ScheduleData (which is 0).
4311 int SchedulingRegionID = 1;
4314 /// Attaches the BlockScheduling structures to basic blocks.
4315 MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
4317 /// Performs the "real" scheduling. Done before vectorization is actually
4318 /// performed in a basic block.
4319 void scheduleBlock(BlockScheduling *BS);
4321 /// List of users to ignore during scheduling and that don't need extracting.
4322 const SmallDenseSet<Value *> *UserIgnoreList = nullptr;
4324 /// A DenseMapInfo implementation for holding DenseMaps and DenseSets of
4325 /// sorted SmallVectors of unsigned.
4326 struct OrdersTypeDenseMapInfo {
4327 static OrdersType getEmptyKey() {
4328 OrdersType V;
4329 V.push_back(~1U);
4330 return V;
4333 static OrdersType getTombstoneKey() {
4334 OrdersType V;
4335 V.push_back(~2U);
4336 return V;
4339 static unsigned getHashValue(const OrdersType &V) {
4340 return static_cast<unsigned>(hash_combine_range(V.begin(), V.end()));
4343 static bool isEqual(const OrdersType &LHS, const OrdersType &RHS) {
4344 return LHS == RHS;
4348 // Analysis and block reference.
4349 Function *F;
4350 ScalarEvolution *SE;
4351 TargetTransformInfo *TTI;
4352 TargetLibraryInfo *TLI;
4353 LoopInfo *LI;
4354 DominatorTree *DT;
4355 AssumptionCache *AC;
4356 DemandedBits *DB;
4357 const DataLayout *DL;
4358 OptimizationRemarkEmitter *ORE;
4360 unsigned MaxVecRegSize; // This is set by TTI or overridden by cl::opt.
4361 unsigned MinVecRegSize; // Set by cl::opt (default: 128).
4363 /// Instruction builder to construct the vectorized tree.
4364 IRBuilder<TargetFolder> Builder;
4366 /// A map of scalar integer values to the smallest bit width with which they
4367 /// can legally be represented. The values map to (width, signed) pairs,
4368 /// where "width" indicates the minimum bit width and "signed" is True if the
4369 /// value must be signed-extended, rather than zero-extended, back to its
4370 /// original width.
4371 DenseMap<const TreeEntry *, std::pair<uint64_t, bool>> MinBWs;
4373 /// Final size of the reduced vector, if the current graph represents the
4374 /// input for the reduction and it was possible to narrow the size of the
4375 /// reduction.
4376 unsigned ReductionBitWidth = 0;
4378 /// Canonical graph size before the transformations.
4379 unsigned BaseGraphSize = 1;
4381 /// If the tree contains any zext/sext/trunc nodes, contains max-min pair of
4382 /// type sizes, used in the tree.
4383 std::optional<std::pair<unsigned, unsigned>> CastMaxMinBWSizes;
4385 /// Indices of the vectorized nodes, which supposed to be the roots of the new
4386 /// bitwidth analysis attempt, like trunc, IToFP or ICmp.
4387 DenseSet<unsigned> ExtraBitWidthNodes;
4390 } // end namespace slpvectorizer
4392 template <> struct GraphTraits<BoUpSLP *> {
4393 using TreeEntry = BoUpSLP::TreeEntry;
4395 /// NodeRef has to be a pointer per the GraphWriter.
4396 using NodeRef = TreeEntry *;
4398 using ContainerTy = BoUpSLP::TreeEntry::VecTreeTy;
4400 /// Add the VectorizableTree to the index iterator to be able to return
4401 /// TreeEntry pointers.
4402 struct ChildIteratorType
4403 : public iterator_adaptor_base<
4404 ChildIteratorType, SmallVector<BoUpSLP::EdgeInfo, 1>::iterator> {
4405 ContainerTy &VectorizableTree;
4407 ChildIteratorType(SmallVector<BoUpSLP::EdgeInfo, 1>::iterator W,
4408 ContainerTy &VT)
4409 : ChildIteratorType::iterator_adaptor_base(W), VectorizableTree(VT) {}
4411 NodeRef operator*() { return I->UserTE; }
4414 static NodeRef getEntryNode(BoUpSLP &R) {
4415 return R.VectorizableTree[0].get();
4418 static ChildIteratorType child_begin(NodeRef N) {
4419 return {N->UserTreeIndices.begin(), N->Container};
4422 static ChildIteratorType child_end(NodeRef N) {
4423 return {N->UserTreeIndices.end(), N->Container};
4426 /// For the node iterator we just need to turn the TreeEntry iterator into a
4427 /// TreeEntry* iterator so that it dereferences to NodeRef.
4428 class nodes_iterator {
4429 using ItTy = ContainerTy::iterator;
4430 ItTy It;
4432 public:
4433 nodes_iterator(const ItTy &It2) : It(It2) {}
4434 NodeRef operator*() { return It->get(); }
4435 nodes_iterator operator++() {
4436 ++It;
4437 return *this;
4439 bool operator!=(const nodes_iterator &N2) const { return N2.It != It; }
4442 static nodes_iterator nodes_begin(BoUpSLP *R) {
4443 return nodes_iterator(R->VectorizableTree.begin());
4446 static nodes_iterator nodes_end(BoUpSLP *R) {
4447 return nodes_iterator(R->VectorizableTree.end());
4450 static unsigned size(BoUpSLP *R) { return R->VectorizableTree.size(); }
4453 template <> struct DOTGraphTraits<BoUpSLP *> : public DefaultDOTGraphTraits {
4454 using TreeEntry = BoUpSLP::TreeEntry;
4456 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
4458 std::string getNodeLabel(const TreeEntry *Entry, const BoUpSLP *R) {
4459 std::string Str;
4460 raw_string_ostream OS(Str);
4461 OS << Entry->Idx << ".\n";
4462 if (isSplat(Entry->Scalars))
4463 OS << "<splat> ";
4464 for (auto *V : Entry->Scalars) {
4465 OS << *V;
4466 if (llvm::any_of(R->ExternalUses, [&](const BoUpSLP::ExternalUser &EU) {
4467 return EU.Scalar == V;
4469 OS << " <extract>";
4470 OS << "\n";
4472 return Str;
4475 static std::string getNodeAttributes(const TreeEntry *Entry,
4476 const BoUpSLP *) {
4477 if (Entry->isGather())
4478 return "color=red";
4479 if (Entry->State == TreeEntry::ScatterVectorize ||
4480 Entry->State == TreeEntry::StridedVectorize)
4481 return "color=blue";
4482 return "";
4486 } // end namespace llvm
4488 BoUpSLP::~BoUpSLP() {
4489 SmallVector<WeakTrackingVH> DeadInsts;
4490 for (auto *I : DeletedInstructions) {
4491 if (!I->getParent()) {
4492 // Temporarily insert instruction back to erase them from parent and
4493 // memory later.
4494 if (isa<PHINode>(I))
4495 // Phi nodes must be the very first instructions in the block.
4496 I->insertBefore(F->getEntryBlock(),
4497 F->getEntryBlock().getFirstNonPHIIt());
4498 else
4499 I->insertBefore(F->getEntryBlock().getTerminator());
4500 continue;
4502 for (Use &U : I->operands()) {
4503 auto *Op = dyn_cast<Instruction>(U.get());
4504 if (Op && !DeletedInstructions.count(Op) && Op->hasOneUser() &&
4505 wouldInstructionBeTriviallyDead(Op, TLI))
4506 DeadInsts.emplace_back(Op);
4508 I->dropAllReferences();
4510 for (auto *I : DeletedInstructions) {
4511 assert(I->use_empty() &&
4512 "trying to erase instruction with users.");
4513 I->eraseFromParent();
4516 // Cleanup any dead scalar code feeding the vectorized instructions
4517 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI);
4519 #ifdef EXPENSIVE_CHECKS
4520 // If we could guarantee that this call is not extremely slow, we could
4521 // remove the ifdef limitation (see PR47712).
4522 assert(!verifyFunction(*F, &dbgs()));
4523 #endif
4526 /// Reorders the given \p Reuses mask according to the given \p Mask. \p Reuses
4527 /// contains original mask for the scalars reused in the node. Procedure
4528 /// transform this mask in accordance with the given \p Mask.
4529 static void reorderReuses(SmallVectorImpl<int> &Reuses, ArrayRef<int> Mask) {
4530 assert(!Mask.empty() && Reuses.size() == Mask.size() &&
4531 "Expected non-empty mask.");
4532 SmallVector<int> Prev(Reuses.begin(), Reuses.end());
4533 Prev.swap(Reuses);
4534 for (unsigned I = 0, E = Prev.size(); I < E; ++I)
4535 if (Mask[I] != PoisonMaskElem)
4536 Reuses[Mask[I]] = Prev[I];
4539 /// Reorders the given \p Order according to the given \p Mask. \p Order - is
4540 /// the original order of the scalars. Procedure transforms the provided order
4541 /// in accordance with the given \p Mask. If the resulting \p Order is just an
4542 /// identity order, \p Order is cleared.
4543 static void reorderOrder(SmallVectorImpl<unsigned> &Order, ArrayRef<int> Mask,
4544 bool BottomOrder = false) {
4545 assert(!Mask.empty() && "Expected non-empty mask.");
4546 unsigned Sz = Mask.size();
4547 if (BottomOrder) {
4548 SmallVector<unsigned> PrevOrder;
4549 if (Order.empty()) {
4550 PrevOrder.resize(Sz);
4551 std::iota(PrevOrder.begin(), PrevOrder.end(), 0);
4552 } else {
4553 PrevOrder.swap(Order);
4555 Order.assign(Sz, Sz);
4556 for (unsigned I = 0; I < Sz; ++I)
4557 if (Mask[I] != PoisonMaskElem)
4558 Order[I] = PrevOrder[Mask[I]];
4559 if (all_of(enumerate(Order), [&](const auto &Data) {
4560 return Data.value() == Sz || Data.index() == Data.value();
4561 })) {
4562 Order.clear();
4563 return;
4565 fixupOrderingIndices(Order);
4566 return;
4568 SmallVector<int> MaskOrder;
4569 if (Order.empty()) {
4570 MaskOrder.resize(Sz);
4571 std::iota(MaskOrder.begin(), MaskOrder.end(), 0);
4572 } else {
4573 inversePermutation(Order, MaskOrder);
4575 reorderReuses(MaskOrder, Mask);
4576 if (ShuffleVectorInst::isIdentityMask(MaskOrder, Sz)) {
4577 Order.clear();
4578 return;
4580 Order.assign(Sz, Sz);
4581 for (unsigned I = 0; I < Sz; ++I)
4582 if (MaskOrder[I] != PoisonMaskElem)
4583 Order[MaskOrder[I]] = I;
4584 fixupOrderingIndices(Order);
4587 std::optional<BoUpSLP::OrdersType>
4588 BoUpSLP::findReusedOrderedScalars(const BoUpSLP::TreeEntry &TE) {
4589 assert(TE.isGather() && "Expected gather node only.");
4590 // Try to find subvector extract/insert patterns and reorder only such
4591 // patterns.
4592 SmallVector<Value *> GatheredScalars(TE.Scalars.begin(), TE.Scalars.end());
4593 Type *ScalarTy = GatheredScalars.front()->getType();
4594 int NumScalars = GatheredScalars.size();
4595 if (!isValidElementType(ScalarTy))
4596 return std::nullopt;
4597 auto *VecTy = getWidenedType(ScalarTy, NumScalars);
4598 int NumParts = TTI->getNumberOfParts(VecTy);
4599 if (NumParts == 0 || NumParts >= NumScalars ||
4600 VecTy->getNumElements() % NumParts != 0 ||
4601 !hasFullVectorsOrPowerOf2(*TTI, VecTy->getElementType(),
4602 VecTy->getNumElements() / NumParts))
4603 NumParts = 1;
4604 SmallVector<int> ExtractMask;
4605 SmallVector<int> Mask;
4606 SmallVector<SmallVector<const TreeEntry *>> Entries;
4607 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> ExtractShuffles =
4608 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts);
4609 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles =
4610 isGatherShuffledEntry(&TE, GatheredScalars, Mask, Entries, NumParts,
4611 /*ForOrder=*/true);
4612 // No shuffled operands - ignore.
4613 if (GatherShuffles.empty() && ExtractShuffles.empty())
4614 return std::nullopt;
4615 OrdersType CurrentOrder(NumScalars, NumScalars);
4616 if (GatherShuffles.size() == 1 &&
4617 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc &&
4618 Entries.front().front()->isSame(TE.Scalars)) {
4619 // Perfect match in the graph, will reuse the previously vectorized
4620 // node. Cost is 0.
4621 std::iota(CurrentOrder.begin(), CurrentOrder.end(), 0);
4622 return CurrentOrder;
4624 auto IsSplatMask = [](ArrayRef<int> Mask) {
4625 int SingleElt = PoisonMaskElem;
4626 return all_of(Mask, [&](int I) {
4627 if (SingleElt == PoisonMaskElem && I != PoisonMaskElem)
4628 SingleElt = I;
4629 return I == PoisonMaskElem || I == SingleElt;
4632 // Exclusive broadcast mask - ignore.
4633 if ((ExtractShuffles.empty() && IsSplatMask(Mask) &&
4634 (Entries.size() != 1 ||
4635 Entries.front().front()->ReorderIndices.empty())) ||
4636 (GatherShuffles.empty() && IsSplatMask(ExtractMask)))
4637 return std::nullopt;
4638 SmallBitVector ShuffledSubMasks(NumParts);
4639 auto TransformMaskToOrder = [&](MutableArrayRef<unsigned> CurrentOrder,
4640 ArrayRef<int> Mask, int PartSz, int NumParts,
4641 function_ref<unsigned(unsigned)> GetVF) {
4642 for (int I : seq<int>(0, NumParts)) {
4643 if (ShuffledSubMasks.test(I))
4644 continue;
4645 const int VF = GetVF(I);
4646 if (VF == 0)
4647 continue;
4648 unsigned Limit = getNumElems(CurrentOrder.size(), PartSz, I);
4649 MutableArrayRef<unsigned> Slice = CurrentOrder.slice(I * PartSz, Limit);
4650 // Shuffle of at least 2 vectors - ignore.
4651 if (any_of(Slice, [&](int I) { return I != NumScalars; })) {
4652 std::fill(Slice.begin(), Slice.end(), NumScalars);
4653 ShuffledSubMasks.set(I);
4654 continue;
4656 // Try to include as much elements from the mask as possible.
4657 int FirstMin = INT_MAX;
4658 int SecondVecFound = false;
4659 for (int K : seq<int>(Limit)) {
4660 int Idx = Mask[I * PartSz + K];
4661 if (Idx == PoisonMaskElem) {
4662 Value *V = GatheredScalars[I * PartSz + K];
4663 if (isConstant(V) && !isa<PoisonValue>(V)) {
4664 SecondVecFound = true;
4665 break;
4667 continue;
4669 if (Idx < VF) {
4670 if (FirstMin > Idx)
4671 FirstMin = Idx;
4672 } else {
4673 SecondVecFound = true;
4674 break;
4677 FirstMin = (FirstMin / PartSz) * PartSz;
4678 // Shuffle of at least 2 vectors - ignore.
4679 if (SecondVecFound) {
4680 std::fill(Slice.begin(), Slice.end(), NumScalars);
4681 ShuffledSubMasks.set(I);
4682 continue;
4684 for (int K : seq<int>(Limit)) {
4685 int Idx = Mask[I * PartSz + K];
4686 if (Idx == PoisonMaskElem)
4687 continue;
4688 Idx -= FirstMin;
4689 if (Idx >= PartSz) {
4690 SecondVecFound = true;
4691 break;
4693 if (CurrentOrder[I * PartSz + Idx] >
4694 static_cast<unsigned>(I * PartSz + K) &&
4695 CurrentOrder[I * PartSz + Idx] !=
4696 static_cast<unsigned>(I * PartSz + Idx))
4697 CurrentOrder[I * PartSz + Idx] = I * PartSz + K;
4699 // Shuffle of at least 2 vectors - ignore.
4700 if (SecondVecFound) {
4701 std::fill(Slice.begin(), Slice.end(), NumScalars);
4702 ShuffledSubMasks.set(I);
4703 continue;
4707 int PartSz = getPartNumElems(NumScalars, NumParts);
4708 if (!ExtractShuffles.empty())
4709 TransformMaskToOrder(
4710 CurrentOrder, ExtractMask, PartSz, NumParts, [&](unsigned I) {
4711 if (!ExtractShuffles[I])
4712 return 0U;
4713 unsigned VF = 0;
4714 unsigned Sz = getNumElems(TE.getVectorFactor(), PartSz, I);
4715 for (unsigned Idx : seq<unsigned>(Sz)) {
4716 int K = I * PartSz + Idx;
4717 if (ExtractMask[K] == PoisonMaskElem)
4718 continue;
4719 if (!TE.ReuseShuffleIndices.empty())
4720 K = TE.ReuseShuffleIndices[K];
4721 if (K == PoisonMaskElem)
4722 continue;
4723 if (!TE.ReorderIndices.empty())
4724 K = std::distance(TE.ReorderIndices.begin(),
4725 find(TE.ReorderIndices, K));
4726 auto *EI = dyn_cast<ExtractElementInst>(TE.Scalars[K]);
4727 if (!EI)
4728 continue;
4729 VF = std::max(VF, cast<VectorType>(EI->getVectorOperandType())
4730 ->getElementCount()
4731 .getKnownMinValue());
4733 return VF;
4735 // Check special corner case - single shuffle of the same entry.
4736 if (GatherShuffles.size() == 1 && NumParts != 1) {
4737 if (ShuffledSubMasks.any())
4738 return std::nullopt;
4739 PartSz = NumScalars;
4740 NumParts = 1;
4742 if (!Entries.empty())
4743 TransformMaskToOrder(CurrentOrder, Mask, PartSz, NumParts, [&](unsigned I) {
4744 if (!GatherShuffles[I])
4745 return 0U;
4746 return std::max(Entries[I].front()->getVectorFactor(),
4747 Entries[I].back()->getVectorFactor());
4749 int NumUndefs =
4750 count_if(CurrentOrder, [&](int Idx) { return Idx == NumScalars; });
4751 if (ShuffledSubMasks.all() || (NumScalars > 2 && NumUndefs >= NumScalars / 2))
4752 return std::nullopt;
4753 return std::move(CurrentOrder);
4756 static bool arePointersCompatible(Value *Ptr1, Value *Ptr2,
4757 const TargetLibraryInfo &TLI,
4758 bool CompareOpcodes = true) {
4759 if (getUnderlyingObject(Ptr1, RecursionMaxDepth) !=
4760 getUnderlyingObject(Ptr2, RecursionMaxDepth))
4761 return false;
4762 auto *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
4763 auto *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
4764 return (!GEP1 || GEP1->getNumOperands() == 2) &&
4765 (!GEP2 || GEP2->getNumOperands() == 2) &&
4766 (((!GEP1 || isConstant(GEP1->getOperand(1))) &&
4767 (!GEP2 || isConstant(GEP2->getOperand(1)))) ||
4768 !CompareOpcodes ||
4769 (GEP1 && GEP2 &&
4770 getSameOpcode({GEP1->getOperand(1), GEP2->getOperand(1)}, TLI)
4771 .getOpcode()));
4774 /// Calculates minimal alignment as a common alignment.
4775 template <typename T>
4776 static Align computeCommonAlignment(ArrayRef<Value *> VL) {
4777 Align CommonAlignment = cast<T>(VL.front())->getAlign();
4778 for (Value *V : VL.drop_front())
4779 CommonAlignment = std::min(CommonAlignment, cast<T>(V)->getAlign());
4780 return CommonAlignment;
4783 /// Check if \p Order represents reverse order.
4784 static bool isReverseOrder(ArrayRef<unsigned> Order) {
4785 unsigned Sz = Order.size();
4786 return !Order.empty() && all_of(enumerate(Order), [&](const auto &Pair) {
4787 return Pair.value() == Sz || Sz - Pair.index() - 1 == Pair.value();
4791 /// Checks if the provided list of pointers \p Pointers represents the strided
4792 /// pointers for type ElemTy. If they are not, std::nullopt is returned.
4793 /// Otherwise, if \p Inst is not specified, just initialized optional value is
4794 /// returned to show that the pointers represent strided pointers. If \p Inst
4795 /// specified, the runtime stride is materialized before the given \p Inst.
4796 /// \returns std::nullopt if the pointers are not pointers with the runtime
4797 /// stride, nullptr or actual stride value, otherwise.
4798 static std::optional<Value *>
4799 calculateRtStride(ArrayRef<Value *> PointerOps, Type *ElemTy,
4800 const DataLayout &DL, ScalarEvolution &SE,
4801 SmallVectorImpl<unsigned> &SortedIndices,
4802 Instruction *Inst = nullptr) {
4803 SmallVector<const SCEV *> SCEVs;
4804 const SCEV *PtrSCEVLowest = nullptr;
4805 const SCEV *PtrSCEVHighest = nullptr;
4806 // Find lower/upper pointers from the PointerOps (i.e. with lowest and highest
4807 // addresses).
4808 for (Value *Ptr : PointerOps) {
4809 const SCEV *PtrSCEV = SE.getSCEV(Ptr);
4810 if (!PtrSCEV)
4811 return std::nullopt;
4812 SCEVs.push_back(PtrSCEV);
4813 if (!PtrSCEVLowest && !PtrSCEVHighest) {
4814 PtrSCEVLowest = PtrSCEVHighest = PtrSCEV;
4815 continue;
4817 const SCEV *Diff = SE.getMinusSCEV(PtrSCEV, PtrSCEVLowest);
4818 if (isa<SCEVCouldNotCompute>(Diff))
4819 return std::nullopt;
4820 if (Diff->isNonConstantNegative()) {
4821 PtrSCEVLowest = PtrSCEV;
4822 continue;
4824 const SCEV *Diff1 = SE.getMinusSCEV(PtrSCEVHighest, PtrSCEV);
4825 if (isa<SCEVCouldNotCompute>(Diff1))
4826 return std::nullopt;
4827 if (Diff1->isNonConstantNegative()) {
4828 PtrSCEVHighest = PtrSCEV;
4829 continue;
4832 // Dist = PtrSCEVHighest - PtrSCEVLowest;
4833 const SCEV *Dist = SE.getMinusSCEV(PtrSCEVHighest, PtrSCEVLowest);
4834 if (isa<SCEVCouldNotCompute>(Dist))
4835 return std::nullopt;
4836 int Size = DL.getTypeStoreSize(ElemTy);
4837 auto TryGetStride = [&](const SCEV *Dist,
4838 const SCEV *Multiplier) -> const SCEV * {
4839 if (const auto *M = dyn_cast<SCEVMulExpr>(Dist)) {
4840 if (M->getOperand(0) == Multiplier)
4841 return M->getOperand(1);
4842 if (M->getOperand(1) == Multiplier)
4843 return M->getOperand(0);
4844 return nullptr;
4846 if (Multiplier == Dist)
4847 return SE.getConstant(Dist->getType(), 1);
4848 return SE.getUDivExactExpr(Dist, Multiplier);
4850 // Stride_in_elements = Dist / element_size * (num_elems - 1).
4851 const SCEV *Stride = nullptr;
4852 if (Size != 1 || SCEVs.size() > 2) {
4853 const SCEV *Sz = SE.getConstant(Dist->getType(), Size * (SCEVs.size() - 1));
4854 Stride = TryGetStride(Dist, Sz);
4855 if (!Stride)
4856 return std::nullopt;
4858 if (!Stride || isa<SCEVConstant>(Stride))
4859 return std::nullopt;
4860 // Iterate through all pointers and check if all distances are
4861 // unique multiple of Stride.
4862 using DistOrdPair = std::pair<int64_t, int>;
4863 auto Compare = llvm::less_first();
4864 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
4865 int Cnt = 0;
4866 bool IsConsecutive = true;
4867 for (const SCEV *PtrSCEV : SCEVs) {
4868 unsigned Dist = 0;
4869 if (PtrSCEV != PtrSCEVLowest) {
4870 const SCEV *Diff = SE.getMinusSCEV(PtrSCEV, PtrSCEVLowest);
4871 const SCEV *Coeff = TryGetStride(Diff, Stride);
4872 if (!Coeff)
4873 return std::nullopt;
4874 const auto *SC = dyn_cast<SCEVConstant>(Coeff);
4875 if (!SC || isa<SCEVCouldNotCompute>(SC))
4876 return std::nullopt;
4877 if (!SE.getMinusSCEV(PtrSCEV, SE.getAddExpr(PtrSCEVLowest,
4878 SE.getMulExpr(Stride, SC)))
4879 ->isZero())
4880 return std::nullopt;
4881 Dist = SC->getAPInt().getZExtValue();
4883 // If the strides are not the same or repeated, we can't vectorize.
4884 if ((Dist / Size) * Size != Dist || (Dist / Size) >= SCEVs.size())
4885 return std::nullopt;
4886 auto Res = Offsets.emplace(Dist, Cnt);
4887 if (!Res.second)
4888 return std::nullopt;
4889 // Consecutive order if the inserted element is the last one.
4890 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
4891 ++Cnt;
4893 if (Offsets.size() != SCEVs.size())
4894 return std::nullopt;
4895 SortedIndices.clear();
4896 if (!IsConsecutive) {
4897 // Fill SortedIndices array only if it is non-consecutive.
4898 SortedIndices.resize(PointerOps.size());
4899 Cnt = 0;
4900 for (const std::pair<int64_t, int> &Pair : Offsets) {
4901 SortedIndices[Cnt] = Pair.second;
4902 ++Cnt;
4905 if (!Inst)
4906 return nullptr;
4907 SCEVExpander Expander(SE, DL, "strided-load-vec");
4908 return Expander.expandCodeFor(Stride, Stride->getType(), Inst);
4911 static std::pair<InstructionCost, InstructionCost>
4912 getGEPCosts(const TargetTransformInfo &TTI, ArrayRef<Value *> Ptrs,
4913 Value *BasePtr, unsigned Opcode, TTI::TargetCostKind CostKind,
4914 Type *ScalarTy, VectorType *VecTy);
4916 /// Returns the cost of the shuffle instructions with the given \p Kind, vector
4917 /// type \p Tp and optional \p Mask. Adds SLP-specifc cost estimation for insert
4918 /// subvector pattern.
4919 static InstructionCost
4920 getShuffleCost(const TargetTransformInfo &TTI, TTI::ShuffleKind Kind,
4921 VectorType *Tp, ArrayRef<int> Mask = {},
4922 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
4923 int Index = 0, VectorType *SubTp = nullptr,
4924 ArrayRef<const Value *> Args = {}) {
4925 if (Kind != TTI::SK_PermuteTwoSrc)
4926 return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args);
4927 int NumSrcElts = Tp->getElementCount().getKnownMinValue();
4928 int NumSubElts;
4929 if (Mask.size() > 2 && ShuffleVectorInst::isInsertSubvectorMask(
4930 Mask, NumSrcElts, NumSubElts, Index)) {
4931 if (Index + NumSubElts > NumSrcElts &&
4932 Index + NumSrcElts <= static_cast<int>(Mask.size()))
4933 return TTI.getShuffleCost(
4934 TTI::SK_InsertSubvector,
4935 getWidenedType(Tp->getElementType(), Mask.size()), Mask,
4936 TTI::TCK_RecipThroughput, Index, Tp);
4938 return TTI.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args);
4941 BoUpSLP::LoadsState
4942 BoUpSLP::canVectorizeLoads(ArrayRef<Value *> VL, const Value *VL0,
4943 SmallVectorImpl<unsigned> &Order,
4944 SmallVectorImpl<Value *> &PointerOps,
4945 unsigned *BestVF, bool TryRecursiveCheck) const {
4946 // Check that a vectorized load would load the same memory as a scalar
4947 // load. For example, we don't want to vectorize loads that are smaller
4948 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
4949 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
4950 // from such a struct, we read/write packed bits disagreeing with the
4951 // unvectorized version.
4952 if (BestVF)
4953 *BestVF = 0;
4954 if (areKnownNonVectorizableLoads(VL))
4955 return LoadsState::Gather;
4956 Type *ScalarTy = VL0->getType();
4958 if (DL->getTypeSizeInBits(ScalarTy) != DL->getTypeAllocSizeInBits(ScalarTy))
4959 return LoadsState::Gather;
4961 // Make sure all loads in the bundle are simple - we can't vectorize
4962 // atomic or volatile loads.
4963 PointerOps.clear();
4964 const unsigned Sz = VL.size();
4965 PointerOps.resize(Sz);
4966 auto *POIter = PointerOps.begin();
4967 for (Value *V : VL) {
4968 auto *L = dyn_cast<LoadInst>(V);
4969 if (!L || !L->isSimple())
4970 return LoadsState::Gather;
4971 *POIter = L->getPointerOperand();
4972 ++POIter;
4975 Order.clear();
4976 // Check the order of pointer operands or that all pointers are the same.
4977 bool IsSorted = sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, Order);
4979 auto *VecTy = getWidenedType(ScalarTy, Sz);
4980 Align CommonAlignment = computeCommonAlignment<LoadInst>(VL);
4981 if (!IsSorted) {
4982 if (Sz > MinProfitableStridedLoads && TTI->isTypeLegal(VecTy)) {
4983 if (TTI->isLegalStridedLoadStore(VecTy, CommonAlignment) &&
4984 calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order))
4985 return LoadsState::StridedVectorize;
4988 if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
4989 TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment))
4990 return LoadsState::Gather;
4992 if (!all_of(PointerOps, [&](Value *P) {
4993 return arePointersCompatible(P, PointerOps.front(), *TLI);
4995 return LoadsState::Gather;
4997 } else {
4998 Value *Ptr0;
4999 Value *PtrN;
5000 if (Order.empty()) {
5001 Ptr0 = PointerOps.front();
5002 PtrN = PointerOps.back();
5003 } else {
5004 Ptr0 = PointerOps[Order.front()];
5005 PtrN = PointerOps[Order.back()];
5007 std::optional<int> Diff =
5008 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
5009 // Check that the sorted loads are consecutive.
5010 if (static_cast<unsigned>(*Diff) == Sz - 1)
5011 return LoadsState::Vectorize;
5012 if (!TTI->isLegalMaskedGather(VecTy, CommonAlignment) ||
5013 TTI->forceScalarizeMaskedGather(VecTy, CommonAlignment))
5014 return LoadsState::Gather;
5015 // Simple check if not a strided access - clear order.
5016 bool IsPossibleStrided = *Diff % (Sz - 1) == 0;
5017 // Try to generate strided load node if:
5018 // 1. Target with strided load support is detected.
5019 // 2. The number of loads is greater than MinProfitableStridedLoads,
5020 // or the potential stride <= MaxProfitableLoadStride and the
5021 // potential stride is power-of-2 (to avoid perf regressions for the very
5022 // small number of loads) and max distance > number of loads, or potential
5023 // stride is -1.
5024 // 3. The loads are ordered, or number of unordered loads <=
5025 // MaxProfitableUnorderedLoads, or loads are in reversed order.
5026 // (this check is to avoid extra costs for very expensive shuffles).
5027 // 4. Any pointer operand is an instruction with the users outside of the
5028 // current graph (for masked gathers extra extractelement instructions
5029 // might be required).
5030 auto IsAnyPointerUsedOutGraph =
5031 IsPossibleStrided && any_of(PointerOps, [&](Value *V) {
5032 return isa<Instruction>(V) && any_of(V->users(), [&](User *U) {
5033 return !getTreeEntry(U) && !MustGather.contains(U);
5036 const unsigned AbsoluteDiff = std::abs(*Diff);
5037 if (IsPossibleStrided && (IsAnyPointerUsedOutGraph ||
5038 ((Sz > MinProfitableStridedLoads ||
5039 (AbsoluteDiff <= MaxProfitableLoadStride * Sz &&
5040 has_single_bit(AbsoluteDiff))) &&
5041 AbsoluteDiff > Sz) ||
5042 *Diff == -(static_cast<int>(Sz) - 1))) {
5043 int Stride = *Diff / static_cast<int>(Sz - 1);
5044 if (*Diff == Stride * static_cast<int>(Sz - 1)) {
5045 Align Alignment =
5046 cast<LoadInst>(Order.empty() ? VL.front() : VL[Order.front()])
5047 ->getAlign();
5048 if (TTI->isLegalStridedLoadStore(VecTy, Alignment)) {
5049 // Iterate through all pointers and check if all distances are
5050 // unique multiple of Dist.
5051 SmallSet<int, 4> Dists;
5052 for (Value *Ptr : PointerOps) {
5053 int Dist = 0;
5054 if (Ptr == PtrN)
5055 Dist = *Diff;
5056 else if (Ptr != Ptr0)
5057 Dist = *getPointersDiff(ScalarTy, Ptr0, ScalarTy, Ptr, *DL, *SE);
5058 // If the strides are not the same or repeated, we can't
5059 // vectorize.
5060 if (((Dist / Stride) * Stride) != Dist ||
5061 !Dists.insert(Dist).second)
5062 break;
5064 if (Dists.size() == Sz)
5065 return LoadsState::StridedVectorize;
5070 // Correctly identify compare the cost of loads + shuffles rather than
5071 // strided/masked gather loads. Returns true if vectorized + shuffles
5072 // representation is better than just gather.
5073 auto CheckForShuffledLoads = [&, &TTI = *TTI](Align CommonAlignment,
5074 unsigned *BestVF,
5075 bool ProfitableGatherPointers) {
5076 if (BestVF)
5077 *BestVF = 0;
5078 // Compare masked gather cost and loads + insert subvector costs.
5079 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5080 auto [ScalarGEPCost, VectorGEPCost] =
5081 getGEPCosts(TTI, PointerOps, PointerOps.front(),
5082 Instruction::GetElementPtr, CostKind, ScalarTy, VecTy);
5083 // Estimate the cost of masked gather GEP. If not a splat, roughly
5084 // estimate as a buildvector, otherwise estimate as splat.
5085 APInt DemandedElts = APInt::getAllOnes(VecTy->getNumElements());
5086 VectorType *PtrVecTy =
5087 getWidenedType(PointerOps.front()->getType()->getScalarType(),
5088 VecTy->getNumElements());
5089 if (static_cast<unsigned>(count_if(
5090 PointerOps, IsaPred<GetElementPtrInst>)) < PointerOps.size() - 1 ||
5091 any_of(PointerOps, [&](Value *V) {
5092 return getUnderlyingObject(V) !=
5093 getUnderlyingObject(PointerOps.front());
5095 VectorGEPCost += TTI.getScalarizationOverhead(
5096 PtrVecTy, DemandedElts, /*Insert=*/true, /*Extract=*/false, CostKind);
5097 else
5098 VectorGEPCost +=
5099 TTI.getScalarizationOverhead(
5100 PtrVecTy, APInt::getOneBitSet(VecTy->getNumElements(), 0),
5101 /*Insert=*/true, /*Extract=*/false, CostKind) +
5102 ::getShuffleCost(TTI, TTI::SK_Broadcast, PtrVecTy, {}, CostKind);
5103 // The cost of scalar loads.
5104 InstructionCost ScalarLoadsCost =
5105 std::accumulate(VL.begin(), VL.end(), InstructionCost(),
5106 [&](InstructionCost C, Value *V) {
5107 return C + TTI.getInstructionCost(
5108 cast<Instruction>(V), CostKind);
5109 }) +
5110 ScalarGEPCost;
5111 // The cost of masked gather.
5112 InstructionCost MaskedGatherCost =
5113 TTI.getGatherScatterOpCost(
5114 Instruction::Load, VecTy, cast<LoadInst>(VL0)->getPointerOperand(),
5115 /*VariableMask=*/false, CommonAlignment, CostKind) +
5116 (ProfitableGatherPointers ? 0 : VectorGEPCost);
5117 InstructionCost GatherCost =
5118 TTI.getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
5119 /*Extract=*/false, CostKind) +
5120 ScalarLoadsCost;
5121 // The list of loads is small or perform partial check already - directly
5122 // compare masked gather cost and gather cost.
5123 constexpr unsigned ListLimit = 4;
5124 if (!TryRecursiveCheck || VL.size() < ListLimit)
5125 return MaskedGatherCost - GatherCost >= -SLPCostThreshold;
5127 // FIXME: The following code has not been updated for non-power-of-2
5128 // vectors. The splitting logic here does not cover the original
5129 // vector if the vector factor is not a power of two. FIXME
5130 if (!has_single_bit(VL.size()))
5131 return false;
5133 unsigned Sz = DL->getTypeSizeInBits(ScalarTy);
5134 unsigned MinVF = getMinVF(2 * Sz);
5135 DemandedElts.clearAllBits();
5136 // Iterate through possible vectorization factors and check if vectorized +
5137 // shuffles is better than just gather.
5138 for (unsigned VF = VL.size() / 2; VF >= MinVF; VF /= 2) {
5139 SmallVector<LoadsState> States;
5140 for (unsigned Cnt = 0, End = VL.size(); Cnt + VF <= End; Cnt += VF) {
5141 ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
5142 SmallVector<unsigned> Order;
5143 SmallVector<Value *> PointerOps;
5144 LoadsState LS =
5145 canVectorizeLoads(Slice, Slice.front(), Order, PointerOps, BestVF,
5146 /*TryRecursiveCheck=*/false);
5147 // Check that the sorted loads are consecutive.
5148 if (LS == LoadsState::Gather) {
5149 if (BestVF) {
5150 DemandedElts.setAllBits();
5151 break;
5153 DemandedElts.setBits(Cnt, Cnt + VF);
5154 continue;
5156 // If need the reorder - consider as high-cost masked gather for now.
5157 if ((LS == LoadsState::Vectorize ||
5158 LS == LoadsState::StridedVectorize) &&
5159 !Order.empty() && !isReverseOrder(Order))
5160 LS = LoadsState::ScatterVectorize;
5161 States.push_back(LS);
5163 if (DemandedElts.isAllOnes())
5164 // All loads gathered - try smaller VF.
5165 continue;
5166 // Can be vectorized later as a serie of loads/insertelements.
5167 InstructionCost VecLdCost = 0;
5168 if (!DemandedElts.isZero()) {
5169 VecLdCost =
5170 TTI.getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
5171 /*Extract=*/false, CostKind) +
5172 ScalarGEPCost;
5173 for (unsigned Idx : seq<unsigned>(VL.size()))
5174 if (DemandedElts[Idx])
5175 VecLdCost +=
5176 TTI.getInstructionCost(cast<Instruction>(VL[Idx]), CostKind);
5178 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
5179 auto *SubVecTy = getWidenedType(ScalarTy, VF);
5180 for (auto [I, LS] : enumerate(States)) {
5181 auto *LI0 = cast<LoadInst>(VL[I * VF]);
5182 InstructionCost VectorGEPCost =
5183 (LS == LoadsState::ScatterVectorize && ProfitableGatherPointers)
5185 : getGEPCosts(TTI, ArrayRef(PointerOps).slice(I * VF, VF),
5186 LI0->getPointerOperand(),
5187 Instruction::GetElementPtr, CostKind, ScalarTy,
5188 SubVecTy)
5189 .second;
5190 if (LS == LoadsState::ScatterVectorize) {
5191 if (static_cast<unsigned>(
5192 count_if(PointerOps, IsaPred<GetElementPtrInst>)) <
5193 PointerOps.size() - 1 ||
5194 any_of(PointerOps, [&](Value *V) {
5195 return getUnderlyingObject(V) !=
5196 getUnderlyingObject(PointerOps.front());
5198 VectorGEPCost += TTI.getScalarizationOverhead(
5199 SubVecTy, APInt::getAllOnes(VF),
5200 /*Insert=*/true, /*Extract=*/false, CostKind);
5201 else
5202 VectorGEPCost +=
5203 TTI.getScalarizationOverhead(
5204 SubVecTy, APInt::getOneBitSet(ScalarTyNumElements * VF, 0),
5205 /*Insert=*/true, /*Extract=*/false, CostKind) +
5206 ::getShuffleCost(TTI, TTI::SK_Broadcast, SubVecTy, {},
5207 CostKind);
5209 switch (LS) {
5210 case LoadsState::Vectorize:
5211 VecLdCost +=
5212 TTI.getMemoryOpCost(Instruction::Load, SubVecTy, LI0->getAlign(),
5213 LI0->getPointerAddressSpace(), CostKind,
5214 TTI::OperandValueInfo()) +
5215 VectorGEPCost;
5216 break;
5217 case LoadsState::StridedVectorize:
5218 VecLdCost += TTI.getStridedMemoryOpCost(Instruction::Load, SubVecTy,
5219 LI0->getPointerOperand(),
5220 /*VariableMask=*/false,
5221 CommonAlignment, CostKind) +
5222 VectorGEPCost;
5223 break;
5224 case LoadsState::ScatterVectorize:
5225 VecLdCost += TTI.getGatherScatterOpCost(Instruction::Load, SubVecTy,
5226 LI0->getPointerOperand(),
5227 /*VariableMask=*/false,
5228 CommonAlignment, CostKind) +
5229 VectorGEPCost;
5230 break;
5231 case LoadsState::Gather:
5232 // Gathers are already calculated - ignore.
5233 continue;
5235 SmallVector<int> ShuffleMask(VL.size());
5236 for (int Idx : seq<int>(0, VL.size()))
5237 ShuffleMask[Idx] = Idx / VF == I ? VL.size() + Idx % VF : Idx;
5238 if (I > 0)
5239 VecLdCost +=
5240 ::getShuffleCost(TTI, TTI::SK_InsertSubvector, VecTy, ShuffleMask,
5241 CostKind, I * VF, SubVecTy);
5243 // If masked gather cost is higher - better to vectorize, so
5244 // consider it as a gather node. It will be better estimated
5245 // later.
5246 if (MaskedGatherCost >= VecLdCost &&
5247 VecLdCost - GatherCost < -SLPCostThreshold) {
5248 if (BestVF)
5249 *BestVF = VF;
5250 return true;
5253 return MaskedGatherCost - GatherCost >= -SLPCostThreshold;
5255 // TODO: need to improve analysis of the pointers, if not all of them are
5256 // GEPs or have > 2 operands, we end up with a gather node, which just
5257 // increases the cost.
5258 Loop *L = LI->getLoopFor(cast<LoadInst>(VL0)->getParent());
5259 bool ProfitableGatherPointers =
5260 L && Sz > 2 && static_cast<unsigned>(count_if(PointerOps, [L](Value *V) {
5261 return L->isLoopInvariant(V);
5262 })) <= Sz / 2;
5263 if (ProfitableGatherPointers || all_of(PointerOps, [](Value *P) {
5264 auto *GEP = dyn_cast<GetElementPtrInst>(P);
5265 return (!GEP && doesNotNeedToBeScheduled(P)) ||
5266 (GEP && GEP->getNumOperands() == 2 &&
5267 isa<Constant, Instruction>(GEP->getOperand(1)));
5268 })) {
5269 // Check if potential masked gather can be represented as series
5270 // of loads + insertsubvectors.
5271 // If masked gather cost is higher - better to vectorize, so
5272 // consider it as a gather node. It will be better estimated
5273 // later.
5274 if (!TryRecursiveCheck || !CheckForShuffledLoads(CommonAlignment, BestVF,
5275 ProfitableGatherPointers))
5276 return LoadsState::ScatterVectorize;
5279 return LoadsState::Gather;
5282 static bool clusterSortPtrAccesses(ArrayRef<Value *> VL,
5283 ArrayRef<BasicBlock *> BBs, Type *ElemTy,
5284 const DataLayout &DL, ScalarEvolution &SE,
5285 SmallVectorImpl<unsigned> &SortedIndices) {
5286 assert(
5287 all_of(VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
5288 "Expected list of pointer operands.");
5289 // Map from bases to a vector of (Ptr, Offset, OrigIdx), which we insert each
5290 // Ptr into, sort and return the sorted indices with values next to one
5291 // another.
5292 SmallMapVector<std::pair<BasicBlock *, Value *>,
5293 SmallVector<SmallVector<std::tuple<Value *, int, unsigned>>>, 8>
5294 Bases;
5295 Bases
5296 .try_emplace(std::make_pair(
5297 BBs.front(), getUnderlyingObject(VL.front(), RecursionMaxDepth)))
5298 .first->second.emplace_back().emplace_back(VL.front(), 0U, 0U);
5300 SortedIndices.clear();
5301 for (auto [Cnt, Ptr] : enumerate(VL.drop_front())) {
5302 auto Key = std::make_pair(BBs[Cnt + 1],
5303 getUnderlyingObject(Ptr, RecursionMaxDepth));
5304 bool Found = any_of(Bases.try_emplace(Key).first->second,
5305 [&, &Cnt = Cnt, &Ptr = Ptr](auto &Base) {
5306 std::optional<int> Diff = getPointersDiff(
5307 ElemTy, std::get<0>(Base.front()), ElemTy,
5308 Ptr, DL, SE,
5309 /*StrictCheck=*/true);
5310 if (!Diff)
5311 return false;
5313 Base.emplace_back(Ptr, *Diff, Cnt + 1);
5314 return true;
5317 if (!Found) {
5318 // If we haven't found enough to usefully cluster, return early.
5319 if (Bases.size() > VL.size() / 2 - 1)
5320 return false;
5322 // Not found already - add a new Base
5323 Bases.find(Key)->second.emplace_back().emplace_back(Ptr, 0, Cnt + 1);
5327 if (Bases.size() == VL.size())
5328 return false;
5330 if (Bases.size() == 1 && (Bases.front().second.size() == 1 ||
5331 Bases.front().second.size() == VL.size()))
5332 return false;
5334 // For each of the bases sort the pointers by Offset and check if any of the
5335 // base become consecutively allocated.
5336 auto ComparePointers = [](Value *Ptr1, Value *Ptr2) {
5337 SmallPtrSet<Value *, 13> FirstPointers;
5338 SmallPtrSet<Value *, 13> SecondPointers;
5339 Value *P1 = Ptr1;
5340 Value *P2 = Ptr2;
5341 if (P1 == P2)
5342 return false;
5343 unsigned Depth = 0;
5344 while (!FirstPointers.contains(P2) && !SecondPointers.contains(P1) &&
5345 Depth <= RecursionMaxDepth) {
5346 FirstPointers.insert(P1);
5347 SecondPointers.insert(P2);
5348 P1 = getUnderlyingObject(P1, /*MaxLookup=*/1);
5349 P2 = getUnderlyingObject(P2, /*MaxLookup=*/1);
5350 ++Depth;
5352 assert((FirstPointers.contains(P2) || SecondPointers.contains(P1)) &&
5353 "Unable to find matching root.");
5354 return FirstPointers.contains(P2) && !SecondPointers.contains(P1);
5356 for (auto &Base : Bases) {
5357 for (auto &Vec : Base.second) {
5358 if (Vec.size() > 1) {
5359 stable_sort(Vec, [](const std::tuple<Value *, int, unsigned> &X,
5360 const std::tuple<Value *, int, unsigned> &Y) {
5361 return std::get<1>(X) < std::get<1>(Y);
5363 int InitialOffset = std::get<1>(Vec[0]);
5364 bool AnyConsecutive =
5365 all_of(enumerate(Vec), [InitialOffset](const auto &P) {
5366 return std::get<1>(P.value()) == int(P.index()) + InitialOffset;
5368 // Fill SortedIndices array only if it looks worth-while to sort the
5369 // ptrs.
5370 if (!AnyConsecutive)
5371 return false;
5374 stable_sort(Base.second, [&](const auto &V1, const auto &V2) {
5375 return ComparePointers(std::get<0>(V1.front()), std::get<0>(V2.front()));
5379 for (auto &T : Bases)
5380 for (const auto &Vec : T.second)
5381 for (const auto &P : Vec)
5382 SortedIndices.push_back(std::get<2>(P));
5384 assert(SortedIndices.size() == VL.size() &&
5385 "Expected SortedIndices to be the size of VL");
5386 return true;
5389 std::optional<BoUpSLP::OrdersType>
5390 BoUpSLP::findPartiallyOrderedLoads(const BoUpSLP::TreeEntry &TE) {
5391 assert(TE.isGather() && "Expected gather node only.");
5392 Type *ScalarTy = TE.Scalars[0]->getType();
5394 SmallVector<Value *> Ptrs;
5395 Ptrs.reserve(TE.Scalars.size());
5396 SmallVector<BasicBlock *> BBs;
5397 BBs.reserve(TE.Scalars.size());
5398 for (Value *V : TE.Scalars) {
5399 auto *L = dyn_cast<LoadInst>(V);
5400 if (!L || !L->isSimple())
5401 return std::nullopt;
5402 Ptrs.push_back(L->getPointerOperand());
5403 BBs.push_back(L->getParent());
5406 BoUpSLP::OrdersType Order;
5407 if (!LoadEntriesToVectorize.contains(TE.Idx) &&
5408 clusterSortPtrAccesses(Ptrs, BBs, ScalarTy, *DL, *SE, Order))
5409 return std::move(Order);
5410 return std::nullopt;
5413 /// Check if two insertelement instructions are from the same buildvector.
5414 static bool areTwoInsertFromSameBuildVector(
5415 InsertElementInst *VU, InsertElementInst *V,
5416 function_ref<Value *(InsertElementInst *)> GetBaseOperand) {
5417 // Instructions must be from the same basic blocks.
5418 if (VU->getParent() != V->getParent())
5419 return false;
5420 // Checks if 2 insertelements are from the same buildvector.
5421 if (VU->getType() != V->getType())
5422 return false;
5423 // Multiple used inserts are separate nodes.
5424 if (!VU->hasOneUse() && !V->hasOneUse())
5425 return false;
5426 auto *IE1 = VU;
5427 auto *IE2 = V;
5428 std::optional<unsigned> Idx1 = getElementIndex(IE1);
5429 std::optional<unsigned> Idx2 = getElementIndex(IE2);
5430 if (Idx1 == std::nullopt || Idx2 == std::nullopt)
5431 return false;
5432 // Go through the vector operand of insertelement instructions trying to find
5433 // either VU as the original vector for IE2 or V as the original vector for
5434 // IE1.
5435 SmallBitVector ReusedIdx(
5436 cast<VectorType>(VU->getType())->getElementCount().getKnownMinValue());
5437 bool IsReusedIdx = false;
5438 do {
5439 if (IE2 == VU && !IE1)
5440 return VU->hasOneUse();
5441 if (IE1 == V && !IE2)
5442 return V->hasOneUse();
5443 if (IE1 && IE1 != V) {
5444 unsigned Idx1 = getElementIndex(IE1).value_or(*Idx2);
5445 IsReusedIdx |= ReusedIdx.test(Idx1);
5446 ReusedIdx.set(Idx1);
5447 if ((IE1 != VU && !IE1->hasOneUse()) || IsReusedIdx)
5448 IE1 = nullptr;
5449 else
5450 IE1 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE1));
5452 if (IE2 && IE2 != VU) {
5453 unsigned Idx2 = getElementIndex(IE2).value_or(*Idx1);
5454 IsReusedIdx |= ReusedIdx.test(Idx2);
5455 ReusedIdx.set(Idx2);
5456 if ((IE2 != V && !IE2->hasOneUse()) || IsReusedIdx)
5457 IE2 = nullptr;
5458 else
5459 IE2 = dyn_cast_or_null<InsertElementInst>(GetBaseOperand(IE2));
5461 } while (!IsReusedIdx && (IE1 || IE2));
5462 return false;
5465 std::optional<BoUpSLP::OrdersType>
5466 BoUpSLP::getReorderingData(const TreeEntry &TE, bool TopToBottom) {
5467 // No need to reorder if need to shuffle reuses, still need to shuffle the
5468 // node.
5469 if (!TE.ReuseShuffleIndices.empty()) {
5470 // FIXME: Support ReuseShuffleIndices for non-power-of-two vectors.
5471 assert(!TE.hasNonWholeRegisterOrNonPowerOf2Vec(*TTI) &&
5472 "Reshuffling scalars not yet supported for nodes with padding");
5474 if (isSplat(TE.Scalars))
5475 return std::nullopt;
5476 // Check if reuse shuffle indices can be improved by reordering.
5477 // For this, check that reuse mask is "clustered", i.e. each scalar values
5478 // is used once in each submask of size <number_of_scalars>.
5479 // Example: 4 scalar values.
5480 // ReuseShuffleIndices mask: 0, 1, 2, 3, 3, 2, 0, 1 - clustered.
5481 // 0, 1, 2, 3, 3, 3, 1, 0 - not clustered, because
5482 // element 3 is used twice in the second submask.
5483 unsigned Sz = TE.Scalars.size();
5484 if (TE.isGather()) {
5485 if (std::optional<OrdersType> CurrentOrder =
5486 findReusedOrderedScalars(TE)) {
5487 SmallVector<int> Mask;
5488 fixupOrderingIndices(*CurrentOrder);
5489 inversePermutation(*CurrentOrder, Mask);
5490 ::addMask(Mask, TE.ReuseShuffleIndices);
5491 OrdersType Res(TE.getVectorFactor(), TE.getVectorFactor());
5492 unsigned Sz = TE.Scalars.size();
5493 for (int K = 0, E = TE.getVectorFactor() / Sz; K < E; ++K) {
5494 for (auto [I, Idx] : enumerate(ArrayRef(Mask).slice(K * Sz, Sz)))
5495 if (Idx != PoisonMaskElem)
5496 Res[Idx + K * Sz] = I + K * Sz;
5498 return std::move(Res);
5501 if (Sz == 2 && TE.getVectorFactor() == 4 &&
5502 TTI->getNumberOfParts(getWidenedType(TE.Scalars.front()->getType(),
5503 2 * TE.getVectorFactor())) == 1)
5504 return std::nullopt;
5505 if (!ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
5506 Sz)) {
5507 SmallVector<int> ReorderMask(Sz, PoisonMaskElem);
5508 if (TE.ReorderIndices.empty())
5509 std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
5510 else
5511 inversePermutation(TE.ReorderIndices, ReorderMask);
5512 ::addMask(ReorderMask, TE.ReuseShuffleIndices);
5513 unsigned VF = ReorderMask.size();
5514 OrdersType ResOrder(VF, VF);
5515 unsigned NumParts = divideCeil(VF, Sz);
5516 SmallBitVector UsedVals(NumParts);
5517 for (unsigned I = 0; I < VF; I += Sz) {
5518 int Val = PoisonMaskElem;
5519 unsigned UndefCnt = 0;
5520 unsigned Limit = std::min(Sz, VF - I);
5521 if (any_of(ArrayRef(ReorderMask).slice(I, Limit),
5522 [&](int Idx) {
5523 if (Val == PoisonMaskElem && Idx != PoisonMaskElem)
5524 Val = Idx;
5525 if (Idx == PoisonMaskElem)
5526 ++UndefCnt;
5527 return Idx != PoisonMaskElem && Idx != Val;
5528 }) ||
5529 Val >= static_cast<int>(NumParts) || UsedVals.test(Val) ||
5530 UndefCnt > Sz / 2)
5531 return std::nullopt;
5532 UsedVals.set(Val);
5533 for (unsigned K = 0; K < NumParts; ++K) {
5534 unsigned Idx = Val + Sz * K;
5535 if (Idx < VF)
5536 ResOrder[Idx] = I + K;
5539 return std::move(ResOrder);
5541 unsigned VF = TE.getVectorFactor();
5542 // Try build correct order for extractelement instructions.
5543 SmallVector<int> ReusedMask(TE.ReuseShuffleIndices.begin(),
5544 TE.ReuseShuffleIndices.end());
5545 if (TE.getOpcode() == Instruction::ExtractElement &&
5546 all_of(TE.Scalars, [Sz](Value *V) {
5547 if (isa<PoisonValue>(V))
5548 return true;
5549 std::optional<unsigned> Idx = getExtractIndex(cast<Instruction>(V));
5550 return Idx && *Idx < Sz;
5551 })) {
5552 assert(!TE.isAltShuffle() && "Alternate instructions are only supported "
5553 "by BinaryOperator and CastInst.");
5554 SmallVector<int> ReorderMask(Sz, PoisonMaskElem);
5555 if (TE.ReorderIndices.empty())
5556 std::iota(ReorderMask.begin(), ReorderMask.end(), 0);
5557 else
5558 inversePermutation(TE.ReorderIndices, ReorderMask);
5559 for (unsigned I = 0; I < VF; ++I) {
5560 int &Idx = ReusedMask[I];
5561 if (Idx == PoisonMaskElem)
5562 continue;
5563 Value *V = TE.Scalars[ReorderMask[Idx]];
5564 std::optional<unsigned> EI = getExtractIndex(cast<Instruction>(V));
5565 Idx = std::distance(ReorderMask.begin(), find(ReorderMask, *EI));
5568 // Build the order of the VF size, need to reorder reuses shuffles, they are
5569 // always of VF size.
5570 OrdersType ResOrder(VF);
5571 std::iota(ResOrder.begin(), ResOrder.end(), 0);
5572 auto *It = ResOrder.begin();
5573 for (unsigned K = 0; K < VF; K += Sz) {
5574 OrdersType CurrentOrder(TE.ReorderIndices);
5575 SmallVector<int> SubMask{ArrayRef(ReusedMask).slice(K, Sz)};
5576 if (SubMask.front() == PoisonMaskElem)
5577 std::iota(SubMask.begin(), SubMask.end(), 0);
5578 reorderOrder(CurrentOrder, SubMask);
5579 transform(CurrentOrder, It, [K](unsigned Pos) { return Pos + K; });
5580 std::advance(It, Sz);
5582 if (TE.isGather() && all_of(enumerate(ResOrder), [](const auto &Data) {
5583 return Data.index() == Data.value();
5585 return std::nullopt; // No need to reorder.
5586 return std::move(ResOrder);
5588 if (TE.State == TreeEntry::StridedVectorize && !TopToBottom &&
5589 any_of(TE.UserTreeIndices,
5590 [](const EdgeInfo &EI) {
5591 return !Instruction::isBinaryOp(EI.UserTE->getOpcode());
5592 }) &&
5593 (TE.ReorderIndices.empty() || isReverseOrder(TE.ReorderIndices)))
5594 return std::nullopt;
5595 if ((TE.State == TreeEntry::Vectorize ||
5596 TE.State == TreeEntry::StridedVectorize) &&
5597 (isa<LoadInst, ExtractElementInst, ExtractValueInst>(TE.getMainOp()) ||
5598 (TopToBottom && isa<StoreInst, InsertElementInst>(TE.getMainOp())))) {
5599 assert(!TE.isAltShuffle() && "Alternate instructions are only supported by "
5600 "BinaryOperator and CastInst.");
5601 return TE.ReorderIndices;
5603 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::PHI) {
5604 if (!TE.ReorderIndices.empty())
5605 return TE.ReorderIndices;
5607 SmallVector<Instruction *> UserBVHead(TE.Scalars.size());
5608 for (auto [I, V] : zip(UserBVHead, TE.Scalars)) {
5609 if (!V->hasNUsesOrMore(1))
5610 continue;
5611 auto *II = dyn_cast<InsertElementInst>(*V->user_begin());
5612 if (!II)
5613 continue;
5614 Instruction *BVHead = nullptr;
5615 BasicBlock *BB = II->getParent();
5616 while (II && II->hasOneUse() && II->getParent() == BB) {
5617 BVHead = II;
5618 II = dyn_cast<InsertElementInst>(II->getOperand(0));
5620 I = BVHead;
5623 auto CompareByBasicBlocks = [&](BasicBlock *BB1, BasicBlock *BB2) {
5624 assert(BB1 != BB2 && "Expected different basic blocks.");
5625 auto *NodeA = DT->getNode(BB1);
5626 auto *NodeB = DT->getNode(BB2);
5627 assert(NodeA && "Should only process reachable instructions");
5628 assert(NodeB && "Should only process reachable instructions");
5629 assert((NodeA == NodeB) ==
5630 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
5631 "Different nodes should have different DFS numbers");
5632 return NodeA->getDFSNumIn() < NodeB->getDFSNumIn();
5634 auto PHICompare = [&](unsigned I1, unsigned I2) {
5635 Value *V1 = TE.Scalars[I1];
5636 Value *V2 = TE.Scalars[I2];
5637 if (V1 == V2 || (V1->getNumUses() == 0 && V2->getNumUses() == 0) ||
5638 isa<PoisonValue>(V1) || isa<PoisonValue>(V2))
5639 return false;
5640 if (V1->getNumUses() < V2->getNumUses())
5641 return true;
5642 if (V1->getNumUses() > V2->getNumUses())
5643 return false;
5644 auto *FirstUserOfPhi1 = cast<Instruction>(*V1->user_begin());
5645 auto *FirstUserOfPhi2 = cast<Instruction>(*V2->user_begin());
5646 if (FirstUserOfPhi1->getParent() != FirstUserOfPhi2->getParent())
5647 return CompareByBasicBlocks(FirstUserOfPhi1->getParent(),
5648 FirstUserOfPhi2->getParent());
5649 auto *IE1 = dyn_cast<InsertElementInst>(FirstUserOfPhi1);
5650 auto *IE2 = dyn_cast<InsertElementInst>(FirstUserOfPhi2);
5651 auto *EE1 = dyn_cast<ExtractElementInst>(FirstUserOfPhi1);
5652 auto *EE2 = dyn_cast<ExtractElementInst>(FirstUserOfPhi2);
5653 if (IE1 && !IE2)
5654 return true;
5655 if (!IE1 && IE2)
5656 return false;
5657 if (IE1 && IE2) {
5658 if (UserBVHead[I1] && !UserBVHead[I2])
5659 return true;
5660 if (!UserBVHead[I1])
5661 return false;
5662 if (UserBVHead[I1] == UserBVHead[I2])
5663 return getElementIndex(IE1) < getElementIndex(IE2);
5664 if (UserBVHead[I1]->getParent() != UserBVHead[I2]->getParent())
5665 return CompareByBasicBlocks(UserBVHead[I1]->getParent(),
5666 UserBVHead[I2]->getParent());
5667 return UserBVHead[I1]->comesBefore(UserBVHead[I2]);
5669 if (EE1 && !EE2)
5670 return true;
5671 if (!EE1 && EE2)
5672 return false;
5673 if (EE1 && EE2) {
5674 auto *Inst1 = dyn_cast<Instruction>(EE1->getOperand(0));
5675 auto *Inst2 = dyn_cast<Instruction>(EE2->getOperand(0));
5676 auto *P1 = dyn_cast<Argument>(EE1->getOperand(0));
5677 auto *P2 = dyn_cast<Argument>(EE2->getOperand(0));
5678 if (!Inst2 && !P2)
5679 return Inst1 || P1;
5680 if (EE1->getOperand(0) == EE2->getOperand(0))
5681 return getElementIndex(EE1) < getElementIndex(EE2);
5682 if (!Inst1 && Inst2)
5683 return false;
5684 if (Inst1 && Inst2) {
5685 if (Inst1->getParent() != Inst2->getParent())
5686 return CompareByBasicBlocks(Inst1->getParent(), Inst2->getParent());
5687 return Inst1->comesBefore(Inst2);
5689 if (!P1 && P2)
5690 return false;
5691 assert(P1 && P2 &&
5692 "Expected either instructions or arguments vector operands.");
5693 return P1->getArgNo() < P2->getArgNo();
5695 return false;
5697 OrdersType Phis(TE.Scalars.size());
5698 std::iota(Phis.begin(), Phis.end(), 0);
5699 stable_sort(Phis, PHICompare);
5700 if (isIdentityOrder(Phis))
5701 return std::nullopt; // No need to reorder.
5702 return std::move(Phis);
5704 if (TE.isGather() && !TE.isAltShuffle() && allSameType(TE.Scalars)) {
5705 // TODO: add analysis of other gather nodes with extractelement
5706 // instructions and other values/instructions, not only undefs.
5707 if ((TE.getOpcode() == Instruction::ExtractElement ||
5708 (all_of(TE.Scalars, IsaPred<UndefValue, ExtractElementInst>) &&
5709 any_of(TE.Scalars, IsaPred<ExtractElementInst>))) &&
5710 all_of(TE.Scalars, [](Value *V) {
5711 auto *EE = dyn_cast<ExtractElementInst>(V);
5712 return !EE || isa<FixedVectorType>(EE->getVectorOperandType());
5713 })) {
5714 // Check that gather of extractelements can be represented as
5715 // just a shuffle of a single vector.
5716 OrdersType CurrentOrder;
5717 bool Reuse = canReuseExtract(TE.Scalars, TE.getMainOp(), CurrentOrder,
5718 /*ResizeAllowed=*/true);
5719 if (Reuse || !CurrentOrder.empty())
5720 return std::move(CurrentOrder);
5722 // If the gather node is <undef, v, .., poison> and
5723 // insertelement poison, v, 0 [+ permute]
5724 // is cheaper than
5725 // insertelement poison, v, n - try to reorder.
5726 // If rotating the whole graph, exclude the permute cost, the whole graph
5727 // might be transformed.
5728 int Sz = TE.Scalars.size();
5729 if (isSplat(TE.Scalars) && !allConstant(TE.Scalars) &&
5730 count_if(TE.Scalars, IsaPred<UndefValue>) == Sz - 1) {
5731 const auto *It =
5732 find_if(TE.Scalars, [](Value *V) { return !isConstant(V); });
5733 if (It == TE.Scalars.begin())
5734 return OrdersType();
5735 auto *Ty = getWidenedType(TE.Scalars.front()->getType(), Sz);
5736 if (It != TE.Scalars.end()) {
5737 OrdersType Order(Sz, Sz);
5738 unsigned Idx = std::distance(TE.Scalars.begin(), It);
5739 Order[Idx] = 0;
5740 fixupOrderingIndices(Order);
5741 SmallVector<int> Mask;
5742 inversePermutation(Order, Mask);
5743 InstructionCost PermuteCost =
5744 TopToBottom
5746 : ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, Ty, Mask);
5747 InstructionCost InsertFirstCost = TTI->getVectorInstrCost(
5748 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, 0,
5749 PoisonValue::get(Ty), *It);
5750 InstructionCost InsertIdxCost = TTI->getVectorInstrCost(
5751 Instruction::InsertElement, Ty, TTI::TCK_RecipThroughput, Idx,
5752 PoisonValue::get(Ty), *It);
5753 if (InsertFirstCost + PermuteCost < InsertIdxCost) {
5754 OrdersType Order(Sz, Sz);
5755 Order[Idx] = 0;
5756 return std::move(Order);
5760 if (isSplat(TE.Scalars))
5761 return std::nullopt;
5762 if (TE.Scalars.size() >= 3)
5763 if (std::optional<OrdersType> Order = findPartiallyOrderedLoads(TE))
5764 return Order;
5765 // Check if can include the order of vectorized loads. For masked gathers do
5766 // extra analysis later, so include such nodes into a special list.
5767 if (TE.isGather() && TE.getOpcode() == Instruction::Load) {
5768 SmallVector<Value *> PointerOps;
5769 OrdersType CurrentOrder;
5770 LoadsState Res = canVectorizeLoads(TE.Scalars, TE.Scalars.front(),
5771 CurrentOrder, PointerOps);
5772 if (Res == LoadsState::Vectorize || Res == LoadsState::StridedVectorize)
5773 return std::move(CurrentOrder);
5775 // FIXME: Remove the non-power-of-two check once findReusedOrderedScalars
5776 // has been auditted for correctness with non-power-of-two vectors.
5777 if (!VectorizeNonPowerOf2 || !TE.hasNonWholeRegisterOrNonPowerOf2Vec(*TTI))
5778 if (std::optional<OrdersType> CurrentOrder = findReusedOrderedScalars(TE))
5779 return CurrentOrder;
5781 return std::nullopt;
5784 /// Checks if the given mask is a "clustered" mask with the same clusters of
5785 /// size \p Sz, which are not identity submasks.
5786 static bool isRepeatedNonIdentityClusteredMask(ArrayRef<int> Mask,
5787 unsigned Sz) {
5788 ArrayRef<int> FirstCluster = Mask.slice(0, Sz);
5789 if (ShuffleVectorInst::isIdentityMask(FirstCluster, Sz))
5790 return false;
5791 for (unsigned I = Sz, E = Mask.size(); I < E; I += Sz) {
5792 ArrayRef<int> Cluster = Mask.slice(I, Sz);
5793 if (Cluster != FirstCluster)
5794 return false;
5796 return true;
5799 void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef<int> Mask) const {
5800 // Reorder reuses mask.
5801 reorderReuses(TE.ReuseShuffleIndices, Mask);
5802 const unsigned Sz = TE.Scalars.size();
5803 // For vectorized and non-clustered reused no need to do anything else.
5804 if (!TE.isGather() ||
5805 !ShuffleVectorInst::isOneUseSingleSourceMask(TE.ReuseShuffleIndices,
5806 Sz) ||
5807 !isRepeatedNonIdentityClusteredMask(TE.ReuseShuffleIndices, Sz))
5808 return;
5809 SmallVector<int> NewMask;
5810 inversePermutation(TE.ReorderIndices, NewMask);
5811 addMask(NewMask, TE.ReuseShuffleIndices);
5812 // Clear reorder since it is going to be applied to the new mask.
5813 TE.ReorderIndices.clear();
5814 // Try to improve gathered nodes with clustered reuses, if possible.
5815 ArrayRef<int> Slice = ArrayRef(NewMask).slice(0, Sz);
5816 SmallVector<unsigned> NewOrder(Slice);
5817 inversePermutation(NewOrder, NewMask);
5818 reorderScalars(TE.Scalars, NewMask);
5819 // Fill the reuses mask with the identity submasks.
5820 for (auto *It = TE.ReuseShuffleIndices.begin(),
5821 *End = TE.ReuseShuffleIndices.end();
5822 It != End; std::advance(It, Sz))
5823 std::iota(It, std::next(It, Sz), 0);
5826 static void combineOrders(MutableArrayRef<unsigned> Order,
5827 ArrayRef<unsigned> SecondaryOrder) {
5828 assert((SecondaryOrder.empty() || Order.size() == SecondaryOrder.size()) &&
5829 "Expected same size of orders");
5830 unsigned Sz = Order.size();
5831 SmallBitVector UsedIndices(Sz);
5832 for (unsigned Idx : seq<unsigned>(0, Sz)) {
5833 if (Order[Idx] != Sz)
5834 UsedIndices.set(Order[Idx]);
5836 if (SecondaryOrder.empty()) {
5837 for (unsigned Idx : seq<unsigned>(0, Sz))
5838 if (Order[Idx] == Sz && !UsedIndices.test(Idx))
5839 Order[Idx] = Idx;
5840 } else {
5841 for (unsigned Idx : seq<unsigned>(0, Sz))
5842 if (SecondaryOrder[Idx] != Sz && Order[Idx] == Sz &&
5843 !UsedIndices.test(SecondaryOrder[Idx]))
5844 Order[Idx] = SecondaryOrder[Idx];
5848 void BoUpSLP::reorderTopToBottom() {
5849 // Maps VF to the graph nodes.
5850 DenseMap<unsigned, SetVector<TreeEntry *>> VFToOrderedEntries;
5851 // ExtractElement gather nodes which can be vectorized and need to handle
5852 // their ordering.
5853 DenseMap<const TreeEntry *, OrdersType> GathersToOrders;
5855 // Phi nodes can have preferred ordering based on their result users
5856 DenseMap<const TreeEntry *, OrdersType> PhisToOrders;
5858 // AltShuffles can also have a preferred ordering that leads to fewer
5859 // instructions, e.g., the addsub instruction in x86.
5860 DenseMap<const TreeEntry *, OrdersType> AltShufflesToOrders;
5862 // Maps a TreeEntry to the reorder indices of external users.
5863 DenseMap<const TreeEntry *, SmallVector<OrdersType, 1>>
5864 ExternalUserReorderMap;
5865 // Find all reorderable nodes with the given VF.
5866 // Currently the are vectorized stores,loads,extracts + some gathering of
5867 // extracts.
5868 for_each(VectorizableTree, [&, &TTIRef = *TTI](
5869 const std::unique_ptr<TreeEntry> &TE) {
5870 // Look for external users that will probably be vectorized.
5871 SmallVector<OrdersType, 1> ExternalUserReorderIndices =
5872 findExternalStoreUsersReorderIndices(TE.get());
5873 if (!ExternalUserReorderIndices.empty()) {
5874 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
5875 ExternalUserReorderMap.try_emplace(TE.get(),
5876 std::move(ExternalUserReorderIndices));
5879 // Patterns like [fadd,fsub] can be combined into a single instruction in
5880 // x86. Reordering them into [fsub,fadd] blocks this pattern. So we need
5881 // to take into account their order when looking for the most used order.
5882 if (TE->isAltShuffle()) {
5883 VectorType *VecTy =
5884 getWidenedType(TE->Scalars[0]->getType(), TE->Scalars.size());
5885 unsigned Opcode0 = TE->getOpcode();
5886 unsigned Opcode1 = TE->getAltOpcode();
5887 SmallBitVector OpcodeMask(getAltInstrMask(TE->Scalars, Opcode0, Opcode1));
5888 // If this pattern is supported by the target then we consider the order.
5889 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
5890 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
5891 AltShufflesToOrders.try_emplace(TE.get(), OrdersType());
5893 // TODO: Check the reverse order too.
5896 if (std::optional<OrdersType> CurrentOrder =
5897 getReorderingData(*TE, /*TopToBottom=*/true)) {
5898 // Do not include ordering for nodes used in the alt opcode vectorization,
5899 // better to reorder them during bottom-to-top stage. If follow the order
5900 // here, it causes reordering of the whole graph though actually it is
5901 // profitable just to reorder the subgraph that starts from the alternate
5902 // opcode vectorization node. Such nodes already end-up with the shuffle
5903 // instruction and it is just enough to change this shuffle rather than
5904 // rotate the scalars for the whole graph.
5905 unsigned Cnt = 0;
5906 const TreeEntry *UserTE = TE.get();
5907 while (UserTE && Cnt < RecursionMaxDepth) {
5908 if (UserTE->UserTreeIndices.size() != 1)
5909 break;
5910 if (all_of(UserTE->UserTreeIndices, [](const EdgeInfo &EI) {
5911 return EI.UserTE->State == TreeEntry::Vectorize &&
5912 EI.UserTE->isAltShuffle() && EI.UserTE->Idx != 0;
5914 return;
5915 UserTE = UserTE->UserTreeIndices.back().UserTE;
5916 ++Cnt;
5918 VFToOrderedEntries[TE->getVectorFactor()].insert(TE.get());
5919 if (!(TE->State == TreeEntry::Vectorize ||
5920 TE->State == TreeEntry::StridedVectorize) ||
5921 !TE->ReuseShuffleIndices.empty())
5922 GathersToOrders.try_emplace(TE.get(), *CurrentOrder);
5923 if (TE->State == TreeEntry::Vectorize &&
5924 TE->getOpcode() == Instruction::PHI)
5925 PhisToOrders.try_emplace(TE.get(), *CurrentOrder);
5929 // Reorder the graph nodes according to their vectorization factor.
5930 for (unsigned VF = VectorizableTree.front()->getVectorFactor();
5931 !VFToOrderedEntries.empty() && VF > 1; VF -= 2 - (VF & 1U)) {
5932 auto It = VFToOrderedEntries.find(VF);
5933 if (It == VFToOrderedEntries.end())
5934 continue;
5935 // Try to find the most profitable order. We just are looking for the most
5936 // used order and reorder scalar elements in the nodes according to this
5937 // mostly used order.
5938 ArrayRef<TreeEntry *> OrderedEntries = It->second.getArrayRef();
5939 // Delete VF entry upon exit.
5940 auto Cleanup = make_scope_exit([&]() { VFToOrderedEntries.erase(It); });
5942 // All operands are reordered and used only in this node - propagate the
5943 // most used order to the user node.
5944 MapVector<OrdersType, unsigned,
5945 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
5946 OrdersUses;
5947 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
5948 for (const TreeEntry *OpTE : OrderedEntries) {
5949 // No need to reorder this nodes, still need to extend and to use shuffle,
5950 // just need to merge reordering shuffle and the reuse shuffle.
5951 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
5952 continue;
5953 // Count number of orders uses.
5954 const auto &Order = [OpTE, &GathersToOrders, &AltShufflesToOrders,
5955 &PhisToOrders]() -> const OrdersType & {
5956 if (OpTE->isGather() || !OpTE->ReuseShuffleIndices.empty()) {
5957 auto It = GathersToOrders.find(OpTE);
5958 if (It != GathersToOrders.end())
5959 return It->second;
5961 if (OpTE->isAltShuffle()) {
5962 auto It = AltShufflesToOrders.find(OpTE);
5963 if (It != AltShufflesToOrders.end())
5964 return It->second;
5966 if (OpTE->State == TreeEntry::Vectorize &&
5967 OpTE->getOpcode() == Instruction::PHI) {
5968 auto It = PhisToOrders.find(OpTE);
5969 if (It != PhisToOrders.end())
5970 return It->second;
5972 return OpTE->ReorderIndices;
5973 }();
5974 // First consider the order of the external scalar users.
5975 auto It = ExternalUserReorderMap.find(OpTE);
5976 if (It != ExternalUserReorderMap.end()) {
5977 const auto &ExternalUserReorderIndices = It->second;
5978 // If the OpTE vector factor != number of scalars - use natural order,
5979 // it is an attempt to reorder node with reused scalars but with
5980 // external uses.
5981 if (OpTE->getVectorFactor() != OpTE->Scalars.size()) {
5982 OrdersUses.insert(std::make_pair(OrdersType(), 0)).first->second +=
5983 ExternalUserReorderIndices.size();
5984 } else {
5985 for (const OrdersType &ExtOrder : ExternalUserReorderIndices)
5986 ++OrdersUses.insert(std::make_pair(ExtOrder, 0)).first->second;
5988 // No other useful reorder data in this entry.
5989 if (Order.empty())
5990 continue;
5992 // Stores actually store the mask, not the order, need to invert.
5993 if (OpTE->State == TreeEntry::Vectorize &&
5994 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
5995 assert(!OpTE->isAltShuffle() &&
5996 "Alternate instructions are only supported by BinaryOperator "
5997 "and CastInst.");
5998 SmallVector<int> Mask;
5999 inversePermutation(Order, Mask);
6000 unsigned E = Order.size();
6001 OrdersType CurrentOrder(E, E);
6002 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
6003 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx);
6005 fixupOrderingIndices(CurrentOrder);
6006 ++OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second;
6007 } else {
6008 ++OrdersUses.insert(std::make_pair(Order, 0)).first->second;
6011 if (OrdersUses.empty())
6012 continue;
6013 // Choose the most used order.
6014 unsigned IdentityCnt = 0;
6015 unsigned FilledIdentityCnt = 0;
6016 OrdersType IdentityOrder(VF, VF);
6017 for (auto &Pair : OrdersUses) {
6018 if (Pair.first.empty() || isIdentityOrder(Pair.first)) {
6019 if (!Pair.first.empty())
6020 FilledIdentityCnt += Pair.second;
6021 IdentityCnt += Pair.second;
6022 combineOrders(IdentityOrder, Pair.first);
6025 MutableArrayRef<unsigned> BestOrder = IdentityOrder;
6026 unsigned Cnt = IdentityCnt;
6027 for (auto &Pair : OrdersUses) {
6028 // Prefer identity order. But, if filled identity found (non-empty order)
6029 // with same number of uses, as the new candidate order, we can choose
6030 // this candidate order.
6031 if (Cnt < Pair.second ||
6032 (Cnt == IdentityCnt && IdentityCnt == FilledIdentityCnt &&
6033 Cnt == Pair.second && !BestOrder.empty() &&
6034 isIdentityOrder(BestOrder))) {
6035 combineOrders(Pair.first, BestOrder);
6036 BestOrder = Pair.first;
6037 Cnt = Pair.second;
6038 } else {
6039 combineOrders(BestOrder, Pair.first);
6042 // Set order of the user node.
6043 if (isIdentityOrder(BestOrder))
6044 continue;
6045 fixupOrderingIndices(BestOrder);
6046 SmallVector<int> Mask;
6047 inversePermutation(BestOrder, Mask);
6048 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem);
6049 unsigned E = BestOrder.size();
6050 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
6051 return I < E ? static_cast<int>(I) : PoisonMaskElem;
6053 // Do an actual reordering, if profitable.
6054 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
6055 // Just do the reordering for the nodes with the given VF.
6056 if (TE->Scalars.size() != VF) {
6057 if (TE->ReuseShuffleIndices.size() == VF) {
6058 // Need to reorder the reuses masks of the operands with smaller VF to
6059 // be able to find the match between the graph nodes and scalar
6060 // operands of the given node during vectorization/cost estimation.
6061 assert(all_of(TE->UserTreeIndices,
6062 [VF, &TE](const EdgeInfo &EI) {
6063 return EI.UserTE->Scalars.size() == VF ||
6064 EI.UserTE->Scalars.size() ==
6065 TE->Scalars.size();
6066 }) &&
6067 "All users must be of VF size.");
6068 if (SLPReVec) {
6069 assert(SLPReVec && "Only supported by REVEC.");
6070 // ShuffleVectorInst does not do reorderOperands (and it should not
6071 // because ShuffleVectorInst supports only a limited set of
6072 // patterns). Only do reorderNodeWithReuses if all of the users are
6073 // not ShuffleVectorInst.
6074 if (all_of(TE->UserTreeIndices, [&](const EdgeInfo &EI) {
6075 return isa<ShuffleVectorInst>(EI.UserTE->getMainOp());
6077 continue;
6078 assert(none_of(TE->UserTreeIndices,
6079 [&](const EdgeInfo &EI) {
6080 return isa<ShuffleVectorInst>(
6081 EI.UserTE->getMainOp());
6082 }) &&
6083 "Does not know how to reorder.");
6085 // Update ordering of the operands with the smaller VF than the given
6086 // one.
6087 reorderNodeWithReuses(*TE, Mask);
6089 continue;
6091 if ((TE->State == TreeEntry::Vectorize ||
6092 TE->State == TreeEntry::StridedVectorize) &&
6093 (isa<ExtractElementInst, ExtractValueInst, LoadInst, StoreInst,
6094 InsertElementInst>(TE->getMainOp()) ||
6095 (SLPReVec && isa<ShuffleVectorInst>(TE->getMainOp())))) {
6096 assert(!TE->isAltShuffle() &&
6097 "Alternate instructions are only supported by BinaryOperator "
6098 "and CastInst.");
6099 // Build correct orders for extract{element,value}, loads and
6100 // stores.
6101 reorderOrder(TE->ReorderIndices, Mask);
6102 if (isa<InsertElementInst, StoreInst>(TE->getMainOp()))
6103 TE->reorderOperands(Mask);
6104 } else {
6105 // Reorder the node and its operands.
6106 TE->reorderOperands(Mask);
6107 assert(TE->ReorderIndices.empty() &&
6108 "Expected empty reorder sequence.");
6109 reorderScalars(TE->Scalars, Mask);
6111 if (!TE->ReuseShuffleIndices.empty()) {
6112 // Apply reversed order to keep the original ordering of the reused
6113 // elements to avoid extra reorder indices shuffling.
6114 OrdersType CurrentOrder;
6115 reorderOrder(CurrentOrder, MaskOrder);
6116 SmallVector<int> NewReuses;
6117 inversePermutation(CurrentOrder, NewReuses);
6118 addMask(NewReuses, TE->ReuseShuffleIndices);
6119 TE->ReuseShuffleIndices.swap(NewReuses);
6125 bool BoUpSLP::canReorderOperands(
6126 TreeEntry *UserTE, SmallVectorImpl<std::pair<unsigned, TreeEntry *>> &Edges,
6127 ArrayRef<TreeEntry *> ReorderableGathers,
6128 SmallVectorImpl<TreeEntry *> &GatherOps) {
6129 for (unsigned I = 0, E = UserTE->getNumOperands(); I < E; ++I) {
6130 if (any_of(Edges, [I](const std::pair<unsigned, TreeEntry *> &OpData) {
6131 return OpData.first == I &&
6132 (OpData.second->State == TreeEntry::Vectorize ||
6133 OpData.second->State == TreeEntry::StridedVectorize);
6135 continue;
6136 if (TreeEntry *TE = getVectorizedOperand(UserTE, I)) {
6137 // Do not reorder if operand node is used by many user nodes.
6138 if (any_of(TE->UserTreeIndices,
6139 [UserTE](const EdgeInfo &EI) { return EI.UserTE != UserTE; }))
6140 return false;
6141 // Add the node to the list of the ordered nodes with the identity
6142 // order.
6143 Edges.emplace_back(I, TE);
6144 // Add ScatterVectorize nodes to the list of operands, where just
6145 // reordering of the scalars is required. Similar to the gathers, so
6146 // simply add to the list of gathered ops.
6147 // If there are reused scalars, process this node as a regular vectorize
6148 // node, just reorder reuses mask.
6149 if (TE->State != TreeEntry::Vectorize &&
6150 TE->State != TreeEntry::StridedVectorize &&
6151 TE->ReuseShuffleIndices.empty() && TE->ReorderIndices.empty())
6152 GatherOps.push_back(TE);
6153 continue;
6155 TreeEntry *Gather = nullptr;
6156 if (count_if(ReorderableGathers,
6157 [&Gather, UserTE, I](TreeEntry *TE) {
6158 assert(TE->State != TreeEntry::Vectorize &&
6159 TE->State != TreeEntry::StridedVectorize &&
6160 "Only non-vectorized nodes are expected.");
6161 if (any_of(TE->UserTreeIndices,
6162 [UserTE, I](const EdgeInfo &EI) {
6163 return EI.UserTE == UserTE && EI.EdgeIdx == I;
6164 })) {
6165 assert(TE->isSame(UserTE->getOperand(I)) &&
6166 "Operand entry does not match operands.");
6167 Gather = TE;
6168 return true;
6170 return false;
6171 }) > 1 &&
6172 !allConstant(UserTE->getOperand(I)))
6173 return false;
6174 if (Gather)
6175 GatherOps.push_back(Gather);
6177 return true;
6180 void BoUpSLP::reorderBottomToTop(bool IgnoreReorder) {
6181 SetVector<TreeEntry *> OrderedEntries;
6182 DenseSet<const TreeEntry *> GathersToOrders;
6183 // Find all reorderable leaf nodes with the given VF.
6184 // Currently the are vectorized loads,extracts without alternate operands +
6185 // some gathering of extracts.
6186 SmallVector<TreeEntry *> NonVectorized;
6187 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
6188 if (TE->State != TreeEntry::Vectorize &&
6189 TE->State != TreeEntry::StridedVectorize)
6190 NonVectorized.push_back(TE.get());
6191 if (std::optional<OrdersType> CurrentOrder =
6192 getReorderingData(*TE, /*TopToBottom=*/false)) {
6193 OrderedEntries.insert(TE.get());
6194 if (!(TE->State == TreeEntry::Vectorize ||
6195 TE->State == TreeEntry::StridedVectorize) ||
6196 !TE->ReuseShuffleIndices.empty())
6197 GathersToOrders.insert(TE.get());
6201 // 1. Propagate order to the graph nodes, which use only reordered nodes.
6202 // I.e., if the node has operands, that are reordered, try to make at least
6203 // one operand order in the natural order and reorder others + reorder the
6204 // user node itself.
6205 SmallPtrSet<const TreeEntry *, 4> Visited;
6206 while (!OrderedEntries.empty()) {
6207 // 1. Filter out only reordered nodes.
6208 // 2. If the entry has multiple uses - skip it and jump to the next node.
6209 DenseMap<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>> Users;
6210 SmallVector<TreeEntry *> Filtered;
6211 for (TreeEntry *TE : OrderedEntries) {
6212 if (!(TE->State == TreeEntry::Vectorize ||
6213 TE->State == TreeEntry::StridedVectorize ||
6214 (TE->isGather() && GathersToOrders.contains(TE))) ||
6215 TE->UserTreeIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
6216 !all_of(drop_begin(TE->UserTreeIndices),
6217 [TE](const EdgeInfo &EI) {
6218 return EI.UserTE == TE->UserTreeIndices.front().UserTE;
6219 }) ||
6220 !Visited.insert(TE).second) {
6221 Filtered.push_back(TE);
6222 continue;
6224 // Build a map between user nodes and their operands order to speedup
6225 // search. The graph currently does not provide this dependency directly.
6226 for (EdgeInfo &EI : TE->UserTreeIndices)
6227 Users[EI.UserTE].emplace_back(EI.EdgeIdx, TE);
6229 // Erase filtered entries.
6230 for (TreeEntry *TE : Filtered)
6231 OrderedEntries.remove(TE);
6232 SmallVector<
6233 std::pair<TreeEntry *, SmallVector<std::pair<unsigned, TreeEntry *>>>>
6234 UsersVec(Users.begin(), Users.end());
6235 sort(UsersVec, [](const auto &Data1, const auto &Data2) {
6236 return Data1.first->Idx > Data2.first->Idx;
6238 for (auto &Data : UsersVec) {
6239 // Check that operands are used only in the User node.
6240 SmallVector<TreeEntry *> GatherOps;
6241 if (!canReorderOperands(Data.first, Data.second, NonVectorized,
6242 GatherOps)) {
6243 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
6244 OrderedEntries.remove(Op.second);
6245 continue;
6247 // All operands are reordered and used only in this node - propagate the
6248 // most used order to the user node.
6249 MapVector<OrdersType, unsigned,
6250 DenseMap<OrdersType, unsigned, OrdersTypeDenseMapInfo>>
6251 OrdersUses;
6252 // Do the analysis for each tree entry only once, otherwise the order of
6253 // the same node my be considered several times, though might be not
6254 // profitable.
6255 SmallPtrSet<const TreeEntry *, 4> VisitedOps;
6256 SmallPtrSet<const TreeEntry *, 4> VisitedUsers;
6257 for (const auto &Op : Data.second) {
6258 TreeEntry *OpTE = Op.second;
6259 if (!VisitedOps.insert(OpTE).second)
6260 continue;
6261 if (!OpTE->ReuseShuffleIndices.empty() && !GathersToOrders.count(OpTE))
6262 continue;
6263 const auto Order = [&]() -> const OrdersType {
6264 if (OpTE->isGather() || !OpTE->ReuseShuffleIndices.empty())
6265 return getReorderingData(*OpTE, /*TopToBottom=*/false)
6266 .value_or(OrdersType(1));
6267 return OpTE->ReorderIndices;
6268 }();
6269 // The order is partially ordered, skip it in favor of fully non-ordered
6270 // orders.
6271 if (Order.size() == 1)
6272 continue;
6273 unsigned NumOps = count_if(
6274 Data.second, [OpTE](const std::pair<unsigned, TreeEntry *> &P) {
6275 return P.second == OpTE;
6277 // Stores actually store the mask, not the order, need to invert.
6278 if (OpTE->State == TreeEntry::Vectorize &&
6279 OpTE->getOpcode() == Instruction::Store && !Order.empty()) {
6280 assert(!OpTE->isAltShuffle() &&
6281 "Alternate instructions are only supported by BinaryOperator "
6282 "and CastInst.");
6283 SmallVector<int> Mask;
6284 inversePermutation(Order, Mask);
6285 unsigned E = Order.size();
6286 OrdersType CurrentOrder(E, E);
6287 transform(Mask, CurrentOrder.begin(), [E](int Idx) {
6288 return Idx == PoisonMaskElem ? E : static_cast<unsigned>(Idx);
6290 fixupOrderingIndices(CurrentOrder);
6291 OrdersUses.insert(std::make_pair(CurrentOrder, 0)).first->second +=
6292 NumOps;
6293 } else {
6294 OrdersUses.insert(std::make_pair(Order, 0)).first->second += NumOps;
6296 auto Res = OrdersUses.insert(std::make_pair(OrdersType(), 0));
6297 const auto AllowsReordering = [&](const TreeEntry *TE) {
6298 if (!TE->ReorderIndices.empty() || !TE->ReuseShuffleIndices.empty() ||
6299 (TE->State == TreeEntry::Vectorize && TE->isAltShuffle()) ||
6300 (IgnoreReorder && TE->Idx == 0))
6301 return true;
6302 if (TE->isGather()) {
6303 if (GathersToOrders.contains(TE))
6304 return !getReorderingData(*TE, /*TopToBottom=*/false)
6305 .value_or(OrdersType(1))
6306 .empty();
6307 return true;
6309 return false;
6311 for (const EdgeInfo &EI : OpTE->UserTreeIndices) {
6312 TreeEntry *UserTE = EI.UserTE;
6313 if (!VisitedUsers.insert(UserTE).second)
6314 continue;
6315 // May reorder user node if it requires reordering, has reused
6316 // scalars, is an alternate op vectorize node or its op nodes require
6317 // reordering.
6318 if (AllowsReordering(UserTE))
6319 continue;
6320 // Check if users allow reordering.
6321 // Currently look up just 1 level of operands to avoid increase of
6322 // the compile time.
6323 // Profitable to reorder if definitely more operands allow
6324 // reordering rather than those with natural order.
6325 ArrayRef<std::pair<unsigned, TreeEntry *>> Ops = Users[UserTE];
6326 if (static_cast<unsigned>(count_if(
6327 Ops, [UserTE, &AllowsReordering](
6328 const std::pair<unsigned, TreeEntry *> &Op) {
6329 return AllowsReordering(Op.second) &&
6330 all_of(Op.second->UserTreeIndices,
6331 [UserTE](const EdgeInfo &EI) {
6332 return EI.UserTE == UserTE;
6334 })) <= Ops.size() / 2)
6335 ++Res.first->second;
6338 if (OrdersUses.empty()) {
6339 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
6340 OrderedEntries.remove(Op.second);
6341 continue;
6343 // Choose the most used order.
6344 unsigned IdentityCnt = 0;
6345 unsigned VF = Data.second.front().second->getVectorFactor();
6346 OrdersType IdentityOrder(VF, VF);
6347 for (auto &Pair : OrdersUses) {
6348 if (Pair.first.empty() || isIdentityOrder(Pair.first)) {
6349 IdentityCnt += Pair.second;
6350 combineOrders(IdentityOrder, Pair.first);
6353 MutableArrayRef<unsigned> BestOrder = IdentityOrder;
6354 unsigned Cnt = IdentityCnt;
6355 for (auto &Pair : OrdersUses) {
6356 // Prefer identity order. But, if filled identity found (non-empty
6357 // order) with same number of uses, as the new candidate order, we can
6358 // choose this candidate order.
6359 if (Cnt < Pair.second) {
6360 combineOrders(Pair.first, BestOrder);
6361 BestOrder = Pair.first;
6362 Cnt = Pair.second;
6363 } else {
6364 combineOrders(BestOrder, Pair.first);
6367 // Set order of the user node.
6368 if (isIdentityOrder(BestOrder)) {
6369 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second)
6370 OrderedEntries.remove(Op.second);
6371 continue;
6373 fixupOrderingIndices(BestOrder);
6374 // Erase operands from OrderedEntries list and adjust their orders.
6375 VisitedOps.clear();
6376 SmallVector<int> Mask;
6377 inversePermutation(BestOrder, Mask);
6378 SmallVector<int> MaskOrder(BestOrder.size(), PoisonMaskElem);
6379 unsigned E = BestOrder.size();
6380 transform(BestOrder, MaskOrder.begin(), [E](unsigned I) {
6381 return I < E ? static_cast<int>(I) : PoisonMaskElem;
6383 for (const std::pair<unsigned, TreeEntry *> &Op : Data.second) {
6384 TreeEntry *TE = Op.second;
6385 OrderedEntries.remove(TE);
6386 if (!VisitedOps.insert(TE).second)
6387 continue;
6388 if (TE->ReuseShuffleIndices.size() == BestOrder.size()) {
6389 reorderNodeWithReuses(*TE, Mask);
6390 continue;
6392 // Gathers are processed separately.
6393 if (TE->State != TreeEntry::Vectorize &&
6394 TE->State != TreeEntry::StridedVectorize &&
6395 (TE->State != TreeEntry::ScatterVectorize ||
6396 TE->ReorderIndices.empty()))
6397 continue;
6398 assert((BestOrder.size() == TE->ReorderIndices.size() ||
6399 TE->ReorderIndices.empty()) &&
6400 "Non-matching sizes of user/operand entries.");
6401 reorderOrder(TE->ReorderIndices, Mask);
6402 if (IgnoreReorder && TE == VectorizableTree.front().get())
6403 IgnoreReorder = false;
6405 // For gathers just need to reorder its scalars.
6406 for (TreeEntry *Gather : GatherOps) {
6407 assert(Gather->ReorderIndices.empty() &&
6408 "Unexpected reordering of gathers.");
6409 if (!Gather->ReuseShuffleIndices.empty()) {
6410 // Just reorder reuses indices.
6411 reorderReuses(Gather->ReuseShuffleIndices, Mask);
6412 continue;
6414 reorderScalars(Gather->Scalars, Mask);
6415 OrderedEntries.remove(Gather);
6417 // Reorder operands of the user node and set the ordering for the user
6418 // node itself.
6419 if (Data.first->State != TreeEntry::Vectorize ||
6420 !isa<ExtractElementInst, ExtractValueInst, LoadInst>(
6421 Data.first->getMainOp()) ||
6422 Data.first->isAltShuffle())
6423 Data.first->reorderOperands(Mask);
6424 if (!isa<InsertElementInst, StoreInst>(Data.first->getMainOp()) ||
6425 Data.first->isAltShuffle() ||
6426 Data.first->State == TreeEntry::StridedVectorize) {
6427 reorderScalars(Data.first->Scalars, Mask);
6428 reorderOrder(Data.first->ReorderIndices, MaskOrder,
6429 /*BottomOrder=*/true);
6430 if (Data.first->ReuseShuffleIndices.empty() &&
6431 !Data.first->ReorderIndices.empty() &&
6432 !Data.first->isAltShuffle()) {
6433 // Insert user node to the list to try to sink reordering deeper in
6434 // the graph.
6435 OrderedEntries.insert(Data.first);
6437 } else {
6438 reorderOrder(Data.first->ReorderIndices, Mask);
6442 // If the reordering is unnecessary, just remove the reorder.
6443 if (IgnoreReorder && !VectorizableTree.front()->ReorderIndices.empty() &&
6444 VectorizableTree.front()->ReuseShuffleIndices.empty())
6445 VectorizableTree.front()->ReorderIndices.clear();
6448 Instruction *BoUpSLP::getRootEntryInstruction(const TreeEntry &Entry) const {
6449 if ((Entry.getOpcode() == Instruction::Store ||
6450 Entry.getOpcode() == Instruction::Load) &&
6451 Entry.State == TreeEntry::StridedVectorize &&
6452 !Entry.ReorderIndices.empty() && isReverseOrder(Entry.ReorderIndices))
6453 return dyn_cast<Instruction>(Entry.Scalars[Entry.ReorderIndices.front()]);
6454 return dyn_cast<Instruction>(Entry.Scalars.front());
6457 void BoUpSLP::buildExternalUses(
6458 const ExtraValueToDebugLocsMap &ExternallyUsedValues) {
6459 DenseMap<Value *, unsigned> ScalarToExtUses;
6460 // Collect the values that we need to extract from the tree.
6461 for (auto &TEPtr : VectorizableTree) {
6462 TreeEntry *Entry = TEPtr.get();
6464 // No need to handle users of gathered values.
6465 if (Entry->isGather())
6466 continue;
6468 // For each lane:
6469 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
6470 Value *Scalar = Entry->Scalars[Lane];
6471 if (!isa<Instruction>(Scalar))
6472 continue;
6473 // All uses must be replaced already? No need to do it again.
6474 auto It = ScalarToExtUses.find(Scalar);
6475 if (It != ScalarToExtUses.end() && !ExternalUses[It->second].User)
6476 continue;
6478 // Check if the scalar is externally used as an extra arg.
6479 const auto ExtI = ExternallyUsedValues.find(Scalar);
6480 if (ExtI != ExternallyUsedValues.end()) {
6481 int FoundLane = Entry->findLaneForValue(Scalar);
6482 LLVM_DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane "
6483 << FoundLane << " from " << *Scalar << ".\n");
6484 ScalarToExtUses.try_emplace(Scalar, ExternalUses.size());
6485 ExternalUses.emplace_back(Scalar, nullptr, FoundLane);
6486 continue;
6488 for (User *U : Scalar->users()) {
6489 LLVM_DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
6491 Instruction *UserInst = dyn_cast<Instruction>(U);
6492 if (!UserInst || isDeleted(UserInst))
6493 continue;
6495 // Ignore users in the user ignore list.
6496 if (UserIgnoreList && UserIgnoreList->contains(UserInst))
6497 continue;
6499 // Skip in-tree scalars that become vectors
6500 if (TreeEntry *UseEntry = getTreeEntry(U)) {
6501 // Some in-tree scalars will remain as scalar in vectorized
6502 // instructions. If that is the case, the one in FoundLane will
6503 // be used.
6504 if (UseEntry->State == TreeEntry::ScatterVectorize ||
6505 !doesInTreeUserNeedToExtract(
6506 Scalar, getRootEntryInstruction(*UseEntry), TLI)) {
6507 LLVM_DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
6508 << ".\n");
6509 assert(!UseEntry->isGather() && "Bad state");
6510 continue;
6512 U = nullptr;
6513 if (It != ScalarToExtUses.end()) {
6514 ExternalUses[It->second].User = nullptr;
6515 break;
6519 if (U && Scalar->hasNUsesOrMore(UsesLimit))
6520 U = nullptr;
6521 int FoundLane = Entry->findLaneForValue(Scalar);
6522 LLVM_DEBUG(dbgs() << "SLP: Need to extract:" << *UserInst
6523 << " from lane " << FoundLane << " from " << *Scalar
6524 << ".\n");
6525 It = ScalarToExtUses.try_emplace(Scalar, ExternalUses.size()).first;
6526 ExternalUses.emplace_back(Scalar, U, FoundLane);
6527 if (!U)
6528 break;
6534 SmallVector<SmallVector<StoreInst *>>
6535 BoUpSLP::collectUserStores(const BoUpSLP::TreeEntry *TE) const {
6536 SmallDenseMap<std::tuple<BasicBlock *, Type *, Value *>,
6537 SmallVector<StoreInst *>, 8>
6538 PtrToStoresMap;
6539 for (unsigned Lane : seq<unsigned>(0, TE->Scalars.size())) {
6540 Value *V = TE->Scalars[Lane];
6541 // Don't iterate over the users of constant data.
6542 if (!isa<Instruction>(V))
6543 continue;
6544 // To save compilation time we don't visit if we have too many users.
6545 if (V->hasNUsesOrMore(UsesLimit))
6546 break;
6548 // Collect stores per pointer object.
6549 for (User *U : V->users()) {
6550 auto *SI = dyn_cast<StoreInst>(U);
6551 // Test whether we can handle the store. V might be a global, which could
6552 // be used in a different function.
6553 if (SI == nullptr || !SI->isSimple() || SI->getFunction() != F ||
6554 !isValidElementType(SI->getValueOperand()->getType()))
6555 continue;
6556 // Skip entry if already
6557 if (getTreeEntry(U))
6558 continue;
6560 Value *Ptr =
6561 getUnderlyingObject(SI->getPointerOperand(), RecursionMaxDepth);
6562 auto &StoresVec = PtrToStoresMap[{SI->getParent(),
6563 SI->getValueOperand()->getType(), Ptr}];
6564 // For now just keep one store per pointer object per lane.
6565 // TODO: Extend this to support multiple stores per pointer per lane
6566 if (StoresVec.size() > Lane)
6567 continue;
6568 if (!StoresVec.empty()) {
6569 std::optional<int> Diff = getPointersDiff(
6570 SI->getValueOperand()->getType(), SI->getPointerOperand(),
6571 SI->getValueOperand()->getType(),
6572 StoresVec.front()->getPointerOperand(), *DL, *SE,
6573 /*StrictCheck=*/true);
6574 // We failed to compare the pointers so just abandon this store.
6575 if (!Diff)
6576 continue;
6578 StoresVec.push_back(SI);
6581 SmallVector<SmallVector<StoreInst *>> Res(PtrToStoresMap.size());
6582 unsigned I = 0;
6583 for (auto &P : PtrToStoresMap) {
6584 Res[I].swap(P.second);
6585 ++I;
6587 return Res;
6590 bool BoUpSLP::canFormVector(ArrayRef<StoreInst *> StoresVec,
6591 OrdersType &ReorderIndices) const {
6592 // We check whether the stores in StoreVec can form a vector by sorting them
6593 // and checking whether they are consecutive.
6595 // To avoid calling getPointersDiff() while sorting we create a vector of
6596 // pairs {store, offset from first} and sort this instead.
6597 SmallVector<std::pair<int, unsigned>> StoreOffsetVec;
6598 StoreInst *S0 = StoresVec[0];
6599 StoreOffsetVec.emplace_back(0, 0);
6600 Type *S0Ty = S0->getValueOperand()->getType();
6601 Value *S0Ptr = S0->getPointerOperand();
6602 for (unsigned Idx : seq<unsigned>(1, StoresVec.size())) {
6603 StoreInst *SI = StoresVec[Idx];
6604 std::optional<int> Diff =
6605 getPointersDiff(S0Ty, S0Ptr, SI->getValueOperand()->getType(),
6606 SI->getPointerOperand(), *DL, *SE,
6607 /*StrictCheck=*/true);
6608 StoreOffsetVec.emplace_back(*Diff, Idx);
6611 // Check if the stores are consecutive by checking if their difference is 1.
6612 if (StoreOffsetVec.size() != StoresVec.size())
6613 return false;
6614 sort(StoreOffsetVec,
6615 [](const std::pair<int, unsigned> &L,
6616 const std::pair<int, unsigned> &R) { return L.first < R.first; });
6617 unsigned Idx = 0;
6618 int PrevDist = 0;
6619 for (const auto &P : StoreOffsetVec) {
6620 if (Idx > 0 && P.first != PrevDist + 1)
6621 return false;
6622 PrevDist = P.first;
6623 ++Idx;
6626 // Calculate the shuffle indices according to their offset against the sorted
6627 // StoreOffsetVec.
6628 ReorderIndices.assign(StoresVec.size(), 0);
6629 bool IsIdentity = true;
6630 for (auto [I, P] : enumerate(StoreOffsetVec)) {
6631 ReorderIndices[P.second] = I;
6632 IsIdentity &= P.second == I;
6634 // Identity order (e.g., {0,1,2,3}) is modeled as an empty OrdersType in
6635 // reorderTopToBottom() and reorderBottomToTop(), so we are following the
6636 // same convention here.
6637 if (IsIdentity)
6638 ReorderIndices.clear();
6640 return true;
6643 #ifndef NDEBUG
6644 LLVM_DUMP_METHOD static void dumpOrder(const BoUpSLP::OrdersType &Order) {
6645 for (unsigned Idx : Order)
6646 dbgs() << Idx << ", ";
6647 dbgs() << "\n";
6649 #endif
6651 SmallVector<BoUpSLP::OrdersType, 1>
6652 BoUpSLP::findExternalStoreUsersReorderIndices(TreeEntry *TE) const {
6653 unsigned NumLanes = TE->Scalars.size();
6655 SmallVector<SmallVector<StoreInst *>> Stores = collectUserStores(TE);
6657 // Holds the reorder indices for each candidate store vector that is a user of
6658 // the current TreeEntry.
6659 SmallVector<OrdersType, 1> ExternalReorderIndices;
6661 // Now inspect the stores collected per pointer and look for vectorization
6662 // candidates. For each candidate calculate the reorder index vector and push
6663 // it into `ExternalReorderIndices`
6664 for (ArrayRef<StoreInst *> StoresVec : Stores) {
6665 // If we have fewer than NumLanes stores, then we can't form a vector.
6666 if (StoresVec.size() != NumLanes)
6667 continue;
6669 // If the stores are not consecutive then abandon this StoresVec.
6670 OrdersType ReorderIndices;
6671 if (!canFormVector(StoresVec, ReorderIndices))
6672 continue;
6674 // We now know that the scalars in StoresVec can form a vector instruction,
6675 // so set the reorder indices.
6676 ExternalReorderIndices.push_back(ReorderIndices);
6678 return ExternalReorderIndices;
6681 void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
6682 const SmallDenseSet<Value *> &UserIgnoreLst) {
6683 deleteTree();
6684 UserIgnoreList = &UserIgnoreLst;
6685 if (!allSameType(Roots))
6686 return;
6687 buildTree_rec(Roots, 0, EdgeInfo());
6690 void BoUpSLP::buildTree(ArrayRef<Value *> Roots) {
6691 deleteTree();
6692 if (!allSameType(Roots))
6693 return;
6694 buildTree_rec(Roots, 0, EdgeInfo());
6697 /// Tries to find subvector of loads and builds new vector of only loads if can
6698 /// be profitable.
6699 static void gatherPossiblyVectorizableLoads(
6700 const BoUpSLP &R, ArrayRef<Value *> VL, const DataLayout &DL,
6701 ScalarEvolution &SE, const TargetTransformInfo &TTI,
6702 SmallVectorImpl<SmallVector<std::pair<LoadInst *, int>>> &GatheredLoads,
6703 bool AddNew = true) {
6704 if (VL.empty())
6705 return;
6706 Type *ScalarTy = getValueType(VL.front());
6707 if (!isValidElementType(ScalarTy))
6708 return;
6709 SmallVector<SmallVector<std::pair<LoadInst *, int>>> ClusteredLoads;
6710 SmallVector<DenseMap<int, LoadInst *>> ClusteredDistToLoad;
6711 for (Value *V : VL) {
6712 auto *LI = dyn_cast<LoadInst>(V);
6713 if (!LI)
6714 continue;
6715 if (R.isDeleted(LI) || R.isVectorized(LI) || !LI->isSimple())
6716 continue;
6717 bool IsFound = false;
6718 for (auto [Map, Data] : zip(ClusteredDistToLoad, ClusteredLoads)) {
6719 assert(LI->getParent() == Data.front().first->getParent() &&
6720 LI->getType() == Data.front().first->getType() &&
6721 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth) ==
6722 getUnderlyingObject(Data.front().first->getPointerOperand(),
6723 RecursionMaxDepth) &&
6724 "Expected loads with the same type, same parent and same "
6725 "underlying pointer.");
6726 std::optional<int> Dist = getPointersDiff(
6727 LI->getType(), LI->getPointerOperand(), Data.front().first->getType(),
6728 Data.front().first->getPointerOperand(), DL, SE,
6729 /*StrictCheck=*/true);
6730 if (!Dist)
6731 continue;
6732 auto It = Map.find(*Dist);
6733 if (It != Map.end() && It->second != LI)
6734 continue;
6735 if (It == Map.end()) {
6736 Data.emplace_back(LI, *Dist);
6737 Map.try_emplace(*Dist, LI);
6739 IsFound = true;
6740 break;
6742 if (!IsFound) {
6743 ClusteredLoads.emplace_back().emplace_back(LI, 0);
6744 ClusteredDistToLoad.emplace_back().try_emplace(0, LI);
6747 auto FindMatchingLoads =
6748 [&](ArrayRef<std::pair<LoadInst *, int>> Loads,
6749 SmallVectorImpl<SmallVector<std::pair<LoadInst *, int>>>
6750 &GatheredLoads,
6751 SetVector<unsigned> &ToAdd, SetVector<unsigned> &Repeated,
6752 int &Offset, unsigned &Start) {
6753 if (Loads.empty())
6754 return GatheredLoads.end();
6755 SmallVector<std::pair<int, int>> Res;
6756 LoadInst *LI = Loads.front().first;
6757 for (auto [Idx, Data] : enumerate(GatheredLoads)) {
6758 if (Idx < Start)
6759 continue;
6760 ToAdd.clear();
6761 if (LI->getParent() != Data.front().first->getParent() ||
6762 LI->getType() != Data.front().first->getType())
6763 continue;
6764 std::optional<int> Dist =
6765 getPointersDiff(LI->getType(), LI->getPointerOperand(),
6766 Data.front().first->getType(),
6767 Data.front().first->getPointerOperand(), DL, SE,
6768 /*StrictCheck=*/true);
6769 if (!Dist)
6770 continue;
6771 SmallSet<int, 4> DataDists;
6772 SmallPtrSet<LoadInst *, 4> DataLoads;
6773 for (std::pair<LoadInst *, int> P : Data) {
6774 DataDists.insert(P.second);
6775 DataLoads.insert(P.first);
6777 // Found matching gathered loads - check if all loads are unique or
6778 // can be effectively vectorized.
6779 unsigned NumUniques = 0;
6780 for (auto [Cnt, Pair] : enumerate(Loads)) {
6781 bool Used = DataLoads.contains(Pair.first);
6782 if (!Used && !DataDists.contains(*Dist + Pair.second)) {
6783 ++NumUniques;
6784 ToAdd.insert(Cnt);
6785 } else if (Used) {
6786 Repeated.insert(Cnt);
6789 if (NumUniques > 0 &&
6790 (Loads.size() == NumUniques ||
6791 (Loads.size() - NumUniques >= 2 &&
6792 Loads.size() - NumUniques >= Loads.size() / 2 &&
6793 (has_single_bit(Data.size() + NumUniques) ||
6794 bit_ceil(Data.size()) <
6795 bit_ceil(Data.size() + NumUniques))))) {
6796 Offset = *Dist;
6797 Start = Idx + 1;
6798 return std::next(GatheredLoads.begin(), Idx);
6801 ToAdd.clear();
6802 return GatheredLoads.end();
6804 for (ArrayRef<std::pair<LoadInst *, int>> Data : ClusteredLoads) {
6805 unsigned Start = 0;
6806 SetVector<unsigned> ToAdd, LocalToAdd, Repeated;
6807 int Offset = 0;
6808 auto *It = FindMatchingLoads(Data, GatheredLoads, LocalToAdd, Repeated,
6809 Offset, Start);
6810 while (It != GatheredLoads.end()) {
6811 assert(!LocalToAdd.empty() && "Expected some elements to add.");
6812 for (unsigned Idx : LocalToAdd)
6813 It->emplace_back(Data[Idx].first, Data[Idx].second + Offset);
6814 ToAdd.insert(LocalToAdd.begin(), LocalToAdd.end());
6815 It = FindMatchingLoads(Data, GatheredLoads, LocalToAdd, Repeated, Offset,
6816 Start);
6818 if (any_of(seq<unsigned>(Data.size()), [&](unsigned Idx) {
6819 return !ToAdd.contains(Idx) && !Repeated.contains(Idx);
6820 })) {
6821 auto AddNewLoads =
6822 [&](SmallVectorImpl<std::pair<LoadInst *, int>> &Loads) {
6823 for (unsigned Idx : seq<unsigned>(Data.size())) {
6824 if (ToAdd.contains(Idx) || Repeated.contains(Idx))
6825 continue;
6826 Loads.push_back(Data[Idx]);
6829 if (!AddNew) {
6830 LoadInst *LI = Data.front().first;
6831 It = find_if(
6832 GatheredLoads, [&](ArrayRef<std::pair<LoadInst *, int>> PD) {
6833 return PD.front().first->getParent() == LI->getParent() &&
6834 PD.front().first->getType() == LI->getType();
6836 while (It != GatheredLoads.end()) {
6837 AddNewLoads(*It);
6838 It = std::find_if(
6839 std::next(It), GatheredLoads.end(),
6840 [&](ArrayRef<std::pair<LoadInst *, int>> PD) {
6841 return PD.front().first->getParent() == LI->getParent() &&
6842 PD.front().first->getType() == LI->getType();
6846 GatheredLoads.emplace_back().append(Data.begin(), Data.end());
6847 AddNewLoads(GatheredLoads.emplace_back());
6852 void BoUpSLP::tryToVectorizeGatheredLoads(
6853 const SmallMapVector<std::tuple<BasicBlock *, Value *, Type *>,
6854 SmallVector<SmallVector<std::pair<LoadInst *, int>>>,
6855 8> &GatheredLoads) {
6856 GatheredLoadsEntriesFirst = VectorizableTree.size();
6858 SmallVector<SmallPtrSet<const Value *, 4>> LoadSetsToVectorize(
6859 LoadEntriesToVectorize.size());
6860 for (auto [Idx, Set] : zip(LoadEntriesToVectorize, LoadSetsToVectorize))
6861 Set.insert(VectorizableTree[Idx]->Scalars.begin(),
6862 VectorizableTree[Idx]->Scalars.end());
6864 // Sort loads by distance.
6865 auto LoadSorter = [](const std::pair<LoadInst *, int> &L1,
6866 const std::pair<LoadInst *, int> &L2) {
6867 return L1.second > L2.second;
6870 auto IsMaskedGatherSupported = [&](ArrayRef<LoadInst *> Loads) {
6871 ArrayRef<Value *> Values(reinterpret_cast<Value *const *>(Loads.begin()),
6872 Loads.size());
6873 Align Alignment = computeCommonAlignment<LoadInst>(Values);
6874 auto *Ty = getWidenedType(Loads.front()->getType(), Loads.size());
6875 return TTI->isLegalMaskedGather(Ty, Alignment) &&
6876 !TTI->forceScalarizeMaskedGather(Ty, Alignment);
6879 auto GetVectorizedRanges = [this](ArrayRef<LoadInst *> Loads,
6880 BoUpSLP::ValueSet &VectorizedLoads,
6881 SmallVectorImpl<LoadInst *> &NonVectorized,
6882 bool Final, unsigned MaxVF) {
6883 SmallVector<std::pair<ArrayRef<Value *>, LoadsState>> Results;
6884 unsigned StartIdx = 0;
6885 SmallVector<int> CandidateVFs;
6886 if (VectorizeNonPowerOf2 && has_single_bit(MaxVF + 1))
6887 CandidateVFs.push_back(MaxVF);
6888 for (int NumElts = getFloorFullVectorNumberOfElements(
6889 *TTI, Loads.front()->getType(), MaxVF);
6890 NumElts > 1; NumElts = getFloorFullVectorNumberOfElements(
6891 *TTI, Loads.front()->getType(), NumElts - 1)) {
6892 CandidateVFs.push_back(NumElts);
6893 if (VectorizeNonPowerOf2 && NumElts > 2)
6894 CandidateVFs.push_back(NumElts - 1);
6897 if (Final && CandidateVFs.empty())
6898 return Results;
6900 unsigned BestVF = Final ? CandidateVFs.back() : 0;
6901 for (unsigned NumElts : CandidateVFs) {
6902 if (Final && NumElts > BestVF)
6903 continue;
6904 SmallVector<unsigned> MaskedGatherVectorized;
6905 for (unsigned Cnt = StartIdx, E = Loads.size(); Cnt < E;
6906 ++Cnt) {
6907 ArrayRef<LoadInst *> Slice =
6908 ArrayRef(Loads).slice(Cnt, std::min(NumElts, E - Cnt));
6909 if (VectorizedLoads.count(Slice.front()) ||
6910 VectorizedLoads.count(Slice.back()) ||
6911 areKnownNonVectorizableLoads(Slice))
6912 continue;
6913 // Check if it is profitable to try vectorizing gathered loads. It is
6914 // profitable if we have more than 3 consecutive loads or if we have
6915 // less but all users are vectorized or deleted.
6916 bool AllowToVectorize = false;
6917 // Check if it is profitable to vectorize 2-elements loads.
6918 if (NumElts == 2) {
6919 bool IsLegalBroadcastLoad = TTI->isLegalBroadcastLoad(
6920 Slice.front()->getType(), ElementCount::getFixed(NumElts));
6921 auto CheckIfAllowed = [=](ArrayRef<LoadInst *> Slice) {
6922 for (LoadInst *LI : Slice) {
6923 // If single use/user - allow to vectorize.
6924 if (LI->hasOneUse())
6925 continue;
6926 // 1. Check if number of uses equals number of users.
6927 // 2. All users are deleted.
6928 // 3. The load broadcasts are not allowed or the load is not
6929 // broadcasted.
6930 if (static_cast<unsigned int>(std::distance(
6931 LI->user_begin(), LI->user_end())) != LI->getNumUses())
6932 return false;
6933 if (!IsLegalBroadcastLoad)
6934 continue;
6935 if (LI->hasNUsesOrMore(UsesLimit))
6936 return false;
6937 for (User *U : LI->users()) {
6938 if (auto *UI = dyn_cast<Instruction>(U); UI && isDeleted(UI))
6939 continue;
6940 if (const TreeEntry *UTE = getTreeEntry(U)) {
6941 for (int I : seq<int>(UTE->getNumOperands())) {
6942 if (all_of(UTE->getOperand(I),
6943 [LI](Value *V) { return V == LI; }))
6944 // Found legal broadcast - do not vectorize.
6945 return false;
6950 return true;
6952 AllowToVectorize = CheckIfAllowed(Slice);
6953 } else {
6954 AllowToVectorize =
6955 (NumElts >= 3 ||
6956 any_of(ValueToGatherNodes.at(Slice.front()),
6957 [=](const TreeEntry *TE) {
6958 return TE->Scalars.size() == 2 &&
6959 ((TE->Scalars.front() == Slice.front() &&
6960 TE->Scalars.back() == Slice.back()) ||
6961 (TE->Scalars.front() == Slice.back() &&
6962 TE->Scalars.back() == Slice.front()));
6963 })) &&
6964 hasFullVectorsOrPowerOf2(*TTI, Slice.front()->getType(),
6965 Slice.size());
6967 if (AllowToVectorize) {
6968 SmallVector<Value *> PointerOps;
6969 OrdersType CurrentOrder;
6970 // Try to build vector load.
6971 ArrayRef<Value *> Values(
6972 reinterpret_cast<Value *const *>(Slice.begin()), Slice.size());
6973 LoadsState LS = canVectorizeLoads(Values, Slice.front(), CurrentOrder,
6974 PointerOps, &BestVF);
6975 if (LS != LoadsState::Gather ||
6976 (BestVF > 1 && static_cast<unsigned>(NumElts) == 2 * BestVF)) {
6977 if (LS == LoadsState::ScatterVectorize) {
6978 if (MaskedGatherVectorized.empty() ||
6979 Cnt >= MaskedGatherVectorized.back() + NumElts)
6980 MaskedGatherVectorized.push_back(Cnt);
6981 continue;
6983 if (LS != LoadsState::Gather) {
6984 Results.emplace_back(Values, LS);
6985 VectorizedLoads.insert(Slice.begin(), Slice.end());
6986 // If we vectorized initial block, no need to try to vectorize it
6987 // again.
6988 if (Cnt == StartIdx)
6989 StartIdx += NumElts;
6991 // Check if the whole array was vectorized already - exit.
6992 if (StartIdx >= Loads.size())
6993 break;
6994 // Erase last masked gather candidate, if another candidate within
6995 // the range is found to be better.
6996 if (!MaskedGatherVectorized.empty() &&
6997 Cnt < MaskedGatherVectorized.back() + NumElts)
6998 MaskedGatherVectorized.pop_back();
6999 Cnt += NumElts - 1;
7000 continue;
7003 if (!AllowToVectorize || BestVF == 0)
7004 registerNonVectorizableLoads(Slice);
7006 // Mark masked gathers candidates as vectorized, if any.
7007 for (unsigned Cnt : MaskedGatherVectorized) {
7008 ArrayRef<LoadInst *> Slice = ArrayRef(Loads).slice(
7009 Cnt, std::min<unsigned>(NumElts, Loads.size() - Cnt));
7010 ArrayRef<Value *> Values(
7011 reinterpret_cast<Value *const *>(Slice.begin()), Slice.size());
7012 Results.emplace_back(Values, LoadsState::ScatterVectorize);
7013 VectorizedLoads.insert(Slice.begin(), Slice.end());
7014 // If we vectorized initial block, no need to try to vectorize it again.
7015 if (Cnt == StartIdx)
7016 StartIdx += NumElts;
7019 for (LoadInst *LI : Loads) {
7020 if (!VectorizedLoads.contains(LI))
7021 NonVectorized.push_back(LI);
7023 return Results;
7025 auto ProcessGatheredLoads =
7026 [&, &TTI = *TTI](
7027 ArrayRef<SmallVector<std::pair<LoadInst *, int>>> GatheredLoads,
7028 bool Final = false) {
7029 SmallVector<LoadInst *> NonVectorized;
7030 for (ArrayRef<std::pair<LoadInst *, int>> LoadsDists : GatheredLoads) {
7031 if (LoadsDists.size() <= 1) {
7032 NonVectorized.push_back(LoadsDists.back().first);
7033 continue;
7035 SmallVector<std::pair<LoadInst *, int>> LocalLoadsDists(LoadsDists);
7036 SmallVector<LoadInst *> OriginalLoads(LocalLoadsDists.size());
7037 transform(
7038 LoadsDists, OriginalLoads.begin(),
7039 [](const std::pair<LoadInst *, int> &L) { return L.first; });
7040 stable_sort(LocalLoadsDists, LoadSorter);
7041 SmallVector<LoadInst *> Loads;
7042 unsigned MaxConsecutiveDistance = 0;
7043 unsigned CurrentConsecutiveDist = 1;
7044 int LastDist = LocalLoadsDists.front().second;
7045 bool AllowMaskedGather = IsMaskedGatherSupported(OriginalLoads);
7046 for (const std::pair<LoadInst *, int> &L : LocalLoadsDists) {
7047 if (getTreeEntry(L.first))
7048 continue;
7049 assert(LastDist >= L.second &&
7050 "Expected first distance always not less than second");
7051 if (static_cast<unsigned>(LastDist - L.second) ==
7052 CurrentConsecutiveDist) {
7053 ++CurrentConsecutiveDist;
7054 MaxConsecutiveDistance =
7055 std::max(MaxConsecutiveDistance, CurrentConsecutiveDist);
7056 Loads.push_back(L.first);
7057 continue;
7059 if (!AllowMaskedGather && CurrentConsecutiveDist == 1 &&
7060 !Loads.empty())
7061 Loads.pop_back();
7062 CurrentConsecutiveDist = 1;
7063 LastDist = L.second;
7064 Loads.push_back(L.first);
7066 if (Loads.size() <= 1)
7067 continue;
7068 if (AllowMaskedGather)
7069 MaxConsecutiveDistance = Loads.size();
7070 else if (MaxConsecutiveDistance < 2)
7071 continue;
7072 BoUpSLP::ValueSet VectorizedLoads;
7073 SmallVector<LoadInst *> SortedNonVectorized;
7074 SmallVector<std::pair<ArrayRef<Value *>, LoadsState>> Results =
7075 GetVectorizedRanges(Loads, VectorizedLoads, SortedNonVectorized,
7076 Final, MaxConsecutiveDistance);
7077 if (!Results.empty() && !SortedNonVectorized.empty() &&
7078 OriginalLoads.size() == Loads.size() &&
7079 MaxConsecutiveDistance == Loads.size() &&
7080 all_of(Results,
7081 [](const std::pair<ArrayRef<Value *>, LoadsState> &P) {
7082 return P.second == LoadsState::ScatterVectorize;
7083 })) {
7084 VectorizedLoads.clear();
7085 SmallVector<LoadInst *> UnsortedNonVectorized;
7086 SmallVector<std::pair<ArrayRef<Value *>, LoadsState>>
7087 UnsortedResults =
7088 GetVectorizedRanges(OriginalLoads, VectorizedLoads,
7089 UnsortedNonVectorized, Final,
7090 OriginalLoads.size());
7091 if (SortedNonVectorized.size() >= UnsortedNonVectorized.size()) {
7092 SortedNonVectorized.swap(UnsortedNonVectorized);
7093 Results.swap(UnsortedResults);
7096 for (auto [Slice, _] : Results) {
7097 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize gathered loads ("
7098 << Slice.size() << ")\n");
7099 if (any_of(Slice, [&](Value *V) { return getTreeEntry(V); })) {
7100 for (Value *L : Slice)
7101 if (!getTreeEntry(L))
7102 SortedNonVectorized.push_back(cast<LoadInst>(L));
7103 continue;
7106 // Select maximum VF as a maximum of user gathered nodes and
7107 // distance between scalar loads in these nodes.
7108 unsigned MaxVF = Slice.size();
7109 unsigned UserMaxVF = 0;
7110 unsigned InterleaveFactor = 0;
7111 if (MaxVF == 2) {
7112 UserMaxVF = MaxVF;
7113 } else {
7114 // Found distance between segments of the interleaved loads.
7115 std::optional<unsigned> InterleavedLoadsDistance = 0;
7116 unsigned Order = 0;
7117 std::optional<unsigned> CommonVF = 0;
7118 DenseMap<const TreeEntry *, unsigned> EntryToPosition;
7119 SmallPtrSet<const TreeEntry *, 8> DeinterleavedNodes;
7120 for (auto [Idx, V] : enumerate(Slice)) {
7121 for (const TreeEntry *E : ValueToGatherNodes.at(V)) {
7122 UserMaxVF = std::max<unsigned>(UserMaxVF, E->Scalars.size());
7123 unsigned Pos =
7124 EntryToPosition.try_emplace(E, Idx).first->second;
7125 UserMaxVF = std::max<unsigned>(UserMaxVF, Idx - Pos + 1);
7126 if (CommonVF) {
7127 if (*CommonVF == 0) {
7128 CommonVF = E->Scalars.size();
7129 continue;
7131 if (*CommonVF != E->Scalars.size())
7132 CommonVF.reset();
7134 // Check if the load is the part of the interleaved load.
7135 if (Pos != Idx && InterleavedLoadsDistance) {
7136 if (!DeinterleavedNodes.contains(E) &&
7137 any_of(E->Scalars, [&, Slice = Slice](Value *V) {
7138 if (isa<Constant>(V))
7139 return false;
7140 if (getTreeEntry(V))
7141 return true;
7142 const auto &Nodes = ValueToGatherNodes.at(V);
7143 return (Nodes.size() != 1 || !Nodes.contains(E)) &&
7144 !is_contained(Slice, V);
7145 })) {
7146 InterleavedLoadsDistance.reset();
7147 continue;
7149 DeinterleavedNodes.insert(E);
7150 if (*InterleavedLoadsDistance == 0) {
7151 InterleavedLoadsDistance = Idx - Pos;
7152 continue;
7154 if ((Idx - Pos) % *InterleavedLoadsDistance != 0 ||
7155 (Idx - Pos) / *InterleavedLoadsDistance < Order)
7156 InterleavedLoadsDistance.reset();
7157 Order = (Idx - Pos) / InterleavedLoadsDistance.value_or(1);
7161 DeinterleavedNodes.clear();
7162 // Check if the large load represents interleaved load operation.
7163 if (InterleavedLoadsDistance.value_or(0) > 1 &&
7164 CommonVF.value_or(0) != 0) {
7165 InterleaveFactor = bit_ceil(*InterleavedLoadsDistance);
7166 unsigned VF = *CommonVF;
7167 OrdersType Order;
7168 SmallVector<Value *> PointerOps;
7169 // Segmented load detected - vectorize at maximum vector factor.
7170 if (InterleaveFactor <= Slice.size() &&
7171 TTI.isLegalInterleavedAccessType(
7172 getWidenedType(Slice.front()->getType(), VF),
7173 InterleaveFactor,
7174 cast<LoadInst>(Slice.front())->getAlign(),
7175 cast<LoadInst>(Slice.front())
7176 ->getPointerAddressSpace()) &&
7177 canVectorizeLoads(Slice, Slice.front(), Order,
7178 PointerOps) == LoadsState::Vectorize) {
7179 UserMaxVF = InterleaveFactor * VF;
7180 } else {
7181 InterleaveFactor = 0;
7184 // Cannot represent the loads as consecutive vectorizable nodes -
7185 // just exit.
7186 unsigned ConsecutiveNodesSize = 0;
7187 if (!LoadEntriesToVectorize.empty() && InterleaveFactor == 0 &&
7188 any_of(zip(LoadEntriesToVectorize, LoadSetsToVectorize),
7189 [&, Slice = Slice](const auto &P) {
7190 const auto *It = find_if(Slice, [&](Value *V) {
7191 return std::get<1>(P).contains(V);
7193 if (It == Slice.end())
7194 return false;
7195 ArrayRef<Value *> VL =
7196 VectorizableTree[std::get<0>(P)]->Scalars;
7197 ConsecutiveNodesSize += VL.size();
7198 unsigned Start = std::distance(Slice.begin(), It);
7199 unsigned Sz = Slice.size() - Start;
7200 return Sz < VL.size() ||
7201 Slice.slice(std::distance(Slice.begin(), It),
7202 VL.size()) != VL;
7204 continue;
7205 // Try to build long masked gather loads.
7206 UserMaxVF = bit_ceil(UserMaxVF);
7207 if (InterleaveFactor == 0 &&
7208 any_of(seq<unsigned>(Slice.size() / UserMaxVF),
7209 [&, Slice = Slice](unsigned Idx) {
7210 OrdersType Order;
7211 SmallVector<Value *> PointerOps;
7212 return canVectorizeLoads(
7213 Slice.slice(Idx * UserMaxVF, UserMaxVF),
7214 Slice[Idx * UserMaxVF], Order,
7215 PointerOps) ==
7216 LoadsState::ScatterVectorize;
7218 UserMaxVF = MaxVF;
7219 if (Slice.size() != ConsecutiveNodesSize)
7220 MaxVF = std::min<unsigned>(MaxVF, UserMaxVF);
7222 for (unsigned VF = MaxVF; VF >= 2; VF /= 2) {
7223 bool IsVectorized = true;
7224 for (unsigned I = 0, E = Slice.size(); I < E; I += VF) {
7225 ArrayRef<Value *> SubSlice =
7226 Slice.slice(I, std::min(VF, E - I));
7227 if (getTreeEntry(SubSlice.front()))
7228 continue;
7229 // Check if the subslice is to be-vectorized entry, which is not
7230 // equal to entry.
7231 if (any_of(zip(LoadEntriesToVectorize, LoadSetsToVectorize),
7232 [&](const auto &P) {
7233 return !SubSlice.equals(
7234 VectorizableTree[std::get<0>(P)]
7235 ->Scalars) &&
7236 set_is_subset(SubSlice, std::get<1>(P));
7238 continue;
7239 unsigned Sz = VectorizableTree.size();
7240 buildTree_rec(SubSlice, 0, EdgeInfo(), InterleaveFactor);
7241 if (Sz == VectorizableTree.size()) {
7242 IsVectorized = false;
7243 // Try non-interleaved vectorization with smaller vector
7244 // factor.
7245 if (InterleaveFactor > 0) {
7246 VF = 2 * (MaxVF / InterleaveFactor);
7247 InterleaveFactor = 0;
7249 continue;
7252 if (IsVectorized)
7253 break;
7256 NonVectorized.append(SortedNonVectorized);
7258 return NonVectorized;
7260 for (const auto &GLs : GatheredLoads) {
7261 const auto &Ref = GLs.second;
7262 SmallVector<LoadInst *> NonVectorized = ProcessGatheredLoads(Ref);
7263 if (!Ref.empty() && !NonVectorized.empty() &&
7264 std::accumulate(
7265 Ref.begin(), Ref.end(), 0u,
7266 [](unsigned S, ArrayRef<std::pair<LoadInst *, int>> LoadsDists) {
7267 return S + LoadsDists.size();
7268 }) != NonVectorized.size() &&
7269 IsMaskedGatherSupported(NonVectorized)) {
7270 SmallVector<SmallVector<std::pair<LoadInst *, int>>> FinalGatheredLoads;
7271 for (LoadInst *LI : NonVectorized) {
7272 // Reinsert non-vectorized loads to other list of loads with the same
7273 // base pointers.
7274 gatherPossiblyVectorizableLoads(*this, LI, *DL, *SE, *TTI,
7275 FinalGatheredLoads,
7276 /*AddNew=*/false);
7278 // Final attempt to vectorize non-vectorized loads.
7279 (void)ProcessGatheredLoads(FinalGatheredLoads, /*Final=*/true);
7282 // Try to vectorize postponed load entries, previously marked as gathered.
7283 for (unsigned Idx : LoadEntriesToVectorize) {
7284 const TreeEntry &E = *VectorizableTree[Idx];
7285 SmallVector<Value *> GatheredScalars(E.Scalars.begin(), E.Scalars.end());
7286 // Avoid reordering, if possible.
7287 if (!E.ReorderIndices.empty()) {
7288 // Build a mask out of the reorder indices and reorder scalars per this
7289 // mask.
7290 SmallVector<int> ReorderMask;
7291 inversePermutation(E.ReorderIndices, ReorderMask);
7292 reorderScalars(GatheredScalars, ReorderMask);
7294 buildTree_rec(GatheredScalars, 0, EdgeInfo());
7296 // If no new entries created, consider it as no gathered loads entries must be
7297 // handled.
7298 if (static_cast<unsigned>(*GatheredLoadsEntriesFirst) ==
7299 VectorizableTree.size())
7300 GatheredLoadsEntriesFirst.reset();
7303 /// \return true if the specified list of values has only one instruction that
7304 /// requires scheduling, false otherwise.
7305 #ifndef NDEBUG
7306 static bool needToScheduleSingleInstruction(ArrayRef<Value *> VL) {
7307 Value *NeedsScheduling = nullptr;
7308 for (Value *V : VL) {
7309 if (doesNotNeedToBeScheduled(V))
7310 continue;
7311 if (!NeedsScheduling) {
7312 NeedsScheduling = V;
7313 continue;
7315 return false;
7317 return NeedsScheduling;
7319 #endif
7321 /// Generates key/subkey pair for the given value to provide effective sorting
7322 /// of the values and better detection of the vectorizable values sequences. The
7323 /// keys/subkeys can be used for better sorting of the values themselves (keys)
7324 /// and in values subgroups (subkeys).
7325 static std::pair<size_t, size_t> generateKeySubkey(
7326 Value *V, const TargetLibraryInfo *TLI,
7327 function_ref<hash_code(size_t, LoadInst *)> LoadsSubkeyGenerator,
7328 bool AllowAlternate) {
7329 hash_code Key = hash_value(V->getValueID() + 2);
7330 hash_code SubKey = hash_value(0);
7331 // Sort the loads by the distance between the pointers.
7332 if (auto *LI = dyn_cast<LoadInst>(V)) {
7333 Key = hash_combine(LI->getType(), hash_value(Instruction::Load), Key);
7334 if (LI->isSimple())
7335 SubKey = hash_value(LoadsSubkeyGenerator(Key, LI));
7336 else
7337 Key = SubKey = hash_value(LI);
7338 } else if (isVectorLikeInstWithConstOps(V)) {
7339 // Sort extracts by the vector operands.
7340 if (isa<ExtractElementInst, UndefValue>(V))
7341 Key = hash_value(Value::UndefValueVal + 1);
7342 if (auto *EI = dyn_cast<ExtractElementInst>(V)) {
7343 if (!isUndefVector(EI->getVectorOperand()).all() &&
7344 !isa<UndefValue>(EI->getIndexOperand()))
7345 SubKey = hash_value(EI->getVectorOperand());
7347 } else if (auto *I = dyn_cast<Instruction>(V)) {
7348 // Sort other instructions just by the opcodes except for CMPInst.
7349 // For CMP also sort by the predicate kind.
7350 if ((isa<BinaryOperator, CastInst>(I)) &&
7351 isValidForAlternation(I->getOpcode())) {
7352 if (AllowAlternate)
7353 Key = hash_value(isa<BinaryOperator>(I) ? 1 : 0);
7354 else
7355 Key = hash_combine(hash_value(I->getOpcode()), Key);
7356 SubKey = hash_combine(
7357 hash_value(I->getOpcode()), hash_value(I->getType()),
7358 hash_value(isa<BinaryOperator>(I)
7359 ? I->getType()
7360 : cast<CastInst>(I)->getOperand(0)->getType()));
7361 // For casts, look through the only operand to improve compile time.
7362 if (isa<CastInst>(I)) {
7363 std::pair<size_t, size_t> OpVals =
7364 generateKeySubkey(I->getOperand(0), TLI, LoadsSubkeyGenerator,
7365 /*AllowAlternate=*/true);
7366 Key = hash_combine(OpVals.first, Key);
7367 SubKey = hash_combine(OpVals.first, SubKey);
7369 } else if (auto *CI = dyn_cast<CmpInst>(I)) {
7370 CmpInst::Predicate Pred = CI->getPredicate();
7371 if (CI->isCommutative())
7372 Pred = std::min(Pred, CmpInst::getInversePredicate(Pred));
7373 CmpInst::Predicate SwapPred = CmpInst::getSwappedPredicate(Pred);
7374 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Pred),
7375 hash_value(SwapPred),
7376 hash_value(CI->getOperand(0)->getType()));
7377 } else if (auto *Call = dyn_cast<CallInst>(I)) {
7378 Intrinsic::ID ID = getVectorIntrinsicIDForCall(Call, TLI);
7379 if (isTriviallyVectorizable(ID)) {
7380 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(ID));
7381 } else if (!VFDatabase(*Call).getMappings(*Call).empty()) {
7382 SubKey = hash_combine(hash_value(I->getOpcode()),
7383 hash_value(Call->getCalledFunction()));
7384 } else {
7385 Key = hash_combine(hash_value(Call), Key);
7386 SubKey = hash_combine(hash_value(I->getOpcode()), hash_value(Call));
7388 for (const CallBase::BundleOpInfo &Op : Call->bundle_op_infos())
7389 SubKey = hash_combine(hash_value(Op.Begin), hash_value(Op.End),
7390 hash_value(Op.Tag), SubKey);
7391 } else if (auto *Gep = dyn_cast<GetElementPtrInst>(I)) {
7392 if (Gep->getNumOperands() == 2 && isa<ConstantInt>(Gep->getOperand(1)))
7393 SubKey = hash_value(Gep->getPointerOperand());
7394 else
7395 SubKey = hash_value(Gep);
7396 } else if (BinaryOperator::isIntDivRem(I->getOpcode()) &&
7397 !isa<ConstantInt>(I->getOperand(1))) {
7398 // Do not try to vectorize instructions with potentially high cost.
7399 SubKey = hash_value(I);
7400 } else {
7401 SubKey = hash_value(I->getOpcode());
7403 Key = hash_combine(hash_value(I->getParent()), Key);
7405 return std::make_pair(Key, SubKey);
7408 /// Checks if the specified instruction \p I is an alternate operation for
7409 /// the given \p MainOp and \p AltOp instructions.
7410 static bool isAlternateInstruction(const Instruction *I,
7411 const Instruction *MainOp,
7412 const Instruction *AltOp,
7413 const TargetLibraryInfo &TLI);
7415 bool BoUpSLP::areAltOperandsProfitable(const InstructionsState &S,
7416 ArrayRef<Value *> VL) const {
7417 unsigned Opcode0 = S.getOpcode();
7418 unsigned Opcode1 = S.getAltOpcode();
7419 SmallBitVector OpcodeMask(getAltInstrMask(VL, Opcode0, Opcode1));
7420 // If this pattern is supported by the target then consider it profitable.
7421 if (TTI->isLegalAltInstr(getWidenedType(S.getMainOp()->getType(), VL.size()),
7422 Opcode0, Opcode1, OpcodeMask))
7423 return true;
7424 SmallVector<ValueList> Operands;
7425 for (unsigned I : seq<unsigned>(S.getMainOp()->getNumOperands())) {
7426 Operands.emplace_back();
7427 // Prepare the operand vector.
7428 for (Value *V : VL) {
7429 if (isa<PoisonValue>(V)) {
7430 Operands.back().push_back(
7431 PoisonValue::get(S.getMainOp()->getOperand(I)->getType()));
7432 continue;
7434 Operands.back().push_back(cast<Instruction>(V)->getOperand(I));
7437 if (Operands.size() == 2) {
7438 // Try find best operands candidates.
7439 for (unsigned I : seq<unsigned>(0, VL.size() - 1)) {
7440 SmallVector<std::pair<Value *, Value *>> Candidates(3);
7441 Candidates[0] = std::make_pair(Operands[0][I], Operands[0][I + 1]);
7442 Candidates[1] = std::make_pair(Operands[0][I], Operands[1][I + 1]);
7443 Candidates[2] = std::make_pair(Operands[1][I], Operands[0][I + 1]);
7444 std::optional<int> Res = findBestRootPair(Candidates);
7445 switch (Res.value_or(0)) {
7446 case 0:
7447 break;
7448 case 1:
7449 std::swap(Operands[0][I + 1], Operands[1][I + 1]);
7450 break;
7451 case 2:
7452 std::swap(Operands[0][I], Operands[1][I]);
7453 break;
7454 default:
7455 llvm_unreachable("Unexpected index.");
7459 DenseSet<unsigned> UniqueOpcodes;
7460 constexpr unsigned NumAltInsts = 3; // main + alt + shuffle.
7461 unsigned NonInstCnt = 0;
7462 // Estimate number of instructions, required for the vectorized node and for
7463 // the buildvector node.
7464 unsigned UndefCnt = 0;
7465 // Count the number of extra shuffles, required for vector nodes.
7466 unsigned ExtraShuffleInsts = 0;
7467 // Check that operands do not contain same values and create either perfect
7468 // diamond match or shuffled match.
7469 if (Operands.size() == 2) {
7470 // Do not count same operands twice.
7471 if (Operands.front() == Operands.back()) {
7472 Operands.erase(Operands.begin());
7473 } else if (!allConstant(Operands.front()) &&
7474 all_of(Operands.front(), [&](Value *V) {
7475 return is_contained(Operands.back(), V);
7476 })) {
7477 Operands.erase(Operands.begin());
7478 ++ExtraShuffleInsts;
7481 const Loop *L = LI->getLoopFor(S.getMainOp()->getParent());
7482 // Vectorize node, if:
7483 // 1. at least single operand is constant or splat.
7484 // 2. Operands have many loop invariants (the instructions are not loop
7485 // invariants).
7486 // 3. At least single unique operands is supposed to vectorized.
7487 return none_of(Operands,
7488 [&](ArrayRef<Value *> Op) {
7489 if (allConstant(Op) ||
7490 (!isSplat(Op) && allSameBlock(Op) && allSameType(Op) &&
7491 getSameOpcode(Op, *TLI).getMainOp()))
7492 return false;
7493 DenseMap<Value *, unsigned> Uniques;
7494 for (Value *V : Op) {
7495 if (isa<Constant, ExtractElementInst>(V) ||
7496 getTreeEntry(V) || (L && L->isLoopInvariant(V))) {
7497 if (isa<UndefValue>(V))
7498 ++UndefCnt;
7499 continue;
7501 auto Res = Uniques.try_emplace(V, 0);
7502 // Found first duplicate - need to add shuffle.
7503 if (!Res.second && Res.first->second == 1)
7504 ++ExtraShuffleInsts;
7505 ++Res.first->getSecond();
7506 if (auto *I = dyn_cast<Instruction>(V))
7507 UniqueOpcodes.insert(I->getOpcode());
7508 else if (Res.second)
7509 ++NonInstCnt;
7511 return none_of(Uniques, [&](const auto &P) {
7512 return P.first->hasNUsesOrMore(P.second + 1) &&
7513 none_of(P.first->users(), [&](User *U) {
7514 return getTreeEntry(U) || Uniques.contains(U);
7517 }) ||
7518 // Do not vectorize node, if estimated number of vector instructions is
7519 // more than estimated number of buildvector instructions. Number of
7520 // vector operands is number of vector instructions + number of vector
7521 // instructions for operands (buildvectors). Number of buildvector
7522 // instructions is just number_of_operands * number_of_scalars.
7523 (UndefCnt < (VL.size() - 1) * S.getMainOp()->getNumOperands() &&
7524 (UniqueOpcodes.size() + NonInstCnt + ExtraShuffleInsts +
7525 NumAltInsts) < S.getMainOp()->getNumOperands() * VL.size());
7528 BoUpSLP::TreeEntry::EntryState BoUpSLP::getScalarsVectorizationState(
7529 const InstructionsState &S, ArrayRef<Value *> VL,
7530 bool IsScatterVectorizeUserTE, OrdersType &CurrentOrder,
7531 SmallVectorImpl<Value *> &PointerOps) {
7532 assert(S.getMainOp() &&
7533 "Expected instructions with same/alternate opcodes only.");
7535 unsigned ShuffleOrOp =
7536 S.isAltShuffle() ? (unsigned)Instruction::ShuffleVector : S.getOpcode();
7537 Instruction *VL0 = S.getMainOp();
7538 switch (ShuffleOrOp) {
7539 case Instruction::PHI: {
7540 // Too many operands - gather, most probably won't be vectorized.
7541 if (VL0->getNumOperands() > MaxPHINumOperands)
7542 return TreeEntry::NeedToGather;
7543 // Check for terminator values (e.g. invoke).
7544 for (Value *V : VL) {
7545 auto *PHI = dyn_cast<PHINode>(V);
7546 if (!PHI)
7547 continue;
7548 for (Value *Incoming : PHI->incoming_values()) {
7549 Instruction *Term = dyn_cast<Instruction>(Incoming);
7550 if (Term && Term->isTerminator()) {
7551 LLVM_DEBUG(dbgs()
7552 << "SLP: Need to swizzle PHINodes (terminator use).\n");
7553 return TreeEntry::NeedToGather;
7558 return TreeEntry::Vectorize;
7560 case Instruction::ExtractValue:
7561 case Instruction::ExtractElement: {
7562 bool Reuse = canReuseExtract(VL, VL0, CurrentOrder);
7563 // FIXME: Vectorizing is not supported yet for non-power-of-2 ops.
7564 if (!has_single_bit(VL.size()))
7565 return TreeEntry::NeedToGather;
7566 if (Reuse || !CurrentOrder.empty())
7567 return TreeEntry::Vectorize;
7568 LLVM_DEBUG(dbgs() << "SLP: Gather extract sequence.\n");
7569 return TreeEntry::NeedToGather;
7571 case Instruction::InsertElement: {
7572 // Check that we have a buildvector and not a shuffle of 2 or more
7573 // different vectors.
7574 ValueSet SourceVectors;
7575 for (Value *V : VL) {
7576 SourceVectors.insert(cast<Instruction>(V)->getOperand(0));
7577 assert(getElementIndex(V) != std::nullopt &&
7578 "Non-constant or undef index?");
7581 if (count_if(VL, [&SourceVectors](Value *V) {
7582 return !SourceVectors.contains(V);
7583 }) >= 2) {
7584 // Found 2nd source vector - cancel.
7585 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
7586 "different source vectors.\n");
7587 return TreeEntry::NeedToGather;
7590 if (any_of(VL, [&SourceVectors](Value *V) {
7591 // The last InsertElement can have multiple uses.
7592 return SourceVectors.contains(V) && !V->hasOneUse();
7593 })) {
7594 assert(SLPReVec && "Only supported by REVEC.");
7595 LLVM_DEBUG(dbgs() << "SLP: Gather of insertelement vectors with "
7596 "multiple uses.\n");
7597 return TreeEntry::NeedToGather;
7600 return TreeEntry::Vectorize;
7602 case Instruction::Load: {
7603 // Check that a vectorized load would load the same memory as a scalar
7604 // load. For example, we don't want to vectorize loads that are smaller
7605 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
7606 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
7607 // from such a struct, we read/write packed bits disagreeing with the
7608 // unvectorized version.
7609 switch (canVectorizeLoads(VL, VL0, CurrentOrder, PointerOps)) {
7610 case LoadsState::Vectorize:
7611 return TreeEntry::Vectorize;
7612 case LoadsState::ScatterVectorize:
7613 if (!IsGraphTransformMode && !VectorizableTree.empty()) {
7614 // Delay slow vectorized nodes for better vectorization attempts.
7615 LoadEntriesToVectorize.insert(VectorizableTree.size());
7616 return TreeEntry::NeedToGather;
7618 return TreeEntry::ScatterVectorize;
7619 case LoadsState::StridedVectorize:
7620 if (!IsGraphTransformMode && VectorizableTree.size() > 1) {
7621 // Delay slow vectorized nodes for better vectorization attempts.
7622 LoadEntriesToVectorize.insert(VectorizableTree.size());
7623 return TreeEntry::NeedToGather;
7625 return TreeEntry::StridedVectorize;
7626 case LoadsState::Gather:
7627 #ifndef NDEBUG
7628 Type *ScalarTy = VL0->getType();
7629 if (DL->getTypeSizeInBits(ScalarTy) !=
7630 DL->getTypeAllocSizeInBits(ScalarTy))
7631 LLVM_DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
7632 else if (any_of(VL, [](Value *V) {
7633 auto *LI = dyn_cast<LoadInst>(V);
7634 return !LI || !LI->isSimple();
7636 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
7637 else
7638 LLVM_DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
7639 #endif // NDEBUG
7640 registerNonVectorizableLoads(VL);
7641 return TreeEntry::NeedToGather;
7643 llvm_unreachable("Unexpected state of loads");
7645 case Instruction::ZExt:
7646 case Instruction::SExt:
7647 case Instruction::FPToUI:
7648 case Instruction::FPToSI:
7649 case Instruction::FPExt:
7650 case Instruction::PtrToInt:
7651 case Instruction::IntToPtr:
7652 case Instruction::SIToFP:
7653 case Instruction::UIToFP:
7654 case Instruction::Trunc:
7655 case Instruction::FPTrunc:
7656 case Instruction::BitCast: {
7657 Type *SrcTy = VL0->getOperand(0)->getType();
7658 for (Value *V : VL) {
7659 if (isa<PoisonValue>(V))
7660 continue;
7661 Type *Ty = cast<Instruction>(V)->getOperand(0)->getType();
7662 if (Ty != SrcTy || !isValidElementType(Ty)) {
7663 LLVM_DEBUG(
7664 dbgs() << "SLP: Gathering casts with different src types.\n");
7665 return TreeEntry::NeedToGather;
7668 return TreeEntry::Vectorize;
7670 case Instruction::ICmp:
7671 case Instruction::FCmp: {
7672 // Check that all of the compares have the same predicate.
7673 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
7674 CmpInst::Predicate SwapP0 = CmpInst::getSwappedPredicate(P0);
7675 Type *ComparedTy = VL0->getOperand(0)->getType();
7676 for (Value *V : VL) {
7677 if (isa<PoisonValue>(V))
7678 continue;
7679 auto *Cmp = cast<CmpInst>(V);
7680 if ((Cmp->getPredicate() != P0 && Cmp->getPredicate() != SwapP0) ||
7681 Cmp->getOperand(0)->getType() != ComparedTy) {
7682 LLVM_DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
7683 return TreeEntry::NeedToGather;
7686 return TreeEntry::Vectorize;
7688 case Instruction::Select:
7689 case Instruction::FNeg:
7690 case Instruction::Add:
7691 case Instruction::FAdd:
7692 case Instruction::Sub:
7693 case Instruction::FSub:
7694 case Instruction::Mul:
7695 case Instruction::FMul:
7696 case Instruction::UDiv:
7697 case Instruction::SDiv:
7698 case Instruction::FDiv:
7699 case Instruction::URem:
7700 case Instruction::SRem:
7701 case Instruction::FRem:
7702 case Instruction::Shl:
7703 case Instruction::LShr:
7704 case Instruction::AShr:
7705 case Instruction::And:
7706 case Instruction::Or:
7707 case Instruction::Xor:
7708 case Instruction::Freeze:
7709 if (S.getMainOp()->getType()->isFloatingPointTy() &&
7710 TTI->isFPVectorizationPotentiallyUnsafe() && any_of(VL, [](Value *V) {
7711 auto *I = dyn_cast<Instruction>(V);
7712 return I && I->isBinaryOp() && !I->isFast();
7714 return TreeEntry::NeedToGather;
7715 return TreeEntry::Vectorize;
7716 case Instruction::GetElementPtr: {
7717 // We don't combine GEPs with complicated (nested) indexing.
7718 for (Value *V : VL) {
7719 auto *I = dyn_cast<GetElementPtrInst>(V);
7720 if (!I)
7721 continue;
7722 if (I->getNumOperands() != 2) {
7723 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
7724 return TreeEntry::NeedToGather;
7728 // We can't combine several GEPs into one vector if they operate on
7729 // different types.
7730 Type *Ty0 = cast<GEPOperator>(VL0)->getSourceElementType();
7731 for (Value *V : VL) {
7732 auto *GEP = dyn_cast<GEPOperator>(V);
7733 if (!GEP)
7734 continue;
7735 Type *CurTy = GEP->getSourceElementType();
7736 if (Ty0 != CurTy) {
7737 LLVM_DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
7738 return TreeEntry::NeedToGather;
7742 // We don't combine GEPs with non-constant indexes.
7743 Type *Ty1 = VL0->getOperand(1)->getType();
7744 for (Value *V : VL) {
7745 auto *I = dyn_cast<GetElementPtrInst>(V);
7746 if (!I)
7747 continue;
7748 auto *Op = I->getOperand(1);
7749 if ((!IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
7750 (Op->getType() != Ty1 &&
7751 ((IsScatterVectorizeUserTE && !isa<ConstantInt>(Op)) ||
7752 Op->getType()->getScalarSizeInBits() >
7753 DL->getIndexSizeInBits(
7754 V->getType()->getPointerAddressSpace())))) {
7755 LLVM_DEBUG(
7756 dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
7757 return TreeEntry::NeedToGather;
7761 return TreeEntry::Vectorize;
7763 case Instruction::Store: {
7764 // Check if the stores are consecutive or if we need to swizzle them.
7765 llvm::Type *ScalarTy = cast<StoreInst>(VL0)->getValueOperand()->getType();
7766 // Avoid types that are padded when being allocated as scalars, while
7767 // being packed together in a vector (such as i1).
7768 if (DL->getTypeSizeInBits(ScalarTy) !=
7769 DL->getTypeAllocSizeInBits(ScalarTy)) {
7770 LLVM_DEBUG(dbgs() << "SLP: Gathering stores of non-packed type.\n");
7771 return TreeEntry::NeedToGather;
7773 // Make sure all stores in the bundle are simple - we can't vectorize
7774 // atomic or volatile stores.
7775 for (Value *V : VL) {
7776 auto *SI = cast<StoreInst>(V);
7777 if (!SI->isSimple()) {
7778 LLVM_DEBUG(dbgs() << "SLP: Gathering non-simple stores.\n");
7779 return TreeEntry::NeedToGather;
7781 PointerOps.push_back(SI->getPointerOperand());
7784 // Check the order of pointer operands.
7785 if (llvm::sortPtrAccesses(PointerOps, ScalarTy, *DL, *SE, CurrentOrder)) {
7786 Value *Ptr0;
7787 Value *PtrN;
7788 if (CurrentOrder.empty()) {
7789 Ptr0 = PointerOps.front();
7790 PtrN = PointerOps.back();
7791 } else {
7792 Ptr0 = PointerOps[CurrentOrder.front()];
7793 PtrN = PointerOps[CurrentOrder.back()];
7795 std::optional<int> Dist =
7796 getPointersDiff(ScalarTy, Ptr0, ScalarTy, PtrN, *DL, *SE);
7797 // Check that the sorted pointer operands are consecutive.
7798 if (static_cast<unsigned>(*Dist) == VL.size() - 1)
7799 return TreeEntry::Vectorize;
7802 LLVM_DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
7803 return TreeEntry::NeedToGather;
7805 case Instruction::Call: {
7806 if (S.getMainOp()->getType()->isFloatingPointTy() &&
7807 TTI->isFPVectorizationPotentiallyUnsafe() && any_of(VL, [](Value *V) {
7808 auto *I = dyn_cast<Instruction>(V);
7809 return I && !I->isFast();
7811 return TreeEntry::NeedToGather;
7812 // Check if the calls are all to the same vectorizable intrinsic or
7813 // library function.
7814 CallInst *CI = cast<CallInst>(VL0);
7815 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
7817 VFShape Shape = VFShape::get(
7818 CI->getFunctionType(),
7819 ElementCount::getFixed(static_cast<unsigned int>(VL.size())),
7820 false /*HasGlobalPred*/);
7821 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
7823 if (!VecFunc && !isTriviallyVectorizable(ID)) {
7824 LLVM_DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
7825 return TreeEntry::NeedToGather;
7827 Function *F = CI->getCalledFunction();
7828 unsigned NumArgs = CI->arg_size();
7829 SmallVector<Value *, 4> ScalarArgs(NumArgs, nullptr);
7830 for (unsigned J = 0; J != NumArgs; ++J)
7831 if (isVectorIntrinsicWithScalarOpAtArg(ID, J))
7832 ScalarArgs[J] = CI->getArgOperand(J);
7833 for (Value *V : VL) {
7834 CallInst *CI2 = dyn_cast<CallInst>(V);
7835 if (!CI2 || CI2->getCalledFunction() != F ||
7836 getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
7837 (VecFunc &&
7838 VecFunc != VFDatabase(*CI2).getVectorizedFunction(Shape)) ||
7839 !CI->hasIdenticalOperandBundleSchema(*CI2)) {
7840 LLVM_DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *V
7841 << "\n");
7842 return TreeEntry::NeedToGather;
7844 // Some intrinsics have scalar arguments and should be same in order for
7845 // them to be vectorized.
7846 for (unsigned J = 0; J != NumArgs; ++J) {
7847 if (isVectorIntrinsicWithScalarOpAtArg(ID, J)) {
7848 Value *A1J = CI2->getArgOperand(J);
7849 if (ScalarArgs[J] != A1J) {
7850 LLVM_DEBUG(dbgs()
7851 << "SLP: mismatched arguments in call:" << *CI
7852 << " argument " << ScalarArgs[J] << "!=" << A1J << "\n");
7853 return TreeEntry::NeedToGather;
7857 // Verify that the bundle operands are identical between the two calls.
7858 if (CI->hasOperandBundles() &&
7859 !std::equal(CI->op_begin() + CI->getBundleOperandsStartIndex(),
7860 CI->op_begin() + CI->getBundleOperandsEndIndex(),
7861 CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
7862 LLVM_DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI
7863 << "!=" << *V << '\n');
7864 return TreeEntry::NeedToGather;
7868 return TreeEntry::Vectorize;
7870 case Instruction::ShuffleVector: {
7871 if (!S.isAltShuffle()) {
7872 // REVEC can support non alternate shuffle.
7873 if (SLPReVec && getShufflevectorNumGroups(VL))
7874 return TreeEntry::Vectorize;
7875 // If this is not an alternate sequence of opcode like add-sub
7876 // then do not vectorize this instruction.
7877 LLVM_DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
7878 return TreeEntry::NeedToGather;
7880 if (!SLPSkipEarlyProfitabilityCheck && !areAltOperandsProfitable(S, VL)) {
7881 LLVM_DEBUG(
7882 dbgs()
7883 << "SLP: ShuffleVector not vectorized, operands are buildvector and "
7884 "the whole alt sequence is not profitable.\n");
7885 return TreeEntry::NeedToGather;
7888 return TreeEntry::Vectorize;
7890 default:
7891 LLVM_DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
7892 return TreeEntry::NeedToGather;
7896 namespace {
7897 /// Allows to correctly handle operands of the phi nodes based on the \p Main
7898 /// PHINode order of incoming basic blocks/values.
7899 class PHIHandler {
7900 DominatorTree &DT;
7901 PHINode *Main = nullptr;
7902 SmallVector<Value *> Phis;
7903 SmallVector<SmallVector<Value *>> Operands;
7905 public:
7906 PHIHandler() = delete;
7907 PHIHandler(DominatorTree &DT, PHINode *Main, ArrayRef<Value *> Phis)
7908 : DT(DT), Main(Main), Phis(Phis),
7909 Operands(Main->getNumIncomingValues(),
7910 SmallVector<Value *>(Phis.size(), nullptr)) {}
7911 void buildOperands() {
7912 constexpr unsigned FastLimit = 4;
7913 if (Main->getNumIncomingValues() <= FastLimit) {
7914 for (unsigned I : seq<unsigned>(0, Main->getNumIncomingValues())) {
7915 BasicBlock *InBB = Main->getIncomingBlock(I);
7916 if (!DT.isReachableFromEntry(InBB)) {
7917 Operands[I].assign(Phis.size(), PoisonValue::get(Main->getType()));
7918 continue;
7920 // Prepare the operand vector.
7921 for (auto [Idx, V] : enumerate(Phis)) {
7922 auto *P = dyn_cast<PHINode>(V);
7923 if (!P) {
7924 assert(isa<PoisonValue>(V) &&
7925 "Expected isa instruction or poison value.");
7926 Operands[I][Idx] = V;
7927 continue;
7929 if (P->getIncomingBlock(I) == InBB)
7930 Operands[I][Idx] = P->getIncomingValue(I);
7931 else
7932 Operands[I][Idx] = P->getIncomingValueForBlock(InBB);
7935 return;
7937 SmallDenseMap<BasicBlock *, SmallVector<unsigned>, 4> Blocks;
7938 for (unsigned I : seq<unsigned>(0, Main->getNumIncomingValues())) {
7939 BasicBlock *InBB = Main->getIncomingBlock(I);
7940 if (!DT.isReachableFromEntry(InBB)) {
7941 Operands[I].assign(Phis.size(), PoisonValue::get(Main->getType()));
7942 continue;
7944 Blocks.try_emplace(InBB).first->second.push_back(I);
7946 for (auto [Idx, V] : enumerate(Phis)) {
7947 if (isa<PoisonValue>(V)) {
7948 for (unsigned I : seq<unsigned>(Main->getNumIncomingValues()))
7949 Operands[I][Idx] = V;
7950 continue;
7952 auto *P = cast<PHINode>(V);
7953 for (unsigned I : seq<unsigned>(0, P->getNumIncomingValues())) {
7954 BasicBlock *InBB = P->getIncomingBlock(I);
7955 if (InBB == Main->getIncomingBlock(I)) {
7956 if (isa_and_nonnull<PoisonValue>(Operands[I][Idx]))
7957 continue;
7958 Operands[I][Idx] = P->getIncomingValue(I);
7959 continue;
7961 auto It = Blocks.find(InBB);
7962 if (It == Blocks.end())
7963 continue;
7964 Operands[It->second.front()][Idx] = P->getIncomingValue(I);
7967 for (const auto &P : Blocks) {
7968 if (P.getSecond().size() <= 1)
7969 continue;
7970 unsigned BasicI = P.getSecond().front();
7971 for (unsigned I : ArrayRef(P.getSecond()).drop_front()) {
7972 assert(all_of(enumerate(Operands[I]),
7973 [&](const auto &Data) {
7974 return !Data.value() ||
7975 Data.value() == Operands[BasicI][Data.index()];
7976 }) &&
7977 "Expected empty operands list.");
7978 Operands[I] = Operands[BasicI];
7982 ArrayRef<Value *> getOperands(unsigned I) const { return Operands[I]; }
7984 } // namespace
7986 void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
7987 const EdgeInfo &UserTreeIdx,
7988 unsigned InterleaveFactor) {
7989 assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
7991 SmallVector<int> ReuseShuffleIndices;
7992 SmallVector<Value *> UniqueValues;
7993 SmallVector<Value *> NonUniqueValueVL;
7994 auto TryToFindDuplicates = [&](const InstructionsState &S,
7995 bool DoNotFail = false) {
7996 // Check that every instruction appears once in this bundle.
7997 SmallDenseMap<Value *, unsigned, 16> UniquePositions(VL.size());
7998 for (Value *V : VL) {
7999 if (isConstant(V)) {
8000 ReuseShuffleIndices.emplace_back(
8001 isa<PoisonValue>(V) ? PoisonMaskElem : UniqueValues.size());
8002 UniqueValues.emplace_back(V);
8003 continue;
8005 auto Res = UniquePositions.try_emplace(V, UniqueValues.size());
8006 ReuseShuffleIndices.emplace_back(Res.first->second);
8007 if (Res.second)
8008 UniqueValues.emplace_back(V);
8010 size_t NumUniqueScalarValues = UniqueValues.size();
8011 bool IsFullVectors = hasFullVectorsOrPowerOf2(
8012 *TTI, getValueType(UniqueValues.front()), NumUniqueScalarValues);
8013 if (NumUniqueScalarValues == VL.size() &&
8014 (VectorizeNonPowerOf2 || IsFullVectors)) {
8015 ReuseShuffleIndices.clear();
8016 } else {
8017 // FIXME: Reshuffing scalars is not supported yet for non-power-of-2 ops.
8018 if ((UserTreeIdx.UserTE &&
8019 UserTreeIdx.UserTE->hasNonWholeRegisterOrNonPowerOf2Vec(*TTI)) ||
8020 !has_single_bit(VL.size())) {
8021 LLVM_DEBUG(dbgs() << "SLP: Reshuffling scalars not yet supported "
8022 "for nodes with padding.\n");
8023 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8024 return false;
8026 LLVM_DEBUG(dbgs() << "SLP: Shuffle for reused scalars.\n");
8027 if (NumUniqueScalarValues <= 1 || !IsFullVectors ||
8028 (UniquePositions.size() == 1 && all_of(UniqueValues, [](Value *V) {
8029 return isa<UndefValue>(V) || !isConstant(V);
8030 }))) {
8031 if (DoNotFail && UniquePositions.size() > 1 &&
8032 NumUniqueScalarValues > 1 && S.getMainOp()->isSafeToRemove() &&
8033 all_of(UniqueValues, IsaPred<Instruction, PoisonValue>)) {
8034 // Find the number of elements, which forms full vectors.
8035 unsigned PWSz = getFullVectorNumberOfElements(
8036 *TTI, UniqueValues.front()->getType(), UniqueValues.size());
8037 if (PWSz == VL.size()) {
8038 ReuseShuffleIndices.clear();
8039 } else {
8040 NonUniqueValueVL.assign(UniqueValues.begin(), UniqueValues.end());
8041 NonUniqueValueVL.append(
8042 PWSz - UniqueValues.size(),
8043 PoisonValue::get(UniqueValues.front()->getType()));
8044 VL = NonUniqueValueVL;
8046 return true;
8048 LLVM_DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
8049 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8050 return false;
8052 VL = UniqueValues;
8054 return true;
8057 InstructionsState S = getSameOpcode(VL, *TLI);
8059 // Don't go into catchswitch blocks, which can happen with PHIs.
8060 // Such blocks can only have PHIs and the catchswitch. There is no
8061 // place to insert a shuffle if we need to, so just avoid that issue.
8062 if (S.getMainOp() &&
8063 isa<CatchSwitchInst>(S.getMainOp()->getParent()->getTerminator())) {
8064 LLVM_DEBUG(dbgs() << "SLP: bundle in catchswitch block.\n");
8065 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8066 return;
8069 // Check if this is a duplicate of another entry.
8070 if (S.getOpcode()) {
8071 if (TreeEntry *E = getTreeEntry(S.getMainOp())) {
8072 LLVM_DEBUG(dbgs() << "SLP: \tChecking bundle: " << *S.getMainOp()
8073 << ".\n");
8074 if (GatheredLoadsEntriesFirst.has_value() || !E->isSame(VL)) {
8075 auto It = MultiNodeScalars.find(S.getMainOp());
8076 if (It != MultiNodeScalars.end()) {
8077 auto *TEIt = find_if(It->getSecond(),
8078 [&](TreeEntry *ME) { return ME->isSame(VL); });
8079 if (TEIt != It->getSecond().end())
8080 E = *TEIt;
8081 else
8082 E = nullptr;
8083 } else {
8084 E = nullptr;
8087 if (!E) {
8088 if (!doesNotNeedToBeScheduled(S.getMainOp())) {
8089 LLVM_DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
8090 if (TryToFindDuplicates(S))
8091 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8092 ReuseShuffleIndices);
8093 return;
8095 SmallPtrSet<const TreeEntry *, 4> Nodes;
8096 Nodes.insert(getTreeEntry(S.getMainOp()));
8097 for (const TreeEntry *E : MultiNodeScalars.lookup(S.getMainOp()))
8098 Nodes.insert(E);
8099 SmallPtrSet<Value *, 8> Values(VL.begin(), VL.end());
8100 if (any_of(Nodes, [&](const TreeEntry *E) {
8101 if (all_of(E->Scalars,
8102 [&](Value *V) { return Values.contains(V); }))
8103 return true;
8104 SmallPtrSet<Value *, 8> EValues(E->Scalars.begin(),
8105 E->Scalars.end());
8106 return (
8107 all_of(VL, [&](Value *V) { return EValues.contains(V); }));
8108 })) {
8109 LLVM_DEBUG(dbgs() << "SLP: Gathering due to full overlap.\n");
8110 if (TryToFindDuplicates(S))
8111 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8112 ReuseShuffleIndices);
8113 return;
8115 } else {
8116 // Record the reuse of the tree node. FIXME, currently this is only
8117 // used to properly draw the graph rather than for the actual
8118 // vectorization.
8119 E->UserTreeIndices.push_back(UserTreeIdx);
8120 LLVM_DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *S.getMainOp()
8121 << ".\n");
8122 return;
8127 // Gather if we hit the RecursionMaxDepth, unless this is a load (or z/sext of
8128 // a load), in which case peek through to include it in the tree, without
8129 // ballooning over-budget.
8130 if (Depth >= RecursionMaxDepth &&
8131 !(S.getMainOp() && !S.isAltShuffle() && VL.size() >= 4 &&
8132 (match(S.getMainOp(), m_Load(m_Value())) ||
8133 all_of(VL, [&S](const Value *I) {
8134 return match(I,
8135 m_OneUse(m_ZExtOrSExt(m_OneUse(m_Load(m_Value()))))) &&
8136 cast<Instruction>(I)->getOpcode() ==
8137 S.getMainOp()->getOpcode();
8138 })))) {
8139 LLVM_DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
8140 if (TryToFindDuplicates(S))
8141 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8142 ReuseShuffleIndices);
8143 return;
8146 // Don't handle scalable vectors
8147 if (S.getOpcode() == Instruction::ExtractElement &&
8148 isa<ScalableVectorType>(
8149 cast<ExtractElementInst>(S.getMainOp())->getVectorOperandType())) {
8150 LLVM_DEBUG(dbgs() << "SLP: Gathering due to scalable vector type.\n");
8151 if (TryToFindDuplicates(S))
8152 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8153 ReuseShuffleIndices);
8154 return;
8157 // Don't handle vectors.
8158 if (!SLPReVec && getValueType(VL.front())->isVectorTy()) {
8159 LLVM_DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
8160 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8161 return;
8164 // If all of the operands are identical or constant we have a simple solution.
8165 // If we deal with insert/extract instructions, they all must have constant
8166 // indices, otherwise we should gather them, not try to vectorize.
8167 // If alternate op node with 2 elements with gathered operands - do not
8168 // vectorize.
8169 auto &&NotProfitableForVectorization = [&S, this,
8170 Depth](ArrayRef<Value *> VL) {
8171 if (!S.getOpcode() || !S.isAltShuffle() || VL.size() > 2)
8172 return false;
8173 if (VectorizableTree.size() < MinTreeSize)
8174 return false;
8175 if (Depth >= RecursionMaxDepth - 1)
8176 return true;
8177 // Check if all operands are extracts, part of vector node or can build a
8178 // regular vectorize node.
8179 SmallVector<unsigned, 8> InstsCount;
8180 for (Value *V : VL) {
8181 auto *I = cast<Instruction>(V);
8182 InstsCount.push_back(count_if(I->operand_values(), [](Value *Op) {
8183 return isa<Instruction>(Op) || isVectorLikeInstWithConstOps(Op);
8184 }));
8186 bool IsCommutative =
8187 isCommutative(S.getMainOp()) || isCommutative(S.getAltOp());
8188 if ((IsCommutative &&
8189 std::accumulate(InstsCount.begin(), InstsCount.end(), 0) < 2) ||
8190 (!IsCommutative &&
8191 all_of(InstsCount, [](unsigned ICnt) { return ICnt < 2; })))
8192 return true;
8193 assert(VL.size() == 2 && "Expected only 2 alternate op instructions.");
8194 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
8195 auto *I1 = cast<Instruction>(VL.front());
8196 auto *I2 = cast<Instruction>(VL.back());
8197 for (int Op : seq<int>(S.getMainOp()->getNumOperands()))
8198 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
8199 I2->getOperand(Op));
8200 if (static_cast<unsigned>(count_if(
8201 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
8202 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
8203 })) >= S.getMainOp()->getNumOperands() / 2)
8204 return false;
8205 if (S.getMainOp()->getNumOperands() > 2)
8206 return true;
8207 if (IsCommutative) {
8208 // Check permuted operands.
8209 Candidates.clear();
8210 for (int Op = 0, E = S.getMainOp()->getNumOperands(); Op < E; ++Op)
8211 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
8212 I2->getOperand((Op + 1) % E));
8213 if (any_of(
8214 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
8215 return findBestRootPair(Cand, LookAheadHeuristics::ScoreSplat);
8217 return false;
8219 return true;
8221 SmallVector<unsigned> SortedIndices;
8222 BasicBlock *BB = nullptr;
8223 bool IsScatterVectorizeUserTE =
8224 UserTreeIdx.UserTE &&
8225 UserTreeIdx.UserTE->State == TreeEntry::ScatterVectorize;
8226 bool AreAllSameBlock = S.getOpcode() && allSameBlock(VL);
8227 bool AreScatterAllGEPSameBlock =
8228 (IsScatterVectorizeUserTE && VL.front()->getType()->isPointerTy() &&
8229 VL.size() > 2 &&
8230 all_of(VL,
8231 [&BB](Value *V) {
8232 auto *I = dyn_cast<GetElementPtrInst>(V);
8233 if (!I)
8234 return doesNotNeedToBeScheduled(V);
8235 if (!BB)
8236 BB = I->getParent();
8237 return BB == I->getParent() && I->getNumOperands() == 2;
8238 }) &&
8239 BB &&
8240 sortPtrAccesses(VL, UserTreeIdx.UserTE->getMainOp()->getType(), *DL, *SE,
8241 SortedIndices));
8242 bool AreAllSameInsts = AreAllSameBlock || AreScatterAllGEPSameBlock;
8243 if (!AreAllSameInsts || (!S.getOpcode() && allConstant(VL)) || isSplat(VL) ||
8244 (isa_and_present<InsertElementInst, ExtractValueInst, ExtractElementInst>(
8245 S.getMainOp()) &&
8246 !all_of(VL, isVectorLikeInstWithConstOps)) ||
8247 NotProfitableForVectorization(VL)) {
8248 LLVM_DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O, small shuffle. \n");
8249 if (TryToFindDuplicates(S))
8250 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8251 ReuseShuffleIndices);
8252 return;
8255 // Don't vectorize ephemeral values.
8256 if (S.getOpcode() && !EphValues.empty()) {
8257 for (Value *V : VL) {
8258 if (EphValues.count(V)) {
8259 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
8260 << ") is ephemeral.\n");
8261 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8262 return;
8267 // We now know that this is a vector of instructions of the same type from
8268 // the same block.
8270 // Check that none of the instructions in the bundle are already in the tree.
8271 for (Value *V : VL) {
8272 if ((!IsScatterVectorizeUserTE && !isa<Instruction>(V)) ||
8273 doesNotNeedToBeScheduled(V))
8274 continue;
8275 if (getTreeEntry(V)) {
8276 LLVM_DEBUG(dbgs() << "SLP: The instruction (" << *V
8277 << ") is already in tree.\n");
8278 if (TryToFindDuplicates(S))
8279 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8280 ReuseShuffleIndices);
8281 return;
8285 // The reduction nodes (stored in UserIgnoreList) also should stay scalar.
8286 if (UserIgnoreList && !UserIgnoreList->empty()) {
8287 for (Value *V : VL) {
8288 if (UserIgnoreList->contains(V)) {
8289 LLVM_DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
8290 if (TryToFindDuplicates(S))
8291 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8292 ReuseShuffleIndices);
8293 return;
8298 // Special processing for sorted pointers for ScatterVectorize node with
8299 // constant indeces only.
8300 if (!AreAllSameBlock && AreScatterAllGEPSameBlock) {
8301 assert(VL.front()->getType()->isPointerTy() &&
8302 count_if(VL, IsaPred<GetElementPtrInst>) >= 2 &&
8303 "Expected pointers only.");
8304 // Reset S to make it GetElementPtr kind of node.
8305 const auto *It = find_if(VL, IsaPred<GetElementPtrInst>);
8306 assert(It != VL.end() && "Expected at least one GEP.");
8307 S = getSameOpcode(*It, *TLI);
8310 // Check that all of the users of the scalars that we want to vectorize are
8311 // schedulable.
8312 Instruction *VL0 = S.getMainOp();
8313 BB = VL0->getParent();
8315 if (S.getMainOp() &&
8316 (BB->isEHPad() || isa_and_nonnull<UnreachableInst>(BB->getTerminator()) ||
8317 !DT->isReachableFromEntry(BB))) {
8318 // Don't go into unreachable blocks. They may contain instructions with
8319 // dependency cycles which confuse the final scheduling.
8320 // Do not vectorize EH and non-returning blocks, not profitable in most
8321 // cases.
8322 LLVM_DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
8323 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx);
8324 return;
8327 // Check that every instruction appears once in this bundle.
8328 if (!TryToFindDuplicates(S, /*DoNotFail=*/true))
8329 return;
8331 // Perform specific checks for each particular instruction kind.
8332 OrdersType CurrentOrder;
8333 SmallVector<Value *> PointerOps;
8334 TreeEntry::EntryState State = getScalarsVectorizationState(
8335 S, VL, IsScatterVectorizeUserTE, CurrentOrder, PointerOps);
8336 if (State == TreeEntry::NeedToGather) {
8337 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8338 ReuseShuffleIndices);
8339 return;
8342 auto &BSRef = BlocksSchedules[BB];
8343 if (!BSRef)
8344 BSRef = std::make_unique<BlockScheduling>(BB);
8346 BlockScheduling &BS = *BSRef;
8348 std::optional<ScheduleData *> Bundle =
8349 BS.tryScheduleBundle(UniqueValues, this, S);
8350 #ifdef EXPENSIVE_CHECKS
8351 // Make sure we didn't break any internal invariants
8352 BS.verify();
8353 #endif
8354 if (!Bundle) {
8355 LLVM_DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
8356 assert((!BS.getScheduleData(VL0) ||
8357 !BS.getScheduleData(VL0)->isPartOfBundle()) &&
8358 "tryScheduleBundle should cancelScheduling on failure");
8359 newTreeEntry(VL, std::nullopt /*not vectorized*/, S, UserTreeIdx,
8360 ReuseShuffleIndices);
8361 NonScheduledFirst.insert(VL.front());
8362 if (S.getOpcode() == Instruction::Load &&
8363 BS.ScheduleRegionSize < BS.ScheduleRegionSizeLimit)
8364 registerNonVectorizableLoads(VL);
8365 return;
8367 LLVM_DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
8369 unsigned ShuffleOrOp = S.isAltShuffle() ?
8370 (unsigned) Instruction::ShuffleVector : S.getOpcode();
8371 auto CreateOperandNodes = [&](TreeEntry *TE, const auto &Operands) {
8372 // Postpone PHI nodes creation
8373 SmallVector<unsigned> PHIOps;
8374 for (unsigned I : seq<unsigned>(Operands.size())) {
8375 ArrayRef<Value *> Op = Operands[I];
8376 if (Op.empty())
8377 continue;
8378 InstructionsState S = getSameOpcode(Op, *TLI);
8379 if (S.getOpcode() != Instruction::PHI || S.isAltShuffle())
8380 buildTree_rec(Op, Depth + 1, {TE, I});
8381 else
8382 PHIOps.push_back(I);
8384 for (unsigned I : PHIOps)
8385 buildTree_rec(Operands[I], Depth + 1, {TE, I});
8387 switch (ShuffleOrOp) {
8388 case Instruction::PHI: {
8389 auto *PH = cast<PHINode>(VL0);
8391 TreeEntry *TE =
8392 newTreeEntry(VL, Bundle, S, UserTreeIdx, ReuseShuffleIndices);
8393 LLVM_DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
8395 // Keeps the reordered operands to avoid code duplication.
8396 PHIHandler Handler(*DT, PH, VL);
8397 Handler.buildOperands();
8398 for (unsigned I : seq<unsigned>(PH->getNumOperands()))
8399 TE->setOperand(I, Handler.getOperands(I));
8400 SmallVector<ArrayRef<Value *>> Operands(PH->getNumOperands());
8401 for (unsigned I : seq<unsigned>(PH->getNumOperands()))
8402 Operands[I] = Handler.getOperands(I);
8403 CreateOperandNodes(TE, Operands);
8404 return;
8406 case Instruction::ExtractValue:
8407 case Instruction::ExtractElement: {
8408 if (CurrentOrder.empty()) {
8409 LLVM_DEBUG(dbgs() << "SLP: Reusing or shuffling extract sequence.\n");
8410 } else {
8411 LLVM_DEBUG({
8412 dbgs() << "SLP: Reusing or shuffling of reordered extract sequence "
8413 "with order";
8414 for (unsigned Idx : CurrentOrder)
8415 dbgs() << " " << Idx;
8416 dbgs() << "\n";
8418 fixupOrderingIndices(CurrentOrder);
8420 // Insert new order with initial value 0, if it does not exist,
8421 // otherwise return the iterator to the existing one.
8422 newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8423 ReuseShuffleIndices, CurrentOrder);
8424 // This is a special case, as it does not gather, but at the same time
8425 // we are not extending buildTree_rec() towards the operands.
8426 ValueList Op0;
8427 Op0.assign(VL.size(), VL0->getOperand(0));
8428 VectorizableTree.back()->setOperand(0, Op0);
8429 return;
8431 case Instruction::InsertElement: {
8432 assert(ReuseShuffleIndices.empty() && "All inserts should be unique");
8434 auto OrdCompare = [](const std::pair<int, int> &P1,
8435 const std::pair<int, int> &P2) {
8436 return P1.first > P2.first;
8438 PriorityQueue<std::pair<int, int>, SmallVector<std::pair<int, int>>,
8439 decltype(OrdCompare)>
8440 Indices(OrdCompare);
8441 for (int I = 0, E = VL.size(); I < E; ++I) {
8442 unsigned Idx = *getElementIndex(VL[I]);
8443 Indices.emplace(Idx, I);
8445 OrdersType CurrentOrder(VL.size(), VL.size());
8446 bool IsIdentity = true;
8447 for (int I = 0, E = VL.size(); I < E; ++I) {
8448 CurrentOrder[Indices.top().second] = I;
8449 IsIdentity &= Indices.top().second == I;
8450 Indices.pop();
8452 if (IsIdentity)
8453 CurrentOrder.clear();
8454 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8455 {}, CurrentOrder);
8456 LLVM_DEBUG(dbgs() << "SLP: added inserts bundle.\n");
8458 TE->setOperand(*this);
8459 buildTree_rec(TE->getOperand(1), Depth + 1, {TE, 1});
8460 return;
8462 case Instruction::Load: {
8463 // Check that a vectorized load would load the same memory as a scalar
8464 // load. For example, we don't want to vectorize loads that are smaller
8465 // than 8-bit. Even though we have a packed struct {<i2, i2, i2, i2>} LLVM
8466 // treats loading/storing it as an i8 struct. If we vectorize loads/stores
8467 // from such a struct, we read/write packed bits disagreeing with the
8468 // unvectorized version.
8469 TreeEntry *TE = nullptr;
8470 fixupOrderingIndices(CurrentOrder);
8471 switch (State) {
8472 case TreeEntry::Vectorize:
8473 TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8474 ReuseShuffleIndices, CurrentOrder, InterleaveFactor);
8475 if (CurrentOrder.empty())
8476 LLVM_DEBUG(dbgs() << "SLP: added a vector of loads.\n");
8477 else
8478 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled loads.\n");
8479 break;
8480 case TreeEntry::StridedVectorize:
8481 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
8482 TE = newTreeEntry(VL, TreeEntry::StridedVectorize, Bundle, S,
8483 UserTreeIdx, ReuseShuffleIndices, CurrentOrder);
8484 LLVM_DEBUG(dbgs() << "SLP: added a vector of strided loads.\n");
8485 break;
8486 case TreeEntry::ScatterVectorize:
8487 // Vectorizing non-consecutive loads with `llvm.masked.gather`.
8488 TE = newTreeEntry(VL, TreeEntry::ScatterVectorize, Bundle, S,
8489 UserTreeIdx, ReuseShuffleIndices);
8490 LLVM_DEBUG(dbgs() << "SLP: added a vector of non-consecutive loads.\n");
8491 break;
8492 case TreeEntry::CombinedVectorize:
8493 case TreeEntry::NeedToGather:
8494 llvm_unreachable("Unexpected loads state.");
8496 TE->setOperand(*this);
8497 if (State == TreeEntry::ScatterVectorize)
8498 buildTree_rec(PointerOps, Depth + 1, {TE, 0});
8499 return;
8501 case Instruction::ZExt:
8502 case Instruction::SExt:
8503 case Instruction::FPToUI:
8504 case Instruction::FPToSI:
8505 case Instruction::FPExt:
8506 case Instruction::PtrToInt:
8507 case Instruction::IntToPtr:
8508 case Instruction::SIToFP:
8509 case Instruction::UIToFP:
8510 case Instruction::Trunc:
8511 case Instruction::FPTrunc:
8512 case Instruction::BitCast: {
8513 auto [PrevMaxBW, PrevMinBW] = CastMaxMinBWSizes.value_or(
8514 std::make_pair(std::numeric_limits<unsigned>::min(),
8515 std::numeric_limits<unsigned>::max()));
8516 if (ShuffleOrOp == Instruction::ZExt ||
8517 ShuffleOrOp == Instruction::SExt) {
8518 CastMaxMinBWSizes = std::make_pair(
8519 std::max<unsigned>(DL->getTypeSizeInBits(VL0->getType()),
8520 PrevMaxBW),
8521 std::min<unsigned>(
8522 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()),
8523 PrevMinBW));
8524 } else if (ShuffleOrOp == Instruction::Trunc) {
8525 CastMaxMinBWSizes = std::make_pair(
8526 std::max<unsigned>(
8527 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()),
8528 PrevMaxBW),
8529 std::min<unsigned>(DL->getTypeSizeInBits(VL0->getType()),
8530 PrevMinBW));
8532 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8533 ReuseShuffleIndices);
8534 LLVM_DEBUG(dbgs() << "SLP: added a vector of casts.\n");
8536 TE->setOperand(*this);
8537 for (unsigned I : seq<unsigned>(VL0->getNumOperands()))
8538 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8539 if (ShuffleOrOp == Instruction::Trunc) {
8540 ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
8541 } else if (ShuffleOrOp == Instruction::SIToFP ||
8542 ShuffleOrOp == Instruction::UIToFP) {
8543 unsigned NumSignBits =
8544 ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
8545 if (auto *OpI = dyn_cast<Instruction>(VL0->getOperand(0))) {
8546 APInt Mask = DB->getDemandedBits(OpI);
8547 NumSignBits = std::max(NumSignBits, Mask.countl_zero());
8549 if (NumSignBits * 2 >=
8550 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()))
8551 ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
8553 return;
8555 case Instruction::ICmp:
8556 case Instruction::FCmp: {
8557 // Check that all of the compares have the same predicate.
8558 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
8559 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8560 ReuseShuffleIndices);
8561 LLVM_DEBUG(dbgs() << "SLP: added a vector of compares.\n");
8563 ValueList Left, Right;
8564 VLOperands Ops(VL, VL0, *this);
8565 if (cast<CmpInst>(VL0)->isCommutative()) {
8566 // Commutative predicate - collect + sort operands of the instructions
8567 // so that each side is more likely to have the same opcode.
8568 assert(P0 == CmpInst::getSwappedPredicate(P0) &&
8569 "Commutative Predicate mismatch");
8570 Ops.reorder();
8571 Left = Ops.getVL(0);
8572 Right = Ops.getVL(1);
8573 } else {
8574 // Collect operands - commute if it uses the swapped predicate.
8575 for (Value *V : VL) {
8576 if (isa<PoisonValue>(V)) {
8577 Left.push_back(PoisonValue::get(VL0->getOperand(0)->getType()));
8578 Right.push_back(PoisonValue::get(VL0->getOperand(1)->getType()));
8579 continue;
8581 auto *Cmp = cast<CmpInst>(V);
8582 Value *LHS = Cmp->getOperand(0);
8583 Value *RHS = Cmp->getOperand(1);
8584 if (Cmp->getPredicate() != P0)
8585 std::swap(LHS, RHS);
8586 Left.push_back(LHS);
8587 Right.push_back(RHS);
8590 TE->setOperand(0, Left);
8591 TE->setOperand(1, Right);
8592 buildTree_rec(Left, Depth + 1, {TE, 0});
8593 buildTree_rec(Right, Depth + 1, {TE, 1});
8594 if (ShuffleOrOp == Instruction::ICmp) {
8595 unsigned NumSignBits0 =
8596 ComputeNumSignBits(VL0->getOperand(0), *DL, 0, AC, nullptr, DT);
8597 if (NumSignBits0 * 2 >=
8598 DL->getTypeSizeInBits(VL0->getOperand(0)->getType()))
8599 ExtraBitWidthNodes.insert(getOperandEntry(TE, 0)->Idx);
8600 unsigned NumSignBits1 =
8601 ComputeNumSignBits(VL0->getOperand(1), *DL, 0, AC, nullptr, DT);
8602 if (NumSignBits1 * 2 >=
8603 DL->getTypeSizeInBits(VL0->getOperand(1)->getType()))
8604 ExtraBitWidthNodes.insert(getOperandEntry(TE, 1)->Idx);
8606 return;
8608 case Instruction::Select:
8609 case Instruction::FNeg:
8610 case Instruction::Add:
8611 case Instruction::FAdd:
8612 case Instruction::Sub:
8613 case Instruction::FSub:
8614 case Instruction::Mul:
8615 case Instruction::FMul:
8616 case Instruction::UDiv:
8617 case Instruction::SDiv:
8618 case Instruction::FDiv:
8619 case Instruction::URem:
8620 case Instruction::SRem:
8621 case Instruction::FRem:
8622 case Instruction::Shl:
8623 case Instruction::LShr:
8624 case Instruction::AShr:
8625 case Instruction::And:
8626 case Instruction::Or:
8627 case Instruction::Xor:
8628 case Instruction::Freeze: {
8629 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8630 ReuseShuffleIndices);
8631 LLVM_DEBUG(dbgs() << "SLP: added a vector of un/bin op.\n");
8633 TE->setOperand(*this, isa<BinaryOperator>(VL0) && isCommutative(VL0));
8634 for (unsigned I : seq<unsigned>(VL0->getNumOperands()))
8635 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8636 return;
8638 case Instruction::GetElementPtr: {
8639 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8640 ReuseShuffleIndices);
8641 LLVM_DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
8642 SmallVector<ValueList, 2> Operands(2);
8643 // Prepare the operand vector for pointer operands.
8644 for (Value *V : VL) {
8645 auto *GEP = dyn_cast<GetElementPtrInst>(V);
8646 if (!GEP) {
8647 Operands.front().push_back(V);
8648 continue;
8650 Operands.front().push_back(GEP->getPointerOperand());
8652 TE->setOperand(0, Operands.front());
8653 // Need to cast all indices to the same type before vectorization to
8654 // avoid crash.
8655 // Required to be able to find correct matches between different gather
8656 // nodes and reuse the vectorized values rather than trying to gather them
8657 // again.
8658 int IndexIdx = 1;
8659 Type *VL0Ty = VL0->getOperand(IndexIdx)->getType();
8660 Type *Ty = all_of(VL,
8661 [VL0Ty, IndexIdx](Value *V) {
8662 auto *GEP = dyn_cast<GetElementPtrInst>(V);
8663 if (!GEP)
8664 return true;
8665 return VL0Ty == GEP->getOperand(IndexIdx)->getType();
8667 ? VL0Ty
8668 : DL->getIndexType(cast<GetElementPtrInst>(VL0)
8669 ->getPointerOperandType()
8670 ->getScalarType());
8671 // Prepare the operand vector.
8672 for (Value *V : VL) {
8673 auto *I = dyn_cast<GetElementPtrInst>(V);
8674 if (!I) {
8675 Operands.back().push_back(
8676 ConstantInt::get(Ty, 0, /*isSigned=*/false));
8677 continue;
8679 auto *Op = I->getOperand(IndexIdx);
8680 auto *CI = dyn_cast<ConstantInt>(Op);
8681 if (!CI)
8682 Operands.back().push_back(Op);
8683 else
8684 Operands.back().push_back(ConstantFoldIntegerCast(
8685 CI, Ty, CI->getValue().isSignBitSet(), *DL));
8687 TE->setOperand(IndexIdx, Operands.back());
8689 for (unsigned I = 0, Ops = Operands.size(); I < Ops; ++I)
8690 buildTree_rec(Operands[I], Depth + 1, {TE, I});
8691 return;
8693 case Instruction::Store: {
8694 bool Consecutive = CurrentOrder.empty();
8695 if (!Consecutive)
8696 fixupOrderingIndices(CurrentOrder);
8697 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8698 ReuseShuffleIndices, CurrentOrder);
8699 TE->setOperand(*this);
8700 buildTree_rec(TE->getOperand(0), Depth + 1, {TE, 0});
8701 if (Consecutive)
8702 LLVM_DEBUG(dbgs() << "SLP: added a vector of stores.\n");
8703 else
8704 LLVM_DEBUG(dbgs() << "SLP: added a vector of jumbled stores.\n");
8705 return;
8707 case Instruction::Call: {
8708 // Check if the calls are all to the same vectorizable intrinsic or
8709 // library function.
8710 CallInst *CI = cast<CallInst>(VL0);
8711 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8713 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8714 ReuseShuffleIndices);
8715 TE->setOperand(*this, isCommutative(VL0));
8716 for (unsigned I : seq<unsigned>(CI->arg_size())) {
8717 // For scalar operands no need to create an entry since no need to
8718 // vectorize it.
8719 if (isVectorIntrinsicWithScalarOpAtArg(ID, I))
8720 continue;
8721 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8723 return;
8725 case Instruction::ShuffleVector: {
8726 TreeEntry *TE = newTreeEntry(VL, Bundle /*vectorized*/, S, UserTreeIdx,
8727 ReuseShuffleIndices);
8728 LLVM_DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
8730 // Reorder operands if reordering would enable vectorization.
8731 auto *CI = dyn_cast<CmpInst>(VL0);
8732 if (CI && any_of(VL, [](Value *V) {
8733 return !isa<PoisonValue>(V) && !cast<CmpInst>(V)->isCommutative();
8734 })) {
8735 auto *MainCI = cast<CmpInst>(S.getMainOp());
8736 auto *AltCI = cast<CmpInst>(S.getAltOp());
8737 CmpInst::Predicate MainP = MainCI->getPredicate();
8738 CmpInst::Predicate AltP = AltCI->getPredicate();
8739 assert(MainP != AltP &&
8740 "Expected different main/alternate predicates.");
8741 ValueList Left, Right;
8742 // Collect operands - commute if it uses the swapped predicate or
8743 // alternate operation.
8744 for (Value *V : VL) {
8745 if (isa<PoisonValue>(V)) {
8746 Left.push_back(PoisonValue::get(MainCI->getOperand(0)->getType()));
8747 Right.push_back(PoisonValue::get(MainCI->getOperand(1)->getType()));
8748 continue;
8750 auto *Cmp = cast<CmpInst>(V);
8751 Value *LHS = Cmp->getOperand(0);
8752 Value *RHS = Cmp->getOperand(1);
8754 if (isAlternateInstruction(Cmp, MainCI, AltCI, *TLI)) {
8755 if (AltP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
8756 std::swap(LHS, RHS);
8757 } else {
8758 if (MainP == CmpInst::getSwappedPredicate(Cmp->getPredicate()))
8759 std::swap(LHS, RHS);
8761 Left.push_back(LHS);
8762 Right.push_back(RHS);
8764 TE->setOperand(0, Left);
8765 TE->setOperand(1, Right);
8766 buildTree_rec(Left, Depth + 1, {TE, 0});
8767 buildTree_rec(Right, Depth + 1, {TE, 1});
8768 return;
8771 TE->setOperand(*this, isa<BinaryOperator>(VL0) || CI);
8772 for (unsigned I : seq<unsigned>(VL0->getNumOperands()))
8773 buildTree_rec(TE->getOperand(I), Depth + 1, {TE, I});
8774 return;
8776 default:
8777 break;
8779 llvm_unreachable("Unexpected vectorization of the instructions.");
8782 unsigned BoUpSLP::canMapToVector(Type *T) const {
8783 unsigned N = 1;
8784 Type *EltTy = T;
8786 while (isa<StructType, ArrayType, FixedVectorType>(EltTy)) {
8787 if (EltTy->isEmptyTy())
8788 return 0;
8789 if (auto *ST = dyn_cast<StructType>(EltTy)) {
8790 // Check that struct is homogeneous.
8791 for (const auto *Ty : ST->elements())
8792 if (Ty != *ST->element_begin())
8793 return 0;
8794 N *= ST->getNumElements();
8795 EltTy = *ST->element_begin();
8796 } else if (auto *AT = dyn_cast<ArrayType>(EltTy)) {
8797 N *= AT->getNumElements();
8798 EltTy = AT->getElementType();
8799 } else {
8800 auto *VT = cast<FixedVectorType>(EltTy);
8801 N *= VT->getNumElements();
8802 EltTy = VT->getElementType();
8806 if (!isValidElementType(EltTy))
8807 return 0;
8808 uint64_t VTSize = DL->getTypeStoreSizeInBits(getWidenedType(EltTy, N));
8809 if (VTSize < MinVecRegSize || VTSize > MaxVecRegSize ||
8810 VTSize != DL->getTypeStoreSizeInBits(T))
8811 return 0;
8812 return N;
8815 bool BoUpSLP::canReuseExtract(ArrayRef<Value *> VL, Value *OpValue,
8816 SmallVectorImpl<unsigned> &CurrentOrder,
8817 bool ResizeAllowed) const {
8818 const auto *It = find_if(VL, IsaPred<ExtractElementInst, ExtractValueInst>);
8819 assert(It != VL.end() && "Expected at least one extract instruction.");
8820 auto *E0 = cast<Instruction>(*It);
8821 assert(
8822 all_of(VL, IsaPred<UndefValue, ExtractElementInst, ExtractValueInst>) &&
8823 "Invalid opcode");
8824 // Check if all of the extracts come from the same vector and from the
8825 // correct offset.
8826 Value *Vec = E0->getOperand(0);
8828 CurrentOrder.clear();
8830 // We have to extract from a vector/aggregate with the same number of elements.
8831 unsigned NElts;
8832 if (E0->getOpcode() == Instruction::ExtractValue) {
8833 NElts = canMapToVector(Vec->getType());
8834 if (!NElts)
8835 return false;
8836 // Check if load can be rewritten as load of vector.
8837 LoadInst *LI = dyn_cast<LoadInst>(Vec);
8838 if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size()))
8839 return false;
8840 } else {
8841 NElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
8844 unsigned E = VL.size();
8845 if (!ResizeAllowed && NElts != E)
8846 return false;
8847 SmallVector<int> Indices(E, PoisonMaskElem);
8848 unsigned MinIdx = NElts, MaxIdx = 0;
8849 for (auto [I, V] : enumerate(VL)) {
8850 auto *Inst = dyn_cast<Instruction>(V);
8851 if (!Inst)
8852 continue;
8853 if (Inst->getOperand(0) != Vec)
8854 return false;
8855 if (auto *EE = dyn_cast<ExtractElementInst>(Inst))
8856 if (isa<UndefValue>(EE->getIndexOperand()))
8857 continue;
8858 std::optional<unsigned> Idx = getExtractIndex(Inst);
8859 if (!Idx)
8860 return false;
8861 const unsigned ExtIdx = *Idx;
8862 if (ExtIdx >= NElts)
8863 continue;
8864 Indices[I] = ExtIdx;
8865 if (MinIdx > ExtIdx)
8866 MinIdx = ExtIdx;
8867 if (MaxIdx < ExtIdx)
8868 MaxIdx = ExtIdx;
8870 if (MaxIdx - MinIdx + 1 > E)
8871 return false;
8872 if (MaxIdx + 1 <= E)
8873 MinIdx = 0;
8875 // Check that all of the indices extract from the correct offset.
8876 bool ShouldKeepOrder = true;
8877 // Assign to all items the initial value E + 1 so we can check if the extract
8878 // instruction index was used already.
8879 // Also, later we can check that all the indices are used and we have a
8880 // consecutive access in the extract instructions, by checking that no
8881 // element of CurrentOrder still has value E + 1.
8882 CurrentOrder.assign(E, E);
8883 for (unsigned I = 0; I < E; ++I) {
8884 if (Indices[I] == PoisonMaskElem)
8885 continue;
8886 const unsigned ExtIdx = Indices[I] - MinIdx;
8887 if (CurrentOrder[ExtIdx] != E) {
8888 CurrentOrder.clear();
8889 return false;
8891 ShouldKeepOrder &= ExtIdx == I;
8892 CurrentOrder[ExtIdx] = I;
8894 if (ShouldKeepOrder)
8895 CurrentOrder.clear();
8897 return ShouldKeepOrder;
8900 bool BoUpSLP::areAllUsersVectorized(
8901 Instruction *I, const SmallDenseSet<Value *> *VectorizedVals) const {
8902 return (I->hasOneUse() && (!VectorizedVals || VectorizedVals->contains(I))) ||
8903 all_of(I->users(), [this](User *U) {
8904 return ScalarToTreeEntry.contains(U) ||
8905 isVectorLikeInstWithConstOps(U) ||
8906 (isa<ExtractElementInst>(U) && MustGather.contains(U));
8910 static std::pair<InstructionCost, InstructionCost>
8911 getVectorCallCosts(CallInst *CI, FixedVectorType *VecTy,
8912 TargetTransformInfo *TTI, TargetLibraryInfo *TLI,
8913 ArrayRef<Type *> ArgTys) {
8914 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
8916 // Calculate the cost of the scalar and vector calls.
8917 FastMathFlags FMF;
8918 if (auto *FPCI = dyn_cast<FPMathOperator>(CI))
8919 FMF = FPCI->getFastMathFlags();
8920 SmallVector<const Value *> Arguments(CI->args());
8921 IntrinsicCostAttributes CostAttrs(ID, VecTy, Arguments, ArgTys, FMF,
8922 dyn_cast<IntrinsicInst>(CI));
8923 auto IntrinsicCost =
8924 TTI->getIntrinsicInstrCost(CostAttrs, TTI::TCK_RecipThroughput);
8926 auto Shape = VFShape::get(CI->getFunctionType(),
8927 ElementCount::getFixed(VecTy->getNumElements()),
8928 false /*HasGlobalPred*/);
8929 Function *VecFunc = VFDatabase(*CI).getVectorizedFunction(Shape);
8930 auto LibCost = IntrinsicCost;
8931 if (!CI->isNoBuiltin() && VecFunc) {
8932 // Calculate the cost of the vector library call.
8933 // If the corresponding vector call is cheaper, return its cost.
8934 LibCost =
8935 TTI->getCallInstrCost(nullptr, VecTy, ArgTys, TTI::TCK_RecipThroughput);
8937 return {IntrinsicCost, LibCost};
8940 void BoUpSLP::TreeEntry::buildAltOpShuffleMask(
8941 const function_ref<bool(Instruction *)> IsAltOp, SmallVectorImpl<int> &Mask,
8942 SmallVectorImpl<Value *> *OpScalars,
8943 SmallVectorImpl<Value *> *AltScalars) const {
8944 unsigned Sz = Scalars.size();
8945 Mask.assign(Sz, PoisonMaskElem);
8946 SmallVector<int> OrderMask;
8947 if (!ReorderIndices.empty())
8948 inversePermutation(ReorderIndices, OrderMask);
8949 for (unsigned I = 0; I < Sz; ++I) {
8950 unsigned Idx = I;
8951 if (!ReorderIndices.empty())
8952 Idx = OrderMask[I];
8953 if (isa<PoisonValue>(Scalars[Idx]))
8954 continue;
8955 auto *OpInst = cast<Instruction>(Scalars[Idx]);
8956 if (IsAltOp(OpInst)) {
8957 Mask[I] = Sz + Idx;
8958 if (AltScalars)
8959 AltScalars->push_back(OpInst);
8960 } else {
8961 Mask[I] = Idx;
8962 if (OpScalars)
8963 OpScalars->push_back(OpInst);
8966 if (!ReuseShuffleIndices.empty()) {
8967 SmallVector<int> NewMask(ReuseShuffleIndices.size(), PoisonMaskElem);
8968 transform(ReuseShuffleIndices, NewMask.begin(), [&Mask](int Idx) {
8969 return Idx != PoisonMaskElem ? Mask[Idx] : PoisonMaskElem;
8971 Mask.swap(NewMask);
8975 static bool isAlternateInstruction(const Instruction *I,
8976 const Instruction *MainOp,
8977 const Instruction *AltOp,
8978 const TargetLibraryInfo &TLI) {
8979 if (auto *MainCI = dyn_cast<CmpInst>(MainOp)) {
8980 auto *AltCI = cast<CmpInst>(AltOp);
8981 CmpInst::Predicate MainP = MainCI->getPredicate();
8982 CmpInst::Predicate AltP = AltCI->getPredicate();
8983 assert(MainP != AltP && "Expected different main/alternate predicates.");
8984 auto *CI = cast<CmpInst>(I);
8985 if (isCmpSameOrSwapped(MainCI, CI, TLI))
8986 return false;
8987 if (isCmpSameOrSwapped(AltCI, CI, TLI))
8988 return true;
8989 CmpInst::Predicate P = CI->getPredicate();
8990 CmpInst::Predicate SwappedP = CmpInst::getSwappedPredicate(P);
8992 assert((MainP == P || AltP == P || MainP == SwappedP || AltP == SwappedP) &&
8993 "CmpInst expected to match either main or alternate predicate or "
8994 "their swap.");
8995 (void)AltP;
8996 return MainP != P && MainP != SwappedP;
8998 return I->getOpcode() == AltOp->getOpcode();
9001 TTI::OperandValueInfo BoUpSLP::getOperandInfo(ArrayRef<Value *> Ops) {
9002 assert(!Ops.empty());
9003 const auto *Op0 = Ops.front();
9005 const bool IsConstant = all_of(Ops, [](Value *V) {
9006 // TODO: We should allow undef elements here
9007 return isConstant(V) && !isa<UndefValue>(V);
9009 const bool IsUniform = all_of(Ops, [=](Value *V) {
9010 // TODO: We should allow undef elements here
9011 return V == Op0;
9013 const bool IsPowerOfTwo = all_of(Ops, [](Value *V) {
9014 // TODO: We should allow undef elements here
9015 if (auto *CI = dyn_cast<ConstantInt>(V))
9016 return CI->getValue().isPowerOf2();
9017 return false;
9019 const bool IsNegatedPowerOfTwo = all_of(Ops, [](Value *V) {
9020 // TODO: We should allow undef elements here
9021 if (auto *CI = dyn_cast<ConstantInt>(V))
9022 return CI->getValue().isNegatedPowerOf2();
9023 return false;
9026 TTI::OperandValueKind VK = TTI::OK_AnyValue;
9027 if (IsConstant && IsUniform)
9028 VK = TTI::OK_UniformConstantValue;
9029 else if (IsConstant)
9030 VK = TTI::OK_NonUniformConstantValue;
9031 else if (IsUniform)
9032 VK = TTI::OK_UniformValue;
9034 TTI::OperandValueProperties VP = TTI::OP_None;
9035 VP = IsPowerOfTwo ? TTI::OP_PowerOf2 : VP;
9036 VP = IsNegatedPowerOfTwo ? TTI::OP_NegatedPowerOf2 : VP;
9038 return {VK, VP};
9041 namespace {
9042 /// The base class for shuffle instruction emission and shuffle cost estimation.
9043 class BaseShuffleAnalysis {
9044 protected:
9045 Type *ScalarTy = nullptr;
9047 BaseShuffleAnalysis(Type *ScalarTy) : ScalarTy(ScalarTy) {}
9049 /// V is expected to be a vectorized value.
9050 /// When REVEC is disabled, there is no difference between VF and
9051 /// VNumElements.
9052 /// When REVEC is enabled, VF is VNumElements / ScalarTyNumElements.
9053 /// e.g., if ScalarTy is <4 x Ty> and V1 is <8 x Ty>, 2 is returned instead
9054 /// of 8.
9055 unsigned getVF(Value *V) const {
9056 assert(V && "V cannot be nullptr");
9057 assert(isa<FixedVectorType>(V->getType()) &&
9058 "V does not have FixedVectorType");
9059 assert(ScalarTy && "ScalarTy cannot be nullptr");
9060 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
9061 unsigned VNumElements =
9062 cast<FixedVectorType>(V->getType())->getNumElements();
9063 assert(VNumElements > ScalarTyNumElements &&
9064 "the number of elements of V is not large enough");
9065 assert(VNumElements % ScalarTyNumElements == 0 &&
9066 "the number of elements of V is not a vectorized value");
9067 return VNumElements / ScalarTyNumElements;
9070 /// Checks if the mask is an identity mask.
9071 /// \param IsStrict if is true the function returns false if mask size does
9072 /// not match vector size.
9073 static bool isIdentityMask(ArrayRef<int> Mask, const FixedVectorType *VecTy,
9074 bool IsStrict) {
9075 int Limit = Mask.size();
9076 int VF = VecTy->getNumElements();
9077 int Index = -1;
9078 if (VF == Limit && ShuffleVectorInst::isIdentityMask(Mask, Limit))
9079 return true;
9080 if (!IsStrict) {
9081 // Consider extract subvector starting from index 0.
9082 if (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) &&
9083 Index == 0)
9084 return true;
9085 // All VF-size submasks are identity (e.g.
9086 // <poison,poison,poison,poison,0,1,2,poison,poison,1,2,3> etc. for VF 4).
9087 if (Limit % VF == 0 && all_of(seq<int>(0, Limit / VF), [=](int Idx) {
9088 ArrayRef<int> Slice = Mask.slice(Idx * VF, VF);
9089 return all_of(Slice, [](int I) { return I == PoisonMaskElem; }) ||
9090 ShuffleVectorInst::isIdentityMask(Slice, VF);
9092 return true;
9094 return false;
9097 /// Tries to combine 2 different masks into single one.
9098 /// \param LocalVF Vector length of the permuted input vector. \p Mask may
9099 /// change the size of the vector, \p LocalVF is the original size of the
9100 /// shuffled vector.
9101 static void combineMasks(unsigned LocalVF, SmallVectorImpl<int> &Mask,
9102 ArrayRef<int> ExtMask) {
9103 unsigned VF = Mask.size();
9104 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem);
9105 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
9106 if (ExtMask[I] == PoisonMaskElem)
9107 continue;
9108 int MaskedIdx = Mask[ExtMask[I] % VF];
9109 NewMask[I] =
9110 MaskedIdx == PoisonMaskElem ? PoisonMaskElem : MaskedIdx % LocalVF;
9112 Mask.swap(NewMask);
9115 /// Looks through shuffles trying to reduce final number of shuffles in the
9116 /// code. The function looks through the previously emitted shuffle
9117 /// instructions and properly mark indices in mask as undef.
9118 /// For example, given the code
9119 /// \code
9120 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0>
9121 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0>
9122 /// \endcode
9123 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will
9124 /// look through %s1 and %s2 and select vectors %0 and %1 with mask
9125 /// <0, 1, 2, 3> for the shuffle.
9126 /// If 2 operands are of different size, the smallest one will be resized and
9127 /// the mask recalculated properly.
9128 /// For example, given the code
9129 /// \code
9130 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0>
9131 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0>
9132 /// \endcode
9133 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will
9134 /// look through %s1 and %s2 and select vectors %0 and %1 with mask
9135 /// <0, 1, 2, 3> for the shuffle.
9136 /// So, it tries to transform permutations to simple vector merge, if
9137 /// possible.
9138 /// \param V The input vector which must be shuffled using the given \p Mask.
9139 /// If the better candidate is found, \p V is set to this best candidate
9140 /// vector.
9141 /// \param Mask The input mask for the shuffle. If the best candidate is found
9142 /// during looking-through-shuffles attempt, it is updated accordingly.
9143 /// \param SinglePermute true if the shuffle operation is originally a
9144 /// single-value-permutation. In this case the look-through-shuffles procedure
9145 /// may look for resizing shuffles as the best candidates.
9146 /// \return true if the shuffle results in the non-resizing identity shuffle
9147 /// (and thus can be ignored), false - otherwise.
9148 static bool peekThroughShuffles(Value *&V, SmallVectorImpl<int> &Mask,
9149 bool SinglePermute) {
9150 Value *Op = V;
9151 ShuffleVectorInst *IdentityOp = nullptr;
9152 SmallVector<int> IdentityMask;
9153 while (auto *SV = dyn_cast<ShuffleVectorInst>(Op)) {
9154 // Exit if not a fixed vector type or changing size shuffle.
9155 auto *SVTy = dyn_cast<FixedVectorType>(SV->getType());
9156 if (!SVTy)
9157 break;
9158 // Remember the identity or broadcast mask, if it is not a resizing
9159 // shuffle. If no better candidates are found, this Op and Mask will be
9160 // used in the final shuffle.
9161 if (isIdentityMask(Mask, SVTy, /*IsStrict=*/false)) {
9162 if (!IdentityOp || !SinglePermute ||
9163 (isIdentityMask(Mask, SVTy, /*IsStrict=*/true) &&
9164 !ShuffleVectorInst::isZeroEltSplatMask(IdentityMask,
9165 IdentityMask.size()))) {
9166 IdentityOp = SV;
9167 // Store current mask in the IdentityMask so later we did not lost
9168 // this info if IdentityOp is selected as the best candidate for the
9169 // permutation.
9170 IdentityMask.assign(Mask);
9173 // Remember the broadcast mask. If no better candidates are found, this Op
9174 // and Mask will be used in the final shuffle.
9175 // Zero splat can be used as identity too, since it might be used with
9176 // mask <0, 1, 2, ...>, i.e. identity mask without extra reshuffling.
9177 // E.g. if need to shuffle the vector with the mask <3, 1, 2, 0>, which is
9178 // expensive, the analysis founds out, that the source vector is just a
9179 // broadcast, this original mask can be transformed to identity mask <0,
9180 // 1, 2, 3>.
9181 // \code
9182 // %0 = shuffle %v, poison, zeroinitalizer
9183 // %res = shuffle %0, poison, <3, 1, 2, 0>
9184 // \endcode
9185 // may be transformed to
9186 // \code
9187 // %0 = shuffle %v, poison, zeroinitalizer
9188 // %res = shuffle %0, poison, <0, 1, 2, 3>
9189 // \endcode
9190 if (SV->isZeroEltSplat()) {
9191 IdentityOp = SV;
9192 IdentityMask.assign(Mask);
9194 int LocalVF = Mask.size();
9195 if (auto *SVOpTy =
9196 dyn_cast<FixedVectorType>(SV->getOperand(0)->getType()))
9197 LocalVF = SVOpTy->getNumElements();
9198 SmallVector<int> ExtMask(Mask.size(), PoisonMaskElem);
9199 for (auto [Idx, I] : enumerate(Mask)) {
9200 if (I == PoisonMaskElem ||
9201 static_cast<unsigned>(I) >= SV->getShuffleMask().size())
9202 continue;
9203 ExtMask[Idx] = SV->getMaskValue(I);
9205 bool IsOp1Undef = isUndefVector</*isPoisonOnly=*/true>(
9206 SV->getOperand(0),
9207 buildUseMask(LocalVF, ExtMask, UseMask::FirstArg))
9208 .all();
9209 bool IsOp2Undef = isUndefVector</*isPoisonOnly=*/true>(
9210 SV->getOperand(1),
9211 buildUseMask(LocalVF, ExtMask, UseMask::SecondArg))
9212 .all();
9213 if (!IsOp1Undef && !IsOp2Undef) {
9214 // Update mask and mark undef elems.
9215 for (int &I : Mask) {
9216 if (I == PoisonMaskElem)
9217 continue;
9218 if (SV->getMaskValue(I % SV->getShuffleMask().size()) ==
9219 PoisonMaskElem)
9220 I = PoisonMaskElem;
9222 break;
9224 SmallVector<int> ShuffleMask(SV->getShuffleMask());
9225 combineMasks(LocalVF, ShuffleMask, Mask);
9226 Mask.swap(ShuffleMask);
9227 if (IsOp2Undef)
9228 Op = SV->getOperand(0);
9229 else
9230 Op = SV->getOperand(1);
9232 if (auto *OpTy = dyn_cast<FixedVectorType>(Op->getType());
9233 !OpTy || !isIdentityMask(Mask, OpTy, SinglePermute) ||
9234 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())) {
9235 if (IdentityOp) {
9236 V = IdentityOp;
9237 assert(Mask.size() == IdentityMask.size() &&
9238 "Expected masks of same sizes.");
9239 // Clear known poison elements.
9240 for (auto [I, Idx] : enumerate(Mask))
9241 if (Idx == PoisonMaskElem)
9242 IdentityMask[I] = PoisonMaskElem;
9243 Mask.swap(IdentityMask);
9244 auto *Shuffle = dyn_cast<ShuffleVectorInst>(V);
9245 return SinglePermute &&
9246 (isIdentityMask(Mask, cast<FixedVectorType>(V->getType()),
9247 /*IsStrict=*/true) ||
9248 (Shuffle && Mask.size() == Shuffle->getShuffleMask().size() &&
9249 Shuffle->isZeroEltSplat() &&
9250 ShuffleVectorInst::isZeroEltSplatMask(Mask, Mask.size())));
9252 V = Op;
9253 return false;
9255 V = Op;
9256 return true;
9259 /// Smart shuffle instruction emission, walks through shuffles trees and
9260 /// tries to find the best matching vector for the actual shuffle
9261 /// instruction.
9262 template <typename T, typename ShuffleBuilderTy>
9263 static T createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask,
9264 ShuffleBuilderTy &Builder) {
9265 assert(V1 && "Expected at least one vector value.");
9266 if (V2)
9267 Builder.resizeToMatch(V1, V2);
9268 int VF = Mask.size();
9269 if (auto *FTy = dyn_cast<FixedVectorType>(V1->getType()))
9270 VF = FTy->getNumElements();
9271 if (V2 && !isUndefVector</*IsPoisonOnly=*/true>(
9272 V2, buildUseMask(VF, Mask, UseMask::SecondArg))
9273 .all()) {
9274 // Peek through shuffles.
9275 Value *Op1 = V1;
9276 Value *Op2 = V2;
9277 int VF =
9278 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
9279 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem);
9280 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem);
9281 for (int I = 0, E = Mask.size(); I < E; ++I) {
9282 if (Mask[I] < VF)
9283 CombinedMask1[I] = Mask[I];
9284 else
9285 CombinedMask2[I] = Mask[I] - VF;
9287 Value *PrevOp1;
9288 Value *PrevOp2;
9289 do {
9290 PrevOp1 = Op1;
9291 PrevOp2 = Op2;
9292 (void)peekThroughShuffles(Op1, CombinedMask1, /*SinglePermute=*/false);
9293 (void)peekThroughShuffles(Op2, CombinedMask2, /*SinglePermute=*/false);
9294 // Check if we have 2 resizing shuffles - need to peek through operands
9295 // again.
9296 if (auto *SV1 = dyn_cast<ShuffleVectorInst>(Op1))
9297 if (auto *SV2 = dyn_cast<ShuffleVectorInst>(Op2)) {
9298 SmallVector<int> ExtMask1(Mask.size(), PoisonMaskElem);
9299 for (auto [Idx, I] : enumerate(CombinedMask1)) {
9300 if (I == PoisonMaskElem)
9301 continue;
9302 ExtMask1[Idx] = SV1->getMaskValue(I);
9304 SmallBitVector UseMask1 = buildUseMask(
9305 cast<FixedVectorType>(SV1->getOperand(1)->getType())
9306 ->getNumElements(),
9307 ExtMask1, UseMask::SecondArg);
9308 SmallVector<int> ExtMask2(CombinedMask2.size(), PoisonMaskElem);
9309 for (auto [Idx, I] : enumerate(CombinedMask2)) {
9310 if (I == PoisonMaskElem)
9311 continue;
9312 ExtMask2[Idx] = SV2->getMaskValue(I);
9314 SmallBitVector UseMask2 = buildUseMask(
9315 cast<FixedVectorType>(SV2->getOperand(1)->getType())
9316 ->getNumElements(),
9317 ExtMask2, UseMask::SecondArg);
9318 if (SV1->getOperand(0)->getType() ==
9319 SV2->getOperand(0)->getType() &&
9320 SV1->getOperand(0)->getType() != SV1->getType() &&
9321 isUndefVector(SV1->getOperand(1), UseMask1).all() &&
9322 isUndefVector(SV2->getOperand(1), UseMask2).all()) {
9323 Op1 = SV1->getOperand(0);
9324 Op2 = SV2->getOperand(0);
9325 SmallVector<int> ShuffleMask1(SV1->getShuffleMask());
9326 int LocalVF = ShuffleMask1.size();
9327 if (auto *FTy = dyn_cast<FixedVectorType>(Op1->getType()))
9328 LocalVF = FTy->getNumElements();
9329 combineMasks(LocalVF, ShuffleMask1, CombinedMask1);
9330 CombinedMask1.swap(ShuffleMask1);
9331 SmallVector<int> ShuffleMask2(SV2->getShuffleMask());
9332 LocalVF = ShuffleMask2.size();
9333 if (auto *FTy = dyn_cast<FixedVectorType>(Op2->getType()))
9334 LocalVF = FTy->getNumElements();
9335 combineMasks(LocalVF, ShuffleMask2, CombinedMask2);
9336 CombinedMask2.swap(ShuffleMask2);
9339 } while (PrevOp1 != Op1 || PrevOp2 != Op2);
9340 Builder.resizeToMatch(Op1, Op2);
9341 VF = std::max(cast<VectorType>(Op1->getType())
9342 ->getElementCount()
9343 .getKnownMinValue(),
9344 cast<VectorType>(Op2->getType())
9345 ->getElementCount()
9346 .getKnownMinValue());
9347 for (int I = 0, E = Mask.size(); I < E; ++I) {
9348 if (CombinedMask2[I] != PoisonMaskElem) {
9349 assert(CombinedMask1[I] == PoisonMaskElem &&
9350 "Expected undefined mask element");
9351 CombinedMask1[I] = CombinedMask2[I] + (Op1 == Op2 ? 0 : VF);
9354 if (Op1 == Op2 &&
9355 (ShuffleVectorInst::isIdentityMask(CombinedMask1, VF) ||
9356 (ShuffleVectorInst::isZeroEltSplatMask(CombinedMask1, VF) &&
9357 isa<ShuffleVectorInst>(Op1) &&
9358 cast<ShuffleVectorInst>(Op1)->getShuffleMask() ==
9359 ArrayRef(CombinedMask1))))
9360 return Builder.createIdentity(Op1);
9361 return Builder.createShuffleVector(
9362 Op1, Op1 == Op2 ? PoisonValue::get(Op1->getType()) : Op2,
9363 CombinedMask1);
9365 if (isa<PoisonValue>(V1))
9366 return Builder.createPoison(
9367 cast<VectorType>(V1->getType())->getElementType(), Mask.size());
9368 SmallVector<int> NewMask(Mask);
9369 bool IsIdentity = peekThroughShuffles(V1, NewMask, /*SinglePermute=*/true);
9370 assert(V1 && "Expected non-null value after looking through shuffles.");
9372 if (!IsIdentity)
9373 return Builder.createShuffleVector(V1, NewMask);
9374 return Builder.createIdentity(V1);
9377 } // namespace
9379 /// Calculate the scalar and the vector costs from vectorizing set of GEPs.
9380 static std::pair<InstructionCost, InstructionCost>
9381 getGEPCosts(const TargetTransformInfo &TTI, ArrayRef<Value *> Ptrs,
9382 Value *BasePtr, unsigned Opcode, TTI::TargetCostKind CostKind,
9383 Type *ScalarTy, VectorType *VecTy) {
9384 InstructionCost ScalarCost = 0;
9385 InstructionCost VecCost = 0;
9386 // Here we differentiate two cases: (1) when Ptrs represent a regular
9387 // vectorization tree node (as they are pointer arguments of scattered
9388 // loads) or (2) when Ptrs are the arguments of loads or stores being
9389 // vectorized as plane wide unit-stride load/store since all the
9390 // loads/stores are known to be from/to adjacent locations.
9391 if (Opcode == Instruction::Load || Opcode == Instruction::Store) {
9392 // Case 2: estimate costs for pointer related costs when vectorizing to
9393 // a wide load/store.
9394 // Scalar cost is estimated as a set of pointers with known relationship
9395 // between them.
9396 // For vector code we will use BasePtr as argument for the wide load/store
9397 // but we also need to account all the instructions which are going to
9398 // stay in vectorized code due to uses outside of these scalar
9399 // loads/stores.
9400 ScalarCost = TTI.getPointersChainCost(
9401 Ptrs, BasePtr, TTI::PointersChainInfo::getUnitStride(), ScalarTy,
9402 CostKind);
9404 SmallVector<const Value *> PtrsRetainedInVecCode;
9405 for (Value *V : Ptrs) {
9406 if (V == BasePtr) {
9407 PtrsRetainedInVecCode.push_back(V);
9408 continue;
9410 auto *Ptr = dyn_cast<GetElementPtrInst>(V);
9411 // For simplicity assume Ptr to stay in vectorized code if it's not a
9412 // GEP instruction. We don't care since it's cost considered free.
9413 // TODO: We should check for any uses outside of vectorizable tree
9414 // rather than just single use.
9415 if (!Ptr || !Ptr->hasOneUse())
9416 PtrsRetainedInVecCode.push_back(V);
9419 if (PtrsRetainedInVecCode.size() == Ptrs.size()) {
9420 // If all pointers stay in vectorized code then we don't have
9421 // any savings on that.
9422 return std::make_pair(TTI::TCC_Free, TTI::TCC_Free);
9424 VecCost = TTI.getPointersChainCost(PtrsRetainedInVecCode, BasePtr,
9425 TTI::PointersChainInfo::getKnownStride(),
9426 VecTy, CostKind);
9427 } else {
9428 // Case 1: Ptrs are the arguments of loads that we are going to transform
9429 // into masked gather load intrinsic.
9430 // All the scalar GEPs will be removed as a result of vectorization.
9431 // For any external uses of some lanes extract element instructions will
9432 // be generated (which cost is estimated separately).
9433 TTI::PointersChainInfo PtrsInfo =
9434 all_of(Ptrs,
9435 [](const Value *V) {
9436 auto *Ptr = dyn_cast<GetElementPtrInst>(V);
9437 return Ptr && !Ptr->hasAllConstantIndices();
9439 ? TTI::PointersChainInfo::getUnknownStride()
9440 : TTI::PointersChainInfo::getKnownStride();
9442 ScalarCost =
9443 TTI.getPointersChainCost(Ptrs, BasePtr, PtrsInfo, ScalarTy, CostKind);
9444 auto *BaseGEP = dyn_cast<GEPOperator>(BasePtr);
9445 if (!BaseGEP) {
9446 auto *It = find_if(Ptrs, IsaPred<GEPOperator>);
9447 if (It != Ptrs.end())
9448 BaseGEP = cast<GEPOperator>(*It);
9450 if (BaseGEP) {
9451 SmallVector<const Value *> Indices(BaseGEP->indices());
9452 VecCost = TTI.getGEPCost(BaseGEP->getSourceElementType(),
9453 BaseGEP->getPointerOperand(), Indices, VecTy,
9454 CostKind);
9458 return std::make_pair(ScalarCost, VecCost);
9461 void BoUpSLP::reorderGatherNode(TreeEntry &TE) {
9462 assert(TE.isGather() && TE.ReorderIndices.empty() &&
9463 "Expected gather node without reordering.");
9464 DenseMap<std::pair<size_t, Value *>, SmallVector<LoadInst *>> LoadsMap;
9465 SmallSet<size_t, 2> LoadKeyUsed;
9467 // Do not reorder nodes if it small (just 2 elements), all-constant or all
9468 // instructions have same opcode already.
9469 if (TE.Scalars.size() == 2 || (TE.getOpcode() && !TE.isAltShuffle()) ||
9470 all_of(TE.Scalars, isConstant))
9471 return;
9473 if (any_of(seq<unsigned>(TE.Idx), [&](unsigned Idx) {
9474 return VectorizableTree[Idx]->isSame(TE.Scalars);
9476 return;
9478 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) {
9479 Key = hash_combine(hash_value(LI->getParent()), Key);
9480 Value *Ptr =
9481 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth);
9482 if (LoadKeyUsed.contains(Key)) {
9483 auto LIt = LoadsMap.find(std::make_pair(Key, Ptr));
9484 if (LIt != LoadsMap.end()) {
9485 for (LoadInst *RLI : LIt->second) {
9486 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(),
9487 LI->getType(), LI->getPointerOperand(), *DL, *SE,
9488 /*StrictCheck=*/true))
9489 return hash_value(RLI->getPointerOperand());
9491 for (LoadInst *RLI : LIt->second) {
9492 if (arePointersCompatible(RLI->getPointerOperand(),
9493 LI->getPointerOperand(), *TLI)) {
9494 hash_code SubKey = hash_value(RLI->getPointerOperand());
9495 return SubKey;
9498 if (LIt->second.size() > 2) {
9499 hash_code SubKey =
9500 hash_value(LIt->second.back()->getPointerOperand());
9501 return SubKey;
9505 LoadKeyUsed.insert(Key);
9506 LoadsMap.try_emplace(std::make_pair(Key, Ptr)).first->second.push_back(LI);
9507 return hash_value(LI->getPointerOperand());
9509 MapVector<size_t, MapVector<size_t, SmallVector<Value *>>> SortedValues;
9510 SmallDenseMap<Value *, SmallVector<unsigned>, 8> KeyToIndex;
9511 bool IsOrdered = true;
9512 unsigned NumInstructions = 0;
9513 // Try to "cluster" scalar instructions, to be able to build extra vectorized
9514 // nodes.
9515 for (auto [I, V] : enumerate(TE.Scalars)) {
9516 size_t Key = 1, Idx = 1;
9517 if (auto *Inst = dyn_cast<Instruction>(V);
9518 Inst && !isa<ExtractElementInst, LoadInst, CastInst>(V) &&
9519 !isDeleted(Inst) && !isVectorized(V)) {
9520 std::tie(Key, Idx) = generateKeySubkey(V, TLI, GenerateLoadsSubkey,
9521 /*AllowAlternate=*/false);
9522 ++NumInstructions;
9524 auto &Container = SortedValues[Key];
9525 if (IsOrdered && !KeyToIndex.contains(V) &&
9526 !(isa<Constant, ExtractElementInst>(V) ||
9527 isVectorLikeInstWithConstOps(V)) &&
9528 ((Container.contains(Idx) &&
9529 KeyToIndex.at(Container[Idx].back()).back() != I - 1) ||
9530 (!Container.empty() && !Container.contains(Idx) &&
9531 KeyToIndex.at(Container.back().second.back()).back() != I - 1)))
9532 IsOrdered = false;
9533 auto &KTI = KeyToIndex[V];
9534 if (KTI.empty())
9535 Container[Idx].push_back(V);
9536 KTI.push_back(I);
9538 SmallVector<std::pair<unsigned, unsigned>> SubVectors;
9539 APInt DemandedElts = APInt::getAllOnes(TE.Scalars.size());
9540 if (!IsOrdered && NumInstructions > 1) {
9541 unsigned Cnt = 0;
9542 TE.ReorderIndices.resize(TE.Scalars.size(), TE.Scalars.size());
9543 for (const auto &D : SortedValues) {
9544 for (const auto &P : D.second) {
9545 unsigned Sz = 0;
9546 for (Value *V : P.second) {
9547 ArrayRef<unsigned> Indices = KeyToIndex.at(V);
9548 for (auto [K, Idx] : enumerate(Indices)) {
9549 TE.ReorderIndices[Cnt + K] = Idx;
9550 TE.Scalars[Cnt + K] = V;
9552 Sz += Indices.size();
9553 Cnt += Indices.size();
9555 if (Sz > 1 && isa<Instruction>(P.second.front())) {
9556 const unsigned SubVF = getFloorFullVectorNumberOfElements(
9557 *TTI, TE.Scalars.front()->getType(), Sz);
9558 SubVectors.emplace_back(Cnt - Sz, SubVF);
9559 for (unsigned I : seq<unsigned>(Cnt - Sz, Cnt - Sz + SubVF))
9560 DemandedElts.clearBit(I);
9561 } else if (!P.second.empty() && isConstant(P.second.front())) {
9562 for (unsigned I : seq<unsigned>(Cnt - Sz, Cnt))
9563 DemandedElts.clearBit(I);
9568 // Reuses always require shuffles, so consider it as profitable.
9569 if (!TE.ReuseShuffleIndices.empty() || TE.ReorderIndices.empty())
9570 return;
9571 // Do simple cost estimation.
9572 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
9573 InstructionCost Cost = 0;
9574 auto *ScalarTy = TE.Scalars.front()->getType();
9575 auto *VecTy = getWidenedType(ScalarTy, TE.Scalars.size());
9576 for (auto [Idx, Sz] : SubVectors) {
9577 Cost += ::getShuffleCost(*TTI, TTI::SK_InsertSubvector, VecTy, {}, CostKind,
9578 Idx, getWidenedType(ScalarTy, Sz));
9580 if (auto *FTy = dyn_cast<FixedVectorType>(ScalarTy)) {
9581 assert(SLPReVec && "Only supported by REVEC.");
9582 // If ScalarTy is FixedVectorType, we should use CreateInsertVector instead
9583 // of CreateInsertElement.
9584 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
9585 for (unsigned I : seq<unsigned>(TE.Scalars.size()))
9586 if (DemandedElts[I])
9587 Cost +=
9588 TTI->getShuffleCost(TTI::SK_InsertSubvector, VecTy, std::nullopt,
9589 CostKind, I * ScalarTyNumElements, FTy);
9590 } else {
9591 Cost += TTI->getScalarizationOverhead(VecTy, DemandedElts, /*Insert=*/true,
9592 /*Extract=*/false, CostKind);
9594 int Sz = TE.Scalars.size();
9595 SmallVector<int> ReorderMask(TE.ReorderIndices.begin(),
9596 TE.ReorderIndices.end());
9597 for (unsigned I : seq<unsigned>(Sz)) {
9598 Value *V = TE.getOrdered(I);
9599 if (isa<PoisonValue>(V)) {
9600 ReorderMask[I] = PoisonMaskElem;
9601 } else if (isConstant(V) || DemandedElts[I]) {
9602 ReorderMask[I] = I + TE.ReorderIndices.size();
9605 Cost += ::getShuffleCost(*TTI,
9606 any_of(ReorderMask, [&](int I) { return I >= Sz; })
9607 ? TTI::SK_PermuteTwoSrc
9608 : TTI::SK_PermuteSingleSrc,
9609 VecTy, ReorderMask);
9610 DemandedElts = APInt::getAllOnes(VecTy->getNumElements());
9611 ReorderMask.assign(Sz, PoisonMaskElem);
9612 for (unsigned I : seq<unsigned>(Sz)) {
9613 Value *V = TE.getOrdered(I);
9614 if (isConstant(V)) {
9615 DemandedElts.clearBit(I);
9616 if (!isa<PoisonValue>(V))
9617 ReorderMask[I] = I;
9618 } else {
9619 ReorderMask[I] = I + Sz;
9622 InstructionCost BVCost = TTI->getScalarizationOverhead(
9623 VecTy, DemandedElts, /*Insert=*/true, /*Extract=*/false, CostKind);
9624 if (!DemandedElts.isAllOnes())
9625 BVCost += ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, VecTy, ReorderMask);
9626 if (Cost >= BVCost) {
9627 SmallVector<int> Mask(TE.ReorderIndices.begin(), TE.ReorderIndices.end());
9628 reorderScalars(TE.Scalars, Mask);
9629 TE.ReorderIndices.clear();
9633 void BoUpSLP::transformNodes() {
9634 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
9635 BaseGraphSize = VectorizableTree.size();
9636 // Turn graph transforming mode on and off, when done.
9637 class GraphTransformModeRAAI {
9638 bool &SavedIsGraphTransformMode;
9640 public:
9641 GraphTransformModeRAAI(bool &IsGraphTransformMode)
9642 : SavedIsGraphTransformMode(IsGraphTransformMode) {
9643 IsGraphTransformMode = true;
9645 ~GraphTransformModeRAAI() { SavedIsGraphTransformMode = false; }
9646 } TransformContext(IsGraphTransformMode);
9647 // Operands are profitable if they are:
9648 // 1. At least one constant
9649 // or
9650 // 2. Splats
9651 // or
9652 // 3. Results in good vectorization opportunity, i.e. may generate vector
9653 // nodes and reduce cost of the graph.
9654 auto CheckOperandsProfitability = [this](Instruction *I1, Instruction *I2,
9655 const InstructionsState &S) {
9656 SmallVector<SmallVector<std::pair<Value *, Value *>>> Candidates;
9657 for (unsigned Op : seq<unsigned>(S.getMainOp()->getNumOperands()))
9658 Candidates.emplace_back().emplace_back(I1->getOperand(Op),
9659 I2->getOperand(Op));
9660 return all_of(
9661 Candidates, [this](ArrayRef<std::pair<Value *, Value *>> Cand) {
9662 return all_of(Cand,
9663 [](const std::pair<Value *, Value *> &P) {
9664 return isa<Constant>(P.first) ||
9665 isa<Constant>(P.second) || P.first == P.second;
9666 }) ||
9667 findBestRootPair(Cand, LookAheadHeuristics::ScoreSplatLoads);
9671 // Try to reorder gather nodes for better vectorization opportunities.
9672 for (unsigned Idx : seq<unsigned>(BaseGraphSize)) {
9673 TreeEntry &E = *VectorizableTree[Idx];
9674 if (E.isGather())
9675 reorderGatherNode(E);
9678 // The tree may grow here, so iterate over nodes, built before.
9679 for (unsigned Idx : seq<unsigned>(BaseGraphSize)) {
9680 TreeEntry &E = *VectorizableTree[Idx];
9681 if (E.isGather()) {
9682 ArrayRef<Value *> VL = E.Scalars;
9683 const unsigned Sz = getVectorElementSize(VL.front());
9684 unsigned MinVF = getMinVF(2 * Sz);
9685 // Do not try partial vectorization for small nodes (<= 2), nodes with the
9686 // same opcode and same parent block or all constants.
9687 if (VL.size() <= 2 || LoadEntriesToVectorize.contains(Idx) ||
9688 !(!E.getOpcode() || E.getOpcode() == Instruction::Load ||
9689 E.isAltShuffle() || !allSameBlock(VL)) ||
9690 allConstant(VL) || isSplat(VL))
9691 continue;
9692 // Try to find vectorizable sequences and transform them into a series of
9693 // insertvector instructions.
9694 unsigned StartIdx = 0;
9695 unsigned End = VL.size();
9696 for (unsigned VF = getFloorFullVectorNumberOfElements(
9697 *TTI, VL.front()->getType(), VL.size() - 1);
9698 VF >= MinVF; VF = getFloorFullVectorNumberOfElements(
9699 *TTI, VL.front()->getType(), VF - 1)) {
9700 if (StartIdx + VF > End)
9701 continue;
9702 SmallVector<std::pair<unsigned, unsigned>> Slices;
9703 for (unsigned Cnt = StartIdx; Cnt + VF <= End; Cnt += VF) {
9704 ArrayRef<Value *> Slice = VL.slice(Cnt, VF);
9705 // If any instruction is vectorized already - do not try again.
9706 // Reuse the existing node, if it fully matches the slice.
9707 if (const TreeEntry *SE = getTreeEntry(Slice.front());
9708 SE || getTreeEntry(Slice.back())) {
9709 if (!SE)
9710 continue;
9711 if (VF != SE->getVectorFactor() || !SE->isSame(Slice))
9712 continue;
9714 // Constant already handled effectively - skip.
9715 if (allConstant(Slice))
9716 continue;
9717 // Do not try to vectorize small splats (less than vector register and
9718 // only with the single non-undef element).
9719 bool IsSplat = isSplat(Slice);
9720 if (Slices.empty() || !IsSplat ||
9721 (VF <= 2 && 2 * std::clamp(TTI->getNumberOfParts(getWidenedType(
9722 Slice.front()->getType(), VF)),
9723 1U, VF - 1) !=
9724 std::clamp(TTI->getNumberOfParts(getWidenedType(
9725 Slice.front()->getType(), 2 * VF)),
9726 1U, 2 * VF)) ||
9727 count(Slice, Slice.front()) ==
9728 static_cast<long>(isa<UndefValue>(Slice.front()) ? VF - 1
9729 : 1)) {
9730 if (IsSplat)
9731 continue;
9732 InstructionsState S = getSameOpcode(Slice, *TLI);
9733 if (!S.getOpcode() || S.isAltShuffle() || !allSameBlock(Slice) ||
9734 (S.getOpcode() == Instruction::Load &&
9735 areKnownNonVectorizableLoads(Slice)) ||
9736 (S.getOpcode() != Instruction::Load && !has_single_bit(VF)))
9737 continue;
9738 if (VF == 2) {
9739 // Try to vectorize reduced values or if all users are vectorized.
9740 // For expensive instructions extra extracts might be profitable.
9741 if ((!UserIgnoreList || E.Idx != 0) &&
9742 TTI->getInstructionCost(S.getMainOp(), CostKind) <
9743 TTI::TCC_Expensive &&
9744 !all_of(Slice, [&](Value *V) {
9745 if (isa<PoisonValue>(V))
9746 return true;
9747 return areAllUsersVectorized(cast<Instruction>(V),
9748 UserIgnoreList);
9750 continue;
9751 if (S.getOpcode() == Instruction::Load) {
9752 OrdersType Order;
9753 SmallVector<Value *> PointerOps;
9754 LoadsState Res =
9755 canVectorizeLoads(Slice, Slice.front(), Order, PointerOps);
9756 // Do not vectorize gathers.
9757 if (Res == LoadsState::ScatterVectorize ||
9758 Res == LoadsState::Gather) {
9759 if (Res == LoadsState::Gather) {
9760 registerNonVectorizableLoads(Slice);
9761 // If reductions and the scalars from the root node are
9762 // analyzed - mark as non-vectorizable reduction.
9763 if (UserIgnoreList && E.Idx == 0)
9764 analyzedReductionVals(Slice);
9766 continue;
9768 } else if (S.getOpcode() == Instruction::ExtractElement ||
9769 (TTI->getInstructionCost(S.getMainOp(), CostKind) <
9770 TTI::TCC_Expensive &&
9771 !CheckOperandsProfitability(
9772 S.getMainOp(),
9773 cast<Instruction>(*find_if(reverse(Slice),
9774 IsaPred<Instruction>)),
9775 S))) {
9776 // Do not vectorize extractelements (handled effectively
9777 // alread). Do not vectorize non-profitable instructions (with
9778 // low cost and non-vectorizable operands.)
9779 continue;
9783 Slices.emplace_back(Cnt, Slice.size());
9785 auto AddCombinedNode = [&](unsigned Idx, unsigned Cnt, unsigned Sz) {
9786 E.CombinedEntriesWithIndices.emplace_back(Idx, Cnt);
9787 if (StartIdx == Cnt)
9788 StartIdx = Cnt + Sz;
9789 if (End == Cnt + Sz)
9790 End = Cnt;
9792 for (auto [Cnt, Sz] : Slices) {
9793 ArrayRef<Value *> Slice = VL.slice(Cnt, Sz);
9794 // If any instruction is vectorized already - do not try again.
9795 if (TreeEntry *SE = getTreeEntry(Slice.front());
9796 SE || getTreeEntry(Slice.back())) {
9797 if (!SE)
9798 continue;
9799 if (VF != SE->getVectorFactor() || !SE->isSame(Slice))
9800 continue;
9801 SE->UserTreeIndices.emplace_back(&E, UINT_MAX);
9802 AddCombinedNode(SE->Idx, Cnt, Sz);
9803 continue;
9805 unsigned PrevSize = VectorizableTree.size();
9806 [[maybe_unused]] unsigned PrevEntriesSize =
9807 LoadEntriesToVectorize.size();
9808 buildTree_rec(Slice, 0, EdgeInfo(&E, UINT_MAX));
9809 if (PrevSize + 1 == VectorizableTree.size() &&
9810 VectorizableTree[PrevSize]->isGather() &&
9811 VectorizableTree[PrevSize]->getOpcode() !=
9812 Instruction::ExtractElement &&
9813 !isSplat(Slice)) {
9814 if (UserIgnoreList && E.Idx == 0 && VF == 2)
9815 analyzedReductionVals(Slice);
9816 VectorizableTree.pop_back();
9817 assert(PrevEntriesSize == LoadEntriesToVectorize.size() &&
9818 "LoadEntriesToVectorize expected to remain the same");
9819 continue;
9821 AddCombinedNode(PrevSize, Cnt, Sz);
9824 // Restore ordering, if no extra vectorization happened.
9825 if (E.CombinedEntriesWithIndices.empty() && !E.ReorderIndices.empty()) {
9826 SmallVector<int> Mask(E.ReorderIndices.begin(), E.ReorderIndices.end());
9827 reorderScalars(E.Scalars, Mask);
9828 E.ReorderIndices.clear();
9831 switch (E.getOpcode()) {
9832 case Instruction::Load: {
9833 // No need to reorder masked gather loads, just reorder the scalar
9834 // operands.
9835 if (E.State != TreeEntry::Vectorize)
9836 break;
9837 Type *ScalarTy = E.getMainOp()->getType();
9838 auto *VecTy = getWidenedType(ScalarTy, E.Scalars.size());
9839 Align CommonAlignment = computeCommonAlignment<LoadInst>(E.Scalars);
9840 // Check if profitable to represent consecutive load + reverse as strided
9841 // load with stride -1.
9842 if (isReverseOrder(E.ReorderIndices) &&
9843 TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) {
9844 SmallVector<int> Mask;
9845 inversePermutation(E.ReorderIndices, Mask);
9846 auto *BaseLI = cast<LoadInst>(E.Scalars.back());
9847 InstructionCost OriginalVecCost =
9848 TTI->getMemoryOpCost(Instruction::Load, VecTy, BaseLI->getAlign(),
9849 BaseLI->getPointerAddressSpace(), CostKind,
9850 TTI::OperandValueInfo()) +
9851 ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind);
9852 InstructionCost StridedCost = TTI->getStridedMemoryOpCost(
9853 Instruction::Load, VecTy, BaseLI->getPointerOperand(),
9854 /*VariableMask=*/false, CommonAlignment, CostKind, BaseLI);
9855 if (StridedCost < OriginalVecCost)
9856 // Strided load is more profitable than consecutive load + reverse -
9857 // transform the node to strided load.
9858 E.State = TreeEntry::StridedVectorize;
9860 break;
9862 case Instruction::Store: {
9863 Type *ScalarTy =
9864 cast<StoreInst>(E.getMainOp())->getValueOperand()->getType();
9865 auto *VecTy = getWidenedType(ScalarTy, E.Scalars.size());
9866 Align CommonAlignment = computeCommonAlignment<StoreInst>(E.Scalars);
9867 // Check if profitable to represent consecutive load + reverse as strided
9868 // load with stride -1.
9869 if (isReverseOrder(E.ReorderIndices) &&
9870 TTI->isLegalStridedLoadStore(VecTy, CommonAlignment)) {
9871 SmallVector<int> Mask;
9872 inversePermutation(E.ReorderIndices, Mask);
9873 auto *BaseSI = cast<StoreInst>(E.Scalars.back());
9874 InstructionCost OriginalVecCost =
9875 TTI->getMemoryOpCost(Instruction::Store, VecTy, BaseSI->getAlign(),
9876 BaseSI->getPointerAddressSpace(), CostKind,
9877 TTI::OperandValueInfo()) +
9878 ::getShuffleCost(*TTI, TTI::SK_Reverse, VecTy, Mask, CostKind);
9879 InstructionCost StridedCost = TTI->getStridedMemoryOpCost(
9880 Instruction::Store, VecTy, BaseSI->getPointerOperand(),
9881 /*VariableMask=*/false, CommonAlignment, CostKind, BaseSI);
9882 if (StridedCost < OriginalVecCost)
9883 // Strided store is more profitable than reverse + consecutive store -
9884 // transform the node to strided store.
9885 E.State = TreeEntry::StridedVectorize;
9886 } else if (!E.ReorderIndices.empty()) {
9887 // Check for interleaved stores.
9888 auto IsInterleaveMask = [&, &TTI = *TTI](ArrayRef<int> Mask) {
9889 auto *BaseSI = cast<StoreInst>(E.Scalars.front());
9890 assert(Mask.size() > 1 && "Expected mask greater than 1 element.");
9891 if (Mask.size() < 4)
9892 return 0u;
9893 for (unsigned Factor : seq<unsigned>(2, Mask.size() / 2 + 1)) {
9894 if (ShuffleVectorInst::isInterleaveMask(
9895 Mask, Factor, VecTy->getElementCount().getFixedValue()) &&
9896 TTI.isLegalInterleavedAccessType(
9897 VecTy, Factor, BaseSI->getAlign(),
9898 BaseSI->getPointerAddressSpace()))
9899 return Factor;
9902 return 0u;
9904 SmallVector<int> Mask(E.ReorderIndices.begin(), E.ReorderIndices.end());
9905 unsigned InterleaveFactor = IsInterleaveMask(Mask);
9906 if (InterleaveFactor != 0)
9907 E.setInterleave(InterleaveFactor);
9909 break;
9911 case Instruction::Select: {
9912 if (E.State != TreeEntry::Vectorize)
9913 break;
9914 auto [MinMaxID, SelectOnly] = canConvertToMinOrMaxIntrinsic(E.Scalars);
9915 if (MinMaxID == Intrinsic::not_intrinsic)
9916 break;
9917 // This node is a minmax node.
9918 E.CombinedOp = TreeEntry::MinMax;
9919 TreeEntry *CondEntry = const_cast<TreeEntry *>(getOperandEntry(&E, 0));
9920 if (SelectOnly && CondEntry->UserTreeIndices.size() == 1 &&
9921 CondEntry->State == TreeEntry::Vectorize) {
9922 // The condition node is part of the combined minmax node.
9923 CondEntry->State = TreeEntry::CombinedVectorize;
9925 break;
9927 default:
9928 break;
9932 if (LoadEntriesToVectorize.empty()) {
9933 // Single load node - exit.
9934 if (VectorizableTree.size() <= 1 &&
9935 VectorizableTree.front()->getOpcode() == Instruction::Load)
9936 return;
9937 // Small graph with small VF - exit.
9938 constexpr unsigned SmallTree = 3;
9939 constexpr unsigned SmallVF = 2;
9940 if ((VectorizableTree.size() <= SmallTree &&
9941 VectorizableTree.front()->Scalars.size() == SmallVF) ||
9942 (VectorizableTree.size() <= 2 && UserIgnoreList))
9943 return;
9945 if (VectorizableTree.front()->isNonPowOf2Vec() &&
9946 getCanonicalGraphSize() != getTreeSize() && UserIgnoreList &&
9947 getCanonicalGraphSize() <= SmallTree &&
9948 count_if(ArrayRef(VectorizableTree).drop_front(getCanonicalGraphSize()),
9949 [](const std::unique_ptr<TreeEntry> &TE) {
9950 return TE->isGather() &&
9951 TE->getOpcode() == Instruction::Load &&
9952 !allSameBlock(TE->Scalars);
9953 }) == 1)
9954 return;
9957 // A list of loads to be gathered during the vectorization process. We can
9958 // try to vectorize them at the end, if profitable.
9959 SmallMapVector<std::tuple<BasicBlock *, Value *, Type *>,
9960 SmallVector<SmallVector<std::pair<LoadInst *, int>>>, 8>
9961 GatheredLoads;
9963 for (std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
9964 TreeEntry &E = *TE;
9965 if (E.isGather() &&
9966 (E.getOpcode() == Instruction::Load ||
9967 (!E.getOpcode() && any_of(E.Scalars,
9968 [&](Value *V) {
9969 return isa<LoadInst>(V) &&
9970 !isVectorized(V) &&
9971 !isDeleted(cast<Instruction>(V));
9972 }))) &&
9973 !isSplat(E.Scalars)) {
9974 for (Value *V : E.Scalars) {
9975 auto *LI = dyn_cast<LoadInst>(V);
9976 if (!LI)
9977 continue;
9978 if (isDeleted(LI) || isVectorized(LI) || !LI->isSimple())
9979 continue;
9980 gatherPossiblyVectorizableLoads(
9981 *this, V, *DL, *SE, *TTI,
9982 GatheredLoads[std::make_tuple(
9983 LI->getParent(),
9984 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth),
9985 LI->getType())]);
9989 // Try to vectorize gathered loads if this is not just a gather of loads.
9990 if (!GatheredLoads.empty())
9991 tryToVectorizeGatheredLoads(GatheredLoads);
9994 /// Merges shuffle masks and emits final shuffle instruction, if required. It
9995 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission,
9996 /// when the actual shuffle instruction is generated only if this is actually
9997 /// required. Otherwise, the shuffle instruction emission is delayed till the
9998 /// end of the process, to reduce the number of emitted instructions and further
9999 /// analysis/transformations.
10000 class BoUpSLP::ShuffleCostEstimator : public BaseShuffleAnalysis {
10001 bool IsFinalized = false;
10002 SmallVector<int> CommonMask;
10003 SmallVector<PointerUnion<Value *, const TreeEntry *>, 2> InVectors;
10004 const TargetTransformInfo &TTI;
10005 InstructionCost Cost = 0;
10006 SmallDenseSet<Value *> VectorizedVals;
10007 BoUpSLP &R;
10008 SmallPtrSetImpl<Value *> &CheckedExtracts;
10009 constexpr static TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
10010 /// While set, still trying to estimate the cost for the same nodes and we
10011 /// can delay actual cost estimation (virtual shuffle instruction emission).
10012 /// May help better estimate the cost if same nodes must be permuted + allows
10013 /// to move most of the long shuffles cost estimation to TTI.
10014 bool SameNodesEstimated = true;
10016 static Constant *getAllOnesValue(const DataLayout &DL, Type *Ty) {
10017 if (Ty->getScalarType()->isPointerTy()) {
10018 Constant *Res = ConstantExpr::getIntToPtr(
10019 ConstantInt::getAllOnesValue(
10020 IntegerType::get(Ty->getContext(),
10021 DL.getTypeStoreSizeInBits(Ty->getScalarType()))),
10022 Ty->getScalarType());
10023 if (auto *VTy = dyn_cast<VectorType>(Ty))
10024 Res = ConstantVector::getSplat(VTy->getElementCount(), Res);
10025 return Res;
10027 return Constant::getAllOnesValue(Ty);
10030 InstructionCost getBuildVectorCost(ArrayRef<Value *> VL, Value *Root) {
10031 if ((!Root && allConstant(VL)) || all_of(VL, IsaPred<UndefValue>))
10032 return TTI::TCC_Free;
10033 auto *VecTy = getWidenedType(ScalarTy, VL.size());
10034 InstructionCost GatherCost = 0;
10035 SmallVector<Value *> Gathers(VL);
10036 if (!Root && isSplat(VL)) {
10037 // Found the broadcasting of the single scalar, calculate the cost as
10038 // the broadcast.
10039 const auto *It = find_if_not(VL, IsaPred<UndefValue>);
10040 assert(It != VL.end() && "Expected at least one non-undef value.");
10041 // Add broadcast for non-identity shuffle only.
10042 bool NeedShuffle =
10043 count(VL, *It) > 1 &&
10044 (VL.front() != *It || !all_of(VL.drop_front(), IsaPred<UndefValue>));
10045 if (!NeedShuffle) {
10046 if (isa<FixedVectorType>(ScalarTy)) {
10047 assert(SLPReVec && "FixedVectorType is not expected.");
10048 return TTI.getShuffleCost(
10049 TTI::SK_InsertSubvector, VecTy, {}, CostKind,
10050 std::distance(VL.begin(), It) * getNumElements(ScalarTy),
10051 cast<FixedVectorType>(ScalarTy));
10053 return TTI.getVectorInstrCost(Instruction::InsertElement, VecTy,
10054 CostKind, std::distance(VL.begin(), It),
10055 PoisonValue::get(VecTy), *It);
10058 SmallVector<int> ShuffleMask(VL.size(), PoisonMaskElem);
10059 transform(VL, ShuffleMask.begin(), [](Value *V) {
10060 return isa<PoisonValue>(V) ? PoisonMaskElem : 0;
10062 InstructionCost InsertCost =
10063 TTI.getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind, 0,
10064 PoisonValue::get(VecTy), *It);
10065 return InsertCost + ::getShuffleCost(TTI,
10066 TargetTransformInfo::SK_Broadcast,
10067 VecTy, ShuffleMask, CostKind,
10068 /*Index=*/0, /*SubTp=*/nullptr,
10069 /*Args=*/*It);
10071 return GatherCost +
10072 (all_of(Gathers, IsaPred<UndefValue>)
10073 ? TTI::TCC_Free
10074 : R.getGatherCost(Gathers, !Root && VL.equals(Gathers),
10075 ScalarTy));
10078 /// Compute the cost of creating a vector containing the extracted values from
10079 /// \p VL.
10080 InstructionCost
10081 computeExtractCost(ArrayRef<Value *> VL, ArrayRef<int> Mask,
10082 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds,
10083 unsigned NumParts) {
10084 assert(VL.size() > NumParts && "Unexpected scalarized shuffle.");
10085 unsigned NumElts =
10086 std::accumulate(VL.begin(), VL.end(), 0, [](unsigned Sz, Value *V) {
10087 auto *EE = dyn_cast<ExtractElementInst>(V);
10088 if (!EE)
10089 return Sz;
10090 auto *VecTy = dyn_cast<FixedVectorType>(EE->getVectorOperandType());
10091 if (!VecTy)
10092 return Sz;
10093 return std::max(Sz, VecTy->getNumElements());
10095 // FIXME: this must be moved to TTI for better estimation.
10096 unsigned EltsPerVector = getPartNumElems(VL.size(), NumParts);
10097 auto CheckPerRegistersShuffle = [&](MutableArrayRef<int> Mask,
10098 SmallVectorImpl<unsigned> &Indices)
10099 -> std::optional<TTI::ShuffleKind> {
10100 if (NumElts <= EltsPerVector)
10101 return std::nullopt;
10102 int OffsetReg0 =
10103 alignDown(std::accumulate(Mask.begin(), Mask.end(), INT_MAX,
10104 [](int S, int I) {
10105 if (I == PoisonMaskElem)
10106 return S;
10107 return std::min(S, I);
10109 EltsPerVector);
10110 int OffsetReg1 = OffsetReg0;
10111 DenseSet<int> RegIndices;
10112 // Check that if trying to permute same single/2 input vectors.
10113 TTI::ShuffleKind ShuffleKind = TTI::SK_PermuteSingleSrc;
10114 int FirstRegId = -1;
10115 Indices.assign(1, OffsetReg0);
10116 for (auto [Pos, I] : enumerate(Mask)) {
10117 if (I == PoisonMaskElem)
10118 continue;
10119 int Idx = I - OffsetReg0;
10120 int RegId =
10121 (Idx / NumElts) * NumParts + (Idx % NumElts) / EltsPerVector;
10122 if (FirstRegId < 0)
10123 FirstRegId = RegId;
10124 RegIndices.insert(RegId);
10125 if (RegIndices.size() > 2)
10126 return std::nullopt;
10127 if (RegIndices.size() == 2) {
10128 ShuffleKind = TTI::SK_PermuteTwoSrc;
10129 if (Indices.size() == 1) {
10130 OffsetReg1 = alignDown(
10131 std::accumulate(
10132 std::next(Mask.begin(), Pos), Mask.end(), INT_MAX,
10133 [&](int S, int I) {
10134 if (I == PoisonMaskElem)
10135 return S;
10136 int RegId = ((I - OffsetReg0) / NumElts) * NumParts +
10137 ((I - OffsetReg0) % NumElts) / EltsPerVector;
10138 if (RegId == FirstRegId)
10139 return S;
10140 return std::min(S, I);
10142 EltsPerVector);
10143 Indices.push_back(OffsetReg1 % NumElts);
10145 Idx = I - OffsetReg1;
10147 I = (Idx % NumElts) % EltsPerVector +
10148 (RegId == FirstRegId ? 0 : EltsPerVector);
10150 return ShuffleKind;
10152 InstructionCost Cost = 0;
10154 // Process extracts in blocks of EltsPerVector to check if the source vector
10155 // operand can be re-used directly. If not, add the cost of creating a
10156 // shuffle to extract the values into a vector register.
10157 for (unsigned Part : seq<unsigned>(NumParts)) {
10158 if (!ShuffleKinds[Part])
10159 continue;
10160 ArrayRef<int> MaskSlice = Mask.slice(
10161 Part * EltsPerVector, getNumElems(Mask.size(), EltsPerVector, Part));
10162 SmallVector<int> SubMask(EltsPerVector, PoisonMaskElem);
10163 copy(MaskSlice, SubMask.begin());
10164 SmallVector<unsigned, 2> Indices;
10165 std::optional<TTI::ShuffleKind> RegShuffleKind =
10166 CheckPerRegistersShuffle(SubMask, Indices);
10167 if (!RegShuffleKind) {
10168 if (*ShuffleKinds[Part] != TTI::SK_PermuteSingleSrc ||
10169 !ShuffleVectorInst::isIdentityMask(
10170 MaskSlice, std::max<unsigned>(NumElts, MaskSlice.size())))
10171 Cost +=
10172 ::getShuffleCost(TTI, *ShuffleKinds[Part],
10173 getWidenedType(ScalarTy, NumElts), MaskSlice);
10174 continue;
10176 if (*RegShuffleKind != TTI::SK_PermuteSingleSrc ||
10177 !ShuffleVectorInst::isIdentityMask(SubMask, EltsPerVector)) {
10178 Cost +=
10179 ::getShuffleCost(TTI, *RegShuffleKind,
10180 getWidenedType(ScalarTy, EltsPerVector), SubMask);
10182 const unsigned BaseVF = getFullVectorNumberOfElements(
10183 *R.TTI, VL.front()->getType(), alignTo(NumElts, EltsPerVector));
10184 for (unsigned Idx : Indices) {
10185 assert((Idx + EltsPerVector) <= BaseVF &&
10186 "SK_ExtractSubvector index out of range");
10187 Cost += ::getShuffleCost(TTI, TTI::SK_ExtractSubvector,
10188 getWidenedType(ScalarTy, BaseVF), {}, CostKind,
10189 Idx, getWidenedType(ScalarTy, EltsPerVector));
10191 // Second attempt to check, if just a permute is better estimated than
10192 // subvector extract.
10193 SubMask.assign(NumElts, PoisonMaskElem);
10194 copy(MaskSlice, SubMask.begin());
10195 InstructionCost OriginalCost = ::getShuffleCost(
10196 TTI, *ShuffleKinds[Part], getWidenedType(ScalarTy, NumElts), SubMask);
10197 if (OriginalCost < Cost)
10198 Cost = OriginalCost;
10200 return Cost;
10202 /// Transforms mask \p CommonMask per given \p Mask to make proper set after
10203 /// shuffle emission.
10204 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask,
10205 ArrayRef<int> Mask) {
10206 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10207 if (Mask[Idx] != PoisonMaskElem)
10208 CommonMask[Idx] = Idx;
10210 /// Adds the cost of reshuffling \p E1 and \p E2 (if present), using given
10211 /// mask \p Mask, register number \p Part, that includes \p SliceSize
10212 /// elements.
10213 void estimateNodesPermuteCost(const TreeEntry &E1, const TreeEntry *E2,
10214 ArrayRef<int> Mask, unsigned Part,
10215 unsigned SliceSize) {
10216 if (SameNodesEstimated) {
10217 // Delay the cost estimation if the same nodes are reshuffling.
10218 // If we already requested the cost of reshuffling of E1 and E2 before, no
10219 // need to estimate another cost with the sub-Mask, instead include this
10220 // sub-Mask into the CommonMask to estimate it later and avoid double cost
10221 // estimation.
10222 if ((InVectors.size() == 2 &&
10223 cast<const TreeEntry *>(InVectors.front()) == &E1 &&
10224 cast<const TreeEntry *>(InVectors.back()) == E2) ||
10225 (!E2 && cast<const TreeEntry *>(InVectors.front()) == &E1)) {
10226 unsigned Limit = getNumElems(Mask.size(), SliceSize, Part);
10227 assert(all_of(ArrayRef(CommonMask).slice(Part * SliceSize, Limit),
10228 [](int Idx) { return Idx == PoisonMaskElem; }) &&
10229 "Expected all poisoned elements.");
10230 ArrayRef<int> SubMask = ArrayRef(Mask).slice(Part * SliceSize, Limit);
10231 copy(SubMask, std::next(CommonMask.begin(), SliceSize * Part));
10232 return;
10234 // Found non-matching nodes - need to estimate the cost for the matched
10235 // and transform mask.
10236 Cost += createShuffle(InVectors.front(),
10237 InVectors.size() == 1 ? nullptr : InVectors.back(),
10238 CommonMask);
10239 transformMaskAfterShuffle(CommonMask, CommonMask);
10240 } else if (InVectors.size() == 2) {
10241 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask);
10242 transformMaskAfterShuffle(CommonMask, CommonMask);
10244 SameNodesEstimated = false;
10245 if (!E2 && InVectors.size() == 1) {
10246 unsigned VF = E1.getVectorFactor();
10247 if (Value *V1 = InVectors.front().dyn_cast<Value *>()) {
10248 VF = std::max(VF,
10249 cast<FixedVectorType>(V1->getType())->getNumElements());
10250 } else {
10251 const auto *E = cast<const TreeEntry *>(InVectors.front());
10252 VF = std::max(VF, E->getVectorFactor());
10254 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10255 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
10256 CommonMask[Idx] = Mask[Idx] + VF;
10257 Cost += createShuffle(InVectors.front(), &E1, CommonMask);
10258 transformMaskAfterShuffle(CommonMask, CommonMask);
10259 } else {
10260 auto P = InVectors.front();
10261 Cost += createShuffle(&E1, E2, Mask);
10262 unsigned VF = Mask.size();
10263 if (Value *V1 = P.dyn_cast<Value *>()) {
10264 VF = std::max(VF,
10265 getNumElements(V1->getType()));
10266 } else {
10267 const auto *E = cast<const TreeEntry *>(P);
10268 VF = std::max(VF, E->getVectorFactor());
10270 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10271 if (Mask[Idx] != PoisonMaskElem)
10272 CommonMask[Idx] = Idx + (InVectors.empty() ? 0 : VF);
10273 Cost += createShuffle(P, InVectors.front(), CommonMask);
10274 transformMaskAfterShuffle(CommonMask, CommonMask);
10278 class ShuffleCostBuilder {
10279 const TargetTransformInfo &TTI;
10281 static bool isEmptyOrIdentity(ArrayRef<int> Mask, unsigned VF) {
10282 int Index = -1;
10283 return Mask.empty() ||
10284 (VF == Mask.size() &&
10285 ShuffleVectorInst::isIdentityMask(Mask, VF)) ||
10286 (ShuffleVectorInst::isExtractSubvectorMask(Mask, VF, Index) &&
10287 Index == 0);
10290 public:
10291 ShuffleCostBuilder(const TargetTransformInfo &TTI) : TTI(TTI) {}
10292 ~ShuffleCostBuilder() = default;
10293 InstructionCost createShuffleVector(Value *V1, Value *,
10294 ArrayRef<int> Mask) const {
10295 // Empty mask or identity mask are free.
10296 unsigned VF =
10297 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
10298 if (isEmptyOrIdentity(Mask, VF))
10299 return TTI::TCC_Free;
10300 return ::getShuffleCost(TTI, TTI::SK_PermuteTwoSrc,
10301 cast<VectorType>(V1->getType()), Mask);
10303 InstructionCost createShuffleVector(Value *V1, ArrayRef<int> Mask) const {
10304 // Empty mask or identity mask are free.
10305 unsigned VF =
10306 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
10307 if (isEmptyOrIdentity(Mask, VF))
10308 return TTI::TCC_Free;
10309 return ::getShuffleCost(TTI, TTI::SK_PermuteSingleSrc,
10310 cast<VectorType>(V1->getType()), Mask);
10312 InstructionCost createIdentity(Value *) const { return TTI::TCC_Free; }
10313 InstructionCost createPoison(Type *Ty, unsigned VF) const {
10314 return TTI::TCC_Free;
10316 void resizeToMatch(Value *&, Value *&) const {}
10319 /// Smart shuffle instruction emission, walks through shuffles trees and
10320 /// tries to find the best matching vector for the actual shuffle
10321 /// instruction.
10322 InstructionCost
10323 createShuffle(const PointerUnion<Value *, const TreeEntry *> &P1,
10324 const PointerUnion<Value *, const TreeEntry *> &P2,
10325 ArrayRef<int> Mask) {
10326 ShuffleCostBuilder Builder(TTI);
10327 SmallVector<int> CommonMask(Mask);
10328 Value *V1 = P1.dyn_cast<Value *>(), *V2 = P2.dyn_cast<Value *>();
10329 unsigned CommonVF = Mask.size();
10330 InstructionCost ExtraCost = 0;
10331 auto GetNodeMinBWAffectedCost = [&](const TreeEntry &E,
10332 unsigned VF) -> InstructionCost {
10333 if (E.isGather() && allConstant(E.Scalars))
10334 return TTI::TCC_Free;
10335 Type *EScalarTy = E.Scalars.front()->getType();
10336 bool IsSigned = true;
10337 if (auto It = R.MinBWs.find(&E); It != R.MinBWs.end()) {
10338 EScalarTy = IntegerType::get(EScalarTy->getContext(), It->second.first);
10339 IsSigned = It->second.second;
10341 if (EScalarTy != ScalarTy) {
10342 unsigned CastOpcode = Instruction::Trunc;
10343 unsigned DstSz = R.DL->getTypeSizeInBits(ScalarTy);
10344 unsigned SrcSz = R.DL->getTypeSizeInBits(EScalarTy);
10345 if (DstSz > SrcSz)
10346 CastOpcode = IsSigned ? Instruction::SExt : Instruction::ZExt;
10347 return TTI.getCastInstrCost(CastOpcode, getWidenedType(ScalarTy, VF),
10348 getWidenedType(EScalarTy, VF),
10349 TTI::CastContextHint::None, CostKind);
10351 return TTI::TCC_Free;
10353 auto GetValueMinBWAffectedCost = [&](const Value *V) -> InstructionCost {
10354 if (isa<Constant>(V))
10355 return TTI::TCC_Free;
10356 auto *VecTy = cast<VectorType>(V->getType());
10357 Type *EScalarTy = VecTy->getElementType();
10358 if (EScalarTy != ScalarTy) {
10359 bool IsSigned = !isKnownNonNegative(V, SimplifyQuery(*R.DL));
10360 unsigned CastOpcode = Instruction::Trunc;
10361 unsigned DstSz = R.DL->getTypeSizeInBits(ScalarTy);
10362 unsigned SrcSz = R.DL->getTypeSizeInBits(EScalarTy);
10363 if (DstSz > SrcSz)
10364 CastOpcode = IsSigned ? Instruction::SExt : Instruction::ZExt;
10365 return TTI.getCastInstrCost(
10366 CastOpcode, VectorType::get(ScalarTy, VecTy->getElementCount()),
10367 VecTy, TTI::CastContextHint::None, CostKind);
10369 return TTI::TCC_Free;
10371 if (!V1 && !V2 && !P2.isNull()) {
10372 // Shuffle 2 entry nodes.
10373 const TreeEntry *E = cast<const TreeEntry *>(P1);
10374 unsigned VF = E->getVectorFactor();
10375 const TreeEntry *E2 = cast<const TreeEntry *>(P2);
10376 CommonVF = std::max(VF, E2->getVectorFactor());
10377 assert(all_of(Mask,
10378 [=](int Idx) {
10379 return Idx < 2 * static_cast<int>(CommonVF);
10380 }) &&
10381 "All elements in mask must be less than 2 * CommonVF.");
10382 if (E->Scalars.size() == E2->Scalars.size()) {
10383 SmallVector<int> EMask = E->getCommonMask();
10384 SmallVector<int> E2Mask = E2->getCommonMask();
10385 if (!EMask.empty() || !E2Mask.empty()) {
10386 for (int &Idx : CommonMask) {
10387 if (Idx == PoisonMaskElem)
10388 continue;
10389 if (Idx < static_cast<int>(CommonVF) && !EMask.empty())
10390 Idx = EMask[Idx];
10391 else if (Idx >= static_cast<int>(CommonVF))
10392 Idx = (E2Mask.empty() ? Idx - CommonVF : E2Mask[Idx - CommonVF]) +
10393 E->Scalars.size();
10396 CommonVF = E->Scalars.size();
10397 ExtraCost += GetNodeMinBWAffectedCost(*E, CommonVF) +
10398 GetNodeMinBWAffectedCost(*E2, CommonVF);
10399 } else {
10400 ExtraCost += GetNodeMinBWAffectedCost(*E, E->getVectorFactor()) +
10401 GetNodeMinBWAffectedCost(*E2, E2->getVectorFactor());
10403 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10404 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10405 } else if (!V1 && P2.isNull()) {
10406 // Shuffle single entry node.
10407 const TreeEntry *E = cast<const TreeEntry *>(P1);
10408 unsigned VF = E->getVectorFactor();
10409 CommonVF = VF;
10410 assert(
10411 all_of(Mask,
10412 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) &&
10413 "All elements in mask must be less than CommonVF.");
10414 if (E->Scalars.size() == Mask.size() && VF != Mask.size()) {
10415 SmallVector<int> EMask = E->getCommonMask();
10416 assert(!EMask.empty() && "Expected non-empty common mask.");
10417 for (int &Idx : CommonMask) {
10418 if (Idx != PoisonMaskElem)
10419 Idx = EMask[Idx];
10421 CommonVF = E->Scalars.size();
10422 } else if (unsigned Factor = E->getInterleaveFactor();
10423 Factor > 0 && E->Scalars.size() != Mask.size() &&
10424 ShuffleVectorInst::isDeInterleaveMaskOfFactor(CommonMask,
10425 Factor)) {
10426 // Deinterleaved nodes are free.
10427 std::iota(CommonMask.begin(), CommonMask.end(), 0);
10429 ExtraCost += GetNodeMinBWAffectedCost(*E, CommonVF);
10430 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10431 // Not identity/broadcast? Try to see if the original vector is better.
10432 if (!E->ReorderIndices.empty() && CommonVF == E->ReorderIndices.size() &&
10433 CommonVF == CommonMask.size() &&
10434 any_of(enumerate(CommonMask),
10435 [](const auto &&P) {
10436 return P.value() != PoisonMaskElem &&
10437 static_cast<unsigned>(P.value()) != P.index();
10438 }) &&
10439 any_of(CommonMask,
10440 [](int Idx) { return Idx != PoisonMaskElem && Idx != 0; })) {
10441 SmallVector<int> ReorderMask;
10442 inversePermutation(E->ReorderIndices, ReorderMask);
10443 ::addMask(CommonMask, ReorderMask);
10445 } else if (V1 && P2.isNull()) {
10446 // Shuffle single vector.
10447 ExtraCost += GetValueMinBWAffectedCost(V1);
10448 CommonVF = getVF(V1);
10449 assert(
10450 all_of(Mask,
10451 [=](int Idx) { return Idx < static_cast<int>(CommonVF); }) &&
10452 "All elements in mask must be less than CommonVF.");
10453 } else if (V1 && !V2) {
10454 // Shuffle vector and tree node.
10455 unsigned VF = getVF(V1);
10456 const TreeEntry *E2 = cast<const TreeEntry *>(P2);
10457 CommonVF = std::max(VF, E2->getVectorFactor());
10458 assert(all_of(Mask,
10459 [=](int Idx) {
10460 return Idx < 2 * static_cast<int>(CommonVF);
10461 }) &&
10462 "All elements in mask must be less than 2 * CommonVF.");
10463 if (E2->Scalars.size() == VF && VF != CommonVF) {
10464 SmallVector<int> E2Mask = E2->getCommonMask();
10465 assert(!E2Mask.empty() && "Expected non-empty common mask.");
10466 for (int &Idx : CommonMask) {
10467 if (Idx == PoisonMaskElem)
10468 continue;
10469 if (Idx >= static_cast<int>(CommonVF))
10470 Idx = E2Mask[Idx - CommonVF] + VF;
10472 CommonVF = VF;
10474 ExtraCost += GetValueMinBWAffectedCost(V1);
10475 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10476 ExtraCost += GetNodeMinBWAffectedCost(
10477 *E2, std::min(CommonVF, E2->getVectorFactor()));
10478 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10479 } else if (!V1 && V2) {
10480 // Shuffle vector and tree node.
10481 unsigned VF = getVF(V2);
10482 const TreeEntry *E1 = cast<const TreeEntry *>(P1);
10483 CommonVF = std::max(VF, E1->getVectorFactor());
10484 assert(all_of(Mask,
10485 [=](int Idx) {
10486 return Idx < 2 * static_cast<int>(CommonVF);
10487 }) &&
10488 "All elements in mask must be less than 2 * CommonVF.");
10489 if (E1->Scalars.size() == VF && VF != CommonVF) {
10490 SmallVector<int> E1Mask = E1->getCommonMask();
10491 assert(!E1Mask.empty() && "Expected non-empty common mask.");
10492 for (int &Idx : CommonMask) {
10493 if (Idx == PoisonMaskElem)
10494 continue;
10495 if (Idx >= static_cast<int>(CommonVF))
10496 Idx = E1Mask[Idx - CommonVF] + VF;
10497 else
10498 Idx = E1Mask[Idx];
10500 CommonVF = VF;
10502 ExtraCost += GetNodeMinBWAffectedCost(
10503 *E1, std::min(CommonVF, E1->getVectorFactor()));
10504 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10505 ExtraCost += GetValueMinBWAffectedCost(V2);
10506 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10507 } else {
10508 assert(V1 && V2 && "Expected both vectors.");
10509 unsigned VF = getVF(V1);
10510 CommonVF = std::max(VF, getVF(V2));
10511 assert(all_of(Mask,
10512 [=](int Idx) {
10513 return Idx < 2 * static_cast<int>(CommonVF);
10514 }) &&
10515 "All elements in mask must be less than 2 * CommonVF.");
10516 ExtraCost +=
10517 GetValueMinBWAffectedCost(V1) + GetValueMinBWAffectedCost(V2);
10518 if (V1->getType() != V2->getType()) {
10519 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10520 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10521 } else {
10522 if (cast<VectorType>(V1->getType())->getElementType() != ScalarTy)
10523 V1 = Constant::getNullValue(getWidenedType(ScalarTy, CommonVF));
10524 if (cast<VectorType>(V2->getType())->getElementType() != ScalarTy)
10525 V2 = getAllOnesValue(*R.DL, getWidenedType(ScalarTy, CommonVF));
10528 if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
10529 assert(SLPReVec && "FixedVectorType is not expected.");
10530 transformScalarShuffleIndiciesToVector(VecTy->getNumElements(),
10531 CommonMask);
10533 InVectors.front() =
10534 Constant::getNullValue(getWidenedType(ScalarTy, CommonMask.size()));
10535 if (InVectors.size() == 2)
10536 InVectors.pop_back();
10537 return ExtraCost + BaseShuffleAnalysis::createShuffle<InstructionCost>(
10538 V1, V2, CommonMask, Builder);
10541 public:
10542 ShuffleCostEstimator(Type *ScalarTy, TargetTransformInfo &TTI,
10543 ArrayRef<Value *> VectorizedVals, BoUpSLP &R,
10544 SmallPtrSetImpl<Value *> &CheckedExtracts)
10545 : BaseShuffleAnalysis(ScalarTy), TTI(TTI),
10546 VectorizedVals(VectorizedVals.begin(), VectorizedVals.end()), R(R),
10547 CheckedExtracts(CheckedExtracts) {}
10548 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask,
10549 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds,
10550 unsigned NumParts, bool &UseVecBaseAsInput) {
10551 UseVecBaseAsInput = false;
10552 if (Mask.empty())
10553 return nullptr;
10554 Value *VecBase = nullptr;
10555 SmallVector<Value *> VL(E->Scalars.begin(), E->Scalars.end());
10556 if (!E->ReorderIndices.empty()) {
10557 SmallVector<int> ReorderMask(E->ReorderIndices.begin(),
10558 E->ReorderIndices.end());
10559 reorderScalars(VL, ReorderMask);
10561 // Check if it can be considered reused if same extractelements were
10562 // vectorized already.
10563 bool PrevNodeFound = any_of(
10564 ArrayRef(R.VectorizableTree).take_front(E->Idx),
10565 [&](const std::unique_ptr<TreeEntry> &TE) {
10566 return ((!TE->isAltShuffle() &&
10567 TE->getOpcode() == Instruction::ExtractElement) ||
10568 TE->isGather()) &&
10569 all_of(enumerate(TE->Scalars), [&](auto &&Data) {
10570 return VL.size() > Data.index() &&
10571 (Mask[Data.index()] == PoisonMaskElem ||
10572 isa<UndefValue>(VL[Data.index()]) ||
10573 Data.value() == VL[Data.index()]);
10576 SmallPtrSet<Value *, 4> UniqueBases;
10577 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
10578 for (unsigned Part : seq<unsigned>(NumParts)) {
10579 unsigned Limit = getNumElems(VL.size(), SliceSize, Part);
10580 ArrayRef<int> SubMask = Mask.slice(Part * SliceSize, Limit);
10581 for (auto [I, V] :
10582 enumerate(ArrayRef(VL).slice(Part * SliceSize, Limit))) {
10583 // Ignore non-extractelement scalars.
10584 if (isa<UndefValue>(V) ||
10585 (!SubMask.empty() && SubMask[I] == PoisonMaskElem))
10586 continue;
10587 // If all users of instruction are going to be vectorized and this
10588 // instruction itself is not going to be vectorized, consider this
10589 // instruction as dead and remove its cost from the final cost of the
10590 // vectorized tree.
10591 // Also, avoid adjusting the cost for extractelements with multiple uses
10592 // in different graph entries.
10593 auto *EE = cast<ExtractElementInst>(V);
10594 VecBase = EE->getVectorOperand();
10595 UniqueBases.insert(VecBase);
10596 const TreeEntry *VE = R.getTreeEntry(V);
10597 if (!CheckedExtracts.insert(V).second ||
10598 !R.areAllUsersVectorized(cast<Instruction>(V), &VectorizedVals) ||
10599 any_of(EE->users(),
10600 [&](User *U) {
10601 return isa<GetElementPtrInst>(U) &&
10602 !R.areAllUsersVectorized(cast<Instruction>(U),
10603 &VectorizedVals);
10604 }) ||
10605 (VE && VE != E))
10606 continue;
10607 std::optional<unsigned> EEIdx = getExtractIndex(EE);
10608 if (!EEIdx)
10609 continue;
10610 unsigned Idx = *EEIdx;
10611 // Take credit for instruction that will become dead.
10612 if (EE->hasOneUse() || !PrevNodeFound) {
10613 Instruction *Ext = EE->user_back();
10614 if (isa<SExtInst, ZExtInst>(Ext) &&
10615 all_of(Ext->users(), IsaPred<GetElementPtrInst>)) {
10616 // Use getExtractWithExtendCost() to calculate the cost of
10617 // extractelement/ext pair.
10618 Cost -=
10619 TTI.getExtractWithExtendCost(Ext->getOpcode(), Ext->getType(),
10620 EE->getVectorOperandType(), Idx);
10621 // Add back the cost of s|zext which is subtracted separately.
10622 Cost += TTI.getCastInstrCost(
10623 Ext->getOpcode(), Ext->getType(), EE->getType(),
10624 TTI::getCastContextHint(Ext), CostKind, Ext);
10625 continue;
10628 Cost -= TTI.getVectorInstrCost(*EE, EE->getVectorOperandType(),
10629 CostKind, Idx);
10632 // Check that gather of extractelements can be represented as just a
10633 // shuffle of a single/two vectors the scalars are extracted from.
10634 // Found the bunch of extractelement instructions that must be gathered
10635 // into a vector and can be represented as a permutation elements in a
10636 // single input vector or of 2 input vectors.
10637 // Done for reused if same extractelements were vectorized already.
10638 if (!PrevNodeFound)
10639 Cost += computeExtractCost(VL, Mask, ShuffleKinds, NumParts);
10640 InVectors.assign(1, E);
10641 CommonMask.assign(Mask.begin(), Mask.end());
10642 transformMaskAfterShuffle(CommonMask, CommonMask);
10643 SameNodesEstimated = false;
10644 if (NumParts != 1 && UniqueBases.size() != 1) {
10645 UseVecBaseAsInput = true;
10646 VecBase =
10647 Constant::getNullValue(getWidenedType(ScalarTy, CommonMask.size()));
10649 return VecBase;
10651 /// Checks if the specified entry \p E needs to be delayed because of its
10652 /// dependency nodes.
10653 std::optional<InstructionCost>
10654 needToDelay(const TreeEntry *,
10655 ArrayRef<SmallVector<const TreeEntry *>>) const {
10656 // No need to delay the cost estimation during analysis.
10657 return std::nullopt;
10659 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) {
10660 if (&E1 == &E2) {
10661 assert(all_of(Mask,
10662 [&](int Idx) {
10663 return Idx < static_cast<int>(E1.getVectorFactor());
10664 }) &&
10665 "Expected single vector shuffle mask.");
10666 add(E1, Mask);
10667 return;
10669 if (InVectors.empty()) {
10670 CommonMask.assign(Mask.begin(), Mask.end());
10671 InVectors.assign({&E1, &E2});
10672 return;
10674 assert(!CommonMask.empty() && "Expected non-empty common mask.");
10675 auto *MaskVecTy = getWidenedType(ScalarTy, Mask.size());
10676 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
10677 if (NumParts == 0 || NumParts >= Mask.size() ||
10678 MaskVecTy->getNumElements() % NumParts != 0 ||
10679 !hasFullVectorsOrPowerOf2(TTI, MaskVecTy->getElementType(),
10680 MaskVecTy->getNumElements() / NumParts))
10681 NumParts = 1;
10682 unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
10683 const auto *It =
10684 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; });
10685 unsigned Part = std::distance(Mask.begin(), It) / SliceSize;
10686 estimateNodesPermuteCost(E1, &E2, Mask, Part, SliceSize);
10688 void add(const TreeEntry &E1, ArrayRef<int> Mask) {
10689 if (InVectors.empty()) {
10690 CommonMask.assign(Mask.begin(), Mask.end());
10691 InVectors.assign(1, &E1);
10692 return;
10694 assert(!CommonMask.empty() && "Expected non-empty common mask.");
10695 auto *MaskVecTy = getWidenedType(ScalarTy, Mask.size());
10696 unsigned NumParts = TTI.getNumberOfParts(MaskVecTy);
10697 if (NumParts == 0 || NumParts >= Mask.size() ||
10698 MaskVecTy->getNumElements() % NumParts != 0 ||
10699 !hasFullVectorsOrPowerOf2(TTI, MaskVecTy->getElementType(),
10700 MaskVecTy->getNumElements() / NumParts))
10701 NumParts = 1;
10702 unsigned SliceSize = getPartNumElems(Mask.size(), NumParts);
10703 const auto *It =
10704 find_if(Mask, [](int Idx) { return Idx != PoisonMaskElem; });
10705 unsigned Part = std::distance(Mask.begin(), It) / SliceSize;
10706 estimateNodesPermuteCost(E1, nullptr, Mask, Part, SliceSize);
10707 if (!SameNodesEstimated && InVectors.size() == 1)
10708 InVectors.emplace_back(&E1);
10710 /// Adds 2 input vectors and the mask for their shuffling.
10711 void add(Value *V1, Value *V2, ArrayRef<int> Mask) {
10712 // May come only for shuffling of 2 vectors with extractelements, already
10713 // handled in adjustExtracts.
10714 assert(InVectors.size() == 1 &&
10715 all_of(enumerate(CommonMask),
10716 [&](auto P) {
10717 if (P.value() == PoisonMaskElem)
10718 return Mask[P.index()] == PoisonMaskElem;
10719 auto *EI = cast<ExtractElementInst>(
10720 cast<const TreeEntry *>(InVectors.front())
10721 ->getOrdered(P.index()));
10722 return EI->getVectorOperand() == V1 ||
10723 EI->getVectorOperand() == V2;
10724 }) &&
10725 "Expected extractelement vectors.");
10727 /// Adds another one input vector and the mask for the shuffling.
10728 void add(Value *V1, ArrayRef<int> Mask, bool ForExtracts = false) {
10729 if (InVectors.empty()) {
10730 assert(CommonMask.empty() && !ForExtracts &&
10731 "Expected empty input mask/vectors.");
10732 CommonMask.assign(Mask.begin(), Mask.end());
10733 InVectors.assign(1, V1);
10734 return;
10736 if (ForExtracts) {
10737 // No need to add vectors here, already handled them in adjustExtracts.
10738 assert(
10739 InVectors.size() == 1 && isa<const TreeEntry *>(InVectors.front()) &&
10740 !CommonMask.empty() &&
10741 all_of(enumerate(CommonMask),
10742 [&](auto P) {
10743 Value *Scalar =
10744 InVectors.front().get<const TreeEntry *>()->getOrdered(
10745 P.index());
10746 if (P.value() == PoisonMaskElem)
10747 return P.value() == Mask[P.index()] ||
10748 isa<UndefValue>(Scalar);
10749 if (isa<Constant>(V1))
10750 return true;
10751 auto *EI = cast<ExtractElementInst>(Scalar);
10752 return EI->getVectorOperand() == V1;
10753 }) &&
10754 "Expected only tree entry for extractelement vectors.");
10755 return;
10757 assert(!InVectors.empty() && !CommonMask.empty() &&
10758 "Expected only tree entries from extracts/reused buildvectors.");
10759 unsigned VF = getVF(V1);
10760 if (InVectors.size() == 2) {
10761 Cost += createShuffle(InVectors.front(), InVectors.back(), CommonMask);
10762 transformMaskAfterShuffle(CommonMask, CommonMask);
10763 VF = std::max<unsigned>(VF, CommonMask.size());
10764 } else if (const auto *InTE =
10765 InVectors.front().dyn_cast<const TreeEntry *>()) {
10766 VF = std::max(VF, InTE->getVectorFactor());
10767 } else {
10768 VF = std::max(
10769 VF, cast<FixedVectorType>(cast<Value *>(InVectors.front())->getType())
10770 ->getNumElements());
10772 InVectors.push_back(V1);
10773 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10774 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
10775 CommonMask[Idx] = Mask[Idx] + VF;
10777 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0,
10778 Value *Root = nullptr) {
10779 Cost += getBuildVectorCost(VL, Root);
10780 if (!Root) {
10781 // FIXME: Need to find a way to avoid use of getNullValue here.
10782 SmallVector<Constant *> Vals;
10783 unsigned VF = VL.size();
10784 if (MaskVF != 0)
10785 VF = std::min(VF, MaskVF);
10786 for (Value *V : VL.take_front(VF)) {
10787 if (isa<UndefValue>(V)) {
10788 Vals.push_back(cast<Constant>(V));
10789 continue;
10791 Vals.push_back(Constant::getNullValue(V->getType()));
10793 if (auto *VecTy = dyn_cast<FixedVectorType>(Vals.front()->getType())) {
10794 assert(SLPReVec && "FixedVectorType is not expected.");
10795 // When REVEC is enabled, we need to expand vector types into scalar
10796 // types.
10797 unsigned VecTyNumElements = VecTy->getNumElements();
10798 SmallVector<Constant *> NewVals(VF * VecTyNumElements, nullptr);
10799 for (auto [I, V] : enumerate(Vals)) {
10800 Type *ScalarTy = V->getType()->getScalarType();
10801 Constant *NewVal;
10802 if (isa<PoisonValue>(V))
10803 NewVal = PoisonValue::get(ScalarTy);
10804 else if (isa<UndefValue>(V))
10805 NewVal = UndefValue::get(ScalarTy);
10806 else
10807 NewVal = Constant::getNullValue(ScalarTy);
10808 std::fill_n(NewVals.begin() + I * VecTyNumElements, VecTyNumElements,
10809 NewVal);
10811 Vals.swap(NewVals);
10813 return ConstantVector::get(Vals);
10815 return ConstantVector::getSplat(
10816 ElementCount::getFixed(
10817 cast<FixedVectorType>(Root->getType())->getNumElements()),
10818 getAllOnesValue(*R.DL, ScalarTy->getScalarType()));
10820 InstructionCost createFreeze(InstructionCost Cost) { return Cost; }
10821 /// Finalize emission of the shuffles.
10822 InstructionCost
10823 finalize(ArrayRef<int> ExtMask,
10824 ArrayRef<std::pair<const TreeEntry *, unsigned>> SubVectors,
10825 ArrayRef<int> SubVectorsMask, unsigned VF = 0,
10826 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) {
10827 IsFinalized = true;
10828 if (Action) {
10829 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front();
10830 if (InVectors.size() == 2)
10831 Cost += createShuffle(Vec, InVectors.back(), CommonMask);
10832 else
10833 Cost += createShuffle(Vec, nullptr, CommonMask);
10834 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10835 if (CommonMask[Idx] != PoisonMaskElem)
10836 CommonMask[Idx] = Idx;
10837 assert(VF > 0 &&
10838 "Expected vector length for the final value before action.");
10839 Value *V = cast<Value *>(Vec);
10840 Action(V, CommonMask);
10841 InVectors.front() = V;
10843 if (!SubVectors.empty()) {
10844 const PointerUnion<Value *, const TreeEntry *> &Vec = InVectors.front();
10845 if (InVectors.size() == 2)
10846 Cost += createShuffle(Vec, InVectors.back(), CommonMask);
10847 else
10848 Cost += createShuffle(Vec, nullptr, CommonMask);
10849 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
10850 if (CommonMask[Idx] != PoisonMaskElem)
10851 CommonMask[Idx] = Idx;
10852 // Add subvectors permutation cost.
10853 if (!SubVectorsMask.empty()) {
10854 assert(SubVectorsMask.size() <= CommonMask.size() &&
10855 "Expected same size of masks for subvectors and common mask.");
10856 SmallVector<int> SVMask(CommonMask.size(), PoisonMaskElem);
10857 copy(SubVectorsMask, SVMask.begin());
10858 for (auto [I1, I2] : zip(SVMask, CommonMask)) {
10859 if (I2 != PoisonMaskElem) {
10860 assert(I1 == PoisonMaskElem && "Expected unused subvectors mask");
10861 I1 = I2 + CommonMask.size();
10864 Cost += ::getShuffleCost(TTI, TTI::SK_PermuteTwoSrc,
10865 getWidenedType(ScalarTy, CommonMask.size()),
10866 SVMask, CostKind);
10868 for (auto [E, Idx] : SubVectors) {
10869 Type *EScalarTy = E->Scalars.front()->getType();
10870 bool IsSigned = true;
10871 if (auto It = R.MinBWs.find(E); It != R.MinBWs.end()) {
10872 EScalarTy =
10873 IntegerType::get(EScalarTy->getContext(), It->second.first);
10874 IsSigned = It->second.second;
10876 if (ScalarTy != EScalarTy) {
10877 unsigned CastOpcode = Instruction::Trunc;
10878 unsigned DstSz = R.DL->getTypeSizeInBits(ScalarTy);
10879 unsigned SrcSz = R.DL->getTypeSizeInBits(EScalarTy);
10880 if (DstSz > SrcSz)
10881 CastOpcode = IsSigned ? Instruction::SExt : Instruction::ZExt;
10882 Cost += TTI.getCastInstrCost(
10883 CastOpcode, getWidenedType(ScalarTy, E->getVectorFactor()),
10884 getWidenedType(EScalarTy, E->getVectorFactor()),
10885 TTI::CastContextHint::Normal, CostKind);
10887 Cost += ::getShuffleCost(
10888 TTI, TTI::SK_InsertSubvector,
10889 getWidenedType(ScalarTy, CommonMask.size()), {}, CostKind, Idx,
10890 getWidenedType(ScalarTy, E->getVectorFactor()));
10891 if (!CommonMask.empty()) {
10892 std::iota(std::next(CommonMask.begin(), Idx),
10893 std::next(CommonMask.begin(), Idx + E->getVectorFactor()),
10894 Idx);
10899 ::addMask(CommonMask, ExtMask, /*ExtendingManyInputs=*/true);
10900 if (CommonMask.empty()) {
10901 assert(InVectors.size() == 1 && "Expected only one vector with no mask");
10902 return Cost;
10904 return Cost +
10905 createShuffle(InVectors.front(),
10906 InVectors.size() == 2 ? InVectors.back() : nullptr,
10907 CommonMask);
10910 ~ShuffleCostEstimator() {
10911 assert((IsFinalized || CommonMask.empty()) &&
10912 "Shuffle construction must be finalized.");
10916 const BoUpSLP::TreeEntry *BoUpSLP::getOperandEntry(const TreeEntry *E,
10917 unsigned Idx) const {
10918 if (const TreeEntry *VE = getMatchedVectorizedOperand(E, Idx))
10919 return VE;
10920 const auto *It =
10921 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
10922 return TE->isGather() &&
10923 find_if(TE->UserTreeIndices, [&](const EdgeInfo &EI) {
10924 return EI.EdgeIdx == Idx && EI.UserTE == E;
10925 }) != TE->UserTreeIndices.end();
10927 assert(It != VectorizableTree.end() && "Expected vectorizable entry.");
10928 return It->get();
10931 TTI::CastContextHint BoUpSLP::getCastContextHint(const TreeEntry &TE) const {
10932 if (TE.State == TreeEntry::ScatterVectorize ||
10933 TE.State == TreeEntry::StridedVectorize)
10934 return TTI::CastContextHint::GatherScatter;
10935 if (TE.State == TreeEntry::Vectorize && TE.getOpcode() == Instruction::Load &&
10936 !TE.isAltShuffle()) {
10937 if (TE.ReorderIndices.empty())
10938 return TTI::CastContextHint::Normal;
10939 SmallVector<int> Mask;
10940 inversePermutation(TE.ReorderIndices, Mask);
10941 if (ShuffleVectorInst::isReverseMask(Mask, Mask.size()))
10942 return TTI::CastContextHint::Reversed;
10944 return TTI::CastContextHint::None;
10947 /// Builds the arguments types vector for the given call instruction with the
10948 /// given \p ID for the specified vector factor.
10949 static SmallVector<Type *> buildIntrinsicArgTypes(const CallInst *CI,
10950 const Intrinsic::ID ID,
10951 const unsigned VF,
10952 unsigned MinBW) {
10953 SmallVector<Type *> ArgTys;
10954 for (auto [Idx, Arg] : enumerate(CI->args())) {
10955 if (ID != Intrinsic::not_intrinsic) {
10956 if (isVectorIntrinsicWithScalarOpAtArg(ID, Idx)) {
10957 ArgTys.push_back(Arg->getType());
10958 continue;
10960 if (MinBW > 0) {
10961 ArgTys.push_back(
10962 getWidenedType(IntegerType::get(CI->getContext(), MinBW), VF));
10963 continue;
10966 ArgTys.push_back(getWidenedType(Arg->getType(), VF));
10968 return ArgTys;
10971 InstructionCost
10972 BoUpSLP::getEntryCost(const TreeEntry *E, ArrayRef<Value *> VectorizedVals,
10973 SmallPtrSetImpl<Value *> &CheckedExtracts) {
10974 ArrayRef<Value *> VL = E->Scalars;
10976 Type *ScalarTy = getValueType(VL[0]);
10977 if (!isValidElementType(ScalarTy))
10978 return InstructionCost::getInvalid();
10979 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
10981 // If we have computed a smaller type for the expression, update VecTy so
10982 // that the costs will be accurate.
10983 auto It = MinBWs.find(E);
10984 Type *OrigScalarTy = ScalarTy;
10985 if (It != MinBWs.end()) {
10986 auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy);
10987 ScalarTy = IntegerType::get(F->getContext(), It->second.first);
10988 if (VecTy)
10989 ScalarTy = getWidenedType(ScalarTy, VecTy->getNumElements());
10991 auto *VecTy = getWidenedType(ScalarTy, VL.size());
10992 unsigned EntryVF = E->getVectorFactor();
10993 auto *FinalVecTy = getWidenedType(ScalarTy, EntryVF);
10995 bool NeedToShuffleReuses = !E->ReuseShuffleIndices.empty();
10996 if (E->isGather()) {
10997 if (allConstant(VL))
10998 return 0;
10999 if (isa<InsertElementInst>(VL[0]))
11000 return InstructionCost::getInvalid();
11001 if (isa<CmpInst>(VL.front()))
11002 ScalarTy = VL.front()->getType();
11003 return processBuildVector<ShuffleCostEstimator, InstructionCost>(
11004 E, ScalarTy, *TTI, VectorizedVals, *this, CheckedExtracts);
11006 InstructionCost CommonCost = 0;
11007 SmallVector<int> Mask;
11008 bool IsReverseOrder = isReverseOrder(E->ReorderIndices);
11009 if (!E->ReorderIndices.empty() &&
11010 (E->State != TreeEntry::StridedVectorize || !IsReverseOrder)) {
11011 SmallVector<int> NewMask;
11012 if (E->getOpcode() == Instruction::Store) {
11013 // For stores the order is actually a mask.
11014 NewMask.resize(E->ReorderIndices.size());
11015 copy(E->ReorderIndices, NewMask.begin());
11016 } else {
11017 inversePermutation(E->ReorderIndices, NewMask);
11019 ::addMask(Mask, NewMask);
11021 if (NeedToShuffleReuses)
11022 ::addMask(Mask, E->ReuseShuffleIndices);
11023 if (!Mask.empty() && !ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))
11024 CommonCost =
11025 ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, FinalVecTy, Mask);
11026 assert((E->State == TreeEntry::Vectorize ||
11027 E->State == TreeEntry::ScatterVectorize ||
11028 E->State == TreeEntry::StridedVectorize) &&
11029 "Unhandled state");
11030 assert(E->getOpcode() &&
11031 ((allSameType(VL) && allSameBlock(VL)) ||
11032 (E->getOpcode() == Instruction::GetElementPtr &&
11033 E->getMainOp()->getType()->isPointerTy())) &&
11034 "Invalid VL");
11035 Instruction *VL0 = E->getMainOp();
11036 unsigned ShuffleOrOp =
11037 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
11038 if (E->CombinedOp != TreeEntry::NotCombinedOp)
11039 ShuffleOrOp = E->CombinedOp;
11040 SmallSetVector<Value *, 16> UniqueValues(VL.begin(), VL.end());
11041 const unsigned Sz = UniqueValues.size();
11042 SmallBitVector UsedScalars(Sz, false);
11043 for (unsigned I = 0; I < Sz; ++I) {
11044 if (isa<Instruction>(UniqueValues[I]) && getTreeEntry(UniqueValues[I]) == E)
11045 continue;
11046 UsedScalars.set(I);
11048 auto GetCastContextHint = [&](Value *V) {
11049 if (const TreeEntry *OpTE = getTreeEntry(V))
11050 return getCastContextHint(*OpTE);
11051 InstructionsState SrcState = getSameOpcode(E->getOperand(0), *TLI);
11052 if (SrcState.getOpcode() == Instruction::Load && !SrcState.isAltShuffle())
11053 return TTI::CastContextHint::GatherScatter;
11054 return TTI::CastContextHint::None;
11056 auto GetCostDiff =
11057 [=](function_ref<InstructionCost(unsigned)> ScalarEltCost,
11058 function_ref<InstructionCost(InstructionCost)> VectorCost) {
11059 // Calculate the cost of this instruction.
11060 InstructionCost ScalarCost = 0;
11061 if (isa<CastInst, CallInst>(VL0)) {
11062 // For some of the instructions no need to calculate cost for each
11063 // particular instruction, we can use the cost of the single
11064 // instruction x total number of scalar instructions.
11065 ScalarCost = (Sz - UsedScalars.count()) * ScalarEltCost(0);
11066 } else {
11067 for (unsigned I = 0; I < Sz; ++I) {
11068 if (UsedScalars.test(I))
11069 continue;
11070 ScalarCost += ScalarEltCost(I);
11074 InstructionCost VecCost = VectorCost(CommonCost);
11075 // Check if the current node must be resized, if the parent node is not
11076 // resized.
11077 if (It != MinBWs.end() && !UnaryInstruction::isCast(E->getOpcode()) &&
11078 E->Idx != 0 &&
11079 (E->getOpcode() != Instruction::Load ||
11080 !E->UserTreeIndices.empty())) {
11081 const EdgeInfo &EI =
11082 *find_if(E->UserTreeIndices, [](const EdgeInfo &EI) {
11083 return !EI.UserTE->isGather() || EI.EdgeIdx != UINT_MAX;
11085 if (EI.UserTE->getOpcode() != Instruction::Select ||
11086 EI.EdgeIdx != 0) {
11087 auto UserBWIt = MinBWs.find(EI.UserTE);
11088 Type *UserScalarTy =
11089 EI.UserTE->getOperand(EI.EdgeIdx).front()->getType();
11090 if (UserBWIt != MinBWs.end())
11091 UserScalarTy = IntegerType::get(ScalarTy->getContext(),
11092 UserBWIt->second.first);
11093 if (ScalarTy != UserScalarTy) {
11094 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
11095 unsigned SrcBWSz = DL->getTypeSizeInBits(UserScalarTy);
11096 unsigned VecOpcode;
11097 auto *UserVecTy = getWidenedType(UserScalarTy, E->Scalars.size());
11098 if (BWSz > SrcBWSz)
11099 VecOpcode = Instruction::Trunc;
11100 else
11101 VecOpcode =
11102 It->second.second ? Instruction::SExt : Instruction::ZExt;
11103 TTI::CastContextHint CCH = GetCastContextHint(VL0);
11104 VecCost += TTI->getCastInstrCost(VecOpcode, UserVecTy, VecTy, CCH,
11105 CostKind);
11109 LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecCost - CommonCost,
11110 ScalarCost, "Calculated costs for Tree"));
11111 return VecCost - ScalarCost;
11113 // Calculate cost difference from vectorizing set of GEPs.
11114 // Negative value means vectorizing is profitable.
11115 auto GetGEPCostDiff = [=](ArrayRef<Value *> Ptrs, Value *BasePtr) {
11116 assert((E->State == TreeEntry::Vectorize ||
11117 E->State == TreeEntry::StridedVectorize) &&
11118 "Entry state expected to be Vectorize or StridedVectorize here.");
11119 InstructionCost ScalarCost = 0;
11120 InstructionCost VecCost = 0;
11121 std::tie(ScalarCost, VecCost) = getGEPCosts(
11122 *TTI, Ptrs, BasePtr, E->getOpcode(), CostKind, OrigScalarTy, VecTy);
11123 LLVM_DEBUG(dumpTreeCosts(E, 0, VecCost, ScalarCost,
11124 "Calculated GEPs cost for Tree"));
11126 return VecCost - ScalarCost;
11129 auto GetMinMaxCost = [&](Type *Ty, Instruction *VI = nullptr) {
11130 auto [MinMaxID, SelectOnly] = canConvertToMinOrMaxIntrinsic(VI ? VI : VL);
11131 if (MinMaxID == Intrinsic::not_intrinsic)
11132 return InstructionCost::getInvalid();
11133 Type *CanonicalType = Ty;
11134 if (CanonicalType->isPtrOrPtrVectorTy())
11135 CanonicalType = CanonicalType->getWithNewType(IntegerType::get(
11136 CanonicalType->getContext(),
11137 DL->getTypeSizeInBits(CanonicalType->getScalarType())));
11139 IntrinsicCostAttributes CostAttrs(MinMaxID, CanonicalType,
11140 {CanonicalType, CanonicalType});
11141 InstructionCost IntrinsicCost =
11142 TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
11143 // If the selects are the only uses of the compares, they will be
11144 // dead and we can adjust the cost by removing their cost.
11145 if (VI && SelectOnly) {
11146 assert((!Ty->isVectorTy() || SLPReVec) &&
11147 "Expected only for scalar type.");
11148 auto *CI = cast<CmpInst>(VI->getOperand(0));
11149 IntrinsicCost -= TTI->getCmpSelInstrCost(
11150 CI->getOpcode(), Ty, Builder.getInt1Ty(), CI->getPredicate(),
11151 CostKind, {TTI::OK_AnyValue, TTI::OP_None},
11152 {TTI::OK_AnyValue, TTI::OP_None}, CI);
11154 return IntrinsicCost;
11156 switch (ShuffleOrOp) {
11157 case Instruction::PHI: {
11158 // Count reused scalars.
11159 InstructionCost ScalarCost = 0;
11160 SmallPtrSet<const TreeEntry *, 4> CountedOps;
11161 for (Value *V : UniqueValues) {
11162 auto *PHI = dyn_cast<PHINode>(V);
11163 if (!PHI)
11164 continue;
11166 ValueList Operands(PHI->getNumIncomingValues(), nullptr);
11167 for (unsigned I = 0, N = PHI->getNumIncomingValues(); I < N; ++I) {
11168 Value *Op = PHI->getIncomingValue(I);
11169 Operands[I] = Op;
11171 if (const TreeEntry *OpTE = getTreeEntry(Operands.front()))
11172 if (OpTE->isSame(Operands) && CountedOps.insert(OpTE).second)
11173 if (!OpTE->ReuseShuffleIndices.empty())
11174 ScalarCost += TTI::TCC_Basic * (OpTE->ReuseShuffleIndices.size() -
11175 OpTE->Scalars.size());
11178 return CommonCost - ScalarCost;
11180 case Instruction::ExtractValue:
11181 case Instruction::ExtractElement: {
11182 auto GetScalarCost = [&](unsigned Idx) {
11183 if (isa<PoisonValue>(UniqueValues[Idx]))
11184 return InstructionCost(TTI::TCC_Free);
11186 auto *I = cast<Instruction>(UniqueValues[Idx]);
11187 VectorType *SrcVecTy;
11188 if (ShuffleOrOp == Instruction::ExtractElement) {
11189 auto *EE = cast<ExtractElementInst>(I);
11190 SrcVecTy = EE->getVectorOperandType();
11191 } else {
11192 auto *EV = cast<ExtractValueInst>(I);
11193 Type *AggregateTy = EV->getAggregateOperand()->getType();
11194 unsigned NumElts;
11195 if (auto *ATy = dyn_cast<ArrayType>(AggregateTy))
11196 NumElts = ATy->getNumElements();
11197 else
11198 NumElts = AggregateTy->getStructNumElements();
11199 SrcVecTy = getWidenedType(OrigScalarTy, NumElts);
11201 if (I->hasOneUse()) {
11202 Instruction *Ext = I->user_back();
11203 if ((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
11204 all_of(Ext->users(), IsaPred<GetElementPtrInst>)) {
11205 // Use getExtractWithExtendCost() to calculate the cost of
11206 // extractelement/ext pair.
11207 InstructionCost Cost = TTI->getExtractWithExtendCost(
11208 Ext->getOpcode(), Ext->getType(), SrcVecTy, *getExtractIndex(I));
11209 // Subtract the cost of s|zext which is subtracted separately.
11210 Cost -= TTI->getCastInstrCost(
11211 Ext->getOpcode(), Ext->getType(), I->getType(),
11212 TTI::getCastContextHint(Ext), CostKind, Ext);
11213 return Cost;
11216 return TTI->getVectorInstrCost(Instruction::ExtractElement, SrcVecTy,
11217 CostKind, *getExtractIndex(I));
11219 auto GetVectorCost = [](InstructionCost CommonCost) { return CommonCost; };
11220 return GetCostDiff(GetScalarCost, GetVectorCost);
11222 case Instruction::InsertElement: {
11223 assert(E->ReuseShuffleIndices.empty() &&
11224 "Unique insertelements only are expected.");
11225 auto *SrcVecTy = cast<FixedVectorType>(VL0->getType());
11226 unsigned const NumElts = SrcVecTy->getNumElements();
11227 unsigned const NumScalars = VL.size();
11229 unsigned NumOfParts = TTI->getNumberOfParts(SrcVecTy);
11231 SmallVector<int> InsertMask(NumElts, PoisonMaskElem);
11232 unsigned OffsetBeg = *getElementIndex(VL.front());
11233 unsigned OffsetEnd = OffsetBeg;
11234 InsertMask[OffsetBeg] = 0;
11235 for (auto [I, V] : enumerate(VL.drop_front())) {
11236 unsigned Idx = *getElementIndex(V);
11237 if (OffsetBeg > Idx)
11238 OffsetBeg = Idx;
11239 else if (OffsetEnd < Idx)
11240 OffsetEnd = Idx;
11241 InsertMask[Idx] = I + 1;
11243 unsigned VecScalarsSz = PowerOf2Ceil(NumElts);
11244 if (NumOfParts > 0 && NumOfParts < NumElts)
11245 VecScalarsSz = PowerOf2Ceil((NumElts + NumOfParts - 1) / NumOfParts);
11246 unsigned VecSz = (1 + OffsetEnd / VecScalarsSz - OffsetBeg / VecScalarsSz) *
11247 VecScalarsSz;
11248 unsigned Offset = VecScalarsSz * (OffsetBeg / VecScalarsSz);
11249 unsigned InsertVecSz = std::min<unsigned>(
11250 PowerOf2Ceil(OffsetEnd - OffsetBeg + 1),
11251 ((OffsetEnd - OffsetBeg + VecScalarsSz) / VecScalarsSz) * VecScalarsSz);
11252 bool IsWholeSubvector =
11253 OffsetBeg == Offset && ((OffsetEnd + 1) % VecScalarsSz == 0);
11254 // Check if we can safely insert a subvector. If it is not possible, just
11255 // generate a whole-sized vector and shuffle the source vector and the new
11256 // subvector.
11257 if (OffsetBeg + InsertVecSz > VecSz) {
11258 // Align OffsetBeg to generate correct mask.
11259 OffsetBeg = alignDown(OffsetBeg, VecSz, Offset);
11260 InsertVecSz = VecSz;
11263 APInt DemandedElts = APInt::getZero(NumElts);
11264 // TODO: Add support for Instruction::InsertValue.
11265 SmallVector<int> Mask;
11266 if (!E->ReorderIndices.empty()) {
11267 inversePermutation(E->ReorderIndices, Mask);
11268 Mask.append(InsertVecSz - Mask.size(), PoisonMaskElem);
11269 } else {
11270 Mask.assign(VecSz, PoisonMaskElem);
11271 std::iota(Mask.begin(), std::next(Mask.begin(), InsertVecSz), 0);
11273 bool IsIdentity = true;
11274 SmallVector<int> PrevMask(InsertVecSz, PoisonMaskElem);
11275 Mask.swap(PrevMask);
11276 for (unsigned I = 0; I < NumScalars; ++I) {
11277 unsigned InsertIdx = *getElementIndex(VL[PrevMask[I]]);
11278 DemandedElts.setBit(InsertIdx);
11279 IsIdentity &= InsertIdx - OffsetBeg == I;
11280 Mask[InsertIdx - OffsetBeg] = I;
11282 assert(Offset < NumElts && "Failed to find vector index offset");
11284 InstructionCost Cost = 0;
11285 Cost -= TTI->getScalarizationOverhead(SrcVecTy, DemandedElts,
11286 /*Insert*/ true, /*Extract*/ false,
11287 CostKind);
11289 // First cost - resize to actual vector size if not identity shuffle or
11290 // need to shift the vector.
11291 // Do not calculate the cost if the actual size is the register size and
11292 // we can merge this shuffle with the following SK_Select.
11293 auto *InsertVecTy = getWidenedType(ScalarTy, InsertVecSz);
11294 if (!IsIdentity)
11295 Cost += ::getShuffleCost(*TTI, TargetTransformInfo::SK_PermuteSingleSrc,
11296 InsertVecTy, Mask);
11297 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
11298 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
11299 }));
11300 // Second cost - permutation with subvector, if some elements are from the
11301 // initial vector or inserting a subvector.
11302 // TODO: Implement the analysis of the FirstInsert->getOperand(0)
11303 // subvector of ActualVecTy.
11304 SmallBitVector InMask =
11305 isUndefVector(FirstInsert->getOperand(0),
11306 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask));
11307 if (!InMask.all() && NumScalars != NumElts && !IsWholeSubvector) {
11308 if (InsertVecSz != VecSz) {
11309 auto *ActualVecTy = getWidenedType(ScalarTy, VecSz);
11310 Cost += ::getShuffleCost(*TTI, TTI::SK_InsertSubvector, ActualVecTy, {},
11311 CostKind, OffsetBeg - Offset, InsertVecTy);
11312 } else {
11313 for (unsigned I = 0, End = OffsetBeg - Offset; I < End; ++I)
11314 Mask[I] = InMask.test(I) ? PoisonMaskElem : I;
11315 for (unsigned I = OffsetBeg - Offset, End = OffsetEnd - Offset;
11316 I <= End; ++I)
11317 if (Mask[I] != PoisonMaskElem)
11318 Mask[I] = I + VecSz;
11319 for (unsigned I = OffsetEnd + 1 - Offset; I < VecSz; ++I)
11320 Mask[I] =
11321 ((I >= InMask.size()) || InMask.test(I)) ? PoisonMaskElem : I;
11322 Cost +=
11323 ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, InsertVecTy, Mask);
11326 return Cost;
11328 case Instruction::ZExt:
11329 case Instruction::SExt:
11330 case Instruction::FPToUI:
11331 case Instruction::FPToSI:
11332 case Instruction::FPExt:
11333 case Instruction::PtrToInt:
11334 case Instruction::IntToPtr:
11335 case Instruction::SIToFP:
11336 case Instruction::UIToFP:
11337 case Instruction::Trunc:
11338 case Instruction::FPTrunc:
11339 case Instruction::BitCast: {
11340 auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
11341 Type *SrcScalarTy = VL0->getOperand(0)->getType();
11342 auto *SrcVecTy = getWidenedType(SrcScalarTy, VL.size());
11343 unsigned Opcode = ShuffleOrOp;
11344 unsigned VecOpcode = Opcode;
11345 if (!ScalarTy->isFPOrFPVectorTy() && !SrcScalarTy->isFPOrFPVectorTy() &&
11346 (SrcIt != MinBWs.end() || It != MinBWs.end())) {
11347 // Check if the values are candidates to demote.
11348 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy->getScalarType());
11349 if (SrcIt != MinBWs.end()) {
11350 SrcBWSz = SrcIt->second.first;
11351 unsigned SrcScalarTyNumElements = getNumElements(SrcScalarTy);
11352 SrcScalarTy = IntegerType::get(F->getContext(), SrcBWSz);
11353 SrcVecTy =
11354 getWidenedType(SrcScalarTy, VL.size() * SrcScalarTyNumElements);
11356 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy->getScalarType());
11357 if (BWSz == SrcBWSz) {
11358 VecOpcode = Instruction::BitCast;
11359 } else if (BWSz < SrcBWSz) {
11360 VecOpcode = Instruction::Trunc;
11361 } else if (It != MinBWs.end()) {
11362 assert(BWSz > SrcBWSz && "Invalid cast!");
11363 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
11364 } else if (SrcIt != MinBWs.end()) {
11365 assert(BWSz > SrcBWSz && "Invalid cast!");
11366 VecOpcode =
11367 SrcIt->second.second ? Instruction::SExt : Instruction::ZExt;
11369 } else if (VecOpcode == Instruction::SIToFP && SrcIt != MinBWs.end() &&
11370 !SrcIt->second.second) {
11371 VecOpcode = Instruction::UIToFP;
11373 auto GetScalarCost = [&](unsigned Idx) -> InstructionCost {
11374 assert(Idx == 0 && "Expected 0 index only");
11375 return TTI->getCastInstrCost(Opcode, VL0->getType(),
11376 VL0->getOperand(0)->getType(),
11377 TTI::getCastContextHint(VL0), CostKind, VL0);
11379 auto GetVectorCost = [=](InstructionCost CommonCost) {
11380 // Do not count cost here if minimum bitwidth is in effect and it is just
11381 // a bitcast (here it is just a noop).
11382 if (VecOpcode != Opcode && VecOpcode == Instruction::BitCast)
11383 return CommonCost;
11384 auto *VI = VL0->getOpcode() == Opcode ? VL0 : nullptr;
11385 TTI::CastContextHint CCH = GetCastContextHint(VL0->getOperand(0));
11387 bool IsArithmeticExtendedReduction =
11388 E->Idx == 0 && UserIgnoreList &&
11389 all_of(*UserIgnoreList, [](Value *V) {
11390 auto *I = cast<Instruction>(V);
11391 return is_contained({Instruction::Add, Instruction::FAdd,
11392 Instruction::Mul, Instruction::FMul,
11393 Instruction::And, Instruction::Or,
11394 Instruction::Xor},
11395 I->getOpcode());
11397 if (IsArithmeticExtendedReduction &&
11398 (VecOpcode == Instruction::ZExt || VecOpcode == Instruction::SExt))
11399 return CommonCost;
11400 return CommonCost +
11401 TTI->getCastInstrCost(VecOpcode, VecTy, SrcVecTy, CCH, CostKind,
11402 VecOpcode == Opcode ? VI : nullptr);
11404 return GetCostDiff(GetScalarCost, GetVectorCost);
11406 case Instruction::FCmp:
11407 case Instruction::ICmp:
11408 case Instruction::Select: {
11409 CmpInst::Predicate VecPred, SwappedVecPred;
11410 auto MatchCmp = m_Cmp(VecPred, m_Value(), m_Value());
11411 if (match(VL0, m_Select(MatchCmp, m_Value(), m_Value())) ||
11412 match(VL0, MatchCmp))
11413 SwappedVecPred = CmpInst::getSwappedPredicate(VecPred);
11414 else
11415 SwappedVecPred = VecPred = ScalarTy->isFloatingPointTy()
11416 ? CmpInst::BAD_FCMP_PREDICATE
11417 : CmpInst::BAD_ICMP_PREDICATE;
11418 auto GetScalarCost = [&](unsigned Idx) {
11419 if (isa<PoisonValue>(UniqueValues[Idx]))
11420 return InstructionCost(TTI::TCC_Free);
11422 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11423 CmpInst::Predicate CurrentPred = ScalarTy->isFloatingPointTy()
11424 ? CmpInst::BAD_FCMP_PREDICATE
11425 : CmpInst::BAD_ICMP_PREDICATE;
11426 auto MatchCmp = m_Cmp(CurrentPred, m_Value(), m_Value());
11427 if ((!match(VI, m_Select(MatchCmp, m_Value(), m_Value())) &&
11428 !match(VI, MatchCmp)) ||
11429 (CurrentPred != VecPred && CurrentPred != SwappedVecPred))
11430 VecPred = SwappedVecPred = ScalarTy->isFloatingPointTy()
11431 ? CmpInst::BAD_FCMP_PREDICATE
11432 : CmpInst::BAD_ICMP_PREDICATE;
11434 InstructionCost ScalarCost = TTI->getCmpSelInstrCost(
11435 E->getOpcode(), OrigScalarTy, Builder.getInt1Ty(), CurrentPred,
11436 CostKind, getOperandInfo(VI->getOperand(0)),
11437 getOperandInfo(VI->getOperand(1)), VI);
11438 InstructionCost IntrinsicCost = GetMinMaxCost(OrigScalarTy, VI);
11439 if (IntrinsicCost.isValid())
11440 ScalarCost = IntrinsicCost;
11442 return ScalarCost;
11444 auto GetVectorCost = [&](InstructionCost CommonCost) {
11445 auto *MaskTy = getWidenedType(Builder.getInt1Ty(), VL.size());
11447 InstructionCost VecCost =
11448 TTI->getCmpSelInstrCost(E->getOpcode(), VecTy, MaskTy, VecPred,
11449 CostKind, getOperandInfo(E->getOperand(0)),
11450 getOperandInfo(E->getOperand(1)), VL0);
11451 if (auto *SI = dyn_cast<SelectInst>(VL0)) {
11452 auto *CondType =
11453 getWidenedType(SI->getCondition()->getType(), VL.size());
11454 unsigned CondNumElements = CondType->getNumElements();
11455 unsigned VecTyNumElements = getNumElements(VecTy);
11456 assert(VecTyNumElements >= CondNumElements &&
11457 VecTyNumElements % CondNumElements == 0 &&
11458 "Cannot vectorize Instruction::Select");
11459 if (CondNumElements != VecTyNumElements) {
11460 // When the return type is i1 but the source is fixed vector type, we
11461 // need to duplicate the condition value.
11462 VecCost += ::getShuffleCost(
11463 *TTI, TTI::SK_PermuteSingleSrc, CondType,
11464 createReplicatedMask(VecTyNumElements / CondNumElements,
11465 CondNumElements));
11468 return VecCost + CommonCost;
11470 return GetCostDiff(GetScalarCost, GetVectorCost);
11472 case TreeEntry::MinMax: {
11473 auto GetScalarCost = [&](unsigned Idx) {
11474 return GetMinMaxCost(OrigScalarTy);
11476 auto GetVectorCost = [&](InstructionCost CommonCost) {
11477 InstructionCost VecCost = GetMinMaxCost(VecTy);
11478 return VecCost + CommonCost;
11480 return GetCostDiff(GetScalarCost, GetVectorCost);
11482 case Instruction::FNeg:
11483 case Instruction::Add:
11484 case Instruction::FAdd:
11485 case Instruction::Sub:
11486 case Instruction::FSub:
11487 case Instruction::Mul:
11488 case Instruction::FMul:
11489 case Instruction::UDiv:
11490 case Instruction::SDiv:
11491 case Instruction::FDiv:
11492 case Instruction::URem:
11493 case Instruction::SRem:
11494 case Instruction::FRem:
11495 case Instruction::Shl:
11496 case Instruction::LShr:
11497 case Instruction::AShr:
11498 case Instruction::And:
11499 case Instruction::Or:
11500 case Instruction::Xor: {
11501 auto GetScalarCost = [&](unsigned Idx) {
11502 if (isa<PoisonValue>(UniqueValues[Idx]))
11503 return InstructionCost(TTI::TCC_Free);
11505 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11506 unsigned OpIdx = isa<UnaryOperator>(VI) ? 0 : 1;
11507 TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(VI->getOperand(0));
11508 TTI::OperandValueInfo Op2Info =
11509 TTI::getOperandInfo(VI->getOperand(OpIdx));
11510 SmallVector<const Value *> Operands(VI->operand_values());
11511 return TTI->getArithmeticInstrCost(ShuffleOrOp, OrigScalarTy, CostKind,
11512 Op1Info, Op2Info, Operands, VI);
11514 auto GetVectorCost = [=](InstructionCost CommonCost) {
11515 if (ShuffleOrOp == Instruction::And && It != MinBWs.end()) {
11516 for (unsigned I : seq<unsigned>(0, E->getNumOperands())) {
11517 ArrayRef<Value *> Ops = E->getOperand(I);
11518 if (all_of(Ops, [&](Value *Op) {
11519 auto *CI = dyn_cast<ConstantInt>(Op);
11520 return CI && CI->getValue().countr_one() >= It->second.first;
11522 return CommonCost;
11525 unsigned OpIdx = isa<UnaryOperator>(VL0) ? 0 : 1;
11526 TTI::OperandValueInfo Op1Info = getOperandInfo(E->getOperand(0));
11527 TTI::OperandValueInfo Op2Info = getOperandInfo(E->getOperand(OpIdx));
11528 return TTI->getArithmeticInstrCost(ShuffleOrOp, VecTy, CostKind, Op1Info,
11529 Op2Info, {}, nullptr, TLI) +
11530 CommonCost;
11532 return GetCostDiff(GetScalarCost, GetVectorCost);
11534 case Instruction::GetElementPtr: {
11535 return CommonCost + GetGEPCostDiff(VL, VL0);
11537 case Instruction::Load: {
11538 auto GetScalarCost = [&](unsigned Idx) {
11539 auto *VI = cast<LoadInst>(UniqueValues[Idx]);
11540 return TTI->getMemoryOpCost(Instruction::Load, OrigScalarTy,
11541 VI->getAlign(), VI->getPointerAddressSpace(),
11542 CostKind, TTI::OperandValueInfo(), VI);
11544 auto *LI0 = cast<LoadInst>(VL0);
11545 auto GetVectorCost = [&](InstructionCost CommonCost) {
11546 InstructionCost VecLdCost;
11547 switch (E->State) {
11548 case TreeEntry::Vectorize:
11549 if (unsigned Factor = E->getInterleaveFactor()) {
11550 VecLdCost = TTI->getInterleavedMemoryOpCost(
11551 Instruction::Load, VecTy, Factor, std::nullopt, LI0->getAlign(),
11552 LI0->getPointerAddressSpace(), CostKind);
11554 } else {
11555 VecLdCost = TTI->getMemoryOpCost(
11556 Instruction::Load, VecTy, LI0->getAlign(),
11557 LI0->getPointerAddressSpace(), CostKind, TTI::OperandValueInfo());
11559 break;
11560 case TreeEntry::StridedVectorize: {
11561 Align CommonAlignment =
11562 computeCommonAlignment<LoadInst>(UniqueValues.getArrayRef());
11563 VecLdCost = TTI->getStridedMemoryOpCost(
11564 Instruction::Load, VecTy, LI0->getPointerOperand(),
11565 /*VariableMask=*/false, CommonAlignment, CostKind);
11566 break;
11568 case TreeEntry::ScatterVectorize: {
11569 Align CommonAlignment =
11570 computeCommonAlignment<LoadInst>(UniqueValues.getArrayRef());
11571 VecLdCost = TTI->getGatherScatterOpCost(
11572 Instruction::Load, VecTy, LI0->getPointerOperand(),
11573 /*VariableMask=*/false, CommonAlignment, CostKind);
11574 break;
11576 case TreeEntry::CombinedVectorize:
11577 case TreeEntry::NeedToGather:
11578 llvm_unreachable("Unexpected vectorization state.");
11580 return VecLdCost + CommonCost;
11583 InstructionCost Cost = GetCostDiff(GetScalarCost, GetVectorCost);
11584 // If this node generates masked gather load then it is not a terminal node.
11585 // Hence address operand cost is estimated separately.
11586 if (E->State == TreeEntry::ScatterVectorize)
11587 return Cost;
11589 // Estimate cost of GEPs since this tree node is a terminator.
11590 SmallVector<Value *> PointerOps(VL.size());
11591 for (auto [I, V] : enumerate(VL))
11592 PointerOps[I] = cast<LoadInst>(V)->getPointerOperand();
11593 return Cost + GetGEPCostDiff(PointerOps, LI0->getPointerOperand());
11595 case Instruction::Store: {
11596 bool IsReorder = !E->ReorderIndices.empty();
11597 auto GetScalarCost = [=](unsigned Idx) {
11598 auto *VI = cast<StoreInst>(VL[Idx]);
11599 TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(VI->getValueOperand());
11600 return TTI->getMemoryOpCost(Instruction::Store, OrigScalarTy,
11601 VI->getAlign(), VI->getPointerAddressSpace(),
11602 CostKind, OpInfo, VI);
11604 auto *BaseSI =
11605 cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
11606 auto GetVectorCost = [=](InstructionCost CommonCost) {
11607 // We know that we can merge the stores. Calculate the cost.
11608 InstructionCost VecStCost;
11609 if (E->State == TreeEntry::StridedVectorize) {
11610 Align CommonAlignment =
11611 computeCommonAlignment<StoreInst>(UniqueValues.getArrayRef());
11612 VecStCost = TTI->getStridedMemoryOpCost(
11613 Instruction::Store, VecTy, BaseSI->getPointerOperand(),
11614 /*VariableMask=*/false, CommonAlignment, CostKind);
11615 } else {
11616 assert(E->State == TreeEntry::Vectorize &&
11617 "Expected either strided or consecutive stores.");
11618 if (unsigned Factor = E->getInterleaveFactor()) {
11619 assert(E->ReuseShuffleIndices.empty() && !E->ReorderIndices.empty() &&
11620 "No reused shuffles expected");
11621 CommonCost = 0;
11622 VecStCost = TTI->getInterleavedMemoryOpCost(
11623 Instruction::Store, VecTy, Factor, std::nullopt,
11624 BaseSI->getAlign(), BaseSI->getPointerAddressSpace(), CostKind);
11625 } else {
11626 TTI::OperandValueInfo OpInfo = getOperandInfo(E->getOperand(0));
11627 VecStCost = TTI->getMemoryOpCost(
11628 Instruction::Store, VecTy, BaseSI->getAlign(),
11629 BaseSI->getPointerAddressSpace(), CostKind, OpInfo);
11632 return VecStCost + CommonCost;
11634 SmallVector<Value *> PointerOps(VL.size());
11635 for (auto [I, V] : enumerate(VL)) {
11636 unsigned Idx = IsReorder ? E->ReorderIndices[I] : I;
11637 PointerOps[Idx] = cast<StoreInst>(V)->getPointerOperand();
11640 return GetCostDiff(GetScalarCost, GetVectorCost) +
11641 GetGEPCostDiff(PointerOps, BaseSI->getPointerOperand());
11643 case Instruction::Call: {
11644 auto GetScalarCost = [&](unsigned Idx) {
11645 auto *CI = cast<CallInst>(UniqueValues[Idx]);
11646 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
11647 if (ID != Intrinsic::not_intrinsic) {
11648 IntrinsicCostAttributes CostAttrs(ID, *CI, 1);
11649 return TTI->getIntrinsicInstrCost(CostAttrs, CostKind);
11651 return TTI->getCallInstrCost(CI->getCalledFunction(),
11652 CI->getFunctionType()->getReturnType(),
11653 CI->getFunctionType()->params(), CostKind);
11655 auto GetVectorCost = [=](InstructionCost CommonCost) {
11656 auto *CI = cast<CallInst>(VL0);
11657 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
11658 SmallVector<Type *> ArgTys =
11659 buildIntrinsicArgTypes(CI, ID, VecTy->getNumElements(),
11660 It != MinBWs.end() ? It->second.first : 0);
11661 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys);
11662 return std::min(VecCallCosts.first, VecCallCosts.second) + CommonCost;
11664 return GetCostDiff(GetScalarCost, GetVectorCost);
11666 case Instruction::ShuffleVector: {
11667 if (!SLPReVec || E->isAltShuffle())
11668 assert(E->isAltShuffle() &&
11669 ((Instruction::isBinaryOp(E->getOpcode()) &&
11670 Instruction::isBinaryOp(E->getAltOpcode())) ||
11671 (Instruction::isCast(E->getOpcode()) &&
11672 Instruction::isCast(E->getAltOpcode())) ||
11673 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
11674 "Invalid Shuffle Vector Operand");
11675 // Try to find the previous shuffle node with the same operands and same
11676 // main/alternate ops.
11677 auto TryFindNodeWithEqualOperands = [=]() {
11678 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
11679 if (TE.get() == E)
11680 break;
11681 if (TE->isAltShuffle() &&
11682 ((TE->getOpcode() == E->getOpcode() &&
11683 TE->getAltOpcode() == E->getAltOpcode()) ||
11684 (TE->getOpcode() == E->getAltOpcode() &&
11685 TE->getAltOpcode() == E->getOpcode())) &&
11686 TE->hasEqualOperands(*E))
11687 return true;
11689 return false;
11691 auto GetScalarCost = [&](unsigned Idx) {
11692 if (isa<PoisonValue>(UniqueValues[Idx]))
11693 return InstructionCost(TTI::TCC_Free);
11695 auto *VI = cast<Instruction>(UniqueValues[Idx]);
11696 assert(E->isOpcodeOrAlt(VI) && "Unexpected main/alternate opcode");
11697 (void)E;
11698 return TTI->getInstructionCost(VI, CostKind);
11700 // Need to clear CommonCost since the final shuffle cost is included into
11701 // vector cost.
11702 auto GetVectorCost = [&, &TTIRef = *TTI](InstructionCost) {
11703 // VecCost is equal to sum of the cost of creating 2 vectors
11704 // and the cost of creating shuffle.
11705 InstructionCost VecCost = 0;
11706 if (TryFindNodeWithEqualOperands()) {
11707 LLVM_DEBUG({
11708 dbgs() << "SLP: diamond match for alternate node found.\n";
11709 E->dump();
11711 // No need to add new vector costs here since we're going to reuse
11712 // same main/alternate vector ops, just do different shuffling.
11713 } else if (Instruction::isBinaryOp(E->getOpcode())) {
11714 VecCost =
11715 TTIRef.getArithmeticInstrCost(E->getOpcode(), VecTy, CostKind);
11716 VecCost +=
11717 TTIRef.getArithmeticInstrCost(E->getAltOpcode(), VecTy, CostKind);
11718 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
11719 auto *MaskTy = getWidenedType(Builder.getInt1Ty(), VL.size());
11720 VecCost = TTIRef.getCmpSelInstrCost(
11721 E->getOpcode(), VecTy, MaskTy, CI0->getPredicate(), CostKind,
11722 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
11723 VL0);
11724 VecCost += TTIRef.getCmpSelInstrCost(
11725 E->getOpcode(), VecTy, MaskTy,
11726 cast<CmpInst>(E->getAltOp())->getPredicate(), CostKind,
11727 {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
11728 E->getAltOp());
11729 } else {
11730 Type *SrcSclTy = E->getMainOp()->getOperand(0)->getType();
11731 auto *SrcTy = getWidenedType(SrcSclTy, VL.size());
11732 if (SrcSclTy->isIntegerTy() && ScalarTy->isIntegerTy()) {
11733 auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
11734 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
11735 unsigned SrcBWSz =
11736 DL->getTypeSizeInBits(E->getMainOp()->getOperand(0)->getType());
11737 if (SrcIt != MinBWs.end()) {
11738 SrcBWSz = SrcIt->second.first;
11739 SrcSclTy = IntegerType::get(SrcSclTy->getContext(), SrcBWSz);
11740 SrcTy = getWidenedType(SrcSclTy, VL.size());
11742 if (BWSz <= SrcBWSz) {
11743 if (BWSz < SrcBWSz)
11744 VecCost =
11745 TTIRef.getCastInstrCost(Instruction::Trunc, VecTy, SrcTy,
11746 TTI::CastContextHint::None, CostKind);
11747 LLVM_DEBUG({
11748 dbgs()
11749 << "SLP: alternate extension, which should be truncated.\n";
11750 E->dump();
11752 return VecCost;
11755 VecCost = TTIRef.getCastInstrCost(E->getOpcode(), VecTy, SrcTy,
11756 TTI::CastContextHint::None, CostKind);
11757 VecCost +=
11758 TTIRef.getCastInstrCost(E->getAltOpcode(), VecTy, SrcTy,
11759 TTI::CastContextHint::None, CostKind);
11761 SmallVector<int> Mask;
11762 E->buildAltOpShuffleMask(
11763 [&](Instruction *I) {
11764 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
11765 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(),
11766 *TLI);
11768 Mask);
11769 VecCost += ::getShuffleCost(TTIRef, TargetTransformInfo::SK_PermuteTwoSrc,
11770 FinalVecTy, Mask, CostKind);
11771 // Patterns like [fadd,fsub] can be combined into a single instruction
11772 // in x86. Reordering them into [fsub,fadd] blocks this pattern. So we
11773 // need to take into account their order when looking for the most used
11774 // order.
11775 unsigned Opcode0 = E->getOpcode();
11776 unsigned Opcode1 = E->getAltOpcode();
11777 SmallBitVector OpcodeMask(getAltInstrMask(E->Scalars, Opcode0, Opcode1));
11778 // If this pattern is supported by the target then we consider the
11779 // order.
11780 if (TTIRef.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask)) {
11781 InstructionCost AltVecCost = TTIRef.getAltInstrCost(
11782 VecTy, Opcode0, Opcode1, OpcodeMask, CostKind);
11783 return AltVecCost < VecCost ? AltVecCost : VecCost;
11785 // TODO: Check the reverse order too.
11786 return VecCost;
11788 if (SLPReVec && !E->isAltShuffle())
11789 return GetCostDiff(
11790 GetScalarCost, [&](InstructionCost) -> InstructionCost {
11791 // If a group uses mask in order, the shufflevector can be
11792 // eliminated by instcombine. Then the cost is 0.
11793 assert(isa<ShuffleVectorInst>(VL.front()) &&
11794 "Not supported shufflevector usage.");
11795 auto *SV = cast<ShuffleVectorInst>(VL.front());
11796 unsigned SVNumElements =
11797 cast<FixedVectorType>(SV->getOperand(0)->getType())
11798 ->getNumElements();
11799 unsigned GroupSize = SVNumElements / SV->getShuffleMask().size();
11800 for (size_t I = 0, End = VL.size(); I != End; I += GroupSize) {
11801 ArrayRef<Value *> Group = VL.slice(I, GroupSize);
11802 int NextIndex = 0;
11803 if (!all_of(Group, [&](Value *V) {
11804 assert(isa<ShuffleVectorInst>(V) &&
11805 "Not supported shufflevector usage.");
11806 auto *SV = cast<ShuffleVectorInst>(V);
11807 int Index;
11808 [[maybe_unused]] bool IsExtractSubvectorMask =
11809 SV->isExtractSubvectorMask(Index);
11810 assert(IsExtractSubvectorMask &&
11811 "Not supported shufflevector usage.");
11812 if (NextIndex != Index)
11813 return false;
11814 NextIndex += SV->getShuffleMask().size();
11815 return true;
11817 return ::getShuffleCost(
11818 *TTI, TargetTransformInfo::SK_PermuteSingleSrc, VecTy,
11819 calculateShufflevectorMask(E->Scalars));
11821 return TTI::TCC_Free;
11823 return GetCostDiff(GetScalarCost, GetVectorCost);
11825 case Instruction::Freeze:
11826 return CommonCost;
11827 default:
11828 llvm_unreachable("Unknown instruction");
11832 bool BoUpSLP::isFullyVectorizableTinyTree(bool ForReduction) const {
11833 LLVM_DEBUG(dbgs() << "SLP: Check whether the tree with height "
11834 << VectorizableTree.size() << " is fully vectorizable .\n");
11836 auto &&AreVectorizableGathers = [this](const TreeEntry *TE, unsigned Limit) {
11837 SmallVector<int> Mask;
11838 return TE->isGather() &&
11839 !any_of(TE->Scalars,
11840 [this](Value *V) { return EphValues.contains(V); }) &&
11841 (allConstant(TE->Scalars) || isSplat(TE->Scalars) ||
11842 TE->Scalars.size() < Limit ||
11843 ((TE->getOpcode() == Instruction::ExtractElement ||
11844 all_of(TE->Scalars, IsaPred<ExtractElementInst, UndefValue>)) &&
11845 isFixedVectorShuffle(TE->Scalars, Mask)) ||
11846 (TE->getOpcode() == Instruction::Load && !TE->isAltShuffle()) ||
11847 any_of(TE->Scalars, IsaPred<LoadInst>));
11850 // We only handle trees of heights 1 and 2.
11851 if (VectorizableTree.size() == 1 &&
11852 (VectorizableTree[0]->State == TreeEntry::Vectorize ||
11853 VectorizableTree[0]->State == TreeEntry::StridedVectorize ||
11854 (ForReduction &&
11855 AreVectorizableGathers(VectorizableTree[0].get(),
11856 VectorizableTree[0]->Scalars.size()) &&
11857 VectorizableTree[0]->getVectorFactor() > 2)))
11858 return true;
11860 if (VectorizableTree.size() != 2)
11861 return false;
11863 // Handle splat and all-constants stores. Also try to vectorize tiny trees
11864 // with the second gather nodes if they have less scalar operands rather than
11865 // the initial tree element (may be profitable to shuffle the second gather)
11866 // or they are extractelements, which form shuffle.
11867 SmallVector<int> Mask;
11868 if (VectorizableTree[0]->State == TreeEntry::Vectorize &&
11869 AreVectorizableGathers(VectorizableTree[1].get(),
11870 VectorizableTree[0]->Scalars.size()))
11871 return true;
11873 // Gathering cost would be too much for tiny trees.
11874 if (VectorizableTree[0]->isGather() ||
11875 (VectorizableTree[1]->isGather() &&
11876 VectorizableTree[0]->State != TreeEntry::ScatterVectorize &&
11877 VectorizableTree[0]->State != TreeEntry::StridedVectorize))
11878 return false;
11880 return true;
11883 static bool isLoadCombineCandidateImpl(Value *Root, unsigned NumElts,
11884 TargetTransformInfo *TTI,
11885 bool MustMatchOrInst) {
11886 // Look past the root to find a source value. Arbitrarily follow the
11887 // path through operand 0 of any 'or'. Also, peek through optional
11888 // shift-left-by-multiple-of-8-bits.
11889 Value *ZextLoad = Root;
11890 const APInt *ShAmtC;
11891 bool FoundOr = false;
11892 while (!isa<ConstantExpr>(ZextLoad) &&
11893 (match(ZextLoad, m_Or(m_Value(), m_Value())) ||
11894 (match(ZextLoad, m_Shl(m_Value(), m_APInt(ShAmtC))) &&
11895 ShAmtC->urem(8) == 0))) {
11896 auto *BinOp = cast<BinaryOperator>(ZextLoad);
11897 ZextLoad = BinOp->getOperand(0);
11898 if (BinOp->getOpcode() == Instruction::Or)
11899 FoundOr = true;
11901 // Check if the input is an extended load of the required or/shift expression.
11902 Value *Load;
11903 if ((MustMatchOrInst && !FoundOr) || ZextLoad == Root ||
11904 !match(ZextLoad, m_ZExt(m_Value(Load))) || !isa<LoadInst>(Load))
11905 return false;
11907 // Require that the total load bit width is a legal integer type.
11908 // For example, <8 x i8> --> i64 is a legal integer on a 64-bit target.
11909 // But <16 x i8> --> i128 is not, so the backend probably can't reduce it.
11910 Type *SrcTy = Load->getType();
11911 unsigned LoadBitWidth = SrcTy->getIntegerBitWidth() * NumElts;
11912 if (!TTI->isTypeLegal(IntegerType::get(Root->getContext(), LoadBitWidth)))
11913 return false;
11915 // Everything matched - assume that we can fold the whole sequence using
11916 // load combining.
11917 LLVM_DEBUG(dbgs() << "SLP: Assume load combining for tree starting at "
11918 << *(cast<Instruction>(Root)) << "\n");
11920 return true;
11923 bool BoUpSLP::isLoadCombineReductionCandidate(RecurKind RdxKind) const {
11924 if (RdxKind != RecurKind::Or)
11925 return false;
11927 unsigned NumElts = VectorizableTree[0]->Scalars.size();
11928 Value *FirstReduced = VectorizableTree[0]->Scalars[0];
11929 return isLoadCombineCandidateImpl(FirstReduced, NumElts, TTI,
11930 /* MatchOr */ false);
11933 bool BoUpSLP::isLoadCombineCandidate(ArrayRef<Value *> Stores) const {
11934 // Peek through a final sequence of stores and check if all operations are
11935 // likely to be load-combined.
11936 unsigned NumElts = Stores.size();
11937 for (Value *Scalar : Stores) {
11938 Value *X;
11939 if (!match(Scalar, m_Store(m_Value(X), m_Value())) ||
11940 !isLoadCombineCandidateImpl(X, NumElts, TTI, /* MatchOr */ true))
11941 return false;
11943 return true;
11946 bool BoUpSLP::isTreeTinyAndNotFullyVectorizable(bool ForReduction) const {
11947 if (!DebugCounter::shouldExecute(VectorizedGraphs))
11948 return true;
11950 // Graph is empty - do nothing.
11951 if (VectorizableTree.empty()) {
11952 assert(ExternalUses.empty() && "We shouldn't have any external users");
11954 return true;
11957 // No need to vectorize inserts of gathered values.
11958 if (VectorizableTree.size() == 2 &&
11959 isa<InsertElementInst>(VectorizableTree[0]->Scalars[0]) &&
11960 VectorizableTree[1]->isGather() &&
11961 (VectorizableTree[1]->getVectorFactor() <= 2 ||
11962 !(isSplat(VectorizableTree[1]->Scalars) ||
11963 allConstant(VectorizableTree[1]->Scalars))))
11964 return true;
11966 // If the graph includes only PHI nodes and gathers, it is defnitely not
11967 // profitable for the vectorization, we can skip it, if the cost threshold is
11968 // default. The cost of vectorized PHI nodes is almost always 0 + the cost of
11969 // gathers/buildvectors.
11970 constexpr int Limit = 4;
11971 if (!ForReduction && !SLPCostThreshold.getNumOccurrences() &&
11972 !VectorizableTree.empty() &&
11973 all_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
11974 return (TE->isGather() &&
11975 TE->getOpcode() != Instruction::ExtractElement &&
11976 count_if(TE->Scalars, IsaPred<ExtractElementInst>) <= Limit) ||
11977 TE->getOpcode() == Instruction::PHI;
11979 return true;
11981 // We can vectorize the tree if its size is greater than or equal to the
11982 // minimum size specified by the MinTreeSize command line option.
11983 if (VectorizableTree.size() >= MinTreeSize)
11984 return false;
11986 // If we have a tiny tree (a tree whose size is less than MinTreeSize), we
11987 // can vectorize it if we can prove it fully vectorizable.
11988 if (isFullyVectorizableTinyTree(ForReduction))
11989 return false;
11991 // Check if any of the gather node forms an insertelement buildvector
11992 // somewhere.
11993 bool IsAllowedSingleBVNode =
11994 VectorizableTree.size() > 1 ||
11995 (VectorizableTree.size() == 1 && VectorizableTree.front()->getOpcode() &&
11996 !VectorizableTree.front()->isAltShuffle() &&
11997 VectorizableTree.front()->getOpcode() != Instruction::PHI &&
11998 VectorizableTree.front()->getOpcode() != Instruction::GetElementPtr &&
11999 allSameBlock(VectorizableTree.front()->Scalars));
12000 if (any_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
12001 return TE->isGather() && all_of(TE->Scalars, [&](Value *V) {
12002 return isa<ExtractElementInst, UndefValue>(V) ||
12003 (IsAllowedSingleBVNode &&
12004 !V->hasNUsesOrMore(UsesLimit) &&
12005 any_of(V->users(), IsaPred<InsertElementInst>));
12008 return false;
12010 if (VectorizableTree.back()->isGather() &&
12011 VectorizableTree.back()->isAltShuffle() &&
12012 VectorizableTree.back()->getVectorFactor() > 2 &&
12013 allSameBlock(VectorizableTree.back()->Scalars) &&
12014 !VectorizableTree.back()->Scalars.front()->getType()->isVectorTy() &&
12015 TTI->getScalarizationOverhead(
12016 getWidenedType(VectorizableTree.back()->Scalars.front()->getType(),
12017 VectorizableTree.back()->getVectorFactor()),
12018 APInt::getAllOnes(VectorizableTree.back()->getVectorFactor()),
12019 /*Insert=*/true, /*Extract=*/false,
12020 TTI::TCK_RecipThroughput) > -SLPCostThreshold)
12021 return false;
12023 // Otherwise, we can't vectorize the tree. It is both tiny and not fully
12024 // vectorizable.
12025 return true;
12028 bool BoUpSLP::isTreeNotExtendable() const {
12029 if (getCanonicalGraphSize() != getTreeSize()) {
12030 constexpr unsigned SmallTree = 3;
12031 if (VectorizableTree.front()->isNonPowOf2Vec() &&
12032 getCanonicalGraphSize() <= SmallTree &&
12033 count_if(ArrayRef(VectorizableTree).drop_front(getCanonicalGraphSize()),
12034 [](const std::unique_ptr<TreeEntry> &TE) {
12035 return TE->isGather() &&
12036 TE->getOpcode() == Instruction::Load &&
12037 !allSameBlock(TE->Scalars);
12038 }) == 1)
12039 return true;
12040 return false;
12042 bool Res = false;
12043 for (unsigned Idx : seq<unsigned>(getTreeSize())) {
12044 TreeEntry &E = *VectorizableTree[Idx];
12045 if (!E.isGather())
12046 continue;
12047 if (E.getOpcode() && E.getOpcode() != Instruction::Load)
12048 return false;
12049 if (isSplat(E.Scalars) || allConstant(E.Scalars))
12050 continue;
12051 Res = true;
12053 return Res;
12056 InstructionCost BoUpSLP::getSpillCost() const {
12057 // Walk from the bottom of the tree to the top, tracking which values are
12058 // live. When we see a call instruction that is not part of our tree,
12059 // query TTI to see if there is a cost to keeping values live over it
12060 // (for example, if spills and fills are required).
12061 unsigned BundleWidth = VectorizableTree.front()->Scalars.size();
12062 InstructionCost Cost = 0;
12064 SmallPtrSet<Instruction *, 4> LiveValues;
12065 Instruction *PrevInst = nullptr;
12067 // The entries in VectorizableTree are not necessarily ordered by their
12068 // position in basic blocks. Collect them and order them by dominance so later
12069 // instructions are guaranteed to be visited first. For instructions in
12070 // different basic blocks, we only scan to the beginning of the block, so
12071 // their order does not matter, as long as all instructions in a basic block
12072 // are grouped together. Using dominance ensures a deterministic order.
12073 SmallVector<Instruction *, 16> OrderedScalars;
12074 for (const auto &TEPtr : VectorizableTree) {
12075 if (TEPtr->State != TreeEntry::Vectorize)
12076 continue;
12077 Instruction *Inst = dyn_cast<Instruction>(TEPtr->Scalars[0]);
12078 if (!Inst)
12079 continue;
12080 OrderedScalars.push_back(Inst);
12082 llvm::sort(OrderedScalars, [&](Instruction *A, Instruction *B) {
12083 auto *NodeA = DT->getNode(A->getParent());
12084 auto *NodeB = DT->getNode(B->getParent());
12085 assert(NodeA && "Should only process reachable instructions");
12086 assert(NodeB && "Should only process reachable instructions");
12087 assert((NodeA == NodeB) == (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
12088 "Different nodes should have different DFS numbers");
12089 if (NodeA != NodeB)
12090 return NodeA->getDFSNumIn() > NodeB->getDFSNumIn();
12091 return B->comesBefore(A);
12094 for (Instruction *Inst : OrderedScalars) {
12095 if (!PrevInst) {
12096 PrevInst = Inst;
12097 continue;
12100 // Update LiveValues.
12101 LiveValues.erase(PrevInst);
12102 for (auto &J : PrevInst->operands()) {
12103 if (isa<Instruction>(&*J) && getTreeEntry(&*J))
12104 LiveValues.insert(cast<Instruction>(&*J));
12107 LLVM_DEBUG({
12108 dbgs() << "SLP: #LV: " << LiveValues.size();
12109 for (auto *X : LiveValues)
12110 dbgs() << " " << X->getName();
12111 dbgs() << ", Looking at ";
12112 Inst->dump();
12115 // Now find the sequence of instructions between PrevInst and Inst.
12116 unsigned NumCalls = 0;
12117 BasicBlock::reverse_iterator InstIt = ++Inst->getIterator().getReverse(),
12118 PrevInstIt =
12119 PrevInst->getIterator().getReverse();
12120 while (InstIt != PrevInstIt) {
12121 if (PrevInstIt == PrevInst->getParent()->rend()) {
12122 PrevInstIt = Inst->getParent()->rbegin();
12123 continue;
12126 auto NoCallIntrinsic = [this](Instruction *I) {
12127 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
12128 if (II->isAssumeLikeIntrinsic())
12129 return true;
12130 FastMathFlags FMF;
12131 SmallVector<Type *, 4> Tys;
12132 for (auto &ArgOp : II->args())
12133 Tys.push_back(ArgOp->getType());
12134 if (auto *FPMO = dyn_cast<FPMathOperator>(II))
12135 FMF = FPMO->getFastMathFlags();
12136 IntrinsicCostAttributes ICA(II->getIntrinsicID(), II->getType(), Tys,
12137 FMF);
12138 InstructionCost IntrCost =
12139 TTI->getIntrinsicInstrCost(ICA, TTI::TCK_RecipThroughput);
12140 InstructionCost CallCost = TTI->getCallInstrCost(
12141 nullptr, II->getType(), Tys, TTI::TCK_RecipThroughput);
12142 if (IntrCost < CallCost)
12143 return true;
12145 return false;
12148 // Debug information does not impact spill cost.
12149 if (isa<CallBase>(&*PrevInstIt) && !NoCallIntrinsic(&*PrevInstIt) &&
12150 &*PrevInstIt != PrevInst)
12151 NumCalls++;
12153 ++PrevInstIt;
12156 if (NumCalls) {
12157 SmallVector<Type *, 4> V;
12158 for (auto *II : LiveValues) {
12159 auto *ScalarTy = II->getType();
12160 if (auto *VectorTy = dyn_cast<FixedVectorType>(ScalarTy))
12161 ScalarTy = VectorTy->getElementType();
12162 V.push_back(getWidenedType(ScalarTy, BundleWidth));
12164 Cost += NumCalls * TTI->getCostOfKeepingLiveOverCall(V);
12167 PrevInst = Inst;
12170 return Cost;
12173 /// Checks if the \p IE1 instructions is followed by \p IE2 instruction in the
12174 /// buildvector sequence.
12175 static bool isFirstInsertElement(const InsertElementInst *IE1,
12176 const InsertElementInst *IE2) {
12177 if (IE1 == IE2)
12178 return false;
12179 const auto *I1 = IE1;
12180 const auto *I2 = IE2;
12181 const InsertElementInst *PrevI1;
12182 const InsertElementInst *PrevI2;
12183 unsigned Idx1 = *getElementIndex(IE1);
12184 unsigned Idx2 = *getElementIndex(IE2);
12185 do {
12186 if (I2 == IE1)
12187 return true;
12188 if (I1 == IE2)
12189 return false;
12190 PrevI1 = I1;
12191 PrevI2 = I2;
12192 if (I1 && (I1 == IE1 || I1->hasOneUse()) &&
12193 getElementIndex(I1).value_or(Idx2) != Idx2)
12194 I1 = dyn_cast<InsertElementInst>(I1->getOperand(0));
12195 if (I2 && ((I2 == IE2 || I2->hasOneUse())) &&
12196 getElementIndex(I2).value_or(Idx1) != Idx1)
12197 I2 = dyn_cast<InsertElementInst>(I2->getOperand(0));
12198 } while ((I1 && PrevI1 != I1) || (I2 && PrevI2 != I2));
12199 llvm_unreachable("Two different buildvectors not expected.");
12202 namespace {
12203 /// Returns incoming Value *, if the requested type is Value * too, or a default
12204 /// value, otherwise.
12205 struct ValueSelect {
12206 template <typename U>
12207 static std::enable_if_t<std::is_same_v<Value *, U>, Value *> get(Value *V) {
12208 return V;
12210 template <typename U>
12211 static std::enable_if_t<!std::is_same_v<Value *, U>, U> get(Value *) {
12212 return U();
12215 } // namespace
12217 /// Does the analysis of the provided shuffle masks and performs the requested
12218 /// actions on the vectors with the given shuffle masks. It tries to do it in
12219 /// several steps.
12220 /// 1. If the Base vector is not undef vector, resizing the very first mask to
12221 /// have common VF and perform action for 2 input vectors (including non-undef
12222 /// Base). Other shuffle masks are combined with the resulting after the 1 stage
12223 /// and processed as a shuffle of 2 elements.
12224 /// 2. If the Base is undef vector and have only 1 shuffle mask, perform the
12225 /// action only for 1 vector with the given mask, if it is not the identity
12226 /// mask.
12227 /// 3. If > 2 masks are used, perform the remaining shuffle actions for 2
12228 /// vectors, combing the masks properly between the steps.
12229 template <typename T>
12230 static T *performExtractsShuffleAction(
12231 MutableArrayRef<std::pair<T *, SmallVector<int>>> ShuffleMask, Value *Base,
12232 function_ref<unsigned(T *)> GetVF,
12233 function_ref<std::pair<T *, bool>(T *, ArrayRef<int>, bool)> ResizeAction,
12234 function_ref<T *(ArrayRef<int>, ArrayRef<T *>)> Action) {
12235 assert(!ShuffleMask.empty() && "Empty list of shuffles for inserts.");
12236 SmallVector<int> Mask(ShuffleMask.begin()->second);
12237 auto VMIt = std::next(ShuffleMask.begin());
12238 T *Prev = nullptr;
12239 SmallBitVector UseMask =
12240 buildUseMask(Mask.size(), Mask, UseMask::UndefsAsMask);
12241 SmallBitVector IsBaseUndef = isUndefVector(Base, UseMask);
12242 if (!IsBaseUndef.all()) {
12243 // Base is not undef, need to combine it with the next subvectors.
12244 std::pair<T *, bool> Res =
12245 ResizeAction(ShuffleMask.begin()->first, Mask, /*ForSingleMask=*/false);
12246 SmallBitVector IsBasePoison = isUndefVector<true>(Base, UseMask);
12247 for (unsigned Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
12248 if (Mask[Idx] == PoisonMaskElem)
12249 Mask[Idx] = IsBasePoison.test(Idx) ? PoisonMaskElem : Idx;
12250 else
12251 Mask[Idx] = (Res.second ? Idx : Mask[Idx]) + VF;
12253 auto *V = ValueSelect::get<T *>(Base);
12254 (void)V;
12255 assert((!V || GetVF(V) == Mask.size()) &&
12256 "Expected base vector of VF number of elements.");
12257 Prev = Action(Mask, {nullptr, Res.first});
12258 } else if (ShuffleMask.size() == 1) {
12259 // Base is undef and only 1 vector is shuffled - perform the action only for
12260 // single vector, if the mask is not the identity mask.
12261 std::pair<T *, bool> Res = ResizeAction(ShuffleMask.begin()->first, Mask,
12262 /*ForSingleMask=*/true);
12263 if (Res.second)
12264 // Identity mask is found.
12265 Prev = Res.first;
12266 else
12267 Prev = Action(Mask, {ShuffleMask.begin()->first});
12268 } else {
12269 // Base is undef and at least 2 input vectors shuffled - perform 2 vectors
12270 // shuffles step by step, combining shuffle between the steps.
12271 unsigned Vec1VF = GetVF(ShuffleMask.begin()->first);
12272 unsigned Vec2VF = GetVF(VMIt->first);
12273 if (Vec1VF == Vec2VF) {
12274 // No need to resize the input vectors since they are of the same size, we
12275 // can shuffle them directly.
12276 ArrayRef<int> SecMask = VMIt->second;
12277 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
12278 if (SecMask[I] != PoisonMaskElem) {
12279 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars.");
12280 Mask[I] = SecMask[I] + Vec1VF;
12283 Prev = Action(Mask, {ShuffleMask.begin()->first, VMIt->first});
12284 } else {
12285 // Vectors of different sizes - resize and reshuffle.
12286 std::pair<T *, bool> Res1 = ResizeAction(ShuffleMask.begin()->first, Mask,
12287 /*ForSingleMask=*/false);
12288 std::pair<T *, bool> Res2 =
12289 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false);
12290 ArrayRef<int> SecMask = VMIt->second;
12291 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
12292 if (Mask[I] != PoisonMaskElem) {
12293 assert(SecMask[I] == PoisonMaskElem && "Multiple uses of scalars.");
12294 if (Res1.second)
12295 Mask[I] = I;
12296 } else if (SecMask[I] != PoisonMaskElem) {
12297 assert(Mask[I] == PoisonMaskElem && "Multiple uses of scalars.");
12298 Mask[I] = (Res2.second ? I : SecMask[I]) + VF;
12301 Prev = Action(Mask, {Res1.first, Res2.first});
12303 VMIt = std::next(VMIt);
12305 bool IsBaseNotUndef = !IsBaseUndef.all();
12306 (void)IsBaseNotUndef;
12307 // Perform requested actions for the remaining masks/vectors.
12308 for (auto E = ShuffleMask.end(); VMIt != E; ++VMIt) {
12309 // Shuffle other input vectors, if any.
12310 std::pair<T *, bool> Res =
12311 ResizeAction(VMIt->first, VMIt->second, /*ForSingleMask=*/false);
12312 ArrayRef<int> SecMask = VMIt->second;
12313 for (unsigned I = 0, VF = Mask.size(); I < VF; ++I) {
12314 if (SecMask[I] != PoisonMaskElem) {
12315 assert((Mask[I] == PoisonMaskElem || IsBaseNotUndef) &&
12316 "Multiple uses of scalars.");
12317 Mask[I] = (Res.second ? I : SecMask[I]) + VF;
12318 } else if (Mask[I] != PoisonMaskElem) {
12319 Mask[I] = I;
12322 Prev = Action(Mask, {Prev, Res.first});
12324 return Prev;
12327 namespace {
12328 /// Data type for handling buildvector sequences with the reused scalars from
12329 /// other tree entries.
12330 template <typename T> struct ShuffledInsertData {
12331 /// List of insertelements to be replaced by shuffles.
12332 SmallVector<InsertElementInst *> InsertElements;
12333 /// The parent vectors and shuffle mask for the given list of inserts.
12334 MapVector<T, SmallVector<int>> ValueMasks;
12336 } // namespace
12338 InstructionCost BoUpSLP::getTreeCost(ArrayRef<Value *> VectorizedVals) {
12339 InstructionCost Cost = 0;
12340 LLVM_DEBUG(dbgs() << "SLP: Calculating cost for tree of size "
12341 << VectorizableTree.size() << ".\n");
12343 unsigned BundleWidth = VectorizableTree[0]->Scalars.size();
12345 SmallPtrSet<Value *, 4> CheckedExtracts;
12346 for (unsigned I = 0, E = VectorizableTree.size(); I < E; ++I) {
12347 TreeEntry &TE = *VectorizableTree[I];
12348 // No need to count the cost for combined entries, they are combined and
12349 // just skip their cost.
12350 if (TE.State == TreeEntry::CombinedVectorize) {
12351 LLVM_DEBUG(
12352 dbgs() << "SLP: Skipping cost for combined node that starts with "
12353 << *TE.Scalars[0] << ".\n";
12354 TE.dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n");
12355 continue;
12357 if (TE.isGather()) {
12358 if (const TreeEntry *E = getTreeEntry(TE.getMainOp());
12359 E && E->getVectorFactor() == TE.getVectorFactor() &&
12360 E->isSame(TE.Scalars)) {
12361 // Some gather nodes might be absolutely the same as some vectorizable
12362 // nodes after reordering, need to handle it.
12363 LLVM_DEBUG(dbgs() << "SLP: Adding cost 0 for bundle "
12364 << shortBundleName(TE.Scalars, TE.Idx) << ".\n"
12365 << "SLP: Current total cost = " << Cost << "\n");
12366 continue;
12370 // Exclude cost of gather loads nodes which are not used. These nodes were
12371 // built as part of the final attempt to vectorize gathered loads.
12372 assert((!TE.isGather() || TE.Idx == 0 || !TE.UserTreeIndices.empty()) &&
12373 "Expected gather nodes with users only.");
12375 InstructionCost C = getEntryCost(&TE, VectorizedVals, CheckedExtracts);
12376 Cost += C;
12377 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle "
12378 << shortBundleName(TE.Scalars, TE.Idx) << ".\n"
12379 << "SLP: Current total cost = " << Cost << "\n");
12382 SmallPtrSet<Value *, 16> ExtractCostCalculated;
12383 InstructionCost ExtractCost = 0;
12384 SmallVector<ShuffledInsertData<const TreeEntry *>> ShuffledInserts;
12385 SmallVector<APInt> DemandedElts;
12386 SmallDenseSet<Value *, 4> UsedInserts;
12387 DenseSet<std::pair<const TreeEntry *, Type *>> VectorCasts;
12388 std::optional<DenseMap<Value *, unsigned>> ValueToExtUses;
12389 DenseMap<const TreeEntry *, DenseSet<Value *>> ExtractsCount;
12390 SmallPtrSet<Value *, 4> ScalarOpsFromCasts;
12391 // Keep track {Scalar, Index, User} tuple.
12392 // On AArch64, this helps in fusing a mov instruction, associated with
12393 // extractelement, with fmul in the backend so that extractelement is free.
12394 SmallVector<std::tuple<Value *, User *, int>, 4> ScalarUserAndIdx;
12395 for (ExternalUser &EU : ExternalUses) {
12396 ScalarUserAndIdx.emplace_back(EU.Scalar, EU.User, EU.Lane);
12398 for (ExternalUser &EU : ExternalUses) {
12399 // Uses by ephemeral values are free (because the ephemeral value will be
12400 // removed prior to code generation, and so the extraction will be
12401 // removed as well).
12402 if (EphValues.count(EU.User))
12403 continue;
12405 // Used in unreachable blocks or in EH pads (rarely executed) or is
12406 // terminated with unreachable instruction.
12407 if (BasicBlock *UserParent =
12408 EU.User ? cast<Instruction>(EU.User)->getParent() : nullptr;
12409 UserParent &&
12410 (!DT->isReachableFromEntry(UserParent) || UserParent->isEHPad() ||
12411 isa_and_present<UnreachableInst>(UserParent->getTerminator())))
12412 continue;
12414 // We only add extract cost once for the same scalar.
12415 if (!isa_and_nonnull<InsertElementInst>(EU.User) &&
12416 !ExtractCostCalculated.insert(EU.Scalar).second)
12417 continue;
12419 // No extract cost for vector "scalar"
12420 if (isa<FixedVectorType>(EU.Scalar->getType()))
12421 continue;
12423 // If found user is an insertelement, do not calculate extract cost but try
12424 // to detect it as a final shuffled/identity match.
12425 if (auto *VU = dyn_cast_or_null<InsertElementInst>(EU.User);
12426 VU && VU->getOperand(1) == EU.Scalar) {
12427 if (auto *FTy = dyn_cast<FixedVectorType>(VU->getType())) {
12428 if (!UsedInserts.insert(VU).second)
12429 continue;
12430 std::optional<unsigned> InsertIdx = getElementIndex(VU);
12431 if (InsertIdx) {
12432 const TreeEntry *ScalarTE = getTreeEntry(EU.Scalar);
12433 auto *It = find_if(
12434 ShuffledInserts,
12435 [this, VU](const ShuffledInsertData<const TreeEntry *> &Data) {
12436 // Checks if 2 insertelements are from the same buildvector.
12437 InsertElementInst *VecInsert = Data.InsertElements.front();
12438 return areTwoInsertFromSameBuildVector(
12439 VU, VecInsert, [this](InsertElementInst *II) -> Value * {
12440 Value *Op0 = II->getOperand(0);
12441 if (getTreeEntry(II) && !getTreeEntry(Op0))
12442 return nullptr;
12443 return Op0;
12446 int VecId = -1;
12447 if (It == ShuffledInserts.end()) {
12448 auto &Data = ShuffledInserts.emplace_back();
12449 Data.InsertElements.emplace_back(VU);
12450 DemandedElts.push_back(APInt::getZero(FTy->getNumElements()));
12451 VecId = ShuffledInserts.size() - 1;
12452 auto It = MinBWs.find(ScalarTE);
12453 if (It != MinBWs.end() &&
12454 VectorCasts
12455 .insert(std::make_pair(ScalarTE, FTy->getElementType()))
12456 .second) {
12457 unsigned BWSz = It->second.first;
12458 unsigned DstBWSz = DL->getTypeSizeInBits(FTy->getElementType());
12459 unsigned VecOpcode;
12460 if (DstBWSz < BWSz)
12461 VecOpcode = Instruction::Trunc;
12462 else
12463 VecOpcode =
12464 It->second.second ? Instruction::SExt : Instruction::ZExt;
12465 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12466 InstructionCost C = TTI->getCastInstrCost(
12467 VecOpcode, FTy,
12468 getWidenedType(IntegerType::get(FTy->getContext(), BWSz),
12469 FTy->getNumElements()),
12470 TTI::CastContextHint::None, CostKind);
12471 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
12472 << " for extending externally used vector with "
12473 "non-equal minimum bitwidth.\n");
12474 Cost += C;
12476 } else {
12477 if (isFirstInsertElement(VU, It->InsertElements.front()))
12478 It->InsertElements.front() = VU;
12479 VecId = std::distance(ShuffledInserts.begin(), It);
12481 int InIdx = *InsertIdx;
12482 SmallVectorImpl<int> &Mask =
12483 ShuffledInserts[VecId].ValueMasks[ScalarTE];
12484 if (Mask.empty())
12485 Mask.assign(FTy->getNumElements(), PoisonMaskElem);
12486 Mask[InIdx] = EU.Lane;
12487 DemandedElts[VecId].setBit(InIdx);
12488 continue;
12493 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
12494 // If we plan to rewrite the tree in a smaller type, we will need to sign
12495 // extend the extracted value back to the original type. Here, we account
12496 // for the extract and the added cost of the sign extend if needed.
12497 InstructionCost ExtraCost = TTI::TCC_Free;
12498 auto *VecTy = getWidenedType(EU.Scalar->getType(), BundleWidth);
12499 const TreeEntry *Entry = getTreeEntry(EU.Scalar);
12500 auto It = MinBWs.find(Entry);
12501 if (It != MinBWs.end()) {
12502 auto *MinTy = IntegerType::get(F->getContext(), It->second.first);
12503 unsigned Extend = isKnownNonNegative(EU.Scalar, SimplifyQuery(*DL))
12504 ? Instruction::ZExt
12505 : Instruction::SExt;
12506 VecTy = getWidenedType(MinTy, BundleWidth);
12507 ExtraCost = TTI->getExtractWithExtendCost(Extend, EU.Scalar->getType(),
12508 VecTy, EU.Lane);
12509 } else {
12510 ExtraCost =
12511 TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, CostKind,
12512 EU.Lane, EU.Scalar, ScalarUserAndIdx);
12514 // Leave the scalar instructions as is if they are cheaper than extracts.
12515 if (Entry->Idx != 0 || Entry->getOpcode() == Instruction::GetElementPtr ||
12516 Entry->getOpcode() == Instruction::Load) {
12517 // Checks if the user of the external scalar is phi in loop body.
12518 auto IsPhiInLoop = [&](const ExternalUser &U) {
12519 if (auto *Phi = dyn_cast_if_present<PHINode>(U.User)) {
12520 auto *I = cast<Instruction>(U.Scalar);
12521 const Loop *L = LI->getLoopFor(Phi->getParent());
12522 return L && (Phi->getParent() == I->getParent() ||
12523 L == LI->getLoopFor(I->getParent()));
12525 return false;
12527 if (!ValueToExtUses) {
12528 ValueToExtUses.emplace();
12529 for_each(enumerate(ExternalUses), [&](const auto &P) {
12530 // Ignore phis in loops.
12531 if (IsPhiInLoop(P.value()))
12532 return;
12534 ValueToExtUses->try_emplace(P.value().Scalar, P.index());
12537 // Can use original instruction, if no operands vectorized or they are
12538 // marked as externally used already.
12539 auto *Inst = cast<Instruction>(EU.Scalar);
12540 InstructionCost ScalarCost = TTI->getInstructionCost(Inst, CostKind);
12541 auto OperandIsScalar = [&](Value *V) {
12542 if (!getTreeEntry(V)) {
12543 // Some extractelements might be not vectorized, but
12544 // transformed into shuffle and removed from the function,
12545 // consider it here.
12546 if (auto *EE = dyn_cast<ExtractElementInst>(V))
12547 return !EE->hasOneUse() || !MustGather.contains(EE);
12548 return true;
12550 return ValueToExtUses->contains(V);
12552 bool CanBeUsedAsScalar = all_of(Inst->operands(), OperandIsScalar);
12553 bool CanBeUsedAsScalarCast = false;
12554 if (auto *CI = dyn_cast<CastInst>(Inst); CI && !CanBeUsedAsScalar) {
12555 if (auto *Op = dyn_cast<Instruction>(CI->getOperand(0));
12556 Op && all_of(Op->operands(), OperandIsScalar)) {
12557 InstructionCost OpCost =
12558 (getTreeEntry(Op) && !ValueToExtUses->contains(Op))
12559 ? TTI->getInstructionCost(Op, CostKind)
12560 : 0;
12561 if (ScalarCost + OpCost <= ExtraCost) {
12562 CanBeUsedAsScalar = CanBeUsedAsScalarCast = true;
12563 ScalarCost += OpCost;
12567 if (CanBeUsedAsScalar) {
12568 bool KeepScalar = ScalarCost <= ExtraCost;
12569 // Try to keep original scalar if the user is the phi node from the same
12570 // block as the root phis, currently vectorized. It allows to keep
12571 // better ordering info of PHIs, being vectorized currently.
12572 bool IsProfitablePHIUser =
12573 (KeepScalar || (ScalarCost - ExtraCost <= TTI::TCC_Basic &&
12574 VectorizableTree.front()->Scalars.size() > 2)) &&
12575 VectorizableTree.front()->getOpcode() == Instruction::PHI &&
12576 !Inst->hasNUsesOrMore(UsesLimit) &&
12577 none_of(Inst->users(),
12578 [&](User *U) {
12579 auto *PHIUser = dyn_cast<PHINode>(U);
12580 return (!PHIUser ||
12581 PHIUser->getParent() !=
12582 cast<Instruction>(
12583 VectorizableTree.front()->getMainOp())
12584 ->getParent()) &&
12585 !getTreeEntry(U);
12586 }) &&
12587 count_if(Entry->Scalars, [&](Value *V) {
12588 return ValueToExtUses->contains(V);
12589 }) <= 2;
12590 if (IsProfitablePHIUser) {
12591 KeepScalar = true;
12592 } else if (KeepScalar && ScalarCost != TTI::TCC_Free &&
12593 ExtraCost - ScalarCost <= TTI::TCC_Basic &&
12594 (!GatheredLoadsEntriesFirst.has_value() ||
12595 Entry->Idx < *GatheredLoadsEntriesFirst)) {
12596 unsigned ScalarUsesCount = count_if(Entry->Scalars, [&](Value *V) {
12597 return ValueToExtUses->contains(V);
12599 auto It = ExtractsCount.find(Entry);
12600 if (It != ExtractsCount.end()) {
12601 assert(ScalarUsesCount >= It->getSecond().size() &&
12602 "Expected total number of external uses not less than "
12603 "number of scalar uses.");
12604 ScalarUsesCount -= It->getSecond().size();
12606 // Keep original scalar if number of externally used instructions in
12607 // the same entry is not power of 2. It may help to do some extra
12608 // vectorization for now.
12609 KeepScalar = ScalarUsesCount <= 1 || !has_single_bit(ScalarUsesCount);
12611 if (KeepScalar) {
12612 ExternalUsesAsOriginalScalar.insert(EU.Scalar);
12613 for_each(Inst->operands(), [&](Value *V) {
12614 auto It = ValueToExtUses->find(V);
12615 if (It != ValueToExtUses->end()) {
12616 // Replace all uses to avoid compiler crash.
12617 ExternalUses[It->second].User = nullptr;
12620 ExtraCost = ScalarCost;
12621 if (!IsPhiInLoop(EU))
12622 ExtractsCount[Entry].insert(Inst);
12623 if (CanBeUsedAsScalarCast) {
12624 ScalarOpsFromCasts.insert(Inst->getOperand(0));
12625 // Update the users of the operands of the cast operand to avoid
12626 // compiler crash.
12627 if (auto *IOp = dyn_cast<Instruction>(Inst->getOperand(0))) {
12628 for_each(IOp->operands(), [&](Value *V) {
12629 auto It = ValueToExtUses->find(V);
12630 if (It != ValueToExtUses->end()) {
12631 // Replace all uses to avoid compiler crash.
12632 ExternalUses[It->second].User = nullptr;
12641 ExtractCost += ExtraCost;
12643 // Insert externals for extract of operands of casts to be emitted as scalars
12644 // instead of extractelement.
12645 for (Value *V : ScalarOpsFromCasts) {
12646 ExternalUsesAsOriginalScalar.insert(V);
12647 if (const TreeEntry *E = getTreeEntry(V)) {
12648 ExternalUses.emplace_back(V, nullptr, E->findLaneForValue(V));
12651 // Add reduced value cost, if resized.
12652 if (!VectorizedVals.empty()) {
12653 const TreeEntry &Root = *VectorizableTree.front();
12654 auto BWIt = MinBWs.find(&Root);
12655 if (BWIt != MinBWs.end()) {
12656 Type *DstTy = Root.Scalars.front()->getType();
12657 unsigned OriginalSz = DL->getTypeSizeInBits(DstTy->getScalarType());
12658 unsigned SrcSz =
12659 ReductionBitWidth == 0 ? BWIt->second.first : ReductionBitWidth;
12660 if (OriginalSz != SrcSz) {
12661 unsigned Opcode = Instruction::Trunc;
12662 if (OriginalSz > SrcSz)
12663 Opcode = BWIt->second.second ? Instruction::SExt : Instruction::ZExt;
12664 Type *SrcTy = IntegerType::get(DstTy->getContext(), SrcSz);
12665 if (auto *VecTy = dyn_cast<FixedVectorType>(DstTy)) {
12666 assert(SLPReVec && "Only supported by REVEC.");
12667 SrcTy = getWidenedType(SrcTy, VecTy->getNumElements());
12669 Cost += TTI->getCastInstrCost(Opcode, DstTy, SrcTy,
12670 TTI::CastContextHint::None,
12671 TTI::TCK_RecipThroughput);
12676 InstructionCost SpillCost = getSpillCost();
12677 Cost += SpillCost + ExtractCost;
12678 auto &&ResizeToVF = [this, &Cost](const TreeEntry *TE, ArrayRef<int> Mask,
12679 bool) {
12680 InstructionCost C = 0;
12681 unsigned VF = Mask.size();
12682 unsigned VecVF = TE->getVectorFactor();
12683 if (VF != VecVF &&
12684 (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); }) ||
12685 !ShuffleVectorInst::isIdentityMask(Mask, VF))) {
12686 SmallVector<int> OrigMask(VecVF, PoisonMaskElem);
12687 std::copy(Mask.begin(), std::next(Mask.begin(), std::min(VF, VecVF)),
12688 OrigMask.begin());
12689 C = ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc,
12690 getWidenedType(TE->getMainOp()->getType(), VecVF),
12691 OrigMask);
12692 LLVM_DEBUG(
12693 dbgs() << "SLP: Adding cost " << C
12694 << " for final shuffle of insertelement external users.\n";
12695 TE->dump(); dbgs() << "SLP: Current total cost = " << Cost << "\n");
12696 Cost += C;
12697 return std::make_pair(TE, true);
12699 return std::make_pair(TE, false);
12701 // Calculate the cost of the reshuffled vectors, if any.
12702 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
12703 Value *Base = ShuffledInserts[I].InsertElements.front()->getOperand(0);
12704 auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
12705 unsigned VF = 0;
12706 auto EstimateShufflesCost = [&](ArrayRef<int> Mask,
12707 ArrayRef<const TreeEntry *> TEs) {
12708 assert((TEs.size() == 1 || TEs.size() == 2) &&
12709 "Expected exactly 1 or 2 tree entries.");
12710 if (TEs.size() == 1) {
12711 if (VF == 0)
12712 VF = TEs.front()->getVectorFactor();
12713 auto *FTy = getWidenedType(TEs.back()->Scalars.front()->getType(), VF);
12714 if (!ShuffleVectorInst::isIdentityMask(Mask, VF) &&
12715 !all_of(enumerate(Mask), [=](const auto &Data) {
12716 return Data.value() == PoisonMaskElem ||
12717 (Data.index() < VF &&
12718 static_cast<int>(Data.index()) == Data.value());
12719 })) {
12720 InstructionCost C =
12721 ::getShuffleCost(*TTI, TTI::SK_PermuteSingleSrc, FTy, Mask);
12722 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
12723 << " for final shuffle of insertelement "
12724 "external users.\n";
12725 TEs.front()->dump();
12726 dbgs() << "SLP: Current total cost = " << Cost << "\n");
12727 Cost += C;
12729 } else {
12730 if (VF == 0) {
12731 if (TEs.front() &&
12732 TEs.front()->getVectorFactor() == TEs.back()->getVectorFactor())
12733 VF = TEs.front()->getVectorFactor();
12734 else
12735 VF = Mask.size();
12737 auto *FTy = getWidenedType(TEs.back()->Scalars.front()->getType(), VF);
12738 InstructionCost C =
12739 ::getShuffleCost(*TTI, TTI::SK_PermuteTwoSrc, FTy, Mask);
12740 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << C
12741 << " for final shuffle of vector node and external "
12742 "insertelement users.\n";
12743 if (TEs.front()) { TEs.front()->dump(); } TEs.back()->dump();
12744 dbgs() << "SLP: Current total cost = " << Cost << "\n");
12745 Cost += C;
12747 VF = Mask.size();
12748 return TEs.back();
12750 (void)performExtractsShuffleAction<const TreeEntry>(
12751 MutableArrayRef(Vector.data(), Vector.size()), Base,
12752 [](const TreeEntry *E) { return E->getVectorFactor(); }, ResizeToVF,
12753 EstimateShufflesCost);
12754 InstructionCost InsertCost = TTI->getScalarizationOverhead(
12755 cast<FixedVectorType>(
12756 ShuffledInserts[I].InsertElements.front()->getType()),
12757 DemandedElts[I],
12758 /*Insert*/ true, /*Extract*/ false, TTI::TCK_RecipThroughput);
12759 Cost -= InsertCost;
12762 // Add the cost for reduced value resize (if required).
12763 if (ReductionBitWidth != 0) {
12764 assert(UserIgnoreList && "Expected reduction tree.");
12765 const TreeEntry &E = *VectorizableTree.front();
12766 auto It = MinBWs.find(&E);
12767 if (It != MinBWs.end() && It->second.first != ReductionBitWidth) {
12768 unsigned SrcSize = It->second.first;
12769 unsigned DstSize = ReductionBitWidth;
12770 unsigned Opcode = Instruction::Trunc;
12771 if (SrcSize < DstSize) {
12772 bool IsArithmeticExtendedReduction =
12773 all_of(*UserIgnoreList, [](Value *V) {
12774 auto *I = cast<Instruction>(V);
12775 return is_contained({Instruction::Add, Instruction::FAdd,
12776 Instruction::Mul, Instruction::FMul,
12777 Instruction::And, Instruction::Or,
12778 Instruction::Xor},
12779 I->getOpcode());
12781 if (IsArithmeticExtendedReduction)
12782 Opcode =
12783 Instruction::BitCast; // Handle it by getExtendedReductionCost
12784 else
12785 Opcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
12787 if (Opcode != Instruction::BitCast) {
12788 auto *SrcVecTy =
12789 getWidenedType(Builder.getIntNTy(SrcSize), E.getVectorFactor());
12790 auto *DstVecTy =
12791 getWidenedType(Builder.getIntNTy(DstSize), E.getVectorFactor());
12792 TTI::CastContextHint CCH = getCastContextHint(E);
12793 InstructionCost CastCost;
12794 switch (E.getOpcode()) {
12795 case Instruction::SExt:
12796 case Instruction::ZExt:
12797 case Instruction::Trunc: {
12798 const TreeEntry *OpTE = getOperandEntry(&E, 0);
12799 CCH = getCastContextHint(*OpTE);
12800 break;
12802 default:
12803 break;
12805 CastCost += TTI->getCastInstrCost(Opcode, DstVecTy, SrcVecTy, CCH,
12806 TTI::TCK_RecipThroughput);
12807 Cost += CastCost;
12808 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << CastCost
12809 << " for final resize for reduction from " << SrcVecTy
12810 << " to " << DstVecTy << "\n";
12811 dbgs() << "SLP: Current total cost = " << Cost << "\n");
12816 #ifndef NDEBUG
12817 SmallString<256> Str;
12819 raw_svector_ostream OS(Str);
12820 OS << "SLP: Spill Cost = " << SpillCost << ".\n"
12821 << "SLP: Extract Cost = " << ExtractCost << ".\n"
12822 << "SLP: Total Cost = " << Cost << ".\n";
12824 LLVM_DEBUG(dbgs() << Str);
12825 if (ViewSLPTree)
12826 ViewGraph(this, "SLP" + F->getName(), false, Str);
12827 #endif
12829 return Cost;
12832 /// Tries to find extractelement instructions with constant indices from fixed
12833 /// vector type and gather such instructions into a bunch, which highly likely
12834 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was
12835 /// successful, the matched scalars are replaced by poison values in \p VL for
12836 /// future analysis.
12837 std::optional<TTI::ShuffleKind>
12838 BoUpSLP::tryToGatherSingleRegisterExtractElements(
12839 MutableArrayRef<Value *> VL, SmallVectorImpl<int> &Mask) const {
12840 // Scan list of gathered scalars for extractelements that can be represented
12841 // as shuffles.
12842 MapVector<Value *, SmallVector<int>> VectorOpToIdx;
12843 SmallVector<int> UndefVectorExtracts;
12844 for (int I = 0, E = VL.size(); I < E; ++I) {
12845 auto *EI = dyn_cast<ExtractElementInst>(VL[I]);
12846 if (!EI) {
12847 if (isa<UndefValue>(VL[I]))
12848 UndefVectorExtracts.push_back(I);
12849 continue;
12851 auto *VecTy = dyn_cast<FixedVectorType>(EI->getVectorOperandType());
12852 if (!VecTy || !isa<ConstantInt, UndefValue>(EI->getIndexOperand()))
12853 continue;
12854 std::optional<unsigned> Idx = getExtractIndex(EI);
12855 // Undefined index.
12856 if (!Idx) {
12857 UndefVectorExtracts.push_back(I);
12858 continue;
12860 if (Idx >= VecTy->getNumElements()) {
12861 UndefVectorExtracts.push_back(I);
12862 continue;
12864 SmallBitVector ExtractMask(VecTy->getNumElements(), true);
12865 ExtractMask.reset(*Idx);
12866 if (isUndefVector(EI->getVectorOperand(), ExtractMask).all()) {
12867 UndefVectorExtracts.push_back(I);
12868 continue;
12870 VectorOpToIdx[EI->getVectorOperand()].push_back(I);
12872 // Sort the vector operands by the maximum number of uses in extractelements.
12873 SmallVector<std::pair<Value *, SmallVector<int>>> Vectors =
12874 VectorOpToIdx.takeVector();
12875 stable_sort(Vectors, [](const auto &P1, const auto &P2) {
12876 return P1.second.size() > P2.second.size();
12878 // Find the best pair of the vectors or a single vector.
12879 const int UndefSz = UndefVectorExtracts.size();
12880 unsigned SingleMax = 0;
12881 unsigned PairMax = 0;
12882 if (!Vectors.empty()) {
12883 SingleMax = Vectors.front().second.size() + UndefSz;
12884 if (Vectors.size() > 1) {
12885 auto *ItNext = std::next(Vectors.begin());
12886 PairMax = SingleMax + ItNext->second.size();
12889 if (SingleMax == 0 && PairMax == 0 && UndefSz == 0)
12890 return std::nullopt;
12891 // Check if better to perform a shuffle of 2 vectors or just of a single
12892 // vector.
12893 SmallVector<Value *> SavedVL(VL.begin(), VL.end());
12894 SmallVector<Value *> GatheredExtracts(
12895 VL.size(), PoisonValue::get(VL.front()->getType()));
12896 if (SingleMax >= PairMax && SingleMax) {
12897 for (int Idx : Vectors.front().second)
12898 std::swap(GatheredExtracts[Idx], VL[Idx]);
12899 } else if (!Vectors.empty()) {
12900 for (unsigned Idx : {0, 1})
12901 for (int Idx : Vectors[Idx].second)
12902 std::swap(GatheredExtracts[Idx], VL[Idx]);
12904 // Add extracts from undefs too.
12905 for (int Idx : UndefVectorExtracts)
12906 std::swap(GatheredExtracts[Idx], VL[Idx]);
12907 // Check that gather of extractelements can be represented as just a
12908 // shuffle of a single/two vectors the scalars are extracted from.
12909 std::optional<TTI::ShuffleKind> Res =
12910 isFixedVectorShuffle(GatheredExtracts, Mask);
12911 if (!Res || all_of(Mask, [](int Idx) { return Idx == PoisonMaskElem; })) {
12912 // TODO: try to check other subsets if possible.
12913 // Restore the original VL if attempt was not successful.
12914 copy(SavedVL, VL.begin());
12915 return std::nullopt;
12917 // Restore unused scalars from mask, if some of the extractelements were not
12918 // selected for shuffle.
12919 for (int I = 0, E = GatheredExtracts.size(); I < E; ++I) {
12920 if (Mask[I] == PoisonMaskElem && !isa<PoisonValue>(GatheredExtracts[I]) &&
12921 isa<UndefValue>(GatheredExtracts[I])) {
12922 std::swap(VL[I], GatheredExtracts[I]);
12923 continue;
12925 auto *EI = dyn_cast<ExtractElementInst>(VL[I]);
12926 if (!EI || !isa<FixedVectorType>(EI->getVectorOperandType()) ||
12927 !isa<ConstantInt, UndefValue>(EI->getIndexOperand()) ||
12928 is_contained(UndefVectorExtracts, I))
12929 continue;
12931 return Res;
12934 /// Tries to find extractelement instructions with constant indices from fixed
12935 /// vector type and gather such instructions into a bunch, which highly likely
12936 /// might be detected as a shuffle of 1 or 2 input vectors. If this attempt was
12937 /// successful, the matched scalars are replaced by poison values in \p VL for
12938 /// future analysis.
12939 SmallVector<std::optional<TTI::ShuffleKind>>
12940 BoUpSLP::tryToGatherExtractElements(SmallVectorImpl<Value *> &VL,
12941 SmallVectorImpl<int> &Mask,
12942 unsigned NumParts) const {
12943 assert(NumParts > 0 && "NumParts expected be greater than or equal to 1.");
12944 SmallVector<std::optional<TTI::ShuffleKind>> ShufflesRes(NumParts);
12945 Mask.assign(VL.size(), PoisonMaskElem);
12946 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
12947 for (unsigned Part : seq<unsigned>(NumParts)) {
12948 // Scan list of gathered scalars for extractelements that can be represented
12949 // as shuffles.
12950 MutableArrayRef<Value *> SubVL = MutableArrayRef(VL).slice(
12951 Part * SliceSize, getNumElems(VL.size(), SliceSize, Part));
12952 SmallVector<int> SubMask;
12953 std::optional<TTI::ShuffleKind> Res =
12954 tryToGatherSingleRegisterExtractElements(SubVL, SubMask);
12955 ShufflesRes[Part] = Res;
12956 copy(SubMask, std::next(Mask.begin(), Part * SliceSize));
12958 if (none_of(ShufflesRes, [](const std::optional<TTI::ShuffleKind> &Res) {
12959 return Res.has_value();
12961 ShufflesRes.clear();
12962 return ShufflesRes;
12965 std::optional<TargetTransformInfo::ShuffleKind>
12966 BoUpSLP::isGatherShuffledSingleRegisterEntry(
12967 const TreeEntry *TE, ArrayRef<Value *> VL, MutableArrayRef<int> Mask,
12968 SmallVectorImpl<const TreeEntry *> &Entries, unsigned Part, bool ForOrder) {
12969 Entries.clear();
12970 // TODO: currently checking only for Scalars in the tree entry, need to count
12971 // reused elements too for better cost estimation.
12972 const EdgeInfo &TEUseEI = TE == VectorizableTree.front().get()
12973 ? EdgeInfo(const_cast<TreeEntry *>(TE), 0)
12974 : TE->UserTreeIndices.front();
12975 const Instruction *TEInsertPt = &getLastInstructionInBundle(TEUseEI.UserTE);
12976 const BasicBlock *TEInsertBlock = nullptr;
12977 // Main node of PHI entries keeps the correct order of operands/incoming
12978 // blocks.
12979 if (auto *PHI = dyn_cast<PHINode>(TEUseEI.UserTE->getMainOp())) {
12980 TEInsertBlock = PHI->getIncomingBlock(TEUseEI.EdgeIdx);
12981 TEInsertPt = TEInsertBlock->getTerminator();
12982 } else {
12983 TEInsertBlock = TEInsertPt->getParent();
12985 if (!DT->isReachableFromEntry(TEInsertBlock))
12986 return std::nullopt;
12987 auto *NodeUI = DT->getNode(TEInsertBlock);
12988 assert(NodeUI && "Should only process reachable instructions");
12989 SmallPtrSet<Value *, 4> GatheredScalars(VL.begin(), VL.end());
12990 auto CheckOrdering = [&](const Instruction *InsertPt) {
12991 // Argument InsertPt is an instruction where vector code for some other
12992 // tree entry (one that shares one or more scalars with TE) is going to be
12993 // generated. This lambda returns true if insertion point of vector code
12994 // for the TE dominates that point (otherwise dependency is the other way
12995 // around). The other node is not limited to be of a gather kind. Gather
12996 // nodes are not scheduled and their vector code is inserted before their
12997 // first user. If user is PHI, that is supposed to be at the end of a
12998 // predecessor block. Otherwise it is the last instruction among scalars of
12999 // the user node. So, instead of checking dependency between instructions
13000 // themselves, we check dependency between their insertion points for vector
13001 // code (since each scalar instruction ends up as a lane of a vector
13002 // instruction).
13003 const BasicBlock *InsertBlock = InsertPt->getParent();
13004 auto *NodeEUI = DT->getNode(InsertBlock);
13005 if (!NodeEUI)
13006 return false;
13007 assert((NodeUI == NodeEUI) ==
13008 (NodeUI->getDFSNumIn() == NodeEUI->getDFSNumIn()) &&
13009 "Different nodes should have different DFS numbers");
13010 // Check the order of the gather nodes users.
13011 if (TEInsertPt->getParent() != InsertBlock &&
13012 (DT->dominates(NodeUI, NodeEUI) || !DT->dominates(NodeEUI, NodeUI)))
13013 return false;
13014 if (TEInsertPt->getParent() == InsertBlock &&
13015 TEInsertPt->comesBefore(InsertPt))
13016 return false;
13017 return true;
13019 // Find all tree entries used by the gathered values. If no common entries
13020 // found - not a shuffle.
13021 // Here we build a set of tree nodes for each gathered value and trying to
13022 // find the intersection between these sets. If we have at least one common
13023 // tree node for each gathered value - we have just a permutation of the
13024 // single vector. If we have 2 different sets, we're in situation where we
13025 // have a permutation of 2 input vectors.
13026 SmallVector<SmallPtrSet<const TreeEntry *, 4>> UsedTEs;
13027 DenseMap<Value *, int> UsedValuesEntry;
13028 for (Value *V : VL) {
13029 if (isConstant(V))
13030 continue;
13031 // Build a list of tree entries where V is used.
13032 SmallPtrSet<const TreeEntry *, 4> VToTEs;
13033 for (const TreeEntry *TEPtr : ValueToGatherNodes.find(V)->second) {
13034 if (TEPtr == TE || TEPtr->Idx == 0)
13035 continue;
13036 assert(any_of(TEPtr->Scalars,
13037 [&](Value *V) { return GatheredScalars.contains(V); }) &&
13038 "Must contain at least single gathered value.");
13039 assert(TEPtr->UserTreeIndices.size() == 1 &&
13040 "Expected only single user of a gather node.");
13041 const EdgeInfo &UseEI = TEPtr->UserTreeIndices.front();
13043 PHINode *UserPHI = dyn_cast<PHINode>(UseEI.UserTE->getMainOp());
13044 const Instruction *InsertPt =
13045 UserPHI ? UserPHI->getIncomingBlock(UseEI.EdgeIdx)->getTerminator()
13046 : &getLastInstructionInBundle(UseEI.UserTE);
13047 if (TEInsertPt == InsertPt) {
13048 // If 2 gathers are operands of the same entry (regardless of whether
13049 // user is PHI or else), compare operands indices, use the earlier one
13050 // as the base.
13051 if (TEUseEI.UserTE == UseEI.UserTE && TEUseEI.EdgeIdx < UseEI.EdgeIdx)
13052 continue;
13053 // If the user instruction is used for some reason in different
13054 // vectorized nodes - make it depend on index.
13055 if (TEUseEI.UserTE != UseEI.UserTE &&
13056 TEUseEI.UserTE->Idx < UseEI.UserTE->Idx)
13057 continue;
13060 // Check if the user node of the TE comes after user node of TEPtr,
13061 // otherwise TEPtr depends on TE.
13062 if ((TEInsertBlock != InsertPt->getParent() ||
13063 TEUseEI.EdgeIdx < UseEI.EdgeIdx || TEUseEI.UserTE != UseEI.UserTE) &&
13064 !CheckOrdering(InsertPt))
13065 continue;
13066 VToTEs.insert(TEPtr);
13068 if (const TreeEntry *VTE = getTreeEntry(V)) {
13069 if (ForOrder && VTE->Idx < GatheredLoadsEntriesFirst.value_or(0)) {
13070 if (VTE->State != TreeEntry::Vectorize) {
13071 auto It = MultiNodeScalars.find(V);
13072 if (It == MultiNodeScalars.end())
13073 continue;
13074 VTE = *It->getSecond().begin();
13075 // Iterate through all vectorized nodes.
13076 auto *MIt = find_if(It->getSecond(), [](const TreeEntry *MTE) {
13077 return MTE->State == TreeEntry::Vectorize;
13079 if (MIt == It->getSecond().end())
13080 continue;
13081 VTE = *MIt;
13084 Instruction &LastBundleInst = getLastInstructionInBundle(VTE);
13085 if (&LastBundleInst == TEInsertPt || !CheckOrdering(&LastBundleInst))
13086 continue;
13087 VToTEs.insert(VTE);
13089 if (VToTEs.empty())
13090 continue;
13091 if (UsedTEs.empty()) {
13092 // The first iteration, just insert the list of nodes to vector.
13093 UsedTEs.push_back(VToTEs);
13094 UsedValuesEntry.try_emplace(V, 0);
13095 } else {
13096 // Need to check if there are any previously used tree nodes which use V.
13097 // If there are no such nodes, consider that we have another one input
13098 // vector.
13099 SmallPtrSet<const TreeEntry *, 4> SavedVToTEs(VToTEs);
13100 unsigned Idx = 0;
13101 for (SmallPtrSet<const TreeEntry *, 4> &Set : UsedTEs) {
13102 // Do we have a non-empty intersection of previously listed tree entries
13103 // and tree entries using current V?
13104 set_intersect(VToTEs, Set);
13105 if (!VToTEs.empty()) {
13106 // Yes, write the new subset and continue analysis for the next
13107 // scalar.
13108 Set.swap(VToTEs);
13109 break;
13111 VToTEs = SavedVToTEs;
13112 ++Idx;
13114 // No non-empty intersection found - need to add a second set of possible
13115 // source vectors.
13116 if (Idx == UsedTEs.size()) {
13117 // If the number of input vectors is greater than 2 - not a permutation,
13118 // fallback to the regular gather.
13119 // TODO: support multiple reshuffled nodes.
13120 if (UsedTEs.size() == 2)
13121 continue;
13122 UsedTEs.push_back(SavedVToTEs);
13123 Idx = UsedTEs.size() - 1;
13125 UsedValuesEntry.try_emplace(V, Idx);
13129 if (UsedTEs.empty()) {
13130 Entries.clear();
13131 return std::nullopt;
13134 unsigned VF = 0;
13135 if (UsedTEs.size() == 1) {
13136 // Keep the order to avoid non-determinism.
13137 SmallVector<const TreeEntry *> FirstEntries(UsedTEs.front().begin(),
13138 UsedTEs.front().end());
13139 sort(FirstEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) {
13140 return TE1->Idx < TE2->Idx;
13142 // Try to find the perfect match in another gather node at first.
13143 auto *It = find_if(FirstEntries, [=](const TreeEntry *EntryPtr) {
13144 return EntryPtr->isSame(VL) || EntryPtr->isSame(TE->Scalars);
13146 if (It != FirstEntries.end() &&
13147 ((*It)->getVectorFactor() == VL.size() ||
13148 ((*It)->getVectorFactor() == TE->Scalars.size() &&
13149 TE->ReuseShuffleIndices.size() == VL.size() &&
13150 (*It)->isSame(TE->Scalars)))) {
13151 Entries.push_back(*It);
13152 if ((*It)->getVectorFactor() == VL.size()) {
13153 std::iota(std::next(Mask.begin(), Part * VL.size()),
13154 std::next(Mask.begin(), (Part + 1) * VL.size()), 0);
13155 } else {
13156 SmallVector<int> CommonMask = TE->getCommonMask();
13157 copy(CommonMask, Mask.begin());
13159 // Clear undef scalars.
13160 for (unsigned I : seq<unsigned>(VL.size()))
13161 if (isa<PoisonValue>(VL[I]))
13162 Mask[Part * VL.size() + I] = PoisonMaskElem;
13163 return TargetTransformInfo::SK_PermuteSingleSrc;
13165 // No perfect match, just shuffle, so choose the first tree node from the
13166 // tree.
13167 Entries.push_back(FirstEntries.front());
13168 } else {
13169 // Try to find nodes with the same vector factor.
13170 assert(UsedTEs.size() == 2 && "Expected at max 2 permuted entries.");
13171 // Keep the order of tree nodes to avoid non-determinism.
13172 DenseMap<int, const TreeEntry *> VFToTE;
13173 for (const TreeEntry *TE : UsedTEs.front()) {
13174 unsigned VF = TE->getVectorFactor();
13175 auto It = VFToTE.find(VF);
13176 if (It != VFToTE.end()) {
13177 if (It->second->Idx > TE->Idx)
13178 It->getSecond() = TE;
13179 continue;
13181 VFToTE.try_emplace(VF, TE);
13183 // Same, keep the order to avoid non-determinism.
13184 SmallVector<const TreeEntry *> SecondEntries(UsedTEs.back().begin(),
13185 UsedTEs.back().end());
13186 sort(SecondEntries, [](const TreeEntry *TE1, const TreeEntry *TE2) {
13187 return TE1->Idx < TE2->Idx;
13189 for (const TreeEntry *TE : SecondEntries) {
13190 auto It = VFToTE.find(TE->getVectorFactor());
13191 if (It != VFToTE.end()) {
13192 VF = It->first;
13193 Entries.push_back(It->second);
13194 Entries.push_back(TE);
13195 break;
13198 // No 2 source vectors with the same vector factor - just choose 2 with max
13199 // index.
13200 if (Entries.empty()) {
13201 Entries.push_back(*llvm::max_element(
13202 UsedTEs.front(), [](const TreeEntry *TE1, const TreeEntry *TE2) {
13203 return TE1->Idx < TE2->Idx;
13204 }));
13205 Entries.push_back(SecondEntries.front());
13206 VF = std::max(Entries.front()->getVectorFactor(),
13207 Entries.back()->getVectorFactor());
13211 bool IsSplatOrUndefs = isSplat(VL) || all_of(VL, IsaPred<UndefValue>);
13212 // Checks if the 2 PHIs are compatible in terms of high possibility to be
13213 // vectorized.
13214 auto AreCompatiblePHIs = [&](Value *V, Value *V1) {
13215 auto *PHI = cast<PHINode>(V);
13216 auto *PHI1 = cast<PHINode>(V1);
13217 // Check that all incoming values are compatible/from same parent (if they
13218 // are instructions).
13219 // The incoming values are compatible if they all are constants, or
13220 // instruction with the same/alternate opcodes from the same basic block.
13221 for (int I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
13222 Value *In = PHI->getIncomingValue(I);
13223 Value *In1 = PHI1->getIncomingValue(I);
13224 if (isConstant(In) && isConstant(In1))
13225 continue;
13226 if (!getSameOpcode({In, In1}, *TLI).getOpcode())
13227 return false;
13228 if (cast<Instruction>(In)->getParent() !=
13229 cast<Instruction>(In1)->getParent())
13230 return false;
13232 return true;
13234 // Check if the value can be ignored during analysis for shuffled gathers.
13235 // We suppose it is better to ignore instruction, which do not form splats,
13236 // are not vectorized/not extractelements (these instructions will be handled
13237 // by extractelements processing) or may form vector node in future.
13238 auto MightBeIgnored = [=](Value *V) {
13239 auto *I = dyn_cast<Instruction>(V);
13240 return I && !IsSplatOrUndefs && !ScalarToTreeEntry.count(I) &&
13241 !isVectorLikeInstWithConstOps(I) &&
13242 !areAllUsersVectorized(I, UserIgnoreList) && isSimple(I);
13244 // Check that the neighbor instruction may form a full vector node with the
13245 // current instruction V. It is possible, if they have same/alternate opcode
13246 // and same parent basic block.
13247 auto NeighborMightBeIgnored = [&](Value *V, int Idx) {
13248 Value *V1 = VL[Idx];
13249 bool UsedInSameVTE = false;
13250 auto It = UsedValuesEntry.find(V1);
13251 if (It != UsedValuesEntry.end())
13252 UsedInSameVTE = It->second == UsedValuesEntry.find(V)->second;
13253 return V != V1 && MightBeIgnored(V1) && !UsedInSameVTE &&
13254 getSameOpcode({V, V1}, *TLI).getOpcode() &&
13255 cast<Instruction>(V)->getParent() ==
13256 cast<Instruction>(V1)->getParent() &&
13257 (!isa<PHINode>(V1) || AreCompatiblePHIs(V, V1));
13259 // Build a shuffle mask for better cost estimation and vector emission.
13260 SmallBitVector UsedIdxs(Entries.size());
13261 SmallVector<std::pair<unsigned, int>> EntryLanes;
13262 for (int I = 0, E = VL.size(); I < E; ++I) {
13263 Value *V = VL[I];
13264 auto It = UsedValuesEntry.find(V);
13265 if (It == UsedValuesEntry.end())
13266 continue;
13267 // Do not try to shuffle scalars, if they are constants, or instructions
13268 // that can be vectorized as a result of the following vector build
13269 // vectorization.
13270 if (isConstant(V) || (MightBeIgnored(V) &&
13271 ((I > 0 && NeighborMightBeIgnored(V, I - 1)) ||
13272 (I != E - 1 && NeighborMightBeIgnored(V, I + 1)))))
13273 continue;
13274 unsigned Idx = It->second;
13275 EntryLanes.emplace_back(Idx, I);
13276 UsedIdxs.set(Idx);
13278 // Iterate through all shuffled scalars and select entries, which can be used
13279 // for final shuffle.
13280 SmallVector<const TreeEntry *> TempEntries;
13281 for (unsigned I = 0, Sz = Entries.size(); I < Sz; ++I) {
13282 if (!UsedIdxs.test(I))
13283 continue;
13284 // Fix the entry number for the given scalar. If it is the first entry, set
13285 // Pair.first to 0, otherwise to 1 (currently select at max 2 nodes).
13286 // These indices are used when calculating final shuffle mask as the vector
13287 // offset.
13288 for (std::pair<unsigned, int> &Pair : EntryLanes)
13289 if (Pair.first == I)
13290 Pair.first = TempEntries.size();
13291 TempEntries.push_back(Entries[I]);
13293 Entries.swap(TempEntries);
13294 if (EntryLanes.size() == Entries.size() &&
13295 !VL.equals(ArrayRef(TE->Scalars)
13296 .slice(Part * VL.size(),
13297 std::min<int>(VL.size(), TE->Scalars.size())))) {
13298 // We may have here 1 or 2 entries only. If the number of scalars is equal
13299 // to the number of entries, no need to do the analysis, it is not very
13300 // profitable. Since VL is not the same as TE->Scalars, it means we already
13301 // have some shuffles before. Cut off not profitable case.
13302 Entries.clear();
13303 return std::nullopt;
13305 // Build the final mask, check for the identity shuffle, if possible.
13306 bool IsIdentity = Entries.size() == 1;
13307 // Pair.first is the offset to the vector, while Pair.second is the index of
13308 // scalar in the list.
13309 for (const std::pair<unsigned, int> &Pair : EntryLanes) {
13310 unsigned Idx = Part * VL.size() + Pair.second;
13311 Mask[Idx] =
13312 Pair.first * VF +
13313 (ForOrder ? std::distance(
13314 Entries[Pair.first]->Scalars.begin(),
13315 find(Entries[Pair.first]->Scalars, VL[Pair.second]))
13316 : Entries[Pair.first]->findLaneForValue(VL[Pair.second]));
13317 IsIdentity &= Mask[Idx] == Pair.second;
13319 switch (Entries.size()) {
13320 case 1:
13321 if (IsIdentity || EntryLanes.size() > 1 || VL.size() <= 2)
13322 return TargetTransformInfo::SK_PermuteSingleSrc;
13323 break;
13324 case 2:
13325 if (EntryLanes.size() > 2 || VL.size() <= 2)
13326 return TargetTransformInfo::SK_PermuteTwoSrc;
13327 break;
13328 default:
13329 break;
13331 Entries.clear();
13332 // Clear the corresponding mask elements.
13333 std::fill(std::next(Mask.begin(), Part * VL.size()),
13334 std::next(Mask.begin(), (Part + 1) * VL.size()), PoisonMaskElem);
13335 return std::nullopt;
13338 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>>
13339 BoUpSLP::isGatherShuffledEntry(
13340 const TreeEntry *TE, ArrayRef<Value *> VL, SmallVectorImpl<int> &Mask,
13341 SmallVectorImpl<SmallVector<const TreeEntry *>> &Entries, unsigned NumParts,
13342 bool ForOrder) {
13343 assert(NumParts > 0 && NumParts < VL.size() &&
13344 "Expected positive number of registers.");
13345 Entries.clear();
13346 // No need to check for the topmost gather node.
13347 if (TE == VectorizableTree.front().get() &&
13348 (!GatheredLoadsEntriesFirst.has_value() ||
13349 none_of(ArrayRef(VectorizableTree).drop_front(),
13350 [](const std::unique_ptr<TreeEntry> &TE) {
13351 return !TE->isGather();
13352 })))
13353 return {};
13354 // FIXME: Gathering for non-power-of-2 nodes not implemented yet.
13355 if (TE->isNonPowOf2Vec())
13356 return {};
13357 Mask.assign(VL.size(), PoisonMaskElem);
13358 assert((TE->UserTreeIndices.size() == 1 ||
13359 TE == VectorizableTree.front().get()) &&
13360 "Expected only single user of the gather node.");
13361 assert(VL.size() % NumParts == 0 &&
13362 "Number of scalars must be divisible by NumParts.");
13363 if (!TE->UserTreeIndices.empty() &&
13364 TE->UserTreeIndices.front().UserTE->isGather() &&
13365 TE->UserTreeIndices.front().EdgeIdx == UINT_MAX) {
13366 assert((TE->Idx == 0 || TE->getOpcode() == Instruction::ExtractElement ||
13367 isSplat(TE->Scalars)) &&
13368 "Expected splat or extractelements only node.");
13369 return {};
13371 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
13372 SmallVector<std::optional<TTI::ShuffleKind>> Res;
13373 for (unsigned Part : seq<unsigned>(NumParts)) {
13374 ArrayRef<Value *> SubVL =
13375 VL.slice(Part * SliceSize, getNumElems(VL.size(), SliceSize, Part));
13376 SmallVectorImpl<const TreeEntry *> &SubEntries = Entries.emplace_back();
13377 std::optional<TTI::ShuffleKind> SubRes =
13378 isGatherShuffledSingleRegisterEntry(TE, SubVL, Mask, SubEntries, Part,
13379 ForOrder);
13380 if (!SubRes)
13381 SubEntries.clear();
13382 Res.push_back(SubRes);
13383 if (SubEntries.size() == 1 && *SubRes == TTI::SK_PermuteSingleSrc &&
13384 SubEntries.front()->getVectorFactor() == VL.size() &&
13385 (SubEntries.front()->isSame(TE->Scalars) ||
13386 SubEntries.front()->isSame(VL))) {
13387 SmallVector<const TreeEntry *> LocalSubEntries;
13388 LocalSubEntries.swap(SubEntries);
13389 Entries.clear();
13390 Res.clear();
13391 std::iota(Mask.begin(), Mask.end(), 0);
13392 // Clear undef scalars.
13393 for (int I = 0, Sz = VL.size(); I < Sz; ++I)
13394 if (isa<PoisonValue>(VL[I]))
13395 Mask[I] = PoisonMaskElem;
13396 Entries.emplace_back(1, LocalSubEntries.front());
13397 Res.push_back(TargetTransformInfo::SK_PermuteSingleSrc);
13398 return Res;
13401 if (all_of(Res,
13402 [](const std::optional<TTI::ShuffleKind> &SK) { return !SK; })) {
13403 Entries.clear();
13404 return {};
13406 return Res;
13409 InstructionCost BoUpSLP::getGatherCost(ArrayRef<Value *> VL, bool ForPoisonSrc,
13410 Type *ScalarTy) const {
13411 auto *VecTy = getWidenedType(ScalarTy, VL.size());
13412 bool DuplicateNonConst = false;
13413 // Find the cost of inserting/extracting values from the vector.
13414 // Check if the same elements are inserted several times and count them as
13415 // shuffle candidates.
13416 APInt ShuffledElements = APInt::getZero(VL.size());
13417 DenseMap<Value *, unsigned> UniqueElements;
13418 constexpr TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
13419 InstructionCost Cost;
13420 auto EstimateInsertCost = [&](unsigned I, Value *V) {
13421 if (V->getType() != ScalarTy) {
13422 Cost += TTI->getCastInstrCost(Instruction::Trunc, ScalarTy, V->getType(),
13423 TTI::CastContextHint::None, CostKind);
13424 V = nullptr;
13426 if (!ForPoisonSrc)
13427 Cost +=
13428 TTI->getVectorInstrCost(Instruction::InsertElement, VecTy, CostKind,
13429 I, Constant::getNullValue(VecTy), V);
13431 SmallVector<int> ShuffleMask(VL.size(), PoisonMaskElem);
13432 for (unsigned I = 0, E = VL.size(); I < E; ++I) {
13433 Value *V = VL[I];
13434 // No need to shuffle duplicates for constants.
13435 if ((ForPoisonSrc && isConstant(V)) || isa<UndefValue>(V)) {
13436 ShuffledElements.setBit(I);
13437 ShuffleMask[I] = isa<PoisonValue>(V) ? PoisonMaskElem : I;
13438 continue;
13441 auto Res = UniqueElements.try_emplace(V, I);
13442 if (Res.second) {
13443 EstimateInsertCost(I, V);
13444 ShuffleMask[I] = I;
13445 continue;
13448 DuplicateNonConst = true;
13449 ShuffledElements.setBit(I);
13450 ShuffleMask[I] = Res.first->second;
13452 if (ForPoisonSrc) {
13453 if (isa<FixedVectorType>(ScalarTy)) {
13454 assert(SLPReVec && "Only supported by REVEC.");
13455 // We don't need to insert elements one by one. Instead, we can insert the
13456 // entire vector into the destination.
13457 Cost = 0;
13458 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
13459 for (unsigned I : seq<unsigned>(VL.size()))
13460 if (!ShuffledElements[I])
13461 Cost += TTI->getShuffleCost(
13462 TTI::SK_InsertSubvector, VecTy, std::nullopt, CostKind,
13463 I * ScalarTyNumElements, cast<FixedVectorType>(ScalarTy));
13464 } else {
13465 Cost = TTI->getScalarizationOverhead(VecTy,
13466 /*DemandedElts*/ ~ShuffledElements,
13467 /*Insert*/ true,
13468 /*Extract*/ false, CostKind, VL);
13471 if (DuplicateNonConst)
13472 Cost += ::getShuffleCost(*TTI, TargetTransformInfo::SK_PermuteSingleSrc,
13473 VecTy, ShuffleMask);
13474 return Cost;
13477 Instruction &BoUpSLP::getLastInstructionInBundle(const TreeEntry *E) {
13478 auto &Res = EntryToLastInstruction.try_emplace(E).first->second;
13479 if (Res)
13480 return *Res;
13481 // Get the basic block this bundle is in. All instructions in the bundle
13482 // should be in this block (except for extractelement-like instructions with
13483 // constant indices or gathered loads).
13484 auto *Front = E->getMainOp();
13485 auto *BB = Front->getParent();
13486 assert(((GatheredLoadsEntriesFirst.has_value() &&
13487 E->getOpcode() == Instruction::Load && E->isGather() &&
13488 E->Idx < *GatheredLoadsEntriesFirst) ||
13489 all_of(E->Scalars,
13490 [=](Value *V) -> bool {
13491 if (E->getOpcode() == Instruction::GetElementPtr &&
13492 !isa<GetElementPtrInst>(V))
13493 return true;
13494 auto *I = dyn_cast<Instruction>(V);
13495 return !I || !E->isOpcodeOrAlt(I) || I->getParent() == BB ||
13496 isVectorLikeInstWithConstOps(I);
13497 })) &&
13498 "Expected gathered loads or GEPs or instructions from same basic "
13499 "block.");
13501 auto FindLastInst = [&]() {
13502 Instruction *LastInst = Front;
13503 for (Value *V : E->Scalars) {
13504 auto *I = dyn_cast<Instruction>(V);
13505 if (!I)
13506 continue;
13507 if (LastInst->getParent() == I->getParent()) {
13508 if (LastInst->comesBefore(I))
13509 LastInst = I;
13510 continue;
13512 assert(((E->getOpcode() == Instruction::GetElementPtr &&
13513 !isa<GetElementPtrInst>(I)) ||
13514 (isVectorLikeInstWithConstOps(LastInst) &&
13515 isVectorLikeInstWithConstOps(I)) ||
13516 (GatheredLoadsEntriesFirst.has_value() &&
13517 E->getOpcode() == Instruction::Load && E->isGather() &&
13518 E->Idx < *GatheredLoadsEntriesFirst)) &&
13519 "Expected vector-like or non-GEP in GEP node insts only.");
13520 if (!DT->isReachableFromEntry(LastInst->getParent())) {
13521 LastInst = I;
13522 continue;
13524 if (!DT->isReachableFromEntry(I->getParent()))
13525 continue;
13526 auto *NodeA = DT->getNode(LastInst->getParent());
13527 auto *NodeB = DT->getNode(I->getParent());
13528 assert(NodeA && "Should only process reachable instructions");
13529 assert(NodeB && "Should only process reachable instructions");
13530 assert((NodeA == NodeB) ==
13531 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
13532 "Different nodes should have different DFS numbers");
13533 if (NodeA->getDFSNumIn() < NodeB->getDFSNumIn())
13534 LastInst = I;
13536 BB = LastInst->getParent();
13537 return LastInst;
13540 auto FindFirstInst = [&]() {
13541 Instruction *FirstInst = Front;
13542 for (Value *V : E->Scalars) {
13543 auto *I = dyn_cast<Instruction>(V);
13544 if (!I)
13545 continue;
13546 if (FirstInst->getParent() == I->getParent()) {
13547 if (I->comesBefore(FirstInst))
13548 FirstInst = I;
13549 continue;
13551 assert(((E->getOpcode() == Instruction::GetElementPtr &&
13552 !isa<GetElementPtrInst>(I)) ||
13553 (isVectorLikeInstWithConstOps(FirstInst) &&
13554 isVectorLikeInstWithConstOps(I))) &&
13555 "Expected vector-like or non-GEP in GEP node insts only.");
13556 if (!DT->isReachableFromEntry(FirstInst->getParent())) {
13557 FirstInst = I;
13558 continue;
13560 if (!DT->isReachableFromEntry(I->getParent()))
13561 continue;
13562 auto *NodeA = DT->getNode(FirstInst->getParent());
13563 auto *NodeB = DT->getNode(I->getParent());
13564 assert(NodeA && "Should only process reachable instructions");
13565 assert(NodeB && "Should only process reachable instructions");
13566 assert((NodeA == NodeB) ==
13567 (NodeA->getDFSNumIn() == NodeB->getDFSNumIn()) &&
13568 "Different nodes should have different DFS numbers");
13569 if (NodeA->getDFSNumIn() > NodeB->getDFSNumIn())
13570 FirstInst = I;
13572 return FirstInst;
13575 // Set insertpoint for gathered loads to the very first load.
13576 if (GatheredLoadsEntriesFirst.has_value() &&
13577 E->Idx >= *GatheredLoadsEntriesFirst && !E->isGather() &&
13578 E->getOpcode() == Instruction::Load) {
13579 Res = FindFirstInst();
13580 return *Res;
13583 // Set the insert point to the beginning of the basic block if the entry
13584 // should not be scheduled.
13585 if (doesNotNeedToSchedule(E->Scalars) ||
13586 (!E->isGather() && all_of(E->Scalars, isVectorLikeInstWithConstOps))) {
13587 if ((E->getOpcode() == Instruction::GetElementPtr &&
13588 any_of(E->Scalars,
13589 [](Value *V) {
13590 return !isa<GetElementPtrInst>(V) && isa<Instruction>(V);
13591 })) ||
13592 all_of(E->Scalars,
13593 [](Value *V) {
13594 return isa<PoisonValue>(V) ||
13595 (!isVectorLikeInstWithConstOps(V) &&
13596 isUsedOutsideBlock(V));
13597 }) ||
13598 (E->isGather() && E->Idx == 0 && all_of(E->Scalars, [](Value *V) {
13599 return isa<ExtractElementInst, UndefValue>(V) ||
13600 areAllOperandsNonInsts(V);
13601 })))
13602 Res = FindLastInst();
13603 else
13604 Res = FindFirstInst();
13605 return *Res;
13608 // Find the last instruction. The common case should be that BB has been
13609 // scheduled, and the last instruction is VL.back(). So we start with
13610 // VL.back() and iterate over schedule data until we reach the end of the
13611 // bundle. The end of the bundle is marked by null ScheduleData.
13612 if (BlocksSchedules.count(BB) && !E->isGather()) {
13613 Value *V = E->isOneOf(E->Scalars.back());
13614 if (doesNotNeedToBeScheduled(V))
13615 V = *find_if_not(E->Scalars, doesNotNeedToBeScheduled);
13616 auto *Bundle = BlocksSchedules[BB]->getScheduleData(V);
13617 if (Bundle && Bundle->isPartOfBundle())
13618 for (; Bundle; Bundle = Bundle->NextInBundle)
13619 Res = Bundle->Inst;
13622 // LastInst can still be null at this point if there's either not an entry
13623 // for BB in BlocksSchedules or there's no ScheduleData available for
13624 // VL.back(). This can be the case if buildTree_rec aborts for various
13625 // reasons (e.g., the maximum recursion depth is reached, the maximum region
13626 // size is reached, etc.). ScheduleData is initialized in the scheduling
13627 // "dry-run".
13629 // If this happens, we can still find the last instruction by brute force. We
13630 // iterate forwards from Front (inclusive) until we either see all
13631 // instructions in the bundle or reach the end of the block. If Front is the
13632 // last instruction in program order, LastInst will be set to Front, and we
13633 // will visit all the remaining instructions in the block.
13635 // One of the reasons we exit early from buildTree_rec is to place an upper
13636 // bound on compile-time. Thus, taking an additional compile-time hit here is
13637 // not ideal. However, this should be exceedingly rare since it requires that
13638 // we both exit early from buildTree_rec and that the bundle be out-of-order
13639 // (causing us to iterate all the way to the end of the block).
13640 if (!Res)
13641 Res = FindLastInst();
13642 assert(Res && "Failed to find last instruction in bundle");
13643 return *Res;
13646 void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
13647 auto *Front = E->getMainOp();
13648 Instruction *LastInst = &getLastInstructionInBundle(E);
13649 assert(LastInst && "Failed to find last instruction in bundle");
13650 BasicBlock::iterator LastInstIt = LastInst->getIterator();
13651 // If the instruction is PHI, set the insert point after all the PHIs.
13652 bool IsPHI = isa<PHINode>(LastInst);
13653 if (IsPHI)
13654 LastInstIt = LastInst->getParent()->getFirstNonPHIIt();
13655 if (IsPHI || (!E->isGather() && doesNotNeedToSchedule(E->Scalars))) {
13656 Builder.SetInsertPoint(LastInst->getParent(), LastInstIt);
13657 } else {
13658 // Set the insertion point after the last instruction in the bundle. Set the
13659 // debug location to Front.
13660 Builder.SetInsertPoint(
13661 LastInst->getParent(),
13662 LastInst->getNextNonDebugInstruction()->getIterator());
13664 Builder.SetCurrentDebugLocation(Front->getDebugLoc());
13667 Value *BoUpSLP::gather(
13668 ArrayRef<Value *> VL, Value *Root, Type *ScalarTy,
13669 function_ref<Value *(Value *, Value *, ArrayRef<int>)> CreateShuffle) {
13670 // List of instructions/lanes from current block and/or the blocks which are
13671 // part of the current loop. These instructions will be inserted at the end to
13672 // make it possible to optimize loops and hoist invariant instructions out of
13673 // the loops body with better chances for success.
13674 SmallVector<std::pair<Value *, unsigned>, 4> PostponedInsts;
13675 SmallSet<int, 4> PostponedIndices;
13676 Loop *L = LI->getLoopFor(Builder.GetInsertBlock());
13677 auto &&CheckPredecessor = [](BasicBlock *InstBB, BasicBlock *InsertBB) {
13678 SmallPtrSet<BasicBlock *, 4> Visited;
13679 while (InsertBB && InsertBB != InstBB && Visited.insert(InsertBB).second)
13680 InsertBB = InsertBB->getSinglePredecessor();
13681 return InsertBB && InsertBB == InstBB;
13683 for (int I = 0, E = VL.size(); I < E; ++I) {
13684 if (auto *Inst = dyn_cast<Instruction>(VL[I]))
13685 if ((CheckPredecessor(Inst->getParent(), Builder.GetInsertBlock()) ||
13686 getTreeEntry(Inst) ||
13687 (L && (!Root || L->isLoopInvariant(Root)) && L->contains(Inst))) &&
13688 PostponedIndices.insert(I).second)
13689 PostponedInsts.emplace_back(Inst, I);
13692 auto &&CreateInsertElement = [this](Value *Vec, Value *V, unsigned Pos,
13693 Type *Ty) {
13694 Value *Scalar = V;
13695 if (Scalar->getType() != Ty) {
13696 assert(Scalar->getType()->isIntOrIntVectorTy() &&
13697 Ty->isIntOrIntVectorTy() && "Expected integer types only.");
13698 Value *V = Scalar;
13699 if (auto *CI = dyn_cast<CastInst>(Scalar);
13700 isa_and_nonnull<SExtInst, ZExtInst>(CI)) {
13701 Value *Op = CI->getOperand(0);
13702 if (auto *IOp = dyn_cast<Instruction>(Op);
13703 !IOp || !(isDeleted(IOp) || getTreeEntry(IOp)))
13704 V = Op;
13706 Scalar = Builder.CreateIntCast(
13707 V, Ty, !isKnownNonNegative(Scalar, SimplifyQuery(*DL)));
13710 Instruction *InsElt;
13711 if (auto *VecTy = dyn_cast<FixedVectorType>(Scalar->getType())) {
13712 assert(SLPReVec && "FixedVectorType is not expected.");
13713 Vec = InsElt = Builder.CreateInsertVector(
13714 Vec->getType(), Vec, Scalar,
13715 Builder.getInt64(Pos * VecTy->getNumElements()));
13716 auto *II = dyn_cast<IntrinsicInst>(InsElt);
13717 if (!II || II->getIntrinsicID() != Intrinsic::vector_insert)
13718 return Vec;
13719 } else {
13720 Vec = Builder.CreateInsertElement(Vec, Scalar, Builder.getInt32(Pos));
13721 InsElt = dyn_cast<InsertElementInst>(Vec);
13722 if (!InsElt)
13723 return Vec;
13725 GatherShuffleExtractSeq.insert(InsElt);
13726 CSEBlocks.insert(InsElt->getParent());
13727 // Add to our 'need-to-extract' list.
13728 if (isa<Instruction>(V)) {
13729 if (TreeEntry *Entry = getTreeEntry(V)) {
13730 // Find which lane we need to extract.
13731 User *UserOp = nullptr;
13732 if (Scalar != V) {
13733 if (auto *SI = dyn_cast<Instruction>(Scalar))
13734 UserOp = SI;
13735 } else {
13736 UserOp = InsElt;
13738 if (UserOp) {
13739 unsigned FoundLane = Entry->findLaneForValue(V);
13740 ExternalUses.emplace_back(V, UserOp, FoundLane);
13744 return Vec;
13746 auto *VecTy = getWidenedType(ScalarTy, VL.size());
13747 Value *Vec = PoisonValue::get(VecTy);
13748 SmallVector<int> NonConsts;
13749 SmallVector<int> Mask(VL.size());
13750 std::iota(Mask.begin(), Mask.end(), 0);
13751 Value *OriginalRoot = Root;
13752 if (auto *SV = dyn_cast_or_null<ShuffleVectorInst>(Root);
13753 SV && isa<PoisonValue>(SV->getOperand(1)) &&
13754 SV->getOperand(0)->getType() == VecTy) {
13755 Root = SV->getOperand(0);
13756 Mask.assign(SV->getShuffleMask().begin(), SV->getShuffleMask().end());
13758 // Insert constant values at first.
13759 for (int I = 0, E = VL.size(); I < E; ++I) {
13760 if (PostponedIndices.contains(I))
13761 continue;
13762 if (!isConstant(VL[I])) {
13763 NonConsts.push_back(I);
13764 continue;
13766 if (isa<PoisonValue>(VL[I]))
13767 continue;
13768 Vec = CreateInsertElement(Vec, VL[I], I, ScalarTy);
13769 Mask[I] = I + E;
13771 if (Root) {
13772 if (isa<PoisonValue>(Vec)) {
13773 Vec = OriginalRoot;
13774 } else {
13775 Vec = CreateShuffle(Root, Vec, Mask);
13776 if (auto *OI = dyn_cast<Instruction>(OriginalRoot);
13777 OI && OI->hasNUses(0) &&
13778 none_of(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
13779 return TE->VectorizedValue == OI;
13781 eraseInstruction(OI);
13784 // Insert non-constant values.
13785 for (int I : NonConsts)
13786 Vec = CreateInsertElement(Vec, VL[I], I, ScalarTy);
13787 // Append instructions, which are/may be part of the loop, in the end to make
13788 // it possible to hoist non-loop-based instructions.
13789 for (const std::pair<Value *, unsigned> &Pair : PostponedInsts)
13790 Vec = CreateInsertElement(Vec, Pair.first, Pair.second, ScalarTy);
13792 return Vec;
13795 /// Merges shuffle masks and emits final shuffle instruction, if required. It
13796 /// supports shuffling of 2 input vectors. It implements lazy shuffles emission,
13797 /// when the actual shuffle instruction is generated only if this is actually
13798 /// required. Otherwise, the shuffle instruction emission is delayed till the
13799 /// end of the process, to reduce the number of emitted instructions and further
13800 /// analysis/transformations.
13801 /// The class also will look through the previously emitted shuffle instructions
13802 /// and properly mark indices in mask as undef.
13803 /// For example, given the code
13804 /// \code
13805 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0>
13806 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0>
13807 /// \endcode
13808 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 3, 2>, it will
13809 /// look through %s1 and %s2 and emit
13810 /// \code
13811 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3>
13812 /// \endcode
13813 /// instead.
13814 /// If 2 operands are of different size, the smallest one will be resized and
13815 /// the mask recalculated properly.
13816 /// For example, given the code
13817 /// \code
13818 /// %s1 = shufflevector <2 x ty> %0, poison, <1, 0, 1, 0>
13819 /// %s2 = shufflevector <2 x ty> %1, poison, <1, 0, 1, 0>
13820 /// \endcode
13821 /// and if need to emit shuffle of %s1 and %s2 with mask <1, 0, 5, 4>, it will
13822 /// look through %s1 and %s2 and emit
13823 /// \code
13824 /// %res = shufflevector <2 x ty> %0, %1, <0, 1, 2, 3>
13825 /// \endcode
13826 /// instead.
13827 class BoUpSLP::ShuffleInstructionBuilder final : public BaseShuffleAnalysis {
13828 bool IsFinalized = false;
13829 /// Combined mask for all applied operands and masks. It is built during
13830 /// analysis and actual emission of shuffle vector instructions.
13831 SmallVector<int> CommonMask;
13832 /// List of operands for the shuffle vector instruction. It hold at max 2
13833 /// operands, if the 3rd is going to be added, the first 2 are combined into
13834 /// shuffle with \p CommonMask mask, the first operand sets to be the
13835 /// resulting shuffle and the second operand sets to be the newly added
13836 /// operand. The \p CommonMask is transformed in the proper way after that.
13837 SmallVector<Value *, 2> InVectors;
13838 IRBuilderBase &Builder;
13839 BoUpSLP &R;
13841 class ShuffleIRBuilder {
13842 IRBuilderBase &Builder;
13843 /// Holds all of the instructions that we gathered.
13844 SetVector<Instruction *> &GatherShuffleExtractSeq;
13845 /// A list of blocks that we are going to CSE.
13846 DenseSet<BasicBlock *> &CSEBlocks;
13847 /// Data layout.
13848 const DataLayout &DL;
13850 public:
13851 ShuffleIRBuilder(IRBuilderBase &Builder,
13852 SetVector<Instruction *> &GatherShuffleExtractSeq,
13853 DenseSet<BasicBlock *> &CSEBlocks, const DataLayout &DL)
13854 : Builder(Builder), GatherShuffleExtractSeq(GatherShuffleExtractSeq),
13855 CSEBlocks(CSEBlocks), DL(DL) {}
13856 ~ShuffleIRBuilder() = default;
13857 /// Creates shufflevector for the 2 operands with the given mask.
13858 Value *createShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask) {
13859 if (V1->getType() != V2->getType()) {
13860 assert(V1->getType()->isIntOrIntVectorTy() &&
13861 V1->getType()->isIntOrIntVectorTy() &&
13862 "Expected integer vector types only.");
13863 if (V1->getType() != V2->getType()) {
13864 if (cast<VectorType>(V2->getType())
13865 ->getElementType()
13866 ->getIntegerBitWidth() < cast<VectorType>(V1->getType())
13867 ->getElementType()
13868 ->getIntegerBitWidth())
13869 V2 = Builder.CreateIntCast(
13870 V2, V1->getType(), !isKnownNonNegative(V2, SimplifyQuery(DL)));
13871 else
13872 V1 = Builder.CreateIntCast(
13873 V1, V2->getType(), !isKnownNonNegative(V1, SimplifyQuery(DL)));
13876 Value *Vec = Builder.CreateShuffleVector(V1, V2, Mask);
13877 if (auto *I = dyn_cast<Instruction>(Vec)) {
13878 GatherShuffleExtractSeq.insert(I);
13879 CSEBlocks.insert(I->getParent());
13881 return Vec;
13883 /// Creates permutation of the single vector operand with the given mask, if
13884 /// it is not identity mask.
13885 Value *createShuffleVector(Value *V1, ArrayRef<int> Mask) {
13886 if (Mask.empty())
13887 return V1;
13888 unsigned VF = Mask.size();
13889 unsigned LocalVF = cast<FixedVectorType>(V1->getType())->getNumElements();
13890 if (VF == LocalVF && ShuffleVectorInst::isIdentityMask(Mask, VF))
13891 return V1;
13892 Value *Vec = Builder.CreateShuffleVector(V1, Mask);
13893 if (auto *I = dyn_cast<Instruction>(Vec)) {
13894 GatherShuffleExtractSeq.insert(I);
13895 CSEBlocks.insert(I->getParent());
13897 return Vec;
13899 Value *createIdentity(Value *V) { return V; }
13900 Value *createPoison(Type *Ty, unsigned VF) {
13901 return PoisonValue::get(getWidenedType(Ty, VF));
13903 /// Resizes 2 input vector to match the sizes, if the they are not equal
13904 /// yet. The smallest vector is resized to the size of the larger vector.
13905 void resizeToMatch(Value *&V1, Value *&V2) {
13906 if (V1->getType() == V2->getType())
13907 return;
13908 int V1VF = cast<FixedVectorType>(V1->getType())->getNumElements();
13909 int V2VF = cast<FixedVectorType>(V2->getType())->getNumElements();
13910 int VF = std::max(V1VF, V2VF);
13911 int MinVF = std::min(V1VF, V2VF);
13912 SmallVector<int> IdentityMask(VF, PoisonMaskElem);
13913 std::iota(IdentityMask.begin(), std::next(IdentityMask.begin(), MinVF),
13915 Value *&Op = MinVF == V1VF ? V1 : V2;
13916 Op = Builder.CreateShuffleVector(Op, IdentityMask);
13917 if (auto *I = dyn_cast<Instruction>(Op)) {
13918 GatherShuffleExtractSeq.insert(I);
13919 CSEBlocks.insert(I->getParent());
13921 if (MinVF == V1VF)
13922 V1 = Op;
13923 else
13924 V2 = Op;
13928 /// Smart shuffle instruction emission, walks through shuffles trees and
13929 /// tries to find the best matching vector for the actual shuffle
13930 /// instruction.
13931 Value *createShuffle(Value *V1, Value *V2, ArrayRef<int> Mask) {
13932 assert(V1 && "Expected at least one vector value.");
13933 ShuffleIRBuilder ShuffleBuilder(Builder, R.GatherShuffleExtractSeq,
13934 R.CSEBlocks, *R.DL);
13935 return BaseShuffleAnalysis::createShuffle<Value *>(V1, V2, Mask,
13936 ShuffleBuilder);
13939 /// Transforms mask \p CommonMask per given \p Mask to make proper set after
13940 /// shuffle emission.
13941 static void transformMaskAfterShuffle(MutableArrayRef<int> CommonMask,
13942 ArrayRef<int> Mask) {
13943 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
13944 if (Mask[Idx] != PoisonMaskElem)
13945 CommonMask[Idx] = Idx;
13948 /// Cast value \p V to the vector type with the same number of elements, but
13949 /// the base type \p ScalarTy.
13950 Value *castToScalarTyElem(Value *V,
13951 std::optional<bool> IsSigned = std::nullopt) {
13952 auto *VecTy = cast<VectorType>(V->getType());
13953 assert(getNumElements(VecTy) % getNumElements(ScalarTy) == 0);
13954 if (VecTy->getElementType() == ScalarTy->getScalarType())
13955 return V;
13956 return Builder.CreateIntCast(
13957 V, VectorType::get(ScalarTy->getScalarType(), VecTy->getElementCount()),
13958 IsSigned.value_or(!isKnownNonNegative(V, SimplifyQuery(*R.DL))));
13961 public:
13962 ShuffleInstructionBuilder(Type *ScalarTy, IRBuilderBase &Builder, BoUpSLP &R)
13963 : BaseShuffleAnalysis(ScalarTy), Builder(Builder), R(R) {}
13965 /// Adjusts extractelements after reusing them.
13966 Value *adjustExtracts(const TreeEntry *E, MutableArrayRef<int> Mask,
13967 ArrayRef<std::optional<TTI::ShuffleKind>> ShuffleKinds,
13968 unsigned NumParts, bool &UseVecBaseAsInput) {
13969 UseVecBaseAsInput = false;
13970 SmallPtrSet<Value *, 4> UniqueBases;
13971 Value *VecBase = nullptr;
13972 SmallVector<Value *> VL(E->Scalars.begin(), E->Scalars.end());
13973 if (!E->ReorderIndices.empty()) {
13974 SmallVector<int> ReorderMask(E->ReorderIndices.begin(),
13975 E->ReorderIndices.end());
13976 reorderScalars(VL, ReorderMask);
13978 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) {
13979 int Idx = Mask[I];
13980 if (Idx == PoisonMaskElem)
13981 continue;
13982 auto *EI = cast<ExtractElementInst>(VL[I]);
13983 VecBase = EI->getVectorOperand();
13984 if (const TreeEntry *TE = R.getTreeEntry(VecBase))
13985 VecBase = TE->VectorizedValue;
13986 assert(VecBase && "Expected vectorized value.");
13987 UniqueBases.insert(VecBase);
13988 // If the only one use is vectorized - can delete the extractelement
13989 // itself.
13990 if (!EI->hasOneUse() || R.ExternalUsesAsOriginalScalar.contains(EI) ||
13991 (NumParts != 1 && count(VL, EI) > 1) ||
13992 any_of(EI->users(), [&](User *U) {
13993 const TreeEntry *UTE = R.getTreeEntry(U);
13994 return !UTE || R.MultiNodeScalars.contains(U) ||
13995 (isa<GetElementPtrInst>(U) &&
13996 !R.areAllUsersVectorized(cast<Instruction>(U))) ||
13997 count_if(R.VectorizableTree,
13998 [&](const std::unique_ptr<TreeEntry> &TE) {
13999 return any_of(TE->UserTreeIndices,
14000 [&](const EdgeInfo &Edge) {
14001 return Edge.UserTE == UTE;
14002 }) &&
14003 is_contained(VL, EI);
14004 }) != 1;
14006 continue;
14007 R.eraseInstruction(EI);
14009 if (NumParts == 1 || UniqueBases.size() == 1) {
14010 assert(VecBase && "Expected vectorized value.");
14011 return castToScalarTyElem(VecBase);
14013 UseVecBaseAsInput = true;
14014 auto TransformToIdentity = [](MutableArrayRef<int> Mask) {
14015 for (auto [I, Idx] : enumerate(Mask))
14016 if (Idx != PoisonMaskElem)
14017 Idx = I;
14019 // Perform multi-register vector shuffle, joining them into a single virtual
14020 // long vector.
14021 // Need to shuffle each part independently and then insert all this parts
14022 // into a long virtual vector register, forming the original vector.
14023 Value *Vec = nullptr;
14024 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem);
14025 unsigned SliceSize = getPartNumElems(VL.size(), NumParts);
14026 for (unsigned Part : seq<unsigned>(NumParts)) {
14027 unsigned Limit = getNumElems(VL.size(), SliceSize, Part);
14028 ArrayRef<Value *> SubVL = ArrayRef(VL).slice(Part * SliceSize, Limit);
14029 MutableArrayRef<int> SubMask = Mask.slice(Part * SliceSize, Limit);
14030 constexpr int MaxBases = 2;
14031 SmallVector<Value *, MaxBases> Bases(MaxBases);
14032 auto VLMask = zip(SubVL, SubMask);
14033 const unsigned VF = std::accumulate(
14034 VLMask.begin(), VLMask.end(), 0U, [&](unsigned S, const auto &D) {
14035 if (std::get<1>(D) == PoisonMaskElem)
14036 return S;
14037 Value *VecOp =
14038 cast<ExtractElementInst>(std::get<0>(D))->getVectorOperand();
14039 if (const TreeEntry *TE = R.getTreeEntry(VecOp))
14040 VecOp = TE->VectorizedValue;
14041 assert(VecOp && "Expected vectorized value.");
14042 const unsigned Size =
14043 cast<FixedVectorType>(VecOp->getType())->getNumElements();
14044 return std::max(S, Size);
14046 for (const auto [V, I] : VLMask) {
14047 if (I == PoisonMaskElem)
14048 continue;
14049 Value *VecOp = cast<ExtractElementInst>(V)->getVectorOperand();
14050 if (const TreeEntry *TE = R.getTreeEntry(VecOp))
14051 VecOp = TE->VectorizedValue;
14052 assert(VecOp && "Expected vectorized value.");
14053 VecOp = castToScalarTyElem(VecOp);
14054 Bases[I / VF] = VecOp;
14056 if (!Bases.front())
14057 continue;
14058 Value *SubVec;
14059 if (Bases.back()) {
14060 SubVec = createShuffle(Bases.front(), Bases.back(), SubMask);
14061 TransformToIdentity(SubMask);
14062 } else {
14063 SubVec = Bases.front();
14065 if (!Vec) {
14066 Vec = SubVec;
14067 assert((Part == 0 || all_of(seq<unsigned>(0, Part),
14068 [&](unsigned P) {
14069 ArrayRef<int> SubMask =
14070 Mask.slice(P * SliceSize,
14071 getNumElems(Mask.size(),
14072 SliceSize, P));
14073 return all_of(SubMask, [](int Idx) {
14074 return Idx == PoisonMaskElem;
14076 })) &&
14077 "Expected first part or all previous parts masked.");
14078 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize));
14079 } else {
14080 unsigned NewVF =
14081 cast<FixedVectorType>(Vec->getType())->getNumElements();
14082 if (Vec->getType() != SubVec->getType()) {
14083 unsigned SubVecVF =
14084 cast<FixedVectorType>(SubVec->getType())->getNumElements();
14085 NewVF = std::max(NewVF, SubVecVF);
14087 // Adjust SubMask.
14088 for (int &Idx : SubMask)
14089 if (Idx != PoisonMaskElem)
14090 Idx += NewVF;
14091 copy(SubMask, std::next(VecMask.begin(), Part * SliceSize));
14092 Vec = createShuffle(Vec, SubVec, VecMask);
14093 TransformToIdentity(VecMask);
14096 copy(VecMask, Mask.begin());
14097 return Vec;
14099 /// Checks if the specified entry \p E needs to be delayed because of its
14100 /// dependency nodes.
14101 std::optional<Value *>
14102 needToDelay(const TreeEntry *E,
14103 ArrayRef<SmallVector<const TreeEntry *>> Deps) const {
14104 // No need to delay emission if all deps are ready.
14105 if (all_of(Deps, [](ArrayRef<const TreeEntry *> TEs) {
14106 return all_of(
14107 TEs, [](const TreeEntry *TE) { return TE->VectorizedValue; });
14109 return std::nullopt;
14110 // Postpone gather emission, will be emitted after the end of the
14111 // process to keep correct order.
14112 auto *ResVecTy = getWidenedType(ScalarTy, E->getVectorFactor());
14113 return Builder.CreateAlignedLoad(
14114 ResVecTy,
14115 PoisonValue::get(PointerType::getUnqual(ScalarTy->getContext())),
14116 MaybeAlign());
14118 /// Adds 2 input vectors (in form of tree entries) and the mask for their
14119 /// shuffling.
14120 void add(const TreeEntry &E1, const TreeEntry &E2, ArrayRef<int> Mask) {
14121 Value *V1 = E1.VectorizedValue;
14122 if (V1->getType()->isIntOrIntVectorTy())
14123 V1 = castToScalarTyElem(V1, any_of(E1.Scalars, [&](Value *V) {
14124 if (isa<PoisonValue>(V))
14125 return false;
14126 return !isKnownNonNegative(
14127 V, SimplifyQuery(*R.DL));
14128 }));
14129 Value *V2 = E2.VectorizedValue;
14130 if (V2->getType()->isIntOrIntVectorTy())
14131 V2 = castToScalarTyElem(V2, any_of(E2.Scalars, [&](Value *V) {
14132 if (isa<PoisonValue>(V))
14133 return false;
14134 return !isKnownNonNegative(
14135 V, SimplifyQuery(*R.DL));
14136 }));
14137 add(V1, V2, Mask);
14139 /// Adds single input vector (in form of tree entry) and the mask for its
14140 /// shuffling.
14141 void add(const TreeEntry &E1, ArrayRef<int> Mask) {
14142 Value *V1 = E1.VectorizedValue;
14143 if (V1->getType()->isIntOrIntVectorTy())
14144 V1 = castToScalarTyElem(V1, any_of(E1.Scalars, [&](Value *V) {
14145 if (isa<PoisonValue>(V))
14146 return false;
14147 return !isKnownNonNegative(
14148 V, SimplifyQuery(*R.DL));
14149 }));
14150 add(V1, Mask);
14152 /// Adds 2 input vectors and the mask for their shuffling.
14153 void add(Value *V1, Value *V2, ArrayRef<int> Mask) {
14154 assert(V1 && V2 && !Mask.empty() && "Expected non-empty input vectors.");
14155 assert(isa<FixedVectorType>(V1->getType()) &&
14156 isa<FixedVectorType>(V2->getType()) &&
14157 "castToScalarTyElem expects V1 and V2 to be FixedVectorType");
14158 V1 = castToScalarTyElem(V1);
14159 V2 = castToScalarTyElem(V2);
14160 if (InVectors.empty()) {
14161 InVectors.push_back(V1);
14162 InVectors.push_back(V2);
14163 CommonMask.assign(Mask.begin(), Mask.end());
14164 return;
14166 Value *Vec = InVectors.front();
14167 if (InVectors.size() == 2) {
14168 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
14169 transformMaskAfterShuffle(CommonMask, CommonMask);
14170 } else if (cast<FixedVectorType>(Vec->getType())->getNumElements() !=
14171 Mask.size()) {
14172 Vec = createShuffle(Vec, nullptr, CommonMask);
14173 transformMaskAfterShuffle(CommonMask, CommonMask);
14175 V1 = createShuffle(V1, V2, Mask);
14176 unsigned VF = std::max(getVF(V1), getVF(Vec));
14177 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14178 if (Mask[Idx] != PoisonMaskElem)
14179 CommonMask[Idx] = Idx + VF;
14180 InVectors.front() = Vec;
14181 if (InVectors.size() == 2)
14182 InVectors.back() = V1;
14183 else
14184 InVectors.push_back(V1);
14186 /// Adds another one input vector and the mask for the shuffling.
14187 void add(Value *V1, ArrayRef<int> Mask, bool = false) {
14188 assert(isa<FixedVectorType>(V1->getType()) &&
14189 "castToScalarTyElem expects V1 to be FixedVectorType");
14190 V1 = castToScalarTyElem(V1);
14191 if (InVectors.empty()) {
14192 InVectors.push_back(V1);
14193 CommonMask.assign(Mask.begin(), Mask.end());
14194 return;
14196 const auto *It = find(InVectors, V1);
14197 if (It == InVectors.end()) {
14198 if (InVectors.size() == 2 ||
14199 InVectors.front()->getType() != V1->getType()) {
14200 Value *V = InVectors.front();
14201 if (InVectors.size() == 2) {
14202 V = createShuffle(InVectors.front(), InVectors.back(), CommonMask);
14203 transformMaskAfterShuffle(CommonMask, CommonMask);
14204 } else if (cast<FixedVectorType>(V->getType())->getNumElements() !=
14205 CommonMask.size()) {
14206 V = createShuffle(InVectors.front(), nullptr, CommonMask);
14207 transformMaskAfterShuffle(CommonMask, CommonMask);
14209 unsigned VF = std::max(CommonMask.size(), Mask.size());
14210 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14211 if (CommonMask[Idx] == PoisonMaskElem && Mask[Idx] != PoisonMaskElem)
14212 CommonMask[Idx] =
14213 V->getType() != V1->getType()
14214 ? Idx + VF
14215 : Mask[Idx] + cast<FixedVectorType>(V1->getType())
14216 ->getNumElements();
14217 if (V->getType() != V1->getType())
14218 V1 = createShuffle(V1, nullptr, Mask);
14219 InVectors.front() = V;
14220 if (InVectors.size() == 2)
14221 InVectors.back() = V1;
14222 else
14223 InVectors.push_back(V1);
14224 return;
14226 // Check if second vector is required if the used elements are already
14227 // used from the first one.
14228 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14229 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem) {
14230 InVectors.push_back(V1);
14231 break;
14234 int VF = getVF(V1);
14235 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14236 if (Mask[Idx] != PoisonMaskElem && CommonMask[Idx] == PoisonMaskElem)
14237 CommonMask[Idx] = Mask[Idx] + (It == InVectors.begin() ? 0 : VF);
14239 /// Adds another one input vector and the mask for the shuffling.
14240 void addOrdered(Value *V1, ArrayRef<unsigned> Order) {
14241 SmallVector<int> NewMask;
14242 inversePermutation(Order, NewMask);
14243 add(V1, NewMask);
14245 Value *gather(ArrayRef<Value *> VL, unsigned MaskVF = 0,
14246 Value *Root = nullptr) {
14247 return R.gather(VL, Root, ScalarTy,
14248 [&](Value *V1, Value *V2, ArrayRef<int> Mask) {
14249 return createShuffle(V1, V2, Mask);
14252 Value *createFreeze(Value *V) { return Builder.CreateFreeze(V); }
14253 /// Finalize emission of the shuffles.
14254 /// \param Action the action (if any) to be performed before final applying of
14255 /// the \p ExtMask mask.
14256 Value *
14257 finalize(ArrayRef<int> ExtMask,
14258 ArrayRef<std::pair<const TreeEntry *, unsigned>> SubVectors,
14259 ArrayRef<int> SubVectorsMask, unsigned VF = 0,
14260 function_ref<void(Value *&, SmallVectorImpl<int> &)> Action = {}) {
14261 IsFinalized = true;
14262 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
14263 SmallVector<int> NewExtMask(ExtMask);
14264 if (ScalarTyNumElements != 1) {
14265 assert(SLPReVec && "FixedVectorType is not expected.");
14266 transformScalarShuffleIndiciesToVector(ScalarTyNumElements, CommonMask);
14267 transformScalarShuffleIndiciesToVector(ScalarTyNumElements, NewExtMask);
14268 ExtMask = NewExtMask;
14270 if (Action) {
14271 Value *Vec = InVectors.front();
14272 if (InVectors.size() == 2) {
14273 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
14274 InVectors.pop_back();
14275 } else {
14276 Vec = createShuffle(Vec, nullptr, CommonMask);
14278 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14279 if (CommonMask[Idx] != PoisonMaskElem)
14280 CommonMask[Idx] = Idx;
14281 assert(VF > 0 &&
14282 "Expected vector length for the final value before action.");
14283 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
14284 if (VecVF < VF) {
14285 SmallVector<int> ResizeMask(VF, PoisonMaskElem);
14286 std::iota(ResizeMask.begin(), std::next(ResizeMask.begin(), VecVF), 0);
14287 Vec = createShuffle(Vec, nullptr, ResizeMask);
14289 Action(Vec, CommonMask);
14290 InVectors.front() = Vec;
14292 if (!SubVectors.empty()) {
14293 Value *Vec = InVectors.front();
14294 if (InVectors.size() == 2) {
14295 Vec = createShuffle(Vec, InVectors.back(), CommonMask);
14296 InVectors.pop_back();
14297 } else {
14298 Vec = createShuffle(Vec, nullptr, CommonMask);
14300 for (unsigned Idx = 0, Sz = CommonMask.size(); Idx < Sz; ++Idx)
14301 if (CommonMask[Idx] != PoisonMaskElem)
14302 CommonMask[Idx] = Idx;
14303 auto CreateSubVectors = [&](Value *Vec,
14304 SmallVectorImpl<int> &CommonMask) {
14305 for (auto [E, Idx] : SubVectors) {
14306 Value *V = E->VectorizedValue;
14307 if (V->getType()->isIntOrIntVectorTy())
14308 V = castToScalarTyElem(V, any_of(E->Scalars, [&](Value *V) {
14309 if (isa<PoisonValue>(V))
14310 return false;
14311 return !isKnownNonNegative(
14312 V, SimplifyQuery(*R.DL));
14313 }));
14314 unsigned InsertionIndex = Idx * ScalarTyNumElements;
14315 const unsigned SubVecVF =
14316 cast<FixedVectorType>(V->getType())->getNumElements();
14317 if (InsertionIndex % SubVecVF == 0) {
14318 Vec = Builder.CreateInsertVector(Vec->getType(), Vec, V,
14319 Builder.getInt64(InsertionIndex));
14320 } else {
14321 // Create shuffle, insertvector requires that index is multiple of
14322 // the subvectors length.
14323 const unsigned VecVF =
14324 cast<FixedVectorType>(Vec->getType())->getNumElements();
14325 SmallVector<int> Mask(VecVF, PoisonMaskElem);
14326 std::iota(Mask.begin(), Mask.end(), 0);
14327 for (unsigned I : seq<unsigned>(
14328 InsertionIndex, (Idx + SubVecVF) * ScalarTyNumElements))
14329 Mask[I] = I - Idx + VecVF;
14330 Vec = createShuffle(Vec, V, Mask);
14332 if (!CommonMask.empty()) {
14333 std::iota(
14334 std::next(CommonMask.begin(), InsertionIndex),
14335 std::next(CommonMask.begin(),
14336 (Idx + E->getVectorFactor()) * ScalarTyNumElements),
14337 InsertionIndex);
14340 return Vec;
14342 if (SubVectorsMask.empty()) {
14343 Vec = CreateSubVectors(Vec, CommonMask);
14344 } else {
14345 SmallVector<int> SVMask(CommonMask.size(), PoisonMaskElem);
14346 copy(SubVectorsMask, SVMask.begin());
14347 for (auto [I1, I2] : zip(SVMask, CommonMask)) {
14348 if (I2 != PoisonMaskElem) {
14349 assert(I1 == PoisonMaskElem && "Expected unused subvectors mask");
14350 I1 = I2 + CommonMask.size();
14353 Value *InsertVec =
14354 CreateSubVectors(PoisonValue::get(Vec->getType()), CommonMask);
14355 Vec = createShuffle(InsertVec, Vec, SVMask);
14356 for (unsigned I : seq<unsigned>(CommonMask.size())) {
14357 if (SVMask[I] != PoisonMaskElem)
14358 CommonMask[I] = I;
14361 InVectors.front() = Vec;
14364 if (!ExtMask.empty()) {
14365 if (CommonMask.empty()) {
14366 CommonMask.assign(ExtMask.begin(), ExtMask.end());
14367 } else {
14368 SmallVector<int> NewMask(ExtMask.size(), PoisonMaskElem);
14369 for (int I = 0, Sz = ExtMask.size(); I < Sz; ++I) {
14370 if (ExtMask[I] == PoisonMaskElem)
14371 continue;
14372 NewMask[I] = CommonMask[ExtMask[I]];
14374 CommonMask.swap(NewMask);
14377 if (CommonMask.empty()) {
14378 assert(InVectors.size() == 1 && "Expected only one vector with no mask");
14379 return InVectors.front();
14381 if (InVectors.size() == 2)
14382 return createShuffle(InVectors.front(), InVectors.back(), CommonMask);
14383 return createShuffle(InVectors.front(), nullptr, CommonMask);
14386 ~ShuffleInstructionBuilder() {
14387 assert((IsFinalized || CommonMask.empty()) &&
14388 "Shuffle construction must be finalized.");
14392 BoUpSLP::TreeEntry *BoUpSLP::getMatchedVectorizedOperand(const TreeEntry *E,
14393 unsigned NodeIdx) {
14394 ArrayRef<Value *> VL = E->getOperand(NodeIdx);
14395 InstructionsState S = getSameOpcode(VL, *TLI);
14396 // Special processing for GEPs bundle, which may include non-gep values.
14397 if (!S.getOpcode() && VL.front()->getType()->isPointerTy()) {
14398 const auto *It = find_if(VL, IsaPred<GetElementPtrInst>);
14399 if (It != VL.end())
14400 S = getSameOpcode(*It, *TLI);
14402 if (!S.getOpcode())
14403 return nullptr;
14404 auto CheckSameVE = [&](const TreeEntry *VE) {
14405 return VE->isSame(VL) &&
14406 (any_of(VE->UserTreeIndices,
14407 [E, NodeIdx](const EdgeInfo &EI) {
14408 return EI.UserTE == E && EI.EdgeIdx == NodeIdx;
14409 }) ||
14410 any_of(VectorizableTree,
14411 [E, NodeIdx, VE](const std::unique_ptr<TreeEntry> &TE) {
14412 return TE->isOperandGatherNode(
14413 {const_cast<TreeEntry *>(E), NodeIdx}) &&
14414 VE->isSame(TE->Scalars);
14415 }));
14417 TreeEntry *VE = getTreeEntry(S.getMainOp());
14418 if (VE && CheckSameVE(VE))
14419 return VE;
14420 auto It = MultiNodeScalars.find(S.getMainOp());
14421 if (It != MultiNodeScalars.end()) {
14422 auto *I = find_if(It->getSecond(), [&](const TreeEntry *TE) {
14423 return TE != VE && CheckSameVE(TE);
14425 if (I != It->getSecond().end())
14426 return *I;
14428 return nullptr;
14431 Value *BoUpSLP::vectorizeOperand(TreeEntry *E, unsigned NodeIdx,
14432 bool PostponedPHIs) {
14433 ValueList &VL = E->getOperand(NodeIdx);
14434 const unsigned VF = VL.size();
14435 if (TreeEntry *VE = getMatchedVectorizedOperand(E, NodeIdx)) {
14436 auto FinalShuffle = [&](Value *V, ArrayRef<int> Mask) {
14437 // V may be affected by MinBWs.
14438 // We want ShuffleInstructionBuilder to correctly support REVEC. The key
14439 // factor is the number of elements, not their type.
14440 Type *ScalarTy = cast<VectorType>(V->getType())->getElementType();
14441 unsigned NumElements = getNumElements(VL.front()->getType());
14442 ShuffleInstructionBuilder ShuffleBuilder(
14443 NumElements != 1 ? FixedVectorType::get(ScalarTy, NumElements)
14444 : ScalarTy,
14445 Builder, *this);
14446 ShuffleBuilder.add(V, Mask);
14447 SmallVector<std::pair<const TreeEntry *, unsigned>> SubVectors(
14448 E->CombinedEntriesWithIndices.size());
14449 transform(E->CombinedEntriesWithIndices, SubVectors.begin(),
14450 [&](const auto &P) {
14451 return std::make_pair(VectorizableTree[P.first].get(),
14452 P.second);
14454 assert((E->CombinedEntriesWithIndices.empty() ||
14455 E->ReorderIndices.empty()) &&
14456 "Expected either combined subnodes or reordering");
14457 return ShuffleBuilder.finalize({}, SubVectors, {});
14459 Value *V = vectorizeTree(VE, PostponedPHIs);
14460 if (VF * getNumElements(VL[0]->getType()) !=
14461 cast<FixedVectorType>(V->getType())->getNumElements()) {
14462 if (!VE->ReuseShuffleIndices.empty()) {
14463 // Reshuffle to get only unique values.
14464 // If some of the scalars are duplicated in the vectorization
14465 // tree entry, we do not vectorize them but instead generate a
14466 // mask for the reuses. But if there are several users of the
14467 // same entry, they may have different vectorization factors.
14468 // This is especially important for PHI nodes. In this case, we
14469 // need to adapt the resulting instruction for the user
14470 // vectorization factor and have to reshuffle it again to take
14471 // only unique elements of the vector. Without this code the
14472 // function incorrectly returns reduced vector instruction with
14473 // the same elements, not with the unique ones.
14475 // block:
14476 // %phi = phi <2 x > { .., %entry} {%shuffle, %block}
14477 // %2 = shuffle <2 x > %phi, poison, <4 x > <1, 1, 0, 0>
14478 // ... (use %2)
14479 // %shuffle = shuffle <2 x> %2, poison, <2 x> {2, 0}
14480 // br %block
14481 SmallVector<int> Mask(VF, PoisonMaskElem);
14482 for (auto [I, V] : enumerate(VL)) {
14483 if (isa<PoisonValue>(V))
14484 continue;
14485 Mask[I] = VE->findLaneForValue(V);
14487 V = FinalShuffle(V, Mask);
14488 } else {
14489 assert(VF < cast<FixedVectorType>(V->getType())->getNumElements() &&
14490 "Expected vectorization factor less "
14491 "than original vector size.");
14492 SmallVector<int> UniformMask(VF, 0);
14493 std::iota(UniformMask.begin(), UniformMask.end(), 0);
14494 V = FinalShuffle(V, UniformMask);
14497 // Need to update the operand gather node, if actually the operand is not a
14498 // vectorized node, but the buildvector/gather node, which matches one of
14499 // the vectorized nodes.
14500 if (find_if(VE->UserTreeIndices, [&](const EdgeInfo &EI) {
14501 return EI.UserTE == E && EI.EdgeIdx == NodeIdx;
14502 }) == VE->UserTreeIndices.end()) {
14503 auto *It =
14504 find_if(VectorizableTree, [&](const std::unique_ptr<TreeEntry> &TE) {
14505 return TE->isGather() && TE->UserTreeIndices.front().UserTE == E &&
14506 TE->UserTreeIndices.front().EdgeIdx == NodeIdx;
14508 assert(It != VectorizableTree.end() && "Expected gather node operand.");
14509 (*It)->VectorizedValue = V;
14511 return V;
14514 // Find the corresponding gather entry and vectorize it.
14515 // Allows to be more accurate with tree/graph transformations, checks for the
14516 // correctness of the transformations in many cases.
14517 auto *I = find_if(VectorizableTree,
14518 [E, NodeIdx](const std::unique_ptr<TreeEntry> &TE) {
14519 return TE->isOperandGatherNode({E, NodeIdx});
14521 assert(I != VectorizableTree.end() && "Gather node is not in the graph.");
14522 assert(I->get()->UserTreeIndices.size() == 1 &&
14523 "Expected only single user for the gather node.");
14524 assert(I->get()->isSame(VL) && "Expected same list of scalars.");
14525 return vectorizeTree(I->get(), PostponedPHIs);
14528 template <typename BVTy, typename ResTy, typename... Args>
14529 ResTy BoUpSLP::processBuildVector(const TreeEntry *E, Type *ScalarTy,
14530 Args &...Params) {
14531 assert(E->isGather() && "Expected gather node.");
14532 unsigned VF = E->getVectorFactor();
14534 bool NeedFreeze = false;
14535 SmallVector<int> ReuseShuffleIndices(E->ReuseShuffleIndices.begin(),
14536 E->ReuseShuffleIndices.end());
14537 SmallVector<Value *> GatheredScalars(E->Scalars.begin(), E->Scalars.end());
14538 // Clear values, to be replaced by insertvector instructions.
14539 for (auto [EIdx, Idx] : E->CombinedEntriesWithIndices)
14540 for_each(MutableArrayRef(GatheredScalars)
14541 .slice(Idx, VectorizableTree[EIdx]->getVectorFactor()),
14542 [&](Value *&V) { V = PoisonValue::get(V->getType()); });
14543 SmallVector<std::pair<const TreeEntry *, unsigned>> SubVectors(
14544 E->CombinedEntriesWithIndices.size());
14545 transform(E->CombinedEntriesWithIndices, SubVectors.begin(),
14546 [&](const auto &P) {
14547 return std::make_pair(VectorizableTree[P.first].get(), P.second);
14549 // Build a mask out of the reorder indices and reorder scalars per this
14550 // mask.
14551 SmallVector<int> ReorderMask(E->ReorderIndices.begin(),
14552 E->ReorderIndices.end());
14553 if (!ReorderMask.empty())
14554 reorderScalars(GatheredScalars, ReorderMask);
14555 SmallVector<int> SubVectorsMask;
14556 inversePermutation(E->ReorderIndices, SubVectorsMask);
14557 // Transform non-clustered elements in the mask to poison (-1).
14558 // "Clustered" operations will be reordered using this mask later.
14559 if (!SubVectors.empty() && !SubVectorsMask.empty()) {
14560 for (unsigned I : seq<unsigned>(GatheredScalars.size()))
14561 if (E->Scalars[I] == GatheredScalars[ReorderMask[I]])
14562 SubVectorsMask[ReorderMask[I]] = PoisonMaskElem;
14563 } else {
14564 SubVectorsMask.clear();
14566 SmallVector<Value *> StoredGS(GatheredScalars);
14567 auto FindReusedSplat = [&](MutableArrayRef<int> Mask, unsigned InputVF,
14568 unsigned I, unsigned SliceSize,
14569 bool IsNotPoisonous) {
14570 if (!isSplat(E->Scalars) || none_of(E->Scalars, [](Value *V) {
14571 return isa<UndefValue>(V) && !isa<PoisonValue>(V);
14573 return false;
14574 TreeEntry *UserTE = E->UserTreeIndices.back().UserTE;
14575 unsigned EdgeIdx = E->UserTreeIndices.back().EdgeIdx;
14576 if (UserTE->getNumOperands() != 2)
14577 return false;
14578 if (!IsNotPoisonous) {
14579 auto *It =
14580 find_if(VectorizableTree, [=](const std::unique_ptr<TreeEntry> &TE) {
14581 return find_if(TE->UserTreeIndices, [=](const EdgeInfo &EI) {
14582 return EI.UserTE == UserTE && EI.EdgeIdx != EdgeIdx;
14583 }) != TE->UserTreeIndices.end();
14585 if (It == VectorizableTree.end())
14586 return false;
14587 SmallVector<Value *> GS((*It)->Scalars.begin(), (*It)->Scalars.end());
14588 if (!(*It)->ReorderIndices.empty()) {
14589 inversePermutation((*It)->ReorderIndices, ReorderMask);
14590 reorderScalars(GS, ReorderMask);
14592 if (!all_of(zip(GatheredScalars, GS), [&](const auto &P) {
14593 Value *V0 = std::get<0>(P);
14594 Value *V1 = std::get<1>(P);
14595 return !isa<UndefValue>(V0) || isa<PoisonValue>(V0) ||
14596 (isa<UndefValue>(V0) && !isa<PoisonValue>(V0) &&
14597 is_contained(E->Scalars, V1));
14599 return false;
14601 int Idx;
14602 if ((Mask.size() < InputVF &&
14603 ShuffleVectorInst::isExtractSubvectorMask(Mask, InputVF, Idx) &&
14604 Idx == 0) ||
14605 (Mask.size() == InputVF &&
14606 ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))) {
14607 std::iota(
14608 std::next(Mask.begin(), I * SliceSize),
14609 std::next(Mask.begin(),
14610 I * SliceSize + getNumElems(Mask.size(), SliceSize, I)),
14612 } else {
14613 unsigned IVal =
14614 *find_if_not(Mask, [](int Idx) { return Idx == PoisonMaskElem; });
14615 std::fill(
14616 std::next(Mask.begin(), I * SliceSize),
14617 std::next(Mask.begin(),
14618 I * SliceSize + getNumElems(Mask.size(), SliceSize, I)),
14619 IVal);
14621 return true;
14623 BVTy ShuffleBuilder(ScalarTy, Params...);
14624 ResTy Res = ResTy();
14625 SmallVector<int> Mask;
14626 SmallVector<int> ExtractMask(GatheredScalars.size(), PoisonMaskElem);
14627 SmallVector<std::optional<TTI::ShuffleKind>> ExtractShuffles;
14628 Value *ExtractVecBase = nullptr;
14629 bool UseVecBaseAsInput = false;
14630 SmallVector<std::optional<TargetTransformInfo::ShuffleKind>> GatherShuffles;
14631 SmallVector<SmallVector<const TreeEntry *>> Entries;
14632 Type *OrigScalarTy = GatheredScalars.front()->getType();
14633 auto *VecTy = getWidenedType(ScalarTy, GatheredScalars.size());
14634 unsigned NumParts = TTI->getNumberOfParts(VecTy);
14635 if (NumParts == 0 || NumParts >= GatheredScalars.size() ||
14636 VecTy->getNumElements() % NumParts != 0 ||
14637 !hasFullVectorsOrPowerOf2(*TTI, VecTy->getElementType(),
14638 VecTy->getNumElements() / NumParts))
14639 NumParts = 1;
14640 if (!all_of(GatheredScalars, IsaPred<UndefValue>)) {
14641 // Check for gathered extracts.
14642 bool Resized = false;
14643 ExtractShuffles =
14644 tryToGatherExtractElements(GatheredScalars, ExtractMask, NumParts);
14645 if (!ExtractShuffles.empty()) {
14646 SmallVector<const TreeEntry *> ExtractEntries;
14647 for (auto [Idx, I] : enumerate(ExtractMask)) {
14648 if (I == PoisonMaskElem)
14649 continue;
14650 if (const auto *TE = getTreeEntry(
14651 cast<ExtractElementInst>(StoredGS[Idx])->getVectorOperand()))
14652 ExtractEntries.push_back(TE);
14654 if (std::optional<ResTy> Delayed =
14655 ShuffleBuilder.needToDelay(E, ExtractEntries)) {
14656 // Delay emission of gathers which are not ready yet.
14657 PostponedGathers.insert(E);
14658 // Postpone gather emission, will be emitted after the end of the
14659 // process to keep correct order.
14660 return *Delayed;
14662 if (Value *VecBase = ShuffleBuilder.adjustExtracts(
14663 E, ExtractMask, ExtractShuffles, NumParts, UseVecBaseAsInput)) {
14664 ExtractVecBase = VecBase;
14665 if (auto *VecBaseTy = dyn_cast<FixedVectorType>(VecBase->getType()))
14666 if (VF == VecBaseTy->getNumElements() &&
14667 GatheredScalars.size() != VF) {
14668 Resized = true;
14669 GatheredScalars.append(VF - GatheredScalars.size(),
14670 PoisonValue::get(OrigScalarTy));
14674 // Gather extracts after we check for full matched gathers only.
14675 if (!ExtractShuffles.empty() || E->getOpcode() != Instruction::Load ||
14676 ((E->getOpcode() == Instruction::Load ||
14677 any_of(E->Scalars, IsaPred<LoadInst>)) &&
14678 any_of(E->Scalars,
14679 [this](Value *V) {
14680 return isa<LoadInst>(V) && getTreeEntry(V);
14681 })) ||
14682 E->isAltShuffle() ||
14683 all_of(E->Scalars, [this](Value *V) { return getTreeEntry(V); }) ||
14684 isSplat(E->Scalars) ||
14685 (E->Scalars != GatheredScalars && GatheredScalars.size() <= 2)) {
14686 GatherShuffles =
14687 isGatherShuffledEntry(E, GatheredScalars, Mask, Entries, NumParts);
14689 if (!GatherShuffles.empty()) {
14690 if (std::optional<ResTy> Delayed =
14691 ShuffleBuilder.needToDelay(E, Entries)) {
14692 // Delay emission of gathers which are not ready yet.
14693 PostponedGathers.insert(E);
14694 // Postpone gather emission, will be emitted after the end of the
14695 // process to keep correct order.
14696 return *Delayed;
14698 if (GatherShuffles.size() == 1 &&
14699 *GatherShuffles.front() == TTI::SK_PermuteSingleSrc &&
14700 Entries.front().front()->isSame(E->Scalars)) {
14701 // Perfect match in the graph, will reuse the previously vectorized
14702 // node. Cost is 0.
14703 LLVM_DEBUG(dbgs() << "SLP: perfect diamond match for gather bundle "
14704 << shortBundleName(E->Scalars, E->Idx) << ".\n");
14705 // Restore the mask for previous partially matched values.
14706 Mask.resize(E->Scalars.size());
14707 const TreeEntry *FrontTE = Entries.front().front();
14708 if (FrontTE->ReorderIndices.empty() &&
14709 ((FrontTE->ReuseShuffleIndices.empty() &&
14710 E->Scalars.size() == FrontTE->Scalars.size()) ||
14711 (E->Scalars.size() == FrontTE->ReuseShuffleIndices.size()))) {
14712 std::iota(Mask.begin(), Mask.end(), 0);
14713 } else {
14714 for (auto [I, V] : enumerate(E->Scalars)) {
14715 if (isa<PoisonValue>(V)) {
14716 Mask[I] = PoisonMaskElem;
14717 continue;
14719 Mask[I] = FrontTE->findLaneForValue(V);
14722 ShuffleBuilder.add(*FrontTE, Mask);
14723 Res = ShuffleBuilder.finalize(E->getCommonMask(), SubVectors,
14724 SubVectorsMask);
14725 return Res;
14727 if (!Resized) {
14728 if (GatheredScalars.size() != VF &&
14729 any_of(Entries, [&](ArrayRef<const TreeEntry *> TEs) {
14730 return any_of(TEs, [&](const TreeEntry *TE) {
14731 return TE->getVectorFactor() == VF;
14734 GatheredScalars.append(VF - GatheredScalars.size(),
14735 PoisonValue::get(OrigScalarTy));
14737 // Remove shuffled elements from list of gathers.
14738 for (int I = 0, Sz = Mask.size(); I < Sz; ++I) {
14739 if (Mask[I] != PoisonMaskElem)
14740 GatheredScalars[I] = PoisonValue::get(OrigScalarTy);
14744 auto TryPackScalars = [&](SmallVectorImpl<Value *> &Scalars,
14745 SmallVectorImpl<int> &ReuseMask,
14746 bool IsRootPoison) {
14747 // For splats with can emit broadcasts instead of gathers, so try to find
14748 // such sequences.
14749 bool IsSplat = IsRootPoison && isSplat(Scalars) &&
14750 (Scalars.size() > 2 || Scalars.front() == Scalars.back());
14751 Scalars.append(VF - Scalars.size(), PoisonValue::get(OrigScalarTy));
14752 SmallVector<int> UndefPos;
14753 DenseMap<Value *, unsigned> UniquePositions;
14754 // Gather unique non-const values and all constant values.
14755 // For repeated values, just shuffle them.
14756 int NumNonConsts = 0;
14757 int SinglePos = 0;
14758 for (auto [I, V] : enumerate(Scalars)) {
14759 if (isa<UndefValue>(V)) {
14760 if (!isa<PoisonValue>(V)) {
14761 ReuseMask[I] = I;
14762 UndefPos.push_back(I);
14764 continue;
14766 if (isConstant(V)) {
14767 ReuseMask[I] = I;
14768 continue;
14770 ++NumNonConsts;
14771 SinglePos = I;
14772 Value *OrigV = V;
14773 Scalars[I] = PoisonValue::get(OrigScalarTy);
14774 if (IsSplat) {
14775 Scalars.front() = OrigV;
14776 ReuseMask[I] = 0;
14777 } else {
14778 const auto Res = UniquePositions.try_emplace(OrigV, I);
14779 Scalars[Res.first->second] = OrigV;
14780 ReuseMask[I] = Res.first->second;
14783 if (NumNonConsts == 1) {
14784 // Restore single insert element.
14785 if (IsSplat) {
14786 ReuseMask.assign(VF, PoisonMaskElem);
14787 std::swap(Scalars.front(), Scalars[SinglePos]);
14788 if (!UndefPos.empty() && UndefPos.front() == 0)
14789 Scalars.front() = UndefValue::get(OrigScalarTy);
14791 ReuseMask[SinglePos] = SinglePos;
14792 } else if (!UndefPos.empty() && IsSplat) {
14793 // For undef values, try to replace them with the simple broadcast.
14794 // We can do it if the broadcasted value is guaranteed to be
14795 // non-poisonous, or by freezing the incoming scalar value first.
14796 auto *It = find_if(Scalars, [this, E](Value *V) {
14797 return !isa<UndefValue>(V) &&
14798 (getTreeEntry(V) || isGuaranteedNotToBePoison(V) ||
14799 (E->UserTreeIndices.size() == 1 &&
14800 any_of(V->uses(), [E](const Use &U) {
14801 // Check if the value already used in the same operation in
14802 // one of the nodes already.
14803 return E->UserTreeIndices.front().EdgeIdx !=
14804 U.getOperandNo() &&
14805 is_contained(
14806 E->UserTreeIndices.front().UserTE->Scalars,
14807 U.getUser());
14808 })));
14810 if (It != Scalars.end()) {
14811 // Replace undefs by the non-poisoned scalars and emit broadcast.
14812 int Pos = std::distance(Scalars.begin(), It);
14813 for (int I : UndefPos) {
14814 // Set the undef position to the non-poisoned scalar.
14815 ReuseMask[I] = Pos;
14816 // Replace the undef by the poison, in the mask it is replaced by
14817 // non-poisoned scalar already.
14818 if (I != Pos)
14819 Scalars[I] = PoisonValue::get(OrigScalarTy);
14821 } else {
14822 // Replace undefs by the poisons, emit broadcast and then emit
14823 // freeze.
14824 for (int I : UndefPos) {
14825 ReuseMask[I] = PoisonMaskElem;
14826 if (isa<UndefValue>(Scalars[I]))
14827 Scalars[I] = PoisonValue::get(OrigScalarTy);
14829 NeedFreeze = true;
14833 if (!ExtractShuffles.empty() || !GatherShuffles.empty()) {
14834 bool IsNonPoisoned = true;
14835 bool IsUsedInExpr = true;
14836 Value *Vec1 = nullptr;
14837 if (!ExtractShuffles.empty()) {
14838 // Gather of extractelements can be represented as just a shuffle of
14839 // a single/two vectors the scalars are extracted from.
14840 // Find input vectors.
14841 Value *Vec2 = nullptr;
14842 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) {
14843 if (!Mask.empty() && Mask[I] != PoisonMaskElem)
14844 ExtractMask[I] = PoisonMaskElem;
14846 if (UseVecBaseAsInput) {
14847 Vec1 = ExtractVecBase;
14848 } else {
14849 for (unsigned I = 0, Sz = ExtractMask.size(); I < Sz; ++I) {
14850 if (ExtractMask[I] == PoisonMaskElem)
14851 continue;
14852 if (isa<UndefValue>(E->Scalars[I]))
14853 continue;
14854 auto *EI = cast<ExtractElementInst>(StoredGS[I]);
14855 Value *VecOp = EI->getVectorOperand();
14856 if (const auto *TE = getTreeEntry(VecOp))
14857 if (TE->VectorizedValue)
14858 VecOp = TE->VectorizedValue;
14859 if (!Vec1) {
14860 Vec1 = VecOp;
14861 } else if (Vec1 != VecOp) {
14862 assert((!Vec2 || Vec2 == VecOp) &&
14863 "Expected only 1 or 2 vectors shuffle.");
14864 Vec2 = VecOp;
14868 if (Vec2) {
14869 IsUsedInExpr = false;
14870 IsNonPoisoned &=
14871 isGuaranteedNotToBePoison(Vec1) && isGuaranteedNotToBePoison(Vec2);
14872 ShuffleBuilder.add(Vec1, Vec2, ExtractMask);
14873 } else if (Vec1) {
14874 bool IsNotPoisonedVec = isGuaranteedNotToBePoison(Vec1);
14875 IsUsedInExpr &= FindReusedSplat(
14876 ExtractMask,
14877 cast<FixedVectorType>(Vec1->getType())->getNumElements(), 0,
14878 ExtractMask.size(), IsNotPoisonedVec);
14879 ShuffleBuilder.add(Vec1, ExtractMask, /*ForExtracts=*/true);
14880 IsNonPoisoned &= IsNotPoisonedVec;
14881 } else {
14882 IsUsedInExpr = false;
14883 ShuffleBuilder.add(PoisonValue::get(VecTy), ExtractMask,
14884 /*ForExtracts=*/true);
14887 if (!GatherShuffles.empty()) {
14888 unsigned SliceSize = getPartNumElems(E->Scalars.size(), NumParts);
14889 SmallVector<int> VecMask(Mask.size(), PoisonMaskElem);
14890 for (const auto [I, TEs] : enumerate(Entries)) {
14891 if (TEs.empty()) {
14892 assert(!GatherShuffles[I] &&
14893 "No shuffles with empty entries list expected.");
14894 continue;
14896 assert((TEs.size() == 1 || TEs.size() == 2) &&
14897 "Expected shuffle of 1 or 2 entries.");
14898 unsigned Limit = getNumElems(Mask.size(), SliceSize, I);
14899 auto SubMask = ArrayRef(Mask).slice(I * SliceSize, Limit);
14900 VecMask.assign(VecMask.size(), PoisonMaskElem);
14901 copy(SubMask, std::next(VecMask.begin(), I * SliceSize));
14902 if (TEs.size() == 1) {
14903 bool IsNotPoisonedVec =
14904 TEs.front()->VectorizedValue
14905 ? isGuaranteedNotToBePoison(TEs.front()->VectorizedValue)
14906 : true;
14907 IsUsedInExpr &=
14908 FindReusedSplat(VecMask, TEs.front()->getVectorFactor(), I,
14909 SliceSize, IsNotPoisonedVec);
14910 ShuffleBuilder.add(*TEs.front(), VecMask);
14911 IsNonPoisoned &= IsNotPoisonedVec;
14912 } else {
14913 IsUsedInExpr = false;
14914 ShuffleBuilder.add(*TEs.front(), *TEs.back(), VecMask);
14915 if (TEs.front()->VectorizedValue && TEs.back()->VectorizedValue)
14916 IsNonPoisoned &=
14917 isGuaranteedNotToBePoison(TEs.front()->VectorizedValue) &&
14918 isGuaranteedNotToBePoison(TEs.back()->VectorizedValue);
14922 // Try to figure out best way to combine values: build a shuffle and insert
14923 // elements or just build several shuffles.
14924 // Insert non-constant scalars.
14925 SmallVector<Value *> NonConstants(GatheredScalars);
14926 int EMSz = ExtractMask.size();
14927 int MSz = Mask.size();
14928 // Try to build constant vector and shuffle with it only if currently we
14929 // have a single permutation and more than 1 scalar constants.
14930 bool IsSingleShuffle = ExtractShuffles.empty() || GatherShuffles.empty();
14931 bool IsIdentityShuffle =
14932 ((UseVecBaseAsInput ||
14933 all_of(ExtractShuffles,
14934 [](const std::optional<TTI::ShuffleKind> &SK) {
14935 return SK.value_or(TTI::SK_PermuteTwoSrc) ==
14936 TTI::SK_PermuteSingleSrc;
14937 })) &&
14938 none_of(ExtractMask, [&](int I) { return I >= EMSz; }) &&
14939 ShuffleVectorInst::isIdentityMask(ExtractMask, EMSz)) ||
14940 (!GatherShuffles.empty() &&
14941 all_of(GatherShuffles,
14942 [](const std::optional<TTI::ShuffleKind> &SK) {
14943 return SK.value_or(TTI::SK_PermuteTwoSrc) ==
14944 TTI::SK_PermuteSingleSrc;
14945 }) &&
14946 none_of(Mask, [&](int I) { return I >= MSz; }) &&
14947 ShuffleVectorInst::isIdentityMask(Mask, MSz));
14948 bool EnoughConstsForShuffle =
14949 IsSingleShuffle &&
14950 (none_of(GatheredScalars,
14951 [](Value *V) {
14952 return isa<UndefValue>(V) && !isa<PoisonValue>(V);
14953 }) ||
14954 any_of(GatheredScalars,
14955 [](Value *V) {
14956 return isa<Constant>(V) && !isa<UndefValue>(V);
14957 })) &&
14958 (!IsIdentityShuffle ||
14959 (GatheredScalars.size() == 2 &&
14960 any_of(GatheredScalars,
14961 [](Value *V) { return !isa<UndefValue>(V); })) ||
14962 count_if(GatheredScalars, [](Value *V) {
14963 return isa<Constant>(V) && !isa<PoisonValue>(V);
14964 }) > 1);
14965 // NonConstants array contains just non-constant values, GatheredScalars
14966 // contains only constant to build final vector and then shuffle.
14967 for (int I = 0, Sz = GatheredScalars.size(); I < Sz; ++I) {
14968 if (EnoughConstsForShuffle && isa<Constant>(GatheredScalars[I]))
14969 NonConstants[I] = PoisonValue::get(OrigScalarTy);
14970 else
14971 GatheredScalars[I] = PoisonValue::get(OrigScalarTy);
14973 // Generate constants for final shuffle and build a mask for them.
14974 if (!all_of(GatheredScalars, IsaPred<PoisonValue>)) {
14975 SmallVector<int> BVMask(GatheredScalars.size(), PoisonMaskElem);
14976 TryPackScalars(GatheredScalars, BVMask, /*IsRootPoison=*/true);
14977 Value *BV = ShuffleBuilder.gather(GatheredScalars, BVMask.size());
14978 ShuffleBuilder.add(BV, BVMask);
14980 if (all_of(NonConstants, [=](Value *V) {
14981 return isa<PoisonValue>(V) ||
14982 (IsSingleShuffle && ((IsIdentityShuffle &&
14983 IsNonPoisoned) || IsUsedInExpr) && isa<UndefValue>(V));
14985 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors,
14986 SubVectorsMask);
14987 else
14988 Res = ShuffleBuilder.finalize(
14989 E->ReuseShuffleIndices, SubVectors, SubVectorsMask, E->Scalars.size(),
14990 [&](Value *&Vec, SmallVectorImpl<int> &Mask) {
14991 TryPackScalars(NonConstants, Mask, /*IsRootPoison=*/false);
14992 Vec = ShuffleBuilder.gather(NonConstants, Mask.size(), Vec);
14994 } else if (!allConstant(GatheredScalars)) {
14995 // Gather unique scalars and all constants.
14996 SmallVector<int> ReuseMask(GatheredScalars.size(), PoisonMaskElem);
14997 TryPackScalars(GatheredScalars, ReuseMask, /*IsRootPoison=*/true);
14998 Value *BV = ShuffleBuilder.gather(GatheredScalars, ReuseMask.size());
14999 ShuffleBuilder.add(BV, ReuseMask);
15000 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors,
15001 SubVectorsMask);
15002 } else {
15003 // Gather all constants.
15004 SmallVector<int> Mask(GatheredScalars.size(), PoisonMaskElem);
15005 for (auto [I, V] : enumerate(GatheredScalars)) {
15006 if (!isa<PoisonValue>(V))
15007 Mask[I] = I;
15009 Value *BV = ShuffleBuilder.gather(GatheredScalars);
15010 ShuffleBuilder.add(BV, Mask);
15011 Res = ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors,
15012 SubVectorsMask);
15015 if (NeedFreeze)
15016 Res = ShuffleBuilder.createFreeze(Res);
15017 return Res;
15020 Value *BoUpSLP::createBuildVector(const TreeEntry *E, Type *ScalarTy,
15021 bool PostponedPHIs) {
15022 for (auto [EIdx, _] : E->CombinedEntriesWithIndices)
15023 (void)vectorizeTree(VectorizableTree[EIdx].get(), PostponedPHIs);
15024 return processBuildVector<ShuffleInstructionBuilder, Value *>(E, ScalarTy,
15025 Builder, *this);
15028 /// \returns \p I after propagating metadata from \p VL only for instructions in
15029 /// \p VL.
15030 static Instruction *propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
15031 SmallVector<Value *> Insts;
15032 for (Value *V : VL)
15033 if (isa<Instruction>(V))
15034 Insts.push_back(V);
15035 return llvm::propagateMetadata(Inst, Insts);
15038 Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
15039 IRBuilderBase::InsertPointGuard Guard(Builder);
15041 if (E->VectorizedValue &&
15042 (E->State != TreeEntry::Vectorize || E->getOpcode() != Instruction::PHI ||
15043 E->isAltShuffle())) {
15044 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
15045 return E->VectorizedValue;
15048 Value *V = E->Scalars.front();
15049 Type *ScalarTy = V->getType();
15050 if (!isa<CmpInst>(V))
15051 ScalarTy = getValueType(V);
15052 auto It = MinBWs.find(E);
15053 if (It != MinBWs.end()) {
15054 auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy);
15055 ScalarTy = IntegerType::get(F->getContext(), It->second.first);
15056 if (VecTy)
15057 ScalarTy = getWidenedType(ScalarTy, VecTy->getNumElements());
15059 auto *VecTy = getWidenedType(ScalarTy, E->Scalars.size());
15060 if (E->isGather()) {
15061 // Set insert point for non-reduction initial nodes.
15062 if (E->getMainOp() && E->Idx == 0 && !UserIgnoreList)
15063 setInsertPointAfterBundle(E);
15064 Value *Vec = createBuildVector(E, ScalarTy, PostponedPHIs);
15065 E->VectorizedValue = Vec;
15066 return Vec;
15069 bool IsReverseOrder = isReverseOrder(E->ReorderIndices);
15070 auto FinalShuffle = [&](Value *V, const TreeEntry *E) {
15071 ShuffleInstructionBuilder ShuffleBuilder(ScalarTy, Builder, *this);
15072 if (E->getOpcode() == Instruction::Store &&
15073 E->State == TreeEntry::Vectorize) {
15074 ArrayRef<int> Mask =
15075 ArrayRef(reinterpret_cast<const int *>(E->ReorderIndices.begin()),
15076 E->ReorderIndices.size());
15077 ShuffleBuilder.add(V, Mask);
15078 } else if (E->State == TreeEntry::StridedVectorize && IsReverseOrder) {
15079 ShuffleBuilder.addOrdered(V, {});
15080 } else {
15081 ShuffleBuilder.addOrdered(V, E->ReorderIndices);
15083 SmallVector<std::pair<const TreeEntry *, unsigned>> SubVectors(
15084 E->CombinedEntriesWithIndices.size());
15085 transform(
15086 E->CombinedEntriesWithIndices, SubVectors.begin(), [&](const auto &P) {
15087 return std::make_pair(VectorizableTree[P.first].get(), P.second);
15089 assert(
15090 (E->CombinedEntriesWithIndices.empty() || E->ReorderIndices.empty()) &&
15091 "Expected either combined subnodes or reordering");
15092 return ShuffleBuilder.finalize(E->ReuseShuffleIndices, SubVectors, {});
15095 assert(!E->isGather() && "Unhandled state");
15096 unsigned ShuffleOrOp =
15097 E->isAltShuffle() ? (unsigned)Instruction::ShuffleVector : E->getOpcode();
15098 Instruction *VL0 = E->getMainOp();
15099 auto GetOperandSignedness = [&](unsigned Idx) {
15100 const TreeEntry *OpE = getOperandEntry(E, Idx);
15101 bool IsSigned = false;
15102 auto It = MinBWs.find(OpE);
15103 if (It != MinBWs.end())
15104 IsSigned = It->second.second;
15105 else
15106 IsSigned = any_of(OpE->Scalars, [&](Value *R) {
15107 if (isa<PoisonValue>(V))
15108 return false;
15109 return !isKnownNonNegative(R, SimplifyQuery(*DL));
15111 return IsSigned;
15113 switch (ShuffleOrOp) {
15114 case Instruction::PHI: {
15115 assert((E->ReorderIndices.empty() || !E->ReuseShuffleIndices.empty() ||
15116 E != VectorizableTree.front().get() ||
15117 !E->UserTreeIndices.empty()) &&
15118 "PHI reordering is free.");
15119 if (PostponedPHIs && E->VectorizedValue)
15120 return E->VectorizedValue;
15121 auto *PH = cast<PHINode>(VL0);
15122 Builder.SetInsertPoint(PH->getParent(),
15123 PH->getParent()->getFirstNonPHIIt());
15124 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
15125 if (PostponedPHIs || !E->VectorizedValue) {
15126 PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
15127 E->PHI = NewPhi;
15128 Value *V = NewPhi;
15130 // Adjust insertion point once all PHI's have been generated.
15131 Builder.SetInsertPoint(PH->getParent(),
15132 PH->getParent()->getFirstInsertionPt());
15133 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
15135 V = FinalShuffle(V, E);
15137 E->VectorizedValue = V;
15138 if (PostponedPHIs)
15139 return V;
15141 PHINode *NewPhi = cast<PHINode>(E->PHI);
15142 // If phi node is fully emitted - exit.
15143 if (NewPhi->getNumIncomingValues() != 0)
15144 return NewPhi;
15146 // PHINodes may have multiple entries from the same block. We want to
15147 // visit every block once.
15148 SmallPtrSet<BasicBlock *, 4> VisitedBBs;
15150 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) {
15151 ValueList Operands;
15152 BasicBlock *IBB = PH->getIncomingBlock(I);
15154 // Stop emission if all incoming values are generated.
15155 if (NewPhi->getNumIncomingValues() == PH->getNumIncomingValues()) {
15156 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15157 return NewPhi;
15160 if (!VisitedBBs.insert(IBB).second) {
15161 NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
15162 continue;
15165 Builder.SetInsertPoint(IBB->getTerminator());
15166 Builder.SetCurrentDebugLocation(PH->getDebugLoc());
15167 Value *Vec = vectorizeOperand(E, I, /*PostponedPHIs=*/true);
15168 if (VecTy != Vec->getType()) {
15169 assert((It != MinBWs.end() || getOperandEntry(E, I)->isGather() ||
15170 MinBWs.contains(getOperandEntry(E, I))) &&
15171 "Expected item in MinBWs.");
15172 Vec = Builder.CreateIntCast(Vec, VecTy, GetOperandSignedness(I));
15174 NewPhi->addIncoming(Vec, IBB);
15177 assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
15178 "Invalid number of incoming values");
15179 assert(E->VectorizedValue && "Expected vectorized value.");
15180 return E->VectorizedValue;
15183 case Instruction::ExtractElement: {
15184 Value *V = E->getSingleOperand(0);
15185 if (const TreeEntry *TE = getTreeEntry(V))
15186 V = TE->VectorizedValue;
15187 setInsertPointAfterBundle(E);
15188 V = FinalShuffle(V, E);
15189 E->VectorizedValue = V;
15190 return V;
15192 case Instruction::ExtractValue: {
15193 auto *LI = cast<LoadInst>(E->getSingleOperand(0));
15194 Builder.SetInsertPoint(LI);
15195 Value *Ptr = LI->getPointerOperand();
15196 LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign());
15197 Value *NewV = ::propagateMetadata(V, E->Scalars);
15198 NewV = FinalShuffle(NewV, E);
15199 E->VectorizedValue = NewV;
15200 return NewV;
15202 case Instruction::InsertElement: {
15203 assert(E->ReuseShuffleIndices.empty() && "All inserts should be unique");
15204 Builder.SetInsertPoint(cast<Instruction>(E->Scalars.back()));
15205 Value *V = vectorizeOperand(E, 1, PostponedPHIs);
15206 ArrayRef<Value *> Op = E->getOperand(1);
15207 Type *ScalarTy = Op.front()->getType();
15208 if (cast<VectorType>(V->getType())->getElementType() != ScalarTy) {
15209 assert(ScalarTy->isIntegerTy() && "Expected item in MinBWs.");
15210 std::pair<unsigned, bool> Res = MinBWs.lookup(getOperandEntry(E, 1));
15211 assert(Res.first > 0 && "Expected item in MinBWs.");
15212 V = Builder.CreateIntCast(
15214 getWidenedType(
15215 ScalarTy,
15216 cast<FixedVectorType>(V->getType())->getNumElements()),
15217 Res.second);
15220 // Create InsertVector shuffle if necessary
15221 auto *FirstInsert = cast<Instruction>(*find_if(E->Scalars, [E](Value *V) {
15222 return !is_contained(E->Scalars, cast<Instruction>(V)->getOperand(0));
15223 }));
15224 const unsigned NumElts =
15225 cast<FixedVectorType>(FirstInsert->getType())->getNumElements();
15226 const unsigned NumScalars = E->Scalars.size();
15228 unsigned Offset = *getElementIndex(VL0);
15229 assert(Offset < NumElts && "Failed to find vector index offset");
15231 // Create shuffle to resize vector
15232 SmallVector<int> Mask;
15233 if (!E->ReorderIndices.empty()) {
15234 inversePermutation(E->ReorderIndices, Mask);
15235 Mask.append(NumElts - NumScalars, PoisonMaskElem);
15236 } else {
15237 Mask.assign(NumElts, PoisonMaskElem);
15238 std::iota(Mask.begin(), std::next(Mask.begin(), NumScalars), 0);
15240 // Create InsertVector shuffle if necessary
15241 bool IsIdentity = true;
15242 SmallVector<int> PrevMask(NumElts, PoisonMaskElem);
15243 Mask.swap(PrevMask);
15244 for (unsigned I = 0; I < NumScalars; ++I) {
15245 Value *Scalar = E->Scalars[PrevMask[I]];
15246 unsigned InsertIdx = *getElementIndex(Scalar);
15247 IsIdentity &= InsertIdx - Offset == I;
15248 Mask[InsertIdx - Offset] = I;
15250 if (!IsIdentity || NumElts != NumScalars) {
15251 Value *V2 = nullptr;
15252 bool IsVNonPoisonous = isGuaranteedNotToBePoison(V) && !isConstant(V);
15253 SmallVector<int> InsertMask(Mask);
15254 if (NumElts != NumScalars && Offset == 0) {
15255 // Follow all insert element instructions from the current buildvector
15256 // sequence.
15257 InsertElementInst *Ins = cast<InsertElementInst>(VL0);
15258 do {
15259 std::optional<unsigned> InsertIdx = getElementIndex(Ins);
15260 if (!InsertIdx)
15261 break;
15262 if (InsertMask[*InsertIdx] == PoisonMaskElem)
15263 InsertMask[*InsertIdx] = *InsertIdx;
15264 if (!Ins->hasOneUse())
15265 break;
15266 Ins = dyn_cast_or_null<InsertElementInst>(
15267 Ins->getUniqueUndroppableUser());
15268 } while (Ins);
15269 SmallBitVector UseMask =
15270 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask);
15271 SmallBitVector IsFirstPoison =
15272 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
15273 SmallBitVector IsFirstUndef =
15274 isUndefVector(FirstInsert->getOperand(0), UseMask);
15275 if (!IsFirstPoison.all()) {
15276 unsigned Idx = 0;
15277 for (unsigned I = 0; I < NumElts; I++) {
15278 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I) &&
15279 IsFirstUndef.test(I)) {
15280 if (IsVNonPoisonous) {
15281 InsertMask[I] = I < NumScalars ? I : 0;
15282 continue;
15284 if (!V2)
15285 V2 = UndefValue::get(V->getType());
15286 if (Idx >= NumScalars)
15287 Idx = NumScalars - 1;
15288 InsertMask[I] = NumScalars + Idx;
15289 ++Idx;
15290 } else if (InsertMask[I] != PoisonMaskElem &&
15291 Mask[I] == PoisonMaskElem) {
15292 InsertMask[I] = PoisonMaskElem;
15295 } else {
15296 InsertMask = Mask;
15299 if (!V2)
15300 V2 = PoisonValue::get(V->getType());
15301 V = Builder.CreateShuffleVector(V, V2, InsertMask);
15302 if (auto *I = dyn_cast<Instruction>(V)) {
15303 GatherShuffleExtractSeq.insert(I);
15304 CSEBlocks.insert(I->getParent());
15308 SmallVector<int> InsertMask(NumElts, PoisonMaskElem);
15309 for (unsigned I = 0; I < NumElts; I++) {
15310 if (Mask[I] != PoisonMaskElem)
15311 InsertMask[Offset + I] = I;
15313 SmallBitVector UseMask =
15314 buildUseMask(NumElts, InsertMask, UseMask::UndefsAsMask);
15315 SmallBitVector IsFirstUndef =
15316 isUndefVector(FirstInsert->getOperand(0), UseMask);
15317 if ((!IsIdentity || Offset != 0 || !IsFirstUndef.all()) &&
15318 NumElts != NumScalars) {
15319 if (IsFirstUndef.all()) {
15320 if (!ShuffleVectorInst::isIdentityMask(InsertMask, NumElts)) {
15321 SmallBitVector IsFirstPoison =
15322 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
15323 if (!IsFirstPoison.all()) {
15324 for (unsigned I = 0; I < NumElts; I++) {
15325 if (InsertMask[I] == PoisonMaskElem && !IsFirstPoison.test(I))
15326 InsertMask[I] = I + NumElts;
15329 V = Builder.CreateShuffleVector(
15331 IsFirstPoison.all() ? PoisonValue::get(V->getType())
15332 : FirstInsert->getOperand(0),
15333 InsertMask, cast<Instruction>(E->Scalars.back())->getName());
15334 if (auto *I = dyn_cast<Instruction>(V)) {
15335 GatherShuffleExtractSeq.insert(I);
15336 CSEBlocks.insert(I->getParent());
15339 } else {
15340 SmallBitVector IsFirstPoison =
15341 isUndefVector<true>(FirstInsert->getOperand(0), UseMask);
15342 for (unsigned I = 0; I < NumElts; I++) {
15343 if (InsertMask[I] == PoisonMaskElem)
15344 InsertMask[I] = IsFirstPoison.test(I) ? PoisonMaskElem : I;
15345 else
15346 InsertMask[I] += NumElts;
15348 V = Builder.CreateShuffleVector(
15349 FirstInsert->getOperand(0), V, InsertMask,
15350 cast<Instruction>(E->Scalars.back())->getName());
15351 if (auto *I = dyn_cast<Instruction>(V)) {
15352 GatherShuffleExtractSeq.insert(I);
15353 CSEBlocks.insert(I->getParent());
15358 ++NumVectorInstructions;
15359 E->VectorizedValue = V;
15360 return V;
15362 case Instruction::ZExt:
15363 case Instruction::SExt:
15364 case Instruction::FPToUI:
15365 case Instruction::FPToSI:
15366 case Instruction::FPExt:
15367 case Instruction::PtrToInt:
15368 case Instruction::IntToPtr:
15369 case Instruction::SIToFP:
15370 case Instruction::UIToFP:
15371 case Instruction::Trunc:
15372 case Instruction::FPTrunc:
15373 case Instruction::BitCast: {
15374 setInsertPointAfterBundle(E);
15376 Value *InVec = vectorizeOperand(E, 0, PostponedPHIs);
15377 if (E->VectorizedValue) {
15378 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15379 return E->VectorizedValue;
15382 auto *CI = cast<CastInst>(VL0);
15383 Instruction::CastOps VecOpcode = CI->getOpcode();
15384 Type *SrcScalarTy = cast<VectorType>(InVec->getType())->getElementType();
15385 auto SrcIt = MinBWs.find(getOperandEntry(E, 0));
15386 if (!ScalarTy->isFPOrFPVectorTy() && !SrcScalarTy->isFPOrFPVectorTy() &&
15387 (SrcIt != MinBWs.end() || It != MinBWs.end() ||
15388 SrcScalarTy != CI->getOperand(0)->getType()->getScalarType())) {
15389 // Check if the values are candidates to demote.
15390 unsigned SrcBWSz = DL->getTypeSizeInBits(SrcScalarTy);
15391 if (SrcIt != MinBWs.end())
15392 SrcBWSz = SrcIt->second.first;
15393 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy->getScalarType());
15394 if (BWSz == SrcBWSz) {
15395 VecOpcode = Instruction::BitCast;
15396 } else if (BWSz < SrcBWSz) {
15397 VecOpcode = Instruction::Trunc;
15398 } else if (It != MinBWs.end()) {
15399 assert(BWSz > SrcBWSz && "Invalid cast!");
15400 VecOpcode = It->second.second ? Instruction::SExt : Instruction::ZExt;
15401 } else if (SrcIt != MinBWs.end()) {
15402 assert(BWSz > SrcBWSz && "Invalid cast!");
15403 VecOpcode =
15404 SrcIt->second.second ? Instruction::SExt : Instruction::ZExt;
15406 } else if (VecOpcode == Instruction::SIToFP && SrcIt != MinBWs.end() &&
15407 !SrcIt->second.second) {
15408 VecOpcode = Instruction::UIToFP;
15410 Value *V = (VecOpcode != ShuffleOrOp && VecOpcode == Instruction::BitCast)
15411 ? InVec
15412 : Builder.CreateCast(VecOpcode, InVec, VecTy);
15413 V = FinalShuffle(V, E);
15415 E->VectorizedValue = V;
15416 ++NumVectorInstructions;
15417 return V;
15419 case Instruction::FCmp:
15420 case Instruction::ICmp: {
15421 setInsertPointAfterBundle(E);
15423 Value *L = vectorizeOperand(E, 0, PostponedPHIs);
15424 if (E->VectorizedValue) {
15425 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15426 return E->VectorizedValue;
15428 Value *R = vectorizeOperand(E, 1, PostponedPHIs);
15429 if (E->VectorizedValue) {
15430 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15431 return E->VectorizedValue;
15433 if (L->getType() != R->getType()) {
15434 assert((getOperandEntry(E, 0)->isGather() ||
15435 getOperandEntry(E, 1)->isGather() ||
15436 MinBWs.contains(getOperandEntry(E, 0)) ||
15437 MinBWs.contains(getOperandEntry(E, 1))) &&
15438 "Expected item in MinBWs.");
15439 if (cast<VectorType>(L->getType())
15440 ->getElementType()
15441 ->getIntegerBitWidth() < cast<VectorType>(R->getType())
15442 ->getElementType()
15443 ->getIntegerBitWidth()) {
15444 Type *CastTy = R->getType();
15445 L = Builder.CreateIntCast(L, CastTy, GetOperandSignedness(0));
15446 } else {
15447 Type *CastTy = L->getType();
15448 R = Builder.CreateIntCast(R, CastTy, GetOperandSignedness(1));
15452 CmpInst::Predicate P0 = cast<CmpInst>(VL0)->getPredicate();
15453 Value *V = Builder.CreateCmp(P0, L, R);
15454 propagateIRFlags(V, E->Scalars, VL0);
15455 // Do not cast for cmps.
15456 VecTy = cast<FixedVectorType>(V->getType());
15457 V = FinalShuffle(V, E);
15459 E->VectorizedValue = V;
15460 ++NumVectorInstructions;
15461 return V;
15463 case Instruction::Select: {
15464 setInsertPointAfterBundle(E);
15466 Value *Cond = vectorizeOperand(E, 0, PostponedPHIs);
15467 if (E->VectorizedValue) {
15468 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15469 return E->VectorizedValue;
15471 Value *True = vectorizeOperand(E, 1, PostponedPHIs);
15472 if (E->VectorizedValue) {
15473 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15474 return E->VectorizedValue;
15476 Value *False = vectorizeOperand(E, 2, PostponedPHIs);
15477 if (E->VectorizedValue) {
15478 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15479 return E->VectorizedValue;
15481 if (True->getType() != VecTy || False->getType() != VecTy) {
15482 assert((It != MinBWs.end() || getOperandEntry(E, 1)->isGather() ||
15483 getOperandEntry(E, 2)->isGather() ||
15484 MinBWs.contains(getOperandEntry(E, 1)) ||
15485 MinBWs.contains(getOperandEntry(E, 2))) &&
15486 "Expected item in MinBWs.");
15487 if (True->getType() != VecTy)
15488 True = Builder.CreateIntCast(True, VecTy, GetOperandSignedness(1));
15489 if (False->getType() != VecTy)
15490 False = Builder.CreateIntCast(False, VecTy, GetOperandSignedness(2));
15493 unsigned CondNumElements = getNumElements(Cond->getType());
15494 unsigned TrueNumElements = getNumElements(True->getType());
15495 assert(TrueNumElements >= CondNumElements &&
15496 TrueNumElements % CondNumElements == 0 &&
15497 "Cannot vectorize Instruction::Select");
15498 assert(TrueNumElements == getNumElements(False->getType()) &&
15499 "Cannot vectorize Instruction::Select");
15500 if (CondNumElements != TrueNumElements) {
15501 // When the return type is i1 but the source is fixed vector type, we
15502 // need to duplicate the condition value.
15503 Cond = Builder.CreateShuffleVector(
15504 Cond, createReplicatedMask(TrueNumElements / CondNumElements,
15505 CondNumElements));
15507 assert(getNumElements(Cond->getType()) == TrueNumElements &&
15508 "Cannot vectorize Instruction::Select");
15509 Value *V = Builder.CreateSelect(Cond, True, False);
15510 V = FinalShuffle(V, E);
15512 E->VectorizedValue = V;
15513 ++NumVectorInstructions;
15514 return V;
15516 case Instruction::FNeg: {
15517 setInsertPointAfterBundle(E);
15519 Value *Op = vectorizeOperand(E, 0, PostponedPHIs);
15521 if (E->VectorizedValue) {
15522 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15523 return E->VectorizedValue;
15526 Value *V = Builder.CreateUnOp(
15527 static_cast<Instruction::UnaryOps>(E->getOpcode()), Op);
15528 propagateIRFlags(V, E->Scalars, VL0);
15529 if (auto *I = dyn_cast<Instruction>(V))
15530 V = ::propagateMetadata(I, E->Scalars);
15532 V = FinalShuffle(V, E);
15534 E->VectorizedValue = V;
15535 ++NumVectorInstructions;
15537 return V;
15539 case Instruction::Freeze: {
15540 setInsertPointAfterBundle(E);
15542 Value *Op = vectorizeOperand(E, 0, PostponedPHIs);
15544 if (E->VectorizedValue) {
15545 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15546 return E->VectorizedValue;
15549 if (Op->getType() != VecTy) {
15550 assert((It != MinBWs.end() || getOperandEntry(E, 0)->isGather() ||
15551 MinBWs.contains(getOperandEntry(E, 0))) &&
15552 "Expected item in MinBWs.");
15553 Op = Builder.CreateIntCast(Op, VecTy, GetOperandSignedness(0));
15555 Value *V = Builder.CreateFreeze(Op);
15556 V = FinalShuffle(V, E);
15558 E->VectorizedValue = V;
15559 ++NumVectorInstructions;
15561 return V;
15563 case Instruction::Add:
15564 case Instruction::FAdd:
15565 case Instruction::Sub:
15566 case Instruction::FSub:
15567 case Instruction::Mul:
15568 case Instruction::FMul:
15569 case Instruction::UDiv:
15570 case Instruction::SDiv:
15571 case Instruction::FDiv:
15572 case Instruction::URem:
15573 case Instruction::SRem:
15574 case Instruction::FRem:
15575 case Instruction::Shl:
15576 case Instruction::LShr:
15577 case Instruction::AShr:
15578 case Instruction::And:
15579 case Instruction::Or:
15580 case Instruction::Xor: {
15581 setInsertPointAfterBundle(E);
15583 Value *LHS = vectorizeOperand(E, 0, PostponedPHIs);
15584 if (E->VectorizedValue) {
15585 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15586 return E->VectorizedValue;
15588 Value *RHS = vectorizeOperand(E, 1, PostponedPHIs);
15589 if (E->VectorizedValue) {
15590 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15591 return E->VectorizedValue;
15593 if (ShuffleOrOp == Instruction::And && It != MinBWs.end()) {
15594 for (unsigned I : seq<unsigned>(0, E->getNumOperands())) {
15595 ArrayRef<Value *> Ops = E->getOperand(I);
15596 if (all_of(Ops, [&](Value *Op) {
15597 auto *CI = dyn_cast<ConstantInt>(Op);
15598 return CI && CI->getValue().countr_one() >= It->second.first;
15599 })) {
15600 V = FinalShuffle(I == 0 ? RHS : LHS, E);
15601 E->VectorizedValue = V;
15602 ++NumVectorInstructions;
15603 return V;
15607 if (LHS->getType() != VecTy || RHS->getType() != VecTy) {
15608 assert((It != MinBWs.end() || getOperandEntry(E, 0)->isGather() ||
15609 getOperandEntry(E, 1)->isGather() ||
15610 MinBWs.contains(getOperandEntry(E, 0)) ||
15611 MinBWs.contains(getOperandEntry(E, 1))) &&
15612 "Expected item in MinBWs.");
15613 if (LHS->getType() != VecTy)
15614 LHS = Builder.CreateIntCast(LHS, VecTy, GetOperandSignedness(0));
15615 if (RHS->getType() != VecTy)
15616 RHS = Builder.CreateIntCast(RHS, VecTy, GetOperandSignedness(1));
15619 Value *V = Builder.CreateBinOp(
15620 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS,
15621 RHS);
15622 propagateIRFlags(V, E->Scalars, VL0, It == MinBWs.end());
15623 if (auto *I = dyn_cast<Instruction>(V)) {
15624 V = ::propagateMetadata(I, E->Scalars);
15625 // Drop nuw flags for abs(sub(commutative), true).
15626 if (!MinBWs.contains(E) && ShuffleOrOp == Instruction::Sub &&
15627 any_of(E->Scalars, [](Value *V) {
15628 return isa<PoisonValue>(V) || isCommutative(cast<Instruction>(V));
15630 I->setHasNoUnsignedWrap(/*b=*/false);
15633 V = FinalShuffle(V, E);
15635 E->VectorizedValue = V;
15636 ++NumVectorInstructions;
15638 return V;
15640 case Instruction::Load: {
15641 // Loads are inserted at the head of the tree because we don't want to
15642 // sink them all the way down past store instructions.
15643 setInsertPointAfterBundle(E);
15645 LoadInst *LI = cast<LoadInst>(VL0);
15646 Instruction *NewLI;
15647 Value *PO = LI->getPointerOperand();
15648 if (E->State == TreeEntry::Vectorize) {
15649 NewLI = Builder.CreateAlignedLoad(VecTy, PO, LI->getAlign());
15650 } else if (E->State == TreeEntry::StridedVectorize) {
15651 Value *Ptr0 = cast<LoadInst>(E->Scalars.front())->getPointerOperand();
15652 Value *PtrN = cast<LoadInst>(E->Scalars.back())->getPointerOperand();
15653 PO = IsReverseOrder ? PtrN : Ptr0;
15654 std::optional<int> Diff = getPointersDiff(
15655 VL0->getType(), Ptr0, VL0->getType(), PtrN, *DL, *SE);
15656 Type *StrideTy = DL->getIndexType(PO->getType());
15657 Value *StrideVal;
15658 if (Diff) {
15659 int Stride = *Diff / (static_cast<int>(E->Scalars.size()) - 1);
15660 StrideVal =
15661 ConstantInt::get(StrideTy, (IsReverseOrder ? -1 : 1) * Stride *
15662 DL->getTypeAllocSize(ScalarTy));
15663 } else {
15664 SmallVector<Value *> PointerOps(E->Scalars.size(), nullptr);
15665 transform(E->Scalars, PointerOps.begin(), [](Value *V) {
15666 return cast<LoadInst>(V)->getPointerOperand();
15668 OrdersType Order;
15669 std::optional<Value *> Stride =
15670 calculateRtStride(PointerOps, ScalarTy, *DL, *SE, Order,
15671 &*Builder.GetInsertPoint());
15672 Value *NewStride =
15673 Builder.CreateIntCast(*Stride, StrideTy, /*isSigned=*/true);
15674 StrideVal = Builder.CreateMul(
15675 NewStride,
15676 ConstantInt::get(
15677 StrideTy,
15678 (IsReverseOrder ? -1 : 1) *
15679 static_cast<int>(DL->getTypeAllocSize(ScalarTy))));
15681 Align CommonAlignment = computeCommonAlignment<LoadInst>(E->Scalars);
15682 auto *Inst = Builder.CreateIntrinsic(
15683 Intrinsic::experimental_vp_strided_load,
15684 {VecTy, PO->getType(), StrideTy},
15685 {PO, StrideVal, Builder.getAllOnesMask(VecTy->getElementCount()),
15686 Builder.getInt32(E->Scalars.size())});
15687 Inst->addParamAttr(
15688 /*ArgNo=*/0,
15689 Attribute::getWithAlignment(Inst->getContext(), CommonAlignment));
15690 NewLI = Inst;
15691 } else {
15692 assert(E->State == TreeEntry::ScatterVectorize && "Unhandled state");
15693 Value *VecPtr = vectorizeOperand(E, 0, PostponedPHIs);
15694 if (E->VectorizedValue) {
15695 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15696 return E->VectorizedValue;
15698 if (isa<FixedVectorType>(ScalarTy)) {
15699 assert(SLPReVec && "FixedVectorType is not expected.");
15700 // CreateMaskedGather expects VecTy and VecPtr have same size. We need
15701 // to expand VecPtr if ScalarTy is a vector type.
15702 unsigned ScalarTyNumElements =
15703 cast<FixedVectorType>(ScalarTy)->getNumElements();
15704 unsigned VecTyNumElements =
15705 cast<FixedVectorType>(VecTy)->getNumElements();
15706 assert(VecTyNumElements % ScalarTyNumElements == 0 &&
15707 "Cannot expand getelementptr.");
15708 unsigned VF = VecTyNumElements / ScalarTyNumElements;
15709 SmallVector<Constant *> Indices(VecTyNumElements);
15710 transform(seq(VecTyNumElements), Indices.begin(), [=](unsigned I) {
15711 return Builder.getInt64(I % ScalarTyNumElements);
15713 VecPtr = Builder.CreateGEP(
15714 VecTy->getElementType(),
15715 Builder.CreateShuffleVector(
15716 VecPtr, createReplicatedMask(ScalarTyNumElements, VF)),
15717 ConstantVector::get(Indices));
15719 // Use the minimum alignment of the gathered loads.
15720 Align CommonAlignment = computeCommonAlignment<LoadInst>(E->Scalars);
15721 NewLI = Builder.CreateMaskedGather(VecTy, VecPtr, CommonAlignment);
15723 Value *V = ::propagateMetadata(NewLI, E->Scalars);
15725 V = FinalShuffle(V, E);
15726 E->VectorizedValue = V;
15727 ++NumVectorInstructions;
15728 return V;
15730 case Instruction::Store: {
15731 auto *SI = cast<StoreInst>(VL0);
15733 setInsertPointAfterBundle(E);
15735 Value *VecValue = vectorizeOperand(E, 0, PostponedPHIs);
15736 if (VecValue->getType() != VecTy)
15737 VecValue =
15738 Builder.CreateIntCast(VecValue, VecTy, GetOperandSignedness(0));
15739 VecValue = FinalShuffle(VecValue, E);
15741 Value *Ptr = SI->getPointerOperand();
15742 Instruction *ST;
15743 if (E->State == TreeEntry::Vectorize) {
15744 ST = Builder.CreateAlignedStore(VecValue, Ptr, SI->getAlign());
15745 } else {
15746 assert(E->State == TreeEntry::StridedVectorize &&
15747 "Expected either strided or consecutive stores.");
15748 if (!E->ReorderIndices.empty()) {
15749 SI = cast<StoreInst>(E->Scalars[E->ReorderIndices.front()]);
15750 Ptr = SI->getPointerOperand();
15752 Align CommonAlignment = computeCommonAlignment<StoreInst>(E->Scalars);
15753 Type *StrideTy = DL->getIndexType(SI->getPointerOperandType());
15754 auto *Inst = Builder.CreateIntrinsic(
15755 Intrinsic::experimental_vp_strided_store,
15756 {VecTy, Ptr->getType(), StrideTy},
15757 {VecValue, Ptr,
15758 ConstantInt::get(
15759 StrideTy, -static_cast<int>(DL->getTypeAllocSize(ScalarTy))),
15760 Builder.getAllOnesMask(VecTy->getElementCount()),
15761 Builder.getInt32(E->Scalars.size())});
15762 Inst->addParamAttr(
15763 /*ArgNo=*/1,
15764 Attribute::getWithAlignment(Inst->getContext(), CommonAlignment));
15765 ST = Inst;
15768 Value *V = ::propagateMetadata(ST, E->Scalars);
15770 E->VectorizedValue = V;
15771 ++NumVectorInstructions;
15772 return V;
15774 case Instruction::GetElementPtr: {
15775 auto *GEP0 = cast<GetElementPtrInst>(VL0);
15776 setInsertPointAfterBundle(E);
15778 Value *Op0 = vectorizeOperand(E, 0, PostponedPHIs);
15779 if (E->VectorizedValue) {
15780 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15781 return E->VectorizedValue;
15784 SmallVector<Value *> OpVecs;
15785 for (int J = 1, N = GEP0->getNumOperands(); J < N; ++J) {
15786 Value *OpVec = vectorizeOperand(E, J, PostponedPHIs);
15787 if (E->VectorizedValue) {
15788 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15789 return E->VectorizedValue;
15791 OpVecs.push_back(OpVec);
15794 Value *V = Builder.CreateGEP(GEP0->getSourceElementType(), Op0, OpVecs);
15795 if (Instruction *I = dyn_cast<GetElementPtrInst>(V)) {
15796 SmallVector<Value *> GEPs;
15797 for (Value *V : E->Scalars) {
15798 if (isa<GetElementPtrInst>(V))
15799 GEPs.push_back(V);
15801 V = ::propagateMetadata(I, GEPs);
15804 V = FinalShuffle(V, E);
15806 E->VectorizedValue = V;
15807 ++NumVectorInstructions;
15809 return V;
15811 case Instruction::Call: {
15812 CallInst *CI = cast<CallInst>(VL0);
15813 setInsertPointAfterBundle(E);
15815 Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
15817 SmallVector<Type *> ArgTys =
15818 buildIntrinsicArgTypes(CI, ID, VecTy->getNumElements(),
15819 It != MinBWs.end() ? It->second.first : 0);
15820 auto VecCallCosts = getVectorCallCosts(CI, VecTy, TTI, TLI, ArgTys);
15821 bool UseIntrinsic = ID != Intrinsic::not_intrinsic &&
15822 VecCallCosts.first <= VecCallCosts.second;
15824 Value *ScalarArg = nullptr;
15825 SmallVector<Value *> OpVecs;
15826 SmallVector<Type *, 2> TysForDecl;
15827 // Add return type if intrinsic is overloaded on it.
15828 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, -1, TTI))
15829 TysForDecl.push_back(VecTy);
15830 auto *CEI = cast<CallInst>(VL0);
15831 for (unsigned I : seq<unsigned>(0, CI->arg_size())) {
15832 ValueList OpVL;
15833 // Some intrinsics have scalar arguments. This argument should not be
15834 // vectorized.
15835 if (UseIntrinsic && isVectorIntrinsicWithScalarOpAtArg(ID, I)) {
15836 ScalarArg = CEI->getArgOperand(I);
15837 // if decided to reduce bitwidth of abs intrinsic, it second argument
15838 // must be set false (do not return poison, if value issigned min).
15839 if (ID == Intrinsic::abs && It != MinBWs.end() &&
15840 It->second.first < DL->getTypeSizeInBits(CEI->getType()))
15841 ScalarArg = Builder.getFalse();
15842 OpVecs.push_back(ScalarArg);
15843 if (isVectorIntrinsicWithOverloadTypeAtArg(ID, I, TTI))
15844 TysForDecl.push_back(ScalarArg->getType());
15845 continue;
15848 Value *OpVec = vectorizeOperand(E, I, PostponedPHIs);
15849 if (E->VectorizedValue) {
15850 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15851 return E->VectorizedValue;
15853 ScalarArg = CEI->getArgOperand(I);
15854 if (cast<VectorType>(OpVec->getType())->getElementType() !=
15855 ScalarArg->getType()->getScalarType() &&
15856 It == MinBWs.end()) {
15857 auto *CastTy =
15858 getWidenedType(ScalarArg->getType(), VecTy->getNumElements());
15859 OpVec = Builder.CreateIntCast(OpVec, CastTy, GetOperandSignedness(I));
15860 } else if (It != MinBWs.end()) {
15861 OpVec = Builder.CreateIntCast(OpVec, VecTy, GetOperandSignedness(I));
15863 LLVM_DEBUG(dbgs() << "SLP: OpVec[" << I << "]: " << *OpVec << "\n");
15864 OpVecs.push_back(OpVec);
15865 if (UseIntrinsic && isVectorIntrinsicWithOverloadTypeAtArg(ID, I, TTI))
15866 TysForDecl.push_back(OpVec->getType());
15869 Function *CF;
15870 if (!UseIntrinsic) {
15871 VFShape Shape =
15872 VFShape::get(CI->getFunctionType(),
15873 ElementCount::getFixed(
15874 static_cast<unsigned>(VecTy->getNumElements())),
15875 false /*HasGlobalPred*/);
15876 CF = VFDatabase(*CI).getVectorizedFunction(Shape);
15877 } else {
15878 CF = Intrinsic::getOrInsertDeclaration(F->getParent(), ID, TysForDecl);
15881 SmallVector<OperandBundleDef, 1> OpBundles;
15882 CI->getOperandBundlesAsDefs(OpBundles);
15883 Value *V = Builder.CreateCall(CF, OpVecs, OpBundles);
15885 propagateIRFlags(V, E->Scalars, VL0);
15886 V = FinalShuffle(V, E);
15888 E->VectorizedValue = V;
15889 ++NumVectorInstructions;
15890 return V;
15892 case Instruction::ShuffleVector: {
15893 Value *V;
15894 if (SLPReVec && !E->isAltShuffle()) {
15895 setInsertPointAfterBundle(E);
15896 Value *Src = vectorizeOperand(E, 0, PostponedPHIs);
15897 if (E->VectorizedValue) {
15898 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15899 return E->VectorizedValue;
15901 SmallVector<int> ThisMask(calculateShufflevectorMask(E->Scalars));
15902 if (auto *SVSrc = dyn_cast<ShuffleVectorInst>(Src)) {
15903 assert(isa<PoisonValue>(SVSrc->getOperand(1)) &&
15904 "Not supported shufflevector usage.");
15905 SmallVector<int> NewMask(ThisMask.size());
15906 transform(ThisMask, NewMask.begin(), [&SVSrc](int Mask) {
15907 return SVSrc->getShuffleMask()[Mask];
15909 V = Builder.CreateShuffleVector(SVSrc->getOperand(0), NewMask);
15910 } else {
15911 V = Builder.CreateShuffleVector(Src, ThisMask);
15913 propagateIRFlags(V, E->Scalars, VL0);
15914 if (auto *I = dyn_cast<Instruction>(V))
15915 V = ::propagateMetadata(I, E->Scalars);
15916 V = FinalShuffle(V, E);
15917 } else {
15918 assert(E->isAltShuffle() &&
15919 ((Instruction::isBinaryOp(E->getOpcode()) &&
15920 Instruction::isBinaryOp(E->getAltOpcode())) ||
15921 (Instruction::isCast(E->getOpcode()) &&
15922 Instruction::isCast(E->getAltOpcode())) ||
15923 (isa<CmpInst>(VL0) && isa<CmpInst>(E->getAltOp()))) &&
15924 "Invalid Shuffle Vector Operand");
15926 Value *LHS = nullptr, *RHS = nullptr;
15927 if (Instruction::isBinaryOp(E->getOpcode()) || isa<CmpInst>(VL0)) {
15928 setInsertPointAfterBundle(E);
15929 LHS = vectorizeOperand(E, 0, PostponedPHIs);
15930 if (E->VectorizedValue) {
15931 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15932 return E->VectorizedValue;
15934 RHS = vectorizeOperand(E, 1, PostponedPHIs);
15935 } else {
15936 setInsertPointAfterBundle(E);
15937 LHS = vectorizeOperand(E, 0, PostponedPHIs);
15939 if (E->VectorizedValue) {
15940 LLVM_DEBUG(dbgs() << "SLP: Diamond merged for " << *VL0 << ".\n");
15941 return E->VectorizedValue;
15943 if (LHS && RHS &&
15944 ((Instruction::isBinaryOp(E->getOpcode()) &&
15945 (LHS->getType() != VecTy || RHS->getType() != VecTy)) ||
15946 (isa<CmpInst>(VL0) && LHS->getType() != RHS->getType()))) {
15947 assert((It != MinBWs.end() ||
15948 getOperandEntry(E, 0)->State == TreeEntry::NeedToGather ||
15949 getOperandEntry(E, 1)->State == TreeEntry::NeedToGather ||
15950 MinBWs.contains(getOperandEntry(E, 0)) ||
15951 MinBWs.contains(getOperandEntry(E, 1))) &&
15952 "Expected item in MinBWs.");
15953 Type *CastTy = VecTy;
15954 if (isa<CmpInst>(VL0) && LHS->getType() != RHS->getType()) {
15955 if (cast<VectorType>(LHS->getType())
15956 ->getElementType()
15957 ->getIntegerBitWidth() < cast<VectorType>(RHS->getType())
15958 ->getElementType()
15959 ->getIntegerBitWidth())
15960 CastTy = RHS->getType();
15961 else
15962 CastTy = LHS->getType();
15964 if (LHS->getType() != CastTy)
15965 LHS = Builder.CreateIntCast(LHS, CastTy, GetOperandSignedness(0));
15966 if (RHS->getType() != CastTy)
15967 RHS = Builder.CreateIntCast(RHS, CastTy, GetOperandSignedness(1));
15970 Value *V0, *V1;
15971 if (Instruction::isBinaryOp(E->getOpcode())) {
15972 V0 = Builder.CreateBinOp(
15973 static_cast<Instruction::BinaryOps>(E->getOpcode()), LHS, RHS);
15974 V1 = Builder.CreateBinOp(
15975 static_cast<Instruction::BinaryOps>(E->getAltOpcode()), LHS, RHS);
15976 } else if (auto *CI0 = dyn_cast<CmpInst>(VL0)) {
15977 V0 = Builder.CreateCmp(CI0->getPredicate(), LHS, RHS);
15978 auto *AltCI = cast<CmpInst>(E->getAltOp());
15979 CmpInst::Predicate AltPred = AltCI->getPredicate();
15980 V1 = Builder.CreateCmp(AltPred, LHS, RHS);
15981 } else {
15982 if (LHS->getType()->isIntOrIntVectorTy() && ScalarTy->isIntegerTy()) {
15983 unsigned SrcBWSz = DL->getTypeSizeInBits(
15984 cast<VectorType>(LHS->getType())->getElementType());
15985 unsigned BWSz = DL->getTypeSizeInBits(ScalarTy);
15986 if (BWSz <= SrcBWSz) {
15987 if (BWSz < SrcBWSz)
15988 LHS = Builder.CreateIntCast(LHS, VecTy, It->second.first);
15989 assert(LHS->getType() == VecTy &&
15990 "Expected same type as operand.");
15991 if (auto *I = dyn_cast<Instruction>(LHS))
15992 LHS = ::propagateMetadata(I, E->Scalars);
15993 LHS = FinalShuffle(LHS, E);
15994 E->VectorizedValue = LHS;
15995 ++NumVectorInstructions;
15996 return LHS;
15999 V0 = Builder.CreateCast(
16000 static_cast<Instruction::CastOps>(E->getOpcode()), LHS, VecTy);
16001 V1 = Builder.CreateCast(
16002 static_cast<Instruction::CastOps>(E->getAltOpcode()), LHS, VecTy);
16004 // Add V0 and V1 to later analysis to try to find and remove matching
16005 // instruction, if any.
16006 for (Value *V : {V0, V1}) {
16007 if (auto *I = dyn_cast<Instruction>(V)) {
16008 GatherShuffleExtractSeq.insert(I);
16009 CSEBlocks.insert(I->getParent());
16013 // Create shuffle to take alternate operations from the vector.
16014 // Also, gather up main and alt scalar ops to propagate IR flags to
16015 // each vector operation.
16016 ValueList OpScalars, AltScalars;
16017 SmallVector<int> Mask;
16018 E->buildAltOpShuffleMask(
16019 [E, this](Instruction *I) {
16020 assert(E->isOpcodeOrAlt(I) && "Unexpected main/alternate opcode");
16021 return isAlternateInstruction(I, E->getMainOp(), E->getAltOp(),
16022 *TLI);
16024 Mask, &OpScalars, &AltScalars);
16026 propagateIRFlags(V0, OpScalars, E->getMainOp(), It == MinBWs.end());
16027 propagateIRFlags(V1, AltScalars, E->getAltOp(), It == MinBWs.end());
16028 auto DropNuwFlag = [&](Value *Vec, unsigned Opcode) {
16029 // Drop nuw flags for abs(sub(commutative), true).
16030 if (auto *I = dyn_cast<Instruction>(Vec);
16031 I && Opcode == Instruction::Sub && !MinBWs.contains(E) &&
16032 any_of(E->Scalars, [](Value *V) {
16033 if (isa<PoisonValue>(V))
16034 return false;
16035 auto *IV = cast<Instruction>(V);
16036 return IV->getOpcode() == Instruction::Sub && isCommutative(IV);
16038 I->setHasNoUnsignedWrap(/*b=*/false);
16040 DropNuwFlag(V0, E->getOpcode());
16041 DropNuwFlag(V1, E->getAltOpcode());
16043 if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
16044 assert(SLPReVec && "FixedVectorType is not expected.");
16045 transformScalarShuffleIndiciesToVector(VecTy->getNumElements(), Mask);
16047 V = Builder.CreateShuffleVector(V0, V1, Mask);
16048 if (auto *I = dyn_cast<Instruction>(V)) {
16049 V = ::propagateMetadata(I, E->Scalars);
16050 GatherShuffleExtractSeq.insert(I);
16051 CSEBlocks.insert(I->getParent());
16055 E->VectorizedValue = V;
16056 ++NumVectorInstructions;
16058 return V;
16060 default:
16061 llvm_unreachable("unknown inst");
16063 return nullptr;
16066 Value *BoUpSLP::vectorizeTree() {
16067 ExtraValueToDebugLocsMap ExternallyUsedValues;
16068 return vectorizeTree(ExternallyUsedValues);
16071 Value *
16072 BoUpSLP::vectorizeTree(const ExtraValueToDebugLocsMap &ExternallyUsedValues,
16073 Instruction *ReductionRoot) {
16074 // All blocks must be scheduled before any instructions are inserted.
16075 for (auto &BSIter : BlocksSchedules) {
16076 scheduleBlock(BSIter.second.get());
16078 // Clean Entry-to-LastInstruction table. It can be affected after scheduling,
16079 // need to rebuild it.
16080 EntryToLastInstruction.clear();
16082 if (ReductionRoot)
16083 Builder.SetInsertPoint(ReductionRoot->getParent(),
16084 ReductionRoot->getIterator());
16085 else
16086 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
16088 // Emit gathered loads first to emit better code for the users of those
16089 // gathered loads.
16090 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree) {
16091 if (GatheredLoadsEntriesFirst.has_value() &&
16092 TE->Idx >= *GatheredLoadsEntriesFirst &&
16093 (!TE->isGather() || !TE->UserTreeIndices.empty())) {
16094 assert((!TE->UserTreeIndices.empty() ||
16095 (TE->getOpcode() == Instruction::Load && !TE->isGather())) &&
16096 "Expected gathered load node.");
16097 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false);
16100 // Postpone emission of PHIs operands to avoid cyclic dependencies issues.
16101 (void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true);
16102 for (const std::unique_ptr<TreeEntry> &TE : VectorizableTree)
16103 if (TE->State == TreeEntry::Vectorize &&
16104 TE->getOpcode() == Instruction::PHI && !TE->isAltShuffle() &&
16105 TE->VectorizedValue)
16106 (void)vectorizeTree(TE.get(), /*PostponedPHIs=*/false);
16107 // Run through the list of postponed gathers and emit them, replacing the temp
16108 // emitted allocas with actual vector instructions.
16109 ArrayRef<const TreeEntry *> PostponedNodes = PostponedGathers.getArrayRef();
16110 DenseMap<Value *, SmallVector<TreeEntry *>> PostponedValues;
16111 for (const TreeEntry *E : PostponedNodes) {
16112 auto *TE = const_cast<TreeEntry *>(E);
16113 if (auto *VecTE = getTreeEntry(TE->Scalars.front()))
16114 if (VecTE->isSame(TE->UserTreeIndices.front().UserTE->getOperand(
16115 TE->UserTreeIndices.front().EdgeIdx)) &&
16116 VecTE->isSame(TE->Scalars))
16117 // Found gather node which is absolutely the same as one of the
16118 // vectorized nodes. It may happen after reordering.
16119 continue;
16120 auto *PrevVec = cast<Instruction>(TE->VectorizedValue);
16121 TE->VectorizedValue = nullptr;
16122 auto *UserI =
16123 cast<Instruction>(TE->UserTreeIndices.front().UserTE->VectorizedValue);
16124 // If user is a PHI node, its vector code have to be inserted right before
16125 // block terminator. Since the node was delayed, there were some unresolved
16126 // dependencies at the moment when stab instruction was emitted. In a case
16127 // when any of these dependencies turn out an operand of another PHI, coming
16128 // from this same block, position of a stab instruction will become invalid.
16129 // The is because source vector that supposed to feed this gather node was
16130 // inserted at the end of the block [after stab instruction]. So we need
16131 // to adjust insertion point again to the end of block.
16132 if (isa<PHINode>(UserI)) {
16133 // Insert before all users.
16134 Instruction *InsertPt = PrevVec->getParent()->getTerminator();
16135 for (User *U : PrevVec->users()) {
16136 if (U == UserI)
16137 continue;
16138 auto *UI = dyn_cast<Instruction>(U);
16139 if (!UI || isa<PHINode>(UI) || UI->getParent() != InsertPt->getParent())
16140 continue;
16141 if (UI->comesBefore(InsertPt))
16142 InsertPt = UI;
16144 Builder.SetInsertPoint(InsertPt);
16145 } else {
16146 Builder.SetInsertPoint(PrevVec);
16148 Builder.SetCurrentDebugLocation(UserI->getDebugLoc());
16149 Value *Vec = vectorizeTree(TE, /*PostponedPHIs=*/false);
16150 if (auto *VecI = dyn_cast<Instruction>(Vec);
16151 VecI && VecI->getParent() == Builder.GetInsertBlock() &&
16152 Builder.GetInsertPoint()->comesBefore(VecI))
16153 VecI->moveBeforePreserving(*Builder.GetInsertBlock(),
16154 Builder.GetInsertPoint());
16155 if (Vec->getType() != PrevVec->getType()) {
16156 assert(Vec->getType()->isIntOrIntVectorTy() &&
16157 PrevVec->getType()->isIntOrIntVectorTy() &&
16158 "Expected integer vector types only.");
16159 std::optional<bool> IsSigned;
16160 for (Value *V : TE->Scalars) {
16161 if (const TreeEntry *BaseTE = getTreeEntry(V)) {
16162 auto It = MinBWs.find(BaseTE);
16163 if (It != MinBWs.end()) {
16164 IsSigned = IsSigned.value_or(false) || It->second.second;
16165 if (*IsSigned)
16166 break;
16168 for (const TreeEntry *MNTE : MultiNodeScalars.lookup(V)) {
16169 auto It = MinBWs.find(MNTE);
16170 if (It != MinBWs.end()) {
16171 IsSigned = IsSigned.value_or(false) || It->second.second;
16172 if (*IsSigned)
16173 break;
16176 if (IsSigned.value_or(false))
16177 break;
16178 // Scan through gather nodes.
16179 for (const TreeEntry *BVE : ValueToGatherNodes.lookup(V)) {
16180 auto It = MinBWs.find(BVE);
16181 if (It != MinBWs.end()) {
16182 IsSigned = IsSigned.value_or(false) || It->second.second;
16183 if (*IsSigned)
16184 break;
16187 if (IsSigned.value_or(false))
16188 break;
16189 if (auto *EE = dyn_cast<ExtractElementInst>(V)) {
16190 IsSigned =
16191 IsSigned.value_or(false) ||
16192 !isKnownNonNegative(EE->getVectorOperand(), SimplifyQuery(*DL));
16193 continue;
16195 if (IsSigned.value_or(false))
16196 break;
16199 if (IsSigned.value_or(false)) {
16200 // Final attempt - check user node.
16201 auto It = MinBWs.find(TE->UserTreeIndices.front().UserTE);
16202 if (It != MinBWs.end())
16203 IsSigned = It->second.second;
16205 assert(IsSigned &&
16206 "Expected user node or perfect diamond match in MinBWs.");
16207 Vec = Builder.CreateIntCast(Vec, PrevVec->getType(), *IsSigned);
16209 PrevVec->replaceAllUsesWith(Vec);
16210 PostponedValues.try_emplace(Vec).first->second.push_back(TE);
16211 // Replace the stub vector node, if it was used before for one of the
16212 // buildvector nodes already.
16213 auto It = PostponedValues.find(PrevVec);
16214 if (It != PostponedValues.end()) {
16215 for (TreeEntry *VTE : It->getSecond())
16216 VTE->VectorizedValue = Vec;
16218 eraseInstruction(PrevVec);
16221 LLVM_DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size()
16222 << " values .\n");
16224 SmallVector<ShuffledInsertData<Value *>> ShuffledInserts;
16225 // Maps vector instruction to original insertelement instruction
16226 DenseMap<Value *, InsertElementInst *> VectorToInsertElement;
16227 // Maps extract Scalar to the corresponding extractelement instruction in the
16228 // basic block. Only one extractelement per block should be emitted.
16229 DenseMap<Value *, DenseMap<BasicBlock *, std::pair<Value *, Value *>>>
16230 ScalarToEEs;
16231 SmallDenseSet<Value *, 4> UsedInserts;
16232 DenseMap<std::pair<Value *, Type *>, Value *> VectorCasts;
16233 SmallDenseSet<Value *, 4> ScalarsWithNullptrUser;
16234 SmallDenseSet<ExtractElementInst *, 4> IgnoredExtracts;
16235 // Extract all of the elements with the external uses.
16236 for (const auto &ExternalUse : ExternalUses) {
16237 Value *Scalar = ExternalUse.Scalar;
16238 llvm::User *User = ExternalUse.User;
16240 // Skip users that we already RAUW. This happens when one instruction
16241 // has multiple uses of the same value.
16242 if (User && !is_contained(Scalar->users(), User))
16243 continue;
16244 TreeEntry *E = getTreeEntry(Scalar);
16245 assert(E && "Invalid scalar");
16246 assert(!E->isGather() && "Extracting from a gather list");
16247 // Non-instruction pointers are not deleted, just skip them.
16248 if (E->getOpcode() == Instruction::GetElementPtr &&
16249 !isa<GetElementPtrInst>(Scalar))
16250 continue;
16252 Value *Vec = E->VectorizedValue;
16253 assert(Vec && "Can't find vectorizable value");
16255 Value *Lane = Builder.getInt32(ExternalUse.Lane);
16256 auto ExtractAndExtendIfNeeded = [&](Value *Vec) {
16257 if (Scalar->getType() != Vec->getType()) {
16258 Value *Ex = nullptr;
16259 Value *ExV = nullptr;
16260 auto *Inst = dyn_cast<Instruction>(Scalar);
16261 bool ReplaceInst = Inst && ExternalUsesAsOriginalScalar.contains(Inst);
16262 auto It = ScalarToEEs.find(Scalar);
16263 if (It != ScalarToEEs.end()) {
16264 // No need to emit many extracts, just move the only one in the
16265 // current block.
16266 auto EEIt = It->second.find(ReplaceInst ? Inst->getParent()
16267 : Builder.GetInsertBlock());
16268 if (EEIt != It->second.end()) {
16269 Value *PrevV = EEIt->second.first;
16270 if (auto *I = dyn_cast<Instruction>(PrevV);
16271 I && !ReplaceInst &&
16272 Builder.GetInsertPoint() != Builder.GetInsertBlock()->end() &&
16273 Builder.GetInsertPoint()->comesBefore(I)) {
16274 I->moveBefore(*Builder.GetInsertPoint()->getParent(),
16275 Builder.GetInsertPoint());
16276 if (auto *CI = dyn_cast<Instruction>(EEIt->second.second))
16277 CI->moveAfter(I);
16279 Ex = PrevV;
16280 ExV = EEIt->second.second ? EEIt->second.second : Ex;
16283 if (!Ex) {
16284 // "Reuse" the existing extract to improve final codegen.
16285 if (ReplaceInst) {
16286 // Leave the instruction as is, if it cheaper extracts and all
16287 // operands are scalar.
16288 if (auto *EE = dyn_cast<ExtractElementInst>(Inst)) {
16289 IgnoredExtracts.insert(EE);
16290 Ex = EE;
16291 } else {
16292 auto *CloneInst = Inst->clone();
16293 CloneInst->insertBefore(Inst);
16294 if (Inst->hasName())
16295 CloneInst->takeName(Inst);
16296 Ex = CloneInst;
16298 } else if (auto *ES = dyn_cast<ExtractElementInst>(Scalar);
16299 ES && isa<Instruction>(Vec)) {
16300 Value *V = ES->getVectorOperand();
16301 auto *IVec = cast<Instruction>(Vec);
16302 if (const TreeEntry *ETE = getTreeEntry(V))
16303 V = ETE->VectorizedValue;
16304 if (auto *IV = dyn_cast<Instruction>(V);
16305 !IV || IV == Vec || IV->getParent() != IVec->getParent() ||
16306 IV->comesBefore(IVec))
16307 Ex = Builder.CreateExtractElement(V, ES->getIndexOperand());
16308 else
16309 Ex = Builder.CreateExtractElement(Vec, Lane);
16310 } else if (auto *VecTy =
16311 dyn_cast<FixedVectorType>(Scalar->getType())) {
16312 assert(SLPReVec && "FixedVectorType is not expected.");
16313 unsigned VecTyNumElements = VecTy->getNumElements();
16314 // When REVEC is enabled, we need to extract a vector.
16315 // Note: The element size of Scalar may be different from the
16316 // element size of Vec.
16317 Ex = Builder.CreateExtractVector(
16318 FixedVectorType::get(Vec->getType()->getScalarType(),
16319 VecTyNumElements),
16320 Vec, Builder.getInt64(ExternalUse.Lane * VecTyNumElements));
16321 } else {
16322 Ex = Builder.CreateExtractElement(Vec, Lane);
16324 // If necessary, sign-extend or zero-extend ScalarRoot
16325 // to the larger type.
16326 ExV = Ex;
16327 if (Scalar->getType() != Ex->getType())
16328 ExV = Builder.CreateIntCast(
16329 Ex, Scalar->getType(),
16330 !isKnownNonNegative(Scalar, SimplifyQuery(*DL)));
16331 auto *I = dyn_cast<Instruction>(Ex);
16332 ScalarToEEs[Scalar].try_emplace(I ? I->getParent()
16333 : &F->getEntryBlock(),
16334 std::make_pair(Ex, ExV));
16336 // The then branch of the previous if may produce constants, since 0
16337 // operand might be a constant.
16338 if (auto *ExI = dyn_cast<Instruction>(Ex);
16339 ExI && !isa<PHINode>(ExI) && !mayHaveNonDefUseDependency(*ExI)) {
16340 GatherShuffleExtractSeq.insert(ExI);
16341 CSEBlocks.insert(ExI->getParent());
16343 return ExV;
16345 assert(isa<FixedVectorType>(Scalar->getType()) &&
16346 isa<InsertElementInst>(Scalar) &&
16347 "In-tree scalar of vector type is not insertelement?");
16348 auto *IE = cast<InsertElementInst>(Scalar);
16349 VectorToInsertElement.try_emplace(Vec, IE);
16350 return Vec;
16352 // If User == nullptr, the Scalar remains as scalar in vectorized
16353 // instructions or is used as extra arg. Generate ExtractElement instruction
16354 // and update the record for this scalar in ExternallyUsedValues.
16355 if (!User) {
16356 if (!ScalarsWithNullptrUser.insert(Scalar).second)
16357 continue;
16358 assert((ExternallyUsedValues.count(Scalar) ||
16359 Scalar->hasNUsesOrMore(UsesLimit) ||
16360 ExternalUsesAsOriginalScalar.contains(Scalar) ||
16361 any_of(Scalar->users(),
16362 [&](llvm::User *U) {
16363 if (ExternalUsesAsOriginalScalar.contains(U))
16364 return true;
16365 TreeEntry *UseEntry = getTreeEntry(U);
16366 return UseEntry &&
16367 (UseEntry->State == TreeEntry::Vectorize ||
16368 UseEntry->State ==
16369 TreeEntry::StridedVectorize) &&
16370 (E->State == TreeEntry::Vectorize ||
16371 E->State == TreeEntry::StridedVectorize) &&
16372 doesInTreeUserNeedToExtract(
16373 Scalar, getRootEntryInstruction(*UseEntry),
16374 TLI);
16375 })) &&
16376 "Scalar with nullptr User must be registered in "
16377 "ExternallyUsedValues map or remain as scalar in vectorized "
16378 "instructions");
16379 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
16380 if (auto *PHI = dyn_cast<PHINode>(VecI)) {
16381 if (PHI->getParent()->isLandingPad())
16382 Builder.SetInsertPoint(
16383 PHI->getParent(),
16384 std::next(
16385 PHI->getParent()->getLandingPadInst()->getIterator()));
16386 else
16387 Builder.SetInsertPoint(PHI->getParent(),
16388 PHI->getParent()->getFirstNonPHIIt());
16389 } else {
16390 Builder.SetInsertPoint(VecI->getParent(),
16391 std::next(VecI->getIterator()));
16393 } else {
16394 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
16396 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16397 // Required to update internally referenced instructions.
16398 if (Scalar != NewInst) {
16399 assert((!isa<ExtractElementInst>(Scalar) ||
16400 !IgnoredExtracts.contains(cast<ExtractElementInst>(Scalar))) &&
16401 "Extractelements should not be replaced.");
16402 Scalar->replaceAllUsesWith(NewInst);
16404 continue;
16407 if (auto *VU = dyn_cast<InsertElementInst>(User);
16408 VU && VU->getOperand(1) == Scalar) {
16409 // Skip if the scalar is another vector op or Vec is not an instruction.
16410 if (!Scalar->getType()->isVectorTy() && isa<Instruction>(Vec)) {
16411 if (auto *FTy = dyn_cast<FixedVectorType>(User->getType())) {
16412 if (!UsedInserts.insert(VU).second)
16413 continue;
16414 // Need to use original vector, if the root is truncated.
16415 auto BWIt = MinBWs.find(E);
16416 if (BWIt != MinBWs.end() && Vec->getType() != VU->getType()) {
16417 auto *ScalarTy = FTy->getElementType();
16418 auto Key = std::make_pair(Vec, ScalarTy);
16419 auto VecIt = VectorCasts.find(Key);
16420 if (VecIt == VectorCasts.end()) {
16421 IRBuilderBase::InsertPointGuard Guard(Builder);
16422 if (auto *IVec = dyn_cast<PHINode>(Vec)) {
16423 if (IVec->getParent()->isLandingPad())
16424 Builder.SetInsertPoint(IVec->getParent(),
16425 std::next(IVec->getParent()
16426 ->getLandingPadInst()
16427 ->getIterator()));
16428 else
16429 Builder.SetInsertPoint(
16430 IVec->getParent()->getFirstNonPHIOrDbgOrLifetime());
16431 } else if (auto *IVec = dyn_cast<Instruction>(Vec)) {
16432 Builder.SetInsertPoint(IVec->getNextNonDebugInstruction());
16434 Vec = Builder.CreateIntCast(
16435 Vec,
16436 getWidenedType(
16437 ScalarTy,
16438 cast<FixedVectorType>(Vec->getType())->getNumElements()),
16439 BWIt->second.second);
16440 VectorCasts.try_emplace(Key, Vec);
16441 } else {
16442 Vec = VecIt->second;
16446 std::optional<unsigned> InsertIdx = getElementIndex(VU);
16447 if (InsertIdx) {
16448 auto *It = find_if(
16449 ShuffledInserts, [VU](const ShuffledInsertData<Value *> &Data) {
16450 // Checks if 2 insertelements are from the same buildvector.
16451 InsertElementInst *VecInsert = Data.InsertElements.front();
16452 return areTwoInsertFromSameBuildVector(
16453 VU, VecInsert,
16454 [](InsertElementInst *II) { return II->getOperand(0); });
16456 unsigned Idx = *InsertIdx;
16457 if (It == ShuffledInserts.end()) {
16458 (void)ShuffledInserts.emplace_back();
16459 It = std::next(ShuffledInserts.begin(),
16460 ShuffledInserts.size() - 1);
16462 SmallVectorImpl<int> &Mask = It->ValueMasks[Vec];
16463 if (Mask.empty())
16464 Mask.assign(FTy->getNumElements(), PoisonMaskElem);
16465 Mask[Idx] = ExternalUse.Lane;
16466 It->InsertElements.push_back(cast<InsertElementInst>(User));
16467 continue;
16473 // Generate extracts for out-of-tree users.
16474 // Find the insertion point for the extractelement lane.
16475 if (auto *VecI = dyn_cast<Instruction>(Vec)) {
16476 if (PHINode *PH = dyn_cast<PHINode>(User)) {
16477 for (unsigned I : seq<unsigned>(0, PH->getNumIncomingValues())) {
16478 if (PH->getIncomingValue(I) == Scalar) {
16479 Instruction *IncomingTerminator =
16480 PH->getIncomingBlock(I)->getTerminator();
16481 if (isa<CatchSwitchInst>(IncomingTerminator)) {
16482 Builder.SetInsertPoint(VecI->getParent(),
16483 std::next(VecI->getIterator()));
16484 } else {
16485 Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator());
16487 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16488 PH->setOperand(I, NewInst);
16491 } else {
16492 Builder.SetInsertPoint(cast<Instruction>(User));
16493 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16494 User->replaceUsesOfWith(Scalar, NewInst);
16496 } else {
16497 Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
16498 Value *NewInst = ExtractAndExtendIfNeeded(Vec);
16499 User->replaceUsesOfWith(Scalar, NewInst);
16502 LLVM_DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
16505 auto CreateShuffle = [&](Value *V1, Value *V2, ArrayRef<int> Mask) {
16506 SmallVector<int> CombinedMask1(Mask.size(), PoisonMaskElem);
16507 SmallVector<int> CombinedMask2(Mask.size(), PoisonMaskElem);
16508 int VF = cast<FixedVectorType>(V1->getType())->getNumElements();
16509 for (int I = 0, E = Mask.size(); I < E; ++I) {
16510 if (Mask[I] < VF)
16511 CombinedMask1[I] = Mask[I];
16512 else
16513 CombinedMask2[I] = Mask[I] - VF;
16515 ShuffleInstructionBuilder ShuffleBuilder(
16516 cast<VectorType>(V1->getType())->getElementType(), Builder, *this);
16517 ShuffleBuilder.add(V1, CombinedMask1);
16518 if (V2)
16519 ShuffleBuilder.add(V2, CombinedMask2);
16520 return ShuffleBuilder.finalize({}, {}, {});
16523 auto &&ResizeToVF = [&CreateShuffle](Value *Vec, ArrayRef<int> Mask,
16524 bool ForSingleMask) {
16525 unsigned VF = Mask.size();
16526 unsigned VecVF = cast<FixedVectorType>(Vec->getType())->getNumElements();
16527 if (VF != VecVF) {
16528 if (any_of(Mask, [VF](int Idx) { return Idx >= static_cast<int>(VF); })) {
16529 Vec = CreateShuffle(Vec, nullptr, Mask);
16530 return std::make_pair(Vec, true);
16532 if (!ForSingleMask) {
16533 SmallVector<int> ResizeMask(VF, PoisonMaskElem);
16534 for (unsigned I = 0; I < VF; ++I) {
16535 if (Mask[I] != PoisonMaskElem)
16536 ResizeMask[Mask[I]] = Mask[I];
16538 Vec = CreateShuffle(Vec, nullptr, ResizeMask);
16542 return std::make_pair(Vec, false);
16544 // Perform shuffling of the vectorize tree entries for better handling of
16545 // external extracts.
16546 for (int I = 0, E = ShuffledInserts.size(); I < E; ++I) {
16547 // Find the first and the last instruction in the list of insertelements.
16548 sort(ShuffledInserts[I].InsertElements, isFirstInsertElement);
16549 InsertElementInst *FirstInsert = ShuffledInserts[I].InsertElements.front();
16550 InsertElementInst *LastInsert = ShuffledInserts[I].InsertElements.back();
16551 Builder.SetInsertPoint(LastInsert);
16552 auto Vector = ShuffledInserts[I].ValueMasks.takeVector();
16553 Value *NewInst = performExtractsShuffleAction<Value>(
16554 MutableArrayRef(Vector.data(), Vector.size()),
16555 FirstInsert->getOperand(0),
16556 [](Value *Vec) {
16557 return cast<VectorType>(Vec->getType())
16558 ->getElementCount()
16559 .getKnownMinValue();
16561 ResizeToVF,
16562 [FirstInsert, &CreateShuffle](ArrayRef<int> Mask,
16563 ArrayRef<Value *> Vals) {
16564 assert((Vals.size() == 1 || Vals.size() == 2) &&
16565 "Expected exactly 1 or 2 input values.");
16566 if (Vals.size() == 1) {
16567 // Do not create shuffle if the mask is a simple identity
16568 // non-resizing mask.
16569 if (Mask.size() != cast<FixedVectorType>(Vals.front()->getType())
16570 ->getNumElements() ||
16571 !ShuffleVectorInst::isIdentityMask(Mask, Mask.size()))
16572 return CreateShuffle(Vals.front(), nullptr, Mask);
16573 return Vals.front();
16575 return CreateShuffle(Vals.front() ? Vals.front()
16576 : FirstInsert->getOperand(0),
16577 Vals.back(), Mask);
16579 auto It = ShuffledInserts[I].InsertElements.rbegin();
16580 // Rebuild buildvector chain.
16581 InsertElementInst *II = nullptr;
16582 if (It != ShuffledInserts[I].InsertElements.rend())
16583 II = *It;
16584 SmallVector<Instruction *> Inserts;
16585 while (It != ShuffledInserts[I].InsertElements.rend()) {
16586 assert(II && "Must be an insertelement instruction.");
16587 if (*It == II)
16588 ++It;
16589 else
16590 Inserts.push_back(cast<Instruction>(II));
16591 II = dyn_cast<InsertElementInst>(II->getOperand(0));
16593 for (Instruction *II : reverse(Inserts)) {
16594 II->replaceUsesOfWith(II->getOperand(0), NewInst);
16595 if (auto *NewI = dyn_cast<Instruction>(NewInst))
16596 if (II->getParent() == NewI->getParent() && II->comesBefore(NewI))
16597 II->moveAfter(NewI);
16598 NewInst = II;
16600 LastInsert->replaceAllUsesWith(NewInst);
16601 for (InsertElementInst *IE : reverse(ShuffledInserts[I].InsertElements)) {
16602 IE->replaceUsesOfWith(IE->getOperand(0),
16603 PoisonValue::get(IE->getOperand(0)->getType()));
16604 IE->replaceUsesOfWith(IE->getOperand(1),
16605 PoisonValue::get(IE->getOperand(1)->getType()));
16606 eraseInstruction(IE);
16608 CSEBlocks.insert(LastInsert->getParent());
16611 SmallVector<Instruction *> RemovedInsts;
16612 // For each vectorized value:
16613 for (auto &TEPtr : VectorizableTree) {
16614 TreeEntry *Entry = TEPtr.get();
16616 // No need to handle users of gathered values.
16617 if (Entry->isGather())
16618 continue;
16620 assert(Entry->VectorizedValue && "Can't find vectorizable value");
16622 // For each lane:
16623 for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
16624 Value *Scalar = Entry->Scalars[Lane];
16626 if (Entry->getOpcode() == Instruction::GetElementPtr &&
16627 !isa<GetElementPtrInst>(Scalar))
16628 continue;
16629 if (auto *EE = dyn_cast<ExtractElementInst>(Scalar);
16630 EE && IgnoredExtracts.contains(EE))
16631 continue;
16632 if (isa<PoisonValue>(Scalar))
16633 continue;
16634 #ifndef NDEBUG
16635 Type *Ty = Scalar->getType();
16636 if (!Ty->isVoidTy()) {
16637 for (User *U : Scalar->users()) {
16638 LLVM_DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
16640 // It is legal to delete users in the ignorelist.
16641 assert((getTreeEntry(U) ||
16642 (UserIgnoreList && UserIgnoreList->contains(U)) ||
16643 (isa_and_nonnull<Instruction>(U) &&
16644 isDeleted(cast<Instruction>(U)))) &&
16645 "Deleting out-of-tree value");
16648 #endif
16649 LLVM_DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
16650 auto *I = cast<Instruction>(Scalar);
16651 RemovedInsts.push_back(I);
16655 // Merge the DIAssignIDs from the about-to-be-deleted instructions into the
16656 // new vector instruction.
16657 if (auto *V = dyn_cast<Instruction>(VectorizableTree[0]->VectorizedValue))
16658 V->mergeDIAssignID(RemovedInsts);
16660 // Clear up reduction references, if any.
16661 if (UserIgnoreList) {
16662 for (Instruction *I : RemovedInsts) {
16663 const TreeEntry *IE = getTreeEntry(I);
16664 if (IE->Idx != 0 &&
16665 !(VectorizableTree.front()->isGather() &&
16666 !IE->UserTreeIndices.empty() &&
16667 (ValueToGatherNodes.lookup(I).contains(
16668 VectorizableTree.front().get()) ||
16669 any_of(IE->UserTreeIndices,
16670 [&](const EdgeInfo &EI) {
16671 return EI.UserTE == VectorizableTree.front().get() &&
16672 EI.EdgeIdx == UINT_MAX;
16673 }))) &&
16674 !(GatheredLoadsEntriesFirst.has_value() &&
16675 IE->Idx >= *GatheredLoadsEntriesFirst &&
16676 VectorizableTree.front()->isGather() &&
16677 is_contained(VectorizableTree.front()->Scalars, I)))
16678 continue;
16679 SmallVector<SelectInst *> LogicalOpSelects;
16680 I->replaceUsesWithIf(PoisonValue::get(I->getType()), [&](Use &U) {
16681 // Do not replace condition of the logical op in form select <cond>.
16682 bool IsPoisoningLogicalOp = isa<SelectInst>(U.getUser()) &&
16683 (match(U.getUser(), m_LogicalAnd()) ||
16684 match(U.getUser(), m_LogicalOr())) &&
16685 U.getOperandNo() == 0;
16686 if (IsPoisoningLogicalOp) {
16687 LogicalOpSelects.push_back(cast<SelectInst>(U.getUser()));
16688 return false;
16690 return UserIgnoreList->contains(U.getUser());
16692 // Replace conditions of the poisoning logical ops with the non-poison
16693 // constant value.
16694 for (SelectInst *SI : LogicalOpSelects)
16695 SI->setCondition(Constant::getNullValue(SI->getCondition()->getType()));
16698 // Retain to-be-deleted instructions for some debug-info bookkeeping and alias
16699 // cache correctness.
16700 // NOTE: removeInstructionAndOperands only marks the instruction for deletion
16701 // - instructions are not deleted until later.
16702 removeInstructionsAndOperands(ArrayRef(RemovedInsts));
16704 Builder.ClearInsertionPoint();
16705 InstrElementSize.clear();
16707 const TreeEntry &RootTE = *VectorizableTree.front();
16708 Value *Vec = RootTE.VectorizedValue;
16709 if (auto It = MinBWs.find(&RootTE); ReductionBitWidth != 0 &&
16710 It != MinBWs.end() &&
16711 ReductionBitWidth != It->second.first) {
16712 IRBuilder<>::InsertPointGuard Guard(Builder);
16713 Builder.SetInsertPoint(ReductionRoot->getParent(),
16714 ReductionRoot->getIterator());
16715 Vec = Builder.CreateIntCast(
16716 Vec,
16717 VectorType::get(Builder.getIntNTy(ReductionBitWidth),
16718 cast<VectorType>(Vec->getType())->getElementCount()),
16719 It->second.second);
16721 return Vec;
16724 void BoUpSLP::optimizeGatherSequence() {
16725 LLVM_DEBUG(dbgs() << "SLP: Optimizing " << GatherShuffleExtractSeq.size()
16726 << " gather sequences instructions.\n");
16727 // LICM InsertElementInst sequences.
16728 for (Instruction *I : GatherShuffleExtractSeq) {
16729 if (isDeleted(I))
16730 continue;
16732 // Check if this block is inside a loop.
16733 Loop *L = LI->getLoopFor(I->getParent());
16734 if (!L)
16735 continue;
16737 // Check if it has a preheader.
16738 BasicBlock *PreHeader = L->getLoopPreheader();
16739 if (!PreHeader)
16740 continue;
16742 // If the vector or the element that we insert into it are
16743 // instructions that are defined in this basic block then we can't
16744 // hoist this instruction.
16745 if (any_of(I->operands(), [L](Value *V) {
16746 auto *OpI = dyn_cast<Instruction>(V);
16747 return OpI && L->contains(OpI);
16749 continue;
16751 // We can hoist this instruction. Move it to the pre-header.
16752 I->moveBefore(PreHeader->getTerminator());
16753 CSEBlocks.insert(PreHeader);
16756 // Make a list of all reachable blocks in our CSE queue.
16757 SmallVector<const DomTreeNode *, 8> CSEWorkList;
16758 CSEWorkList.reserve(CSEBlocks.size());
16759 for (BasicBlock *BB : CSEBlocks)
16760 if (DomTreeNode *N = DT->getNode(BB)) {
16761 assert(DT->isReachableFromEntry(N));
16762 CSEWorkList.push_back(N);
16765 // Sort blocks by domination. This ensures we visit a block after all blocks
16766 // dominating it are visited.
16767 llvm::sort(CSEWorkList, [](const DomTreeNode *A, const DomTreeNode *B) {
16768 assert((A == B) == (A->getDFSNumIn() == B->getDFSNumIn()) &&
16769 "Different nodes should have different DFS numbers");
16770 return A->getDFSNumIn() < B->getDFSNumIn();
16773 // Less defined shuffles can be replaced by the more defined copies.
16774 // Between two shuffles one is less defined if it has the same vector operands
16775 // and its mask indeces are the same as in the first one or undefs. E.g.
16776 // shuffle %0, poison, <0, 0, 0, undef> is less defined than shuffle %0,
16777 // poison, <0, 0, 0, 0>.
16778 auto &&IsIdenticalOrLessDefined = [this](Instruction *I1, Instruction *I2,
16779 SmallVectorImpl<int> &NewMask) {
16780 if (I1->getType() != I2->getType())
16781 return false;
16782 auto *SI1 = dyn_cast<ShuffleVectorInst>(I1);
16783 auto *SI2 = dyn_cast<ShuffleVectorInst>(I2);
16784 if (!SI1 || !SI2)
16785 return I1->isIdenticalTo(I2);
16786 if (SI1->isIdenticalTo(SI2))
16787 return true;
16788 for (int I = 0, E = SI1->getNumOperands(); I < E; ++I)
16789 if (SI1->getOperand(I) != SI2->getOperand(I))
16790 return false;
16791 // Check if the second instruction is more defined than the first one.
16792 NewMask.assign(SI2->getShuffleMask().begin(), SI2->getShuffleMask().end());
16793 ArrayRef<int> SM1 = SI1->getShuffleMask();
16794 // Count trailing undefs in the mask to check the final number of used
16795 // registers.
16796 unsigned LastUndefsCnt = 0;
16797 for (int I = 0, E = NewMask.size(); I < E; ++I) {
16798 if (SM1[I] == PoisonMaskElem)
16799 ++LastUndefsCnt;
16800 else
16801 LastUndefsCnt = 0;
16802 if (NewMask[I] != PoisonMaskElem && SM1[I] != PoisonMaskElem &&
16803 NewMask[I] != SM1[I])
16804 return false;
16805 if (NewMask[I] == PoisonMaskElem)
16806 NewMask[I] = SM1[I];
16808 // Check if the last undefs actually change the final number of used vector
16809 // registers.
16810 return SM1.size() - LastUndefsCnt > 1 &&
16811 TTI->getNumberOfParts(SI1->getType()) ==
16812 TTI->getNumberOfParts(
16813 getWidenedType(SI1->getType()->getElementType(),
16814 SM1.size() - LastUndefsCnt));
16816 // Perform O(N^2) search over the gather/shuffle sequences and merge identical
16817 // instructions. TODO: We can further optimize this scan if we split the
16818 // instructions into different buckets based on the insert lane.
16819 SmallVector<Instruction *, 16> Visited;
16820 for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
16821 assert(*I &&
16822 (I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
16823 "Worklist not sorted properly!");
16824 BasicBlock *BB = (*I)->getBlock();
16825 // For all instructions in blocks containing gather sequences:
16826 for (Instruction &In : llvm::make_early_inc_range(*BB)) {
16827 if (isDeleted(&In))
16828 continue;
16829 if (!isa<InsertElementInst, ExtractElementInst, ShuffleVectorInst>(&In) &&
16830 !GatherShuffleExtractSeq.contains(&In))
16831 continue;
16833 // Check if we can replace this instruction with any of the
16834 // visited instructions.
16835 bool Replaced = false;
16836 for (Instruction *&V : Visited) {
16837 SmallVector<int> NewMask;
16838 if (IsIdenticalOrLessDefined(&In, V, NewMask) &&
16839 DT->dominates(V->getParent(), In.getParent())) {
16840 In.replaceAllUsesWith(V);
16841 eraseInstruction(&In);
16842 if (auto *SI = dyn_cast<ShuffleVectorInst>(V))
16843 if (!NewMask.empty())
16844 SI->setShuffleMask(NewMask);
16845 Replaced = true;
16846 break;
16848 if (isa<ShuffleVectorInst>(In) && isa<ShuffleVectorInst>(V) &&
16849 GatherShuffleExtractSeq.contains(V) &&
16850 IsIdenticalOrLessDefined(V, &In, NewMask) &&
16851 DT->dominates(In.getParent(), V->getParent())) {
16852 In.moveAfter(V);
16853 V->replaceAllUsesWith(&In);
16854 eraseInstruction(V);
16855 if (auto *SI = dyn_cast<ShuffleVectorInst>(&In))
16856 if (!NewMask.empty())
16857 SI->setShuffleMask(NewMask);
16858 V = &In;
16859 Replaced = true;
16860 break;
16863 if (!Replaced) {
16864 assert(!is_contained(Visited, &In));
16865 Visited.push_back(&In);
16869 CSEBlocks.clear();
16870 GatherShuffleExtractSeq.clear();
16873 BoUpSLP::ScheduleData *
16874 BoUpSLP::BlockScheduling::buildBundle(ArrayRef<Value *> VL) {
16875 ScheduleData *Bundle = nullptr;
16876 ScheduleData *PrevInBundle = nullptr;
16877 for (Value *V : VL) {
16878 if (doesNotNeedToBeScheduled(V))
16879 continue;
16880 ScheduleData *BundleMember = getScheduleData(V);
16881 assert(BundleMember &&
16882 "no ScheduleData for bundle member "
16883 "(maybe not in same basic block)");
16884 assert(BundleMember->isSchedulingEntity() &&
16885 "bundle member already part of other bundle");
16886 if (PrevInBundle) {
16887 PrevInBundle->NextInBundle = BundleMember;
16888 } else {
16889 Bundle = BundleMember;
16892 // Group the instructions to a bundle.
16893 BundleMember->FirstInBundle = Bundle;
16894 PrevInBundle = BundleMember;
16896 assert(Bundle && "Failed to find schedule bundle");
16897 return Bundle;
16900 // Groups the instructions to a bundle (which is then a single scheduling entity)
16901 // and schedules instructions until the bundle gets ready.
16902 std::optional<BoUpSLP::ScheduleData *>
16903 BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP,
16904 const InstructionsState &S) {
16905 // No need to schedule PHIs, insertelement, extractelement and extractvalue
16906 // instructions.
16907 if (isa<PHINode>(S.getMainOp()) ||
16908 isVectorLikeInstWithConstOps(S.getMainOp()) || doesNotNeedToSchedule(VL))
16909 return nullptr;
16911 // Initialize the instruction bundle.
16912 Instruction *OldScheduleEnd = ScheduleEnd;
16913 LLVM_DEBUG(dbgs() << "SLP: bundle: " << *S.getMainOp() << "\n");
16915 auto TryScheduleBundleImpl = [this, OldScheduleEnd, SLP](bool ReSchedule,
16916 ScheduleData *Bundle) {
16917 // The scheduling region got new instructions at the lower end (or it is a
16918 // new region for the first bundle). This makes it necessary to
16919 // recalculate all dependencies.
16920 // It is seldom that this needs to be done a second time after adding the
16921 // initial bundle to the region.
16922 if (ScheduleEnd != OldScheduleEnd) {
16923 for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode())
16924 if (ScheduleData *SD = getScheduleData(I))
16925 SD->clearDependencies();
16926 ReSchedule = true;
16928 if (Bundle) {
16929 LLVM_DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle
16930 << " in block " << BB->getName() << "\n");
16931 calculateDependencies(Bundle, /*InsertInReadyList=*/true, SLP);
16934 if (ReSchedule) {
16935 resetSchedule();
16936 initialFillReadyList(ReadyInsts);
16939 // Now try to schedule the new bundle or (if no bundle) just calculate
16940 // dependencies. As soon as the bundle is "ready" it means that there are no
16941 // cyclic dependencies and we can schedule it. Note that's important that we
16942 // don't "schedule" the bundle yet (see cancelScheduling).
16943 while (((!Bundle && ReSchedule) || (Bundle && !Bundle->isReady())) &&
16944 !ReadyInsts.empty()) {
16945 ScheduleData *Picked = ReadyInsts.pop_back_val();
16946 assert(Picked->isSchedulingEntity() && Picked->isReady() &&
16947 "must be ready to schedule");
16948 schedule(Picked, ReadyInsts);
16952 // Make sure that the scheduling region contains all
16953 // instructions of the bundle.
16954 for (Value *V : VL) {
16955 if (doesNotNeedToBeScheduled(V))
16956 continue;
16957 if (!extendSchedulingRegion(V, S)) {
16958 // If the scheduling region got new instructions at the lower end (or it
16959 // is a new region for the first bundle). This makes it necessary to
16960 // recalculate all dependencies.
16961 // Otherwise the compiler may crash trying to incorrectly calculate
16962 // dependencies and emit instruction in the wrong order at the actual
16963 // scheduling.
16964 TryScheduleBundleImpl(/*ReSchedule=*/false, nullptr);
16965 return std::nullopt;
16969 bool ReSchedule = false;
16970 for (Value *V : VL) {
16971 if (doesNotNeedToBeScheduled(V))
16972 continue;
16973 ScheduleData *BundleMember = getScheduleData(V);
16974 assert(BundleMember &&
16975 "no ScheduleData for bundle member (maybe not in same basic block)");
16977 // Make sure we don't leave the pieces of the bundle in the ready list when
16978 // whole bundle might not be ready.
16979 ReadyInsts.remove(BundleMember);
16981 if (!BundleMember->IsScheduled)
16982 continue;
16983 // A bundle member was scheduled as single instruction before and now
16984 // needs to be scheduled as part of the bundle. We just get rid of the
16985 // existing schedule.
16986 LLVM_DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
16987 << " was already scheduled\n");
16988 ReSchedule = true;
16991 auto *Bundle = buildBundle(VL);
16992 TryScheduleBundleImpl(ReSchedule, Bundle);
16993 if (!Bundle->isReady()) {
16994 cancelScheduling(VL, S.getMainOp());
16995 return std::nullopt;
16997 return Bundle;
17000 void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL,
17001 Value *OpValue) {
17002 if (isa<PHINode>(OpValue) || isVectorLikeInstWithConstOps(OpValue) ||
17003 doesNotNeedToSchedule(VL))
17004 return;
17006 if (doesNotNeedToBeScheduled(OpValue))
17007 OpValue = *find_if_not(VL, doesNotNeedToBeScheduled);
17008 ScheduleData *Bundle = getScheduleData(OpValue);
17009 LLVM_DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
17010 assert(!Bundle->IsScheduled &&
17011 "Can't cancel bundle which is already scheduled");
17012 assert(Bundle->isSchedulingEntity() &&
17013 (Bundle->isPartOfBundle() || needToScheduleSingleInstruction(VL)) &&
17014 "tried to unbundle something which is not a bundle");
17016 // Remove the bundle from the ready list.
17017 if (Bundle->isReady())
17018 ReadyInsts.remove(Bundle);
17020 // Un-bundle: make single instructions out of the bundle.
17021 ScheduleData *BundleMember = Bundle;
17022 while (BundleMember) {
17023 assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
17024 BundleMember->FirstInBundle = BundleMember;
17025 ScheduleData *Next = BundleMember->NextInBundle;
17026 BundleMember->NextInBundle = nullptr;
17027 BundleMember->TE = nullptr;
17028 if (BundleMember->unscheduledDepsInBundle() == 0) {
17029 ReadyInsts.insert(BundleMember);
17031 BundleMember = Next;
17035 BoUpSLP::ScheduleData *BoUpSLP::BlockScheduling::allocateScheduleDataChunks() {
17036 // Allocate a new ScheduleData for the instruction.
17037 if (ChunkPos >= ChunkSize) {
17038 ScheduleDataChunks.push_back(std::make_unique<ScheduleData[]>(ChunkSize));
17039 ChunkPos = 0;
17041 return &(ScheduleDataChunks.back()[ChunkPos++]);
17044 bool BoUpSLP::BlockScheduling::extendSchedulingRegion(
17045 Value *V, const InstructionsState &S) {
17046 Instruction *I = dyn_cast<Instruction>(V);
17047 assert(I && "bundle member must be an instruction");
17048 assert(!isa<PHINode>(I) && !isVectorLikeInstWithConstOps(I) &&
17049 !doesNotNeedToBeScheduled(I) &&
17050 "phi nodes/insertelements/extractelements/extractvalues don't need to "
17051 "be scheduled");
17052 if (getScheduleData(I))
17053 return true;
17054 if (!ScheduleStart) {
17055 // It's the first instruction in the new region.
17056 initScheduleData(I, I->getNextNode(), nullptr, nullptr);
17057 ScheduleStart = I;
17058 ScheduleEnd = I->getNextNode();
17059 assert(ScheduleEnd && "tried to vectorize a terminator?");
17060 LLVM_DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
17061 return true;
17063 // Search up and down at the same time, because we don't know if the new
17064 // instruction is above or below the existing scheduling region.
17065 // Ignore debug info (and other "AssumeLike" intrinsics) so that's not counted
17066 // against the budget. Otherwise debug info could affect codegen.
17067 BasicBlock::reverse_iterator UpIter =
17068 ++ScheduleStart->getIterator().getReverse();
17069 BasicBlock::reverse_iterator UpperEnd = BB->rend();
17070 BasicBlock::iterator DownIter = ScheduleEnd->getIterator();
17071 BasicBlock::iterator LowerEnd = BB->end();
17072 auto IsAssumeLikeIntr = [](const Instruction &I) {
17073 if (auto *II = dyn_cast<IntrinsicInst>(&I))
17074 return II->isAssumeLikeIntrinsic();
17075 return false;
17077 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr);
17078 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr);
17079 while (UpIter != UpperEnd && DownIter != LowerEnd && &*UpIter != I &&
17080 &*DownIter != I) {
17081 if (++ScheduleRegionSize > ScheduleRegionSizeLimit) {
17082 LLVM_DEBUG(dbgs() << "SLP: exceeded schedule region size limit\n");
17083 return false;
17086 ++UpIter;
17087 ++DownIter;
17089 UpIter = std::find_if_not(UpIter, UpperEnd, IsAssumeLikeIntr);
17090 DownIter = std::find_if_not(DownIter, LowerEnd, IsAssumeLikeIntr);
17092 if (DownIter == LowerEnd || (UpIter != UpperEnd && &*UpIter == I)) {
17093 assert(I->getParent() == ScheduleStart->getParent() &&
17094 "Instruction is in wrong basic block.");
17095 initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
17096 ScheduleStart = I;
17097 LLVM_DEBUG(dbgs() << "SLP: extend schedule region start to " << *I
17098 << "\n");
17099 return true;
17101 assert((UpIter == UpperEnd || (DownIter != LowerEnd && &*DownIter == I)) &&
17102 "Expected to reach top of the basic block or instruction down the "
17103 "lower end.");
17104 assert(I->getParent() == ScheduleEnd->getParent() &&
17105 "Instruction is in wrong basic block.");
17106 initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
17107 nullptr);
17108 ScheduleEnd = I->getNextNode();
17109 assert(ScheduleEnd && "tried to vectorize a terminator?");
17110 LLVM_DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
17111 return true;
17114 void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
17115 Instruction *ToI,
17116 ScheduleData *PrevLoadStore,
17117 ScheduleData *NextLoadStore) {
17118 ScheduleData *CurrentLoadStore = PrevLoadStore;
17119 for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
17120 // No need to allocate data for non-schedulable instructions.
17121 if (doesNotNeedToBeScheduled(I))
17122 continue;
17123 ScheduleData *SD = ScheduleDataMap.lookup(I);
17124 if (!SD) {
17125 SD = allocateScheduleDataChunks();
17126 ScheduleDataMap[I] = SD;
17128 assert(!isInSchedulingRegion(SD) &&
17129 "new ScheduleData already in scheduling region");
17130 SD->init(SchedulingRegionID, I);
17132 if (I->mayReadOrWriteMemory() &&
17133 (!isa<IntrinsicInst>(I) ||
17134 (cast<IntrinsicInst>(I)->getIntrinsicID() != Intrinsic::sideeffect &&
17135 cast<IntrinsicInst>(I)->getIntrinsicID() !=
17136 Intrinsic::pseudoprobe))) {
17137 // Update the linked list of memory accessing instructions.
17138 if (CurrentLoadStore) {
17139 CurrentLoadStore->NextLoadStore = SD;
17140 } else {
17141 FirstLoadStoreInRegion = SD;
17143 CurrentLoadStore = SD;
17146 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
17147 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
17148 RegionHasStackSave = true;
17150 if (NextLoadStore) {
17151 if (CurrentLoadStore)
17152 CurrentLoadStore->NextLoadStore = NextLoadStore;
17153 } else {
17154 LastLoadStoreInRegion = CurrentLoadStore;
17158 void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
17159 bool InsertInReadyList,
17160 BoUpSLP *SLP) {
17161 assert(SD->isSchedulingEntity());
17163 SmallVector<ScheduleData *, 10> WorkList;
17164 WorkList.push_back(SD);
17166 while (!WorkList.empty()) {
17167 ScheduleData *SD = WorkList.pop_back_val();
17168 for (ScheduleData *BundleMember = SD; BundleMember;
17169 BundleMember = BundleMember->NextInBundle) {
17170 assert(isInSchedulingRegion(BundleMember));
17171 if (BundleMember->hasValidDependencies())
17172 continue;
17174 LLVM_DEBUG(dbgs() << "SLP: update deps of " << *BundleMember
17175 << "\n");
17176 BundleMember->Dependencies = 0;
17177 BundleMember->resetUnscheduledDeps();
17179 // Handle def-use chain dependencies.
17180 for (User *U : BundleMember->Inst->users()) {
17181 if (ScheduleData *UseSD = getScheduleData(cast<Instruction>(U))) {
17182 BundleMember->Dependencies++;
17183 ScheduleData *DestBundle = UseSD->FirstInBundle;
17184 if (!DestBundle->IsScheduled)
17185 BundleMember->incrementUnscheduledDeps(1);
17186 if (!DestBundle->hasValidDependencies())
17187 WorkList.push_back(DestBundle);
17191 auto MakeControlDependent = [&](Instruction *I) {
17192 auto *DepDest = getScheduleData(I);
17193 assert(DepDest && "must be in schedule window");
17194 DepDest->ControlDependencies.push_back(BundleMember);
17195 BundleMember->Dependencies++;
17196 ScheduleData *DestBundle = DepDest->FirstInBundle;
17197 if (!DestBundle->IsScheduled)
17198 BundleMember->incrementUnscheduledDeps(1);
17199 if (!DestBundle->hasValidDependencies())
17200 WorkList.push_back(DestBundle);
17203 // Any instruction which isn't safe to speculate at the beginning of the
17204 // block is control dependend on any early exit or non-willreturn call
17205 // which proceeds it.
17206 if (!isGuaranteedToTransferExecutionToSuccessor(BundleMember->Inst)) {
17207 for (Instruction *I = BundleMember->Inst->getNextNode();
17208 I != ScheduleEnd; I = I->getNextNode()) {
17209 if (isSafeToSpeculativelyExecute(I, &*BB->begin(), SLP->AC))
17210 continue;
17212 // Add the dependency
17213 MakeControlDependent(I);
17215 if (!isGuaranteedToTransferExecutionToSuccessor(I))
17216 // Everything past here must be control dependent on I.
17217 break;
17221 if (RegionHasStackSave) {
17222 // If we have an inalloc alloca instruction, it needs to be scheduled
17223 // after any preceeding stacksave. We also need to prevent any alloca
17224 // from reordering above a preceeding stackrestore.
17225 if (match(BundleMember->Inst, m_Intrinsic<Intrinsic::stacksave>()) ||
17226 match(BundleMember->Inst, m_Intrinsic<Intrinsic::stackrestore>())) {
17227 for (Instruction *I = BundleMember->Inst->getNextNode();
17228 I != ScheduleEnd; I = I->getNextNode()) {
17229 if (match(I, m_Intrinsic<Intrinsic::stacksave>()) ||
17230 match(I, m_Intrinsic<Intrinsic::stackrestore>()))
17231 // Any allocas past here must be control dependent on I, and I
17232 // must be memory dependend on BundleMember->Inst.
17233 break;
17235 if (!isa<AllocaInst>(I))
17236 continue;
17238 // Add the dependency
17239 MakeControlDependent(I);
17243 // In addition to the cases handle just above, we need to prevent
17244 // allocas and loads/stores from moving below a stacksave or a
17245 // stackrestore. Avoiding moving allocas below stackrestore is currently
17246 // thought to be conservatism. Moving loads/stores below a stackrestore
17247 // can lead to incorrect code.
17248 if (isa<AllocaInst>(BundleMember->Inst) ||
17249 BundleMember->Inst->mayReadOrWriteMemory()) {
17250 for (Instruction *I = BundleMember->Inst->getNextNode();
17251 I != ScheduleEnd; I = I->getNextNode()) {
17252 if (!match(I, m_Intrinsic<Intrinsic::stacksave>()) &&
17253 !match(I, m_Intrinsic<Intrinsic::stackrestore>()))
17254 continue;
17256 // Add the dependency
17257 MakeControlDependent(I);
17258 break;
17263 // Handle the memory dependencies (if any).
17264 ScheduleData *DepDest = BundleMember->NextLoadStore;
17265 if (!DepDest)
17266 continue;
17267 Instruction *SrcInst = BundleMember->Inst;
17268 assert(SrcInst->mayReadOrWriteMemory() &&
17269 "NextLoadStore list for non memory effecting bundle?");
17270 MemoryLocation SrcLoc = getLocation(SrcInst);
17271 bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
17272 unsigned NumAliased = 0;
17273 unsigned DistToSrc = 1;
17275 for (; DepDest; DepDest = DepDest->NextLoadStore) {
17276 assert(isInSchedulingRegion(DepDest));
17278 // We have two limits to reduce the complexity:
17279 // 1) AliasedCheckLimit: It's a small limit to reduce calls to
17280 // SLP->isAliased (which is the expensive part in this loop).
17281 // 2) MaxMemDepDistance: It's for very large blocks and it aborts
17282 // the whole loop (even if the loop is fast, it's quadratic).
17283 // It's important for the loop break condition (see below) to
17284 // check this limit even between two read-only instructions.
17285 if (DistToSrc >= MaxMemDepDistance ||
17286 ((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
17287 (NumAliased >= AliasedCheckLimit ||
17288 SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
17290 // We increment the counter only if the locations are aliased
17291 // (instead of counting all alias checks). This gives a better
17292 // balance between reduced runtime and accurate dependencies.
17293 NumAliased++;
17295 DepDest->MemoryDependencies.push_back(BundleMember);
17296 BundleMember->Dependencies++;
17297 ScheduleData *DestBundle = DepDest->FirstInBundle;
17298 if (!DestBundle->IsScheduled) {
17299 BundleMember->incrementUnscheduledDeps(1);
17301 if (!DestBundle->hasValidDependencies()) {
17302 WorkList.push_back(DestBundle);
17306 // Example, explaining the loop break condition: Let's assume our
17307 // starting instruction is i0 and MaxMemDepDistance = 3.
17309 // +--------v--v--v
17310 // i0,i1,i2,i3,i4,i5,i6,i7,i8
17311 // +--------^--^--^
17313 // MaxMemDepDistance let us stop alias-checking at i3 and we add
17314 // dependencies from i0 to i3,i4,.. (even if they are not aliased).
17315 // Previously we already added dependencies from i3 to i6,i7,i8
17316 // (because of MaxMemDepDistance). As we added a dependency from
17317 // i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
17318 // and we can abort this loop at i6.
17319 if (DistToSrc >= 2 * MaxMemDepDistance)
17320 break;
17321 DistToSrc++;
17324 if (InsertInReadyList && SD->isReady()) {
17325 ReadyInsts.insert(SD);
17326 LLVM_DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst
17327 << "\n");
17332 void BoUpSLP::BlockScheduling::resetSchedule() {
17333 assert(ScheduleStart &&
17334 "tried to reset schedule on block which has not been scheduled");
17335 for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
17336 if (ScheduleData *SD = getScheduleData(I)) {
17337 assert(isInSchedulingRegion(SD) &&
17338 "ScheduleData not in scheduling region");
17339 SD->IsScheduled = false;
17340 SD->resetUnscheduledDeps();
17343 ReadyInsts.clear();
17346 void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
17347 if (!BS->ScheduleStart)
17348 return;
17350 LLVM_DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
17352 // A key point - if we got here, pre-scheduling was able to find a valid
17353 // scheduling of the sub-graph of the scheduling window which consists
17354 // of all vector bundles and their transitive users. As such, we do not
17355 // need to reschedule anything *outside of* that subgraph.
17357 BS->resetSchedule();
17359 // For the real scheduling we use a more sophisticated ready-list: it is
17360 // sorted by the original instruction location. This lets the final schedule
17361 // be as close as possible to the original instruction order.
17362 // WARNING: If changing this order causes a correctness issue, that means
17363 // there is some missing dependence edge in the schedule data graph.
17364 struct ScheduleDataCompare {
17365 bool operator()(ScheduleData *SD1, ScheduleData *SD2) const {
17366 return SD2->SchedulingPriority < SD1->SchedulingPriority;
17369 std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
17371 // Ensure that all dependency data is updated (for nodes in the sub-graph)
17372 // and fill the ready-list with initial instructions.
17373 int Idx = 0;
17374 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
17375 I = I->getNextNode()) {
17376 if (ScheduleData *SD = BS->getScheduleData(I)) {
17377 TreeEntry *SDTE = getTreeEntry(SD->Inst);
17378 (void)SDTE;
17379 assert((isVectorLikeInstWithConstOps(SD->Inst) ||
17380 SD->isPartOfBundle() ==
17381 (SDTE && !doesNotNeedToSchedule(SDTE->Scalars))) &&
17382 "scheduler and vectorizer bundle mismatch");
17383 SD->FirstInBundle->SchedulingPriority = Idx++;
17385 if (SD->isSchedulingEntity() && SD->isPartOfBundle())
17386 BS->calculateDependencies(SD, false, this);
17389 BS->initialFillReadyList(ReadyInsts);
17391 Instruction *LastScheduledInst = BS->ScheduleEnd;
17393 // Do the "real" scheduling.
17394 while (!ReadyInsts.empty()) {
17395 ScheduleData *Picked = *ReadyInsts.begin();
17396 ReadyInsts.erase(ReadyInsts.begin());
17398 // Move the scheduled instruction(s) to their dedicated places, if not
17399 // there yet.
17400 for (ScheduleData *BundleMember = Picked; BundleMember;
17401 BundleMember = BundleMember->NextInBundle) {
17402 Instruction *PickedInst = BundleMember->Inst;
17403 if (PickedInst->getNextNonDebugInstruction() != LastScheduledInst)
17404 PickedInst->moveAfter(LastScheduledInst->getPrevNode());
17405 LastScheduledInst = PickedInst;
17408 BS->schedule(Picked, ReadyInsts);
17411 // Check that we didn't break any of our invariants.
17412 #ifdef EXPENSIVE_CHECKS
17413 BS->verify();
17414 #endif
17416 #if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
17417 // Check that all schedulable entities got scheduled
17418 for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd; I = I->getNextNode()) {
17419 ScheduleData *SD = BS->getScheduleData(I);
17420 if (SD && SD->isSchedulingEntity() && SD->hasValidDependencies())
17421 assert(SD->IsScheduled && "must be scheduled at this point");
17423 #endif
17425 // Avoid duplicate scheduling of the block.
17426 BS->ScheduleStart = nullptr;
17429 unsigned BoUpSLP::getVectorElementSize(Value *V) {
17430 // If V is a store, just return the width of the stored value (or value
17431 // truncated just before storing) without traversing the expression tree.
17432 // This is the common case.
17433 if (auto *Store = dyn_cast<StoreInst>(V))
17434 return DL->getTypeSizeInBits(Store->getValueOperand()->getType());
17436 if (auto *IEI = dyn_cast<InsertElementInst>(V))
17437 return getVectorElementSize(IEI->getOperand(1));
17439 auto E = InstrElementSize.find(V);
17440 if (E != InstrElementSize.end())
17441 return E->second;
17443 // If V is not a store, we can traverse the expression tree to find loads
17444 // that feed it. The type of the loaded value may indicate a more suitable
17445 // width than V's type. We want to base the vector element size on the width
17446 // of memory operations where possible.
17447 SmallVector<std::tuple<Instruction *, BasicBlock *, unsigned>> Worklist;
17448 SmallPtrSet<Instruction *, 16> Visited;
17449 if (auto *I = dyn_cast<Instruction>(V)) {
17450 Worklist.emplace_back(I, I->getParent(), 0);
17451 Visited.insert(I);
17454 // Traverse the expression tree in bottom-up order looking for loads. If we
17455 // encounter an instruction we don't yet handle, we give up.
17456 auto Width = 0u;
17457 Value *FirstNonBool = nullptr;
17458 while (!Worklist.empty()) {
17459 auto [I, Parent, Level] = Worklist.pop_back_val();
17461 // We should only be looking at scalar instructions here. If the current
17462 // instruction has a vector type, skip.
17463 auto *Ty = I->getType();
17464 if (isa<VectorType>(Ty))
17465 continue;
17466 if (Ty != Builder.getInt1Ty() && !FirstNonBool)
17467 FirstNonBool = I;
17468 if (Level > RecursionMaxDepth)
17469 continue;
17471 // If the current instruction is a load, update MaxWidth to reflect the
17472 // width of the loaded value.
17473 if (isa<LoadInst, ExtractElementInst, ExtractValueInst>(I))
17474 Width = std::max<unsigned>(Width, DL->getTypeSizeInBits(Ty));
17476 // Otherwise, we need to visit the operands of the instruction. We only
17477 // handle the interesting cases from buildTree here. If an operand is an
17478 // instruction we haven't yet visited and from the same basic block as the
17479 // user or the use is a PHI node, we add it to the worklist.
17480 else if (isa<PHINode, CastInst, GetElementPtrInst, CmpInst, SelectInst,
17481 BinaryOperator, UnaryOperator>(I)) {
17482 for (Use &U : I->operands()) {
17483 if (auto *J = dyn_cast<Instruction>(U.get()))
17484 if (Visited.insert(J).second &&
17485 (isa<PHINode>(I) || J->getParent() == Parent)) {
17486 Worklist.emplace_back(J, J->getParent(), Level + 1);
17487 continue;
17489 if (!FirstNonBool && U.get()->getType() != Builder.getInt1Ty())
17490 FirstNonBool = U.get();
17492 } else {
17493 break;
17497 // If we didn't encounter a memory access in the expression tree, or if we
17498 // gave up for some reason, just return the width of V. Otherwise, return the
17499 // maximum width we found.
17500 if (!Width) {
17501 if (V->getType() == Builder.getInt1Ty() && FirstNonBool)
17502 V = FirstNonBool;
17503 Width = DL->getTypeSizeInBits(V->getType());
17506 for (Instruction *I : Visited)
17507 InstrElementSize[I] = Width;
17509 return Width;
17512 bool BoUpSLP::collectValuesToDemote(
17513 const TreeEntry &E, bool IsProfitableToDemoteRoot, unsigned &BitWidth,
17514 SmallVectorImpl<unsigned> &ToDemote, DenseSet<const TreeEntry *> &Visited,
17515 unsigned &MaxDepthLevel, bool &IsProfitableToDemote,
17516 bool IsTruncRoot) const {
17517 // We can always demote constants.
17518 if (all_of(E.Scalars, IsaPred<Constant>))
17519 return true;
17521 unsigned OrigBitWidth =
17522 DL->getTypeSizeInBits(E.Scalars.front()->getType()->getScalarType());
17523 if (OrigBitWidth == BitWidth) {
17524 MaxDepthLevel = 1;
17525 return true;
17528 // If the value is not a vectorized instruction in the expression and not used
17529 // by the insertelement instruction and not used in multiple vector nodes, it
17530 // cannot be demoted.
17531 bool IsSignedNode = any_of(E.Scalars, [&](Value *R) {
17532 if (isa<PoisonValue>(R))
17533 return false;
17534 return !isKnownNonNegative(R, SimplifyQuery(*DL));
17536 auto IsPotentiallyTruncated = [&](Value *V, unsigned &BitWidth) -> bool {
17537 if (isa<PoisonValue>(V))
17538 return true;
17539 if (MultiNodeScalars.contains(V))
17540 return false;
17541 // For lat shuffle of sext/zext with many uses need to check the extra bit
17542 // for unsigned values, otherwise may have incorrect casting for reused
17543 // scalars.
17544 bool IsSignedVal = !isKnownNonNegative(V, SimplifyQuery(*DL));
17545 if ((!IsSignedNode || IsSignedVal) && OrigBitWidth > BitWidth) {
17546 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17547 if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
17548 return true;
17550 unsigned NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
17551 unsigned BitWidth1 = OrigBitWidth - NumSignBits;
17552 if (IsSignedNode)
17553 ++BitWidth1;
17554 if (auto *I = dyn_cast<Instruction>(V)) {
17555 APInt Mask = DB->getDemandedBits(I);
17556 unsigned BitWidth2 =
17557 std::max<unsigned>(1, Mask.getBitWidth() - Mask.countl_zero());
17558 while (!IsSignedNode && BitWidth2 < OrigBitWidth) {
17559 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth2 - 1);
17560 if (MaskedValueIsZero(V, Mask, SimplifyQuery(*DL)))
17561 break;
17562 BitWidth2 *= 2;
17564 BitWidth1 = std::min(BitWidth1, BitWidth2);
17566 BitWidth = std::max(BitWidth, BitWidth1);
17567 return BitWidth > 0 && OrigBitWidth >= (BitWidth * 2);
17569 using namespace std::placeholders;
17570 auto FinalAnalysis = [&]() {
17571 if (!IsProfitableToDemote)
17572 return false;
17573 bool Res = all_of(
17574 E.Scalars, std::bind(IsPotentiallyTruncated, _1, std::ref(BitWidth)));
17575 // Demote gathers.
17576 if (Res && E.isGather()) {
17577 // Check possible extractelement instructions bases and final vector
17578 // length.
17579 SmallPtrSet<Value *, 4> UniqueBases;
17580 for (Value *V : E.Scalars) {
17581 auto *EE = dyn_cast<ExtractElementInst>(V);
17582 if (!EE)
17583 continue;
17584 UniqueBases.insert(EE->getVectorOperand());
17586 const unsigned VF = E.Scalars.size();
17587 Type *OrigScalarTy = E.Scalars.front()->getType();
17588 if (UniqueBases.size() <= 2 ||
17589 TTI->getNumberOfParts(getWidenedType(OrigScalarTy, VF)) ==
17590 TTI->getNumberOfParts(getWidenedType(
17591 IntegerType::get(OrigScalarTy->getContext(), BitWidth), VF)))
17592 ToDemote.push_back(E.Idx);
17594 return Res;
17596 if (E.isGather() || !Visited.insert(&E).second ||
17597 any_of(E.Scalars, [&](Value *V) {
17598 return all_of(V->users(), [&](User *U) {
17599 return isa<InsertElementInst>(U) && !getTreeEntry(U);
17602 return FinalAnalysis();
17604 if (any_of(E.Scalars, [&](Value *V) {
17605 return !all_of(V->users(), [=](User *U) {
17606 return getTreeEntry(U) ||
17607 (E.Idx == 0 && UserIgnoreList &&
17608 UserIgnoreList->contains(U)) ||
17609 (!isa<CmpInst>(U) && U->getType()->isSized() &&
17610 !U->getType()->isScalableTy() &&
17611 DL->getTypeSizeInBits(U->getType()) <= BitWidth);
17612 }) && !IsPotentiallyTruncated(V, BitWidth);
17614 return false;
17616 auto ProcessOperands = [&](ArrayRef<const TreeEntry *> Operands,
17617 bool &NeedToExit) {
17618 NeedToExit = false;
17619 unsigned InitLevel = MaxDepthLevel;
17620 for (const TreeEntry *Op : Operands) {
17621 unsigned Level = InitLevel;
17622 if (!collectValuesToDemote(*Op, IsProfitableToDemoteRoot, BitWidth,
17623 ToDemote, Visited, Level, IsProfitableToDemote,
17624 IsTruncRoot)) {
17625 if (!IsProfitableToDemote)
17626 return false;
17627 NeedToExit = true;
17628 if (!FinalAnalysis())
17629 return false;
17630 continue;
17632 MaxDepthLevel = std::max(MaxDepthLevel, Level);
17634 return true;
17636 auto AttemptCheckBitwidth =
17637 [&](function_ref<bool(unsigned, unsigned)> Checker, bool &NeedToExit) {
17638 // Try all bitwidth < OrigBitWidth.
17639 NeedToExit = false;
17640 unsigned BestFailBitwidth = 0;
17641 for (; BitWidth < OrigBitWidth; BitWidth *= 2) {
17642 if (Checker(BitWidth, OrigBitWidth))
17643 return true;
17644 if (BestFailBitwidth == 0 && FinalAnalysis())
17645 BestFailBitwidth = BitWidth;
17647 if (BitWidth >= OrigBitWidth) {
17648 if (BestFailBitwidth == 0) {
17649 BitWidth = OrigBitWidth;
17650 return false;
17652 MaxDepthLevel = 1;
17653 BitWidth = BestFailBitwidth;
17654 NeedToExit = true;
17655 return true;
17657 return false;
17659 auto TryProcessInstruction =
17660 [&](unsigned &BitWidth, ArrayRef<const TreeEntry *> Operands = {},
17661 function_ref<bool(unsigned, unsigned)> Checker = {}) {
17662 if (Operands.empty()) {
17663 if (!IsTruncRoot)
17664 MaxDepthLevel = 1;
17665 (void)for_each(E.Scalars, std::bind(IsPotentiallyTruncated, _1,
17666 std::ref(BitWidth)));
17667 } else {
17668 // Several vectorized uses? Check if we can truncate it, otherwise -
17669 // exit.
17670 if (E.UserTreeIndices.size() > 1 &&
17671 !all_of(E.Scalars, std::bind(IsPotentiallyTruncated, _1,
17672 std::ref(BitWidth))))
17673 return false;
17674 bool NeedToExit = false;
17675 if (Checker && !AttemptCheckBitwidth(Checker, NeedToExit))
17676 return false;
17677 if (NeedToExit)
17678 return true;
17679 if (!ProcessOperands(Operands, NeedToExit))
17680 return false;
17681 if (NeedToExit)
17682 return true;
17685 ++MaxDepthLevel;
17686 // Record the entry that we can demote.
17687 ToDemote.push_back(E.Idx);
17688 return IsProfitableToDemote;
17690 switch (E.getOpcode()) {
17692 // We can always demote truncations and extensions. Since truncations can
17693 // seed additional demotion, we save the truncated value.
17694 case Instruction::Trunc:
17695 if (IsProfitableToDemoteRoot)
17696 IsProfitableToDemote = true;
17697 return TryProcessInstruction(BitWidth);
17698 case Instruction::ZExt:
17699 case Instruction::SExt:
17700 IsProfitableToDemote = true;
17701 return TryProcessInstruction(BitWidth);
17703 // We can demote certain binary operations if we can demote both of their
17704 // operands.
17705 case Instruction::Add:
17706 case Instruction::Sub:
17707 case Instruction::Mul:
17708 case Instruction::And:
17709 case Instruction::Or:
17710 case Instruction::Xor: {
17711 return TryProcessInstruction(
17712 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)});
17714 case Instruction::Freeze:
17715 return TryProcessInstruction(BitWidth, getOperandEntry(&E, 0));
17716 case Instruction::Shl: {
17717 // If we are truncating the result of this SHL, and if it's a shift of an
17718 // inrange amount, we can always perform a SHL in a smaller type.
17719 auto ShlChecker = [&](unsigned BitWidth, unsigned) {
17720 return all_of(E.Scalars, [&](Value *V) {
17721 if (isa<PoisonValue>(V))
17722 return true;
17723 auto *I = cast<Instruction>(V);
17724 KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
17725 return AmtKnownBits.getMaxValue().ult(BitWidth);
17728 return TryProcessInstruction(
17729 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)}, ShlChecker);
17731 case Instruction::LShr: {
17732 // If this is a truncate of a logical shr, we can truncate it to a smaller
17733 // lshr iff we know that the bits we would otherwise be shifting in are
17734 // already zeros.
17735 auto LShrChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17736 return all_of(E.Scalars, [&](Value *V) {
17737 if (isa<PoisonValue>(V))
17738 return true;
17739 auto *I = cast<Instruction>(V);
17740 KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
17741 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17742 return AmtKnownBits.getMaxValue().ult(BitWidth) &&
17743 MaskedValueIsZero(I->getOperand(0), ShiftedBits,
17744 SimplifyQuery(*DL));
17747 return TryProcessInstruction(
17748 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)},
17749 LShrChecker);
17751 case Instruction::AShr: {
17752 // If this is a truncate of an arithmetic shr, we can truncate it to a
17753 // smaller ashr iff we know that all the bits from the sign bit of the
17754 // original type and the sign bit of the truncate type are similar.
17755 auto AShrChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17756 return all_of(E.Scalars, [&](Value *V) {
17757 if (isa<PoisonValue>(V))
17758 return true;
17759 auto *I = cast<Instruction>(V);
17760 KnownBits AmtKnownBits = computeKnownBits(I->getOperand(1), *DL);
17761 unsigned ShiftedBits = OrigBitWidth - BitWidth;
17762 return AmtKnownBits.getMaxValue().ult(BitWidth) &&
17763 ShiftedBits < ComputeNumSignBits(I->getOperand(0), *DL, 0, AC,
17764 nullptr, DT);
17767 return TryProcessInstruction(
17768 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)},
17769 AShrChecker);
17771 case Instruction::UDiv:
17772 case Instruction::URem: {
17773 // UDiv and URem can be truncated if all the truncated bits are zero.
17774 auto Checker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17775 assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
17776 return all_of(E.Scalars, [&](Value *V) {
17777 auto *I = cast<Instruction>(V);
17778 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17779 return MaskedValueIsZero(I->getOperand(0), Mask, SimplifyQuery(*DL)) &&
17780 MaskedValueIsZero(I->getOperand(1), Mask, SimplifyQuery(*DL));
17783 return TryProcessInstruction(
17784 BitWidth, {getOperandEntry(&E, 0), getOperandEntry(&E, 1)}, Checker);
17787 // We can demote selects if we can demote their true and false values.
17788 case Instruction::Select: {
17789 return TryProcessInstruction(
17790 BitWidth, {getOperandEntry(&E, 1), getOperandEntry(&E, 2)});
17793 // We can demote phis if we can demote all their incoming operands. Note that
17794 // we don't need to worry about cycles since we ensure single use above.
17795 case Instruction::PHI: {
17796 const unsigned NumOps = E.getNumOperands();
17797 SmallVector<const TreeEntry *> Ops(NumOps);
17798 transform(seq<unsigned>(0, NumOps), Ops.begin(),
17799 std::bind(&BoUpSLP::getOperandEntry, this, &E, _1));
17801 return TryProcessInstruction(BitWidth, Ops);
17804 case Instruction::Call: {
17805 auto *IC = dyn_cast<IntrinsicInst>(E.getMainOp());
17806 if (!IC)
17807 break;
17808 Intrinsic::ID ID = getVectorIntrinsicIDForCall(IC, TLI);
17809 if (ID != Intrinsic::abs && ID != Intrinsic::smin &&
17810 ID != Intrinsic::smax && ID != Intrinsic::umin && ID != Intrinsic::umax)
17811 break;
17812 SmallVector<const TreeEntry *, 2> Operands(1, getOperandEntry(&E, 0));
17813 function_ref<bool(unsigned, unsigned)> CallChecker;
17814 auto CompChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17815 assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
17816 return all_of(E.Scalars, [&](Value *V) {
17817 auto *I = cast<Instruction>(V);
17818 if (ID == Intrinsic::umin || ID == Intrinsic::umax) {
17819 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
17820 return MaskedValueIsZero(I->getOperand(0), Mask,
17821 SimplifyQuery(*DL)) &&
17822 MaskedValueIsZero(I->getOperand(1), Mask, SimplifyQuery(*DL));
17824 assert((ID == Intrinsic::smin || ID == Intrinsic::smax) &&
17825 "Expected min/max intrinsics only.");
17826 unsigned SignBits = OrigBitWidth - BitWidth;
17827 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth - 1);
17828 unsigned Op0SignBits = ComputeNumSignBits(I->getOperand(0), *DL, 0, AC,
17829 nullptr, DT);
17830 unsigned Op1SignBits = ComputeNumSignBits(I->getOperand(1), *DL, 0, AC,
17831 nullptr, DT);
17832 return SignBits <= Op0SignBits &&
17833 ((SignBits != Op0SignBits &&
17834 !isKnownNonNegative(I->getOperand(0), SimplifyQuery(*DL))) ||
17835 MaskedValueIsZero(I->getOperand(0), Mask,
17836 SimplifyQuery(*DL))) &&
17837 SignBits <= Op1SignBits &&
17838 ((SignBits != Op1SignBits &&
17839 !isKnownNonNegative(I->getOperand(1), SimplifyQuery(*DL))) ||
17840 MaskedValueIsZero(I->getOperand(1), Mask, SimplifyQuery(*DL)));
17843 auto AbsChecker = [&](unsigned BitWidth, unsigned OrigBitWidth) {
17844 assert(BitWidth <= OrigBitWidth && "Unexpected bitwidths!");
17845 return all_of(E.Scalars, [&](Value *V) {
17846 auto *I = cast<Instruction>(V);
17847 unsigned SignBits = OrigBitWidth - BitWidth;
17848 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth - 1);
17849 unsigned Op0SignBits =
17850 ComputeNumSignBits(I->getOperand(0), *DL, 0, AC, nullptr, DT);
17851 return SignBits <= Op0SignBits &&
17852 ((SignBits != Op0SignBits &&
17853 !isKnownNonNegative(I->getOperand(0), SimplifyQuery(*DL))) ||
17854 MaskedValueIsZero(I->getOperand(0), Mask, SimplifyQuery(*DL)));
17857 if (ID != Intrinsic::abs) {
17858 Operands.push_back(getOperandEntry(&E, 1));
17859 CallChecker = CompChecker;
17860 } else {
17861 CallChecker = AbsChecker;
17863 InstructionCost BestCost =
17864 std::numeric_limits<InstructionCost::CostType>::max();
17865 unsigned BestBitWidth = BitWidth;
17866 unsigned VF = E.Scalars.size();
17867 // Choose the best bitwidth based on cost estimations.
17868 auto Checker = [&](unsigned BitWidth, unsigned) {
17869 unsigned MinBW = PowerOf2Ceil(BitWidth);
17870 SmallVector<Type *> ArgTys = buildIntrinsicArgTypes(IC, ID, VF, MinBW);
17871 auto VecCallCosts = getVectorCallCosts(
17872 IC, getWidenedType(IntegerType::get(IC->getContext(), MinBW), VF),
17873 TTI, TLI, ArgTys);
17874 InstructionCost Cost = std::min(VecCallCosts.first, VecCallCosts.second);
17875 if (Cost < BestCost) {
17876 BestCost = Cost;
17877 BestBitWidth = BitWidth;
17879 return false;
17881 [[maybe_unused]] bool NeedToExit;
17882 (void)AttemptCheckBitwidth(Checker, NeedToExit);
17883 BitWidth = BestBitWidth;
17884 return TryProcessInstruction(BitWidth, Operands, CallChecker);
17887 // Otherwise, conservatively give up.
17888 default:
17889 break;
17891 MaxDepthLevel = 1;
17892 return FinalAnalysis();
17895 static RecurKind getRdxKind(Value *V);
17897 void BoUpSLP::computeMinimumValueSizes() {
17898 // We only attempt to truncate integer expressions.
17899 bool IsStoreOrInsertElt =
17900 VectorizableTree.front()->getOpcode() == Instruction::Store ||
17901 VectorizableTree.front()->getOpcode() == Instruction::InsertElement;
17902 if ((IsStoreOrInsertElt || UserIgnoreList) &&
17903 ExtraBitWidthNodes.size() <= 1 &&
17904 (!CastMaxMinBWSizes || CastMaxMinBWSizes->second == 0 ||
17905 CastMaxMinBWSizes->first / CastMaxMinBWSizes->second <= 2))
17906 return;
17908 unsigned NodeIdx = 0;
17909 if (IsStoreOrInsertElt && !VectorizableTree.front()->isGather())
17910 NodeIdx = 1;
17912 // Ensure the roots of the vectorizable tree don't form a cycle.
17913 if (VectorizableTree[NodeIdx]->isGather() ||
17914 (NodeIdx == 0 && !VectorizableTree[NodeIdx]->UserTreeIndices.empty()) ||
17915 (NodeIdx != 0 && any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
17916 [NodeIdx](const EdgeInfo &EI) {
17917 return EI.UserTE->Idx > NodeIdx;
17918 })))
17919 return;
17921 // The first value node for store/insertelement is sext/zext/trunc? Skip it,
17922 // resize to the final type.
17923 bool IsTruncRoot = false;
17924 bool IsProfitableToDemoteRoot = !IsStoreOrInsertElt;
17925 SmallVector<unsigned> RootDemotes;
17926 if (NodeIdx != 0 &&
17927 VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
17928 VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {
17929 assert(IsStoreOrInsertElt && "Expected store/insertelement seeded graph.");
17930 IsTruncRoot = true;
17931 RootDemotes.push_back(NodeIdx);
17932 IsProfitableToDemoteRoot = true;
17933 ++NodeIdx;
17936 // Analyzed the reduction already and not profitable - exit.
17937 if (AnalyzedMinBWVals.contains(VectorizableTree[NodeIdx]->Scalars.front()))
17938 return;
17940 SmallVector<unsigned> ToDemote;
17941 auto ComputeMaxBitWidth = [&](const TreeEntry &E, bool IsTopRoot,
17942 bool IsProfitableToDemoteRoot, unsigned Opcode,
17943 unsigned Limit, bool IsTruncRoot,
17944 bool IsSignedCmp) -> unsigned {
17945 ToDemote.clear();
17946 // Check if the root is trunc and the next node is gather/buildvector, then
17947 // keep trunc in scalars, which is free in most cases.
17948 if (E.isGather() && IsTruncRoot && E.UserTreeIndices.size() == 1 &&
17949 E.Idx > (IsStoreOrInsertElt ? 2u : 1u) &&
17950 all_of(E.Scalars, [&](Value *V) {
17951 return V->hasOneUse() || isa<Constant>(V) ||
17952 (!V->hasNUsesOrMore(UsesLimit) &&
17953 none_of(V->users(), [&](User *U) {
17954 const TreeEntry *TE = getTreeEntry(U);
17955 const TreeEntry *UserTE = E.UserTreeIndices.back().UserTE;
17956 if (TE == UserTE || !TE)
17957 return false;
17958 if (!isa<CastInst, BinaryOperator, FreezeInst, PHINode,
17959 SelectInst>(U) ||
17960 !isa<CastInst, BinaryOperator, FreezeInst, PHINode,
17961 SelectInst>(UserTE->getMainOp()))
17962 return true;
17963 unsigned UserTESz = DL->getTypeSizeInBits(
17964 UserTE->Scalars.front()->getType());
17965 auto It = MinBWs.find(TE);
17966 if (It != MinBWs.end() && It->second.first > UserTESz)
17967 return true;
17968 return DL->getTypeSizeInBits(U->getType()) > UserTESz;
17969 }));
17970 })) {
17971 ToDemote.push_back(E.Idx);
17972 const TreeEntry *UserTE = E.UserTreeIndices.back().UserTE;
17973 auto It = MinBWs.find(UserTE);
17974 if (It != MinBWs.end())
17975 return It->second.first;
17976 unsigned MaxBitWidth =
17977 DL->getTypeSizeInBits(UserTE->Scalars.front()->getType());
17978 MaxBitWidth = bit_ceil(MaxBitWidth);
17979 if (MaxBitWidth < 8 && MaxBitWidth > 1)
17980 MaxBitWidth = 8;
17981 return MaxBitWidth;
17984 unsigned VF = E.getVectorFactor();
17985 Type *ScalarTy = E.Scalars.front()->getType();
17986 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
17987 auto *TreeRootIT = dyn_cast<IntegerType>(ScalarTy->getScalarType());
17988 if (!TreeRootIT || !Opcode)
17989 return 0u;
17991 if (any_of(E.Scalars,
17992 [&](Value *V) { return AnalyzedMinBWVals.contains(V); }))
17993 return 0u;
17995 unsigned NumParts = TTI->getNumberOfParts(
17996 getWidenedType(TreeRootIT, VF * ScalarTyNumElements));
17998 // The maximum bit width required to represent all the values that can be
17999 // demoted without loss of precision. It would be safe to truncate the roots
18000 // of the expression to this width.
18001 unsigned MaxBitWidth = 1u;
18003 // True if the roots can be zero-extended back to their original type,
18004 // rather than sign-extended. We know that if the leading bits are not
18005 // demanded, we can safely zero-extend. So we initialize IsKnownPositive to
18006 // True.
18007 // Determine if the sign bit of all the roots is known to be zero. If not,
18008 // IsKnownPositive is set to False.
18009 bool IsKnownPositive = !IsSignedCmp && all_of(E.Scalars, [&](Value *R) {
18010 if (isa<PoisonValue>(R))
18011 return true;
18012 KnownBits Known = computeKnownBits(R, *DL);
18013 return Known.isNonNegative();
18016 // We first check if all the bits of the roots are demanded. If they're not,
18017 // we can truncate the roots to this narrower type.
18018 for (Value *Root : E.Scalars) {
18019 if (isa<PoisonValue>(Root))
18020 continue;
18021 unsigned NumSignBits = ComputeNumSignBits(Root, *DL, 0, AC, nullptr, DT);
18022 TypeSize NumTypeBits =
18023 DL->getTypeSizeInBits(Root->getType()->getScalarType());
18024 unsigned BitWidth1 = NumTypeBits - NumSignBits;
18025 // If we can't prove that the sign bit is zero, we must add one to the
18026 // maximum bit width to account for the unknown sign bit. This preserves
18027 // the existing sign bit so we can safely sign-extend the root back to the
18028 // original type. Otherwise, if we know the sign bit is zero, we will
18029 // zero-extend the root instead.
18031 // FIXME: This is somewhat suboptimal, as there will be cases where adding
18032 // one to the maximum bit width will yield a larger-than-necessary
18033 // type. In general, we need to add an extra bit only if we can't
18034 // prove that the upper bit of the original type is equal to the
18035 // upper bit of the proposed smaller type. If these two bits are
18036 // the same (either zero or one) we know that sign-extending from
18037 // the smaller type will result in the same value. Here, since we
18038 // can't yet prove this, we are just making the proposed smaller
18039 // type larger to ensure correctness.
18040 if (!IsKnownPositive)
18041 ++BitWidth1;
18043 APInt Mask = DB->getDemandedBits(cast<Instruction>(Root));
18044 unsigned BitWidth2 = Mask.getBitWidth() - Mask.countl_zero();
18045 MaxBitWidth =
18046 std::max<unsigned>(std::min(BitWidth1, BitWidth2), MaxBitWidth);
18049 if (MaxBitWidth < 8 && MaxBitWidth > 1)
18050 MaxBitWidth = 8;
18052 // If the original type is large, but reduced type does not improve the reg
18053 // use - ignore it.
18054 if (NumParts > 1 &&
18055 NumParts ==
18056 TTI->getNumberOfParts(getWidenedType(
18057 IntegerType::get(F->getContext(), bit_ceil(MaxBitWidth)), VF)))
18058 return 0u;
18060 bool IsProfitableToDemote = Opcode == Instruction::Trunc ||
18061 Opcode == Instruction::SExt ||
18062 Opcode == Instruction::ZExt || NumParts > 1;
18063 // Conservatively determine if we can actually truncate the roots of the
18064 // expression. Collect the values that can be demoted in ToDemote and
18065 // additional roots that require investigating in Roots.
18066 DenseSet<const TreeEntry *> Visited;
18067 unsigned MaxDepthLevel = IsTruncRoot ? Limit : 1;
18068 bool NeedToDemote = IsProfitableToDemote;
18070 if (!collectValuesToDemote(E, IsProfitableToDemoteRoot, MaxBitWidth,
18071 ToDemote, Visited, MaxDepthLevel, NeedToDemote,
18072 IsTruncRoot) ||
18073 (MaxDepthLevel <= Limit &&
18074 !(((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
18075 (!IsTopRoot || !(IsStoreOrInsertElt || UserIgnoreList) ||
18076 DL->getTypeSizeInBits(TreeRootIT) /
18077 DL->getTypeSizeInBits(
18078 E.getMainOp()->getOperand(0)->getType()) >
18079 2)))))
18080 return 0u;
18081 // Round MaxBitWidth up to the next power-of-two.
18082 MaxBitWidth = bit_ceil(MaxBitWidth);
18084 return MaxBitWidth;
18087 // If we can truncate the root, we must collect additional values that might
18088 // be demoted as a result. That is, those seeded by truncations we will
18089 // modify.
18090 // Add reduction ops sizes, if any.
18091 if (UserIgnoreList &&
18092 isa<IntegerType>(VectorizableTree.front()->Scalars.front()->getType())) {
18093 // Convert vector_reduce_add(ZExt(<n x i1>)) to ZExtOrTrunc(ctpop(bitcast <n
18094 // x i1> to in)).
18095 if (all_of(*UserIgnoreList,
18096 [](Value *V) {
18097 return isa<PoisonValue>(V) ||
18098 cast<Instruction>(V)->getOpcode() == Instruction::Add;
18099 }) &&
18100 VectorizableTree.front()->State == TreeEntry::Vectorize &&
18101 VectorizableTree.front()->getOpcode() == Instruction::ZExt &&
18102 cast<CastInst>(VectorizableTree.front()->getMainOp())->getSrcTy() ==
18103 Builder.getInt1Ty()) {
18104 ReductionBitWidth = 1;
18105 } else {
18106 for (Value *V : *UserIgnoreList) {
18107 if (isa<PoisonValue>(V))
18108 continue;
18109 unsigned NumSignBits = ComputeNumSignBits(V, *DL, 0, AC, nullptr, DT);
18110 TypeSize NumTypeBits = DL->getTypeSizeInBits(V->getType());
18111 unsigned BitWidth1 = NumTypeBits - NumSignBits;
18112 if (!isKnownNonNegative(V, SimplifyQuery(*DL)))
18113 ++BitWidth1;
18114 unsigned BitWidth2 = BitWidth1;
18115 if (!RecurrenceDescriptor::isIntMinMaxRecurrenceKind(::getRdxKind(V))) {
18116 APInt Mask = DB->getDemandedBits(cast<Instruction>(V));
18117 BitWidth2 = Mask.getBitWidth() - Mask.countl_zero();
18119 ReductionBitWidth =
18120 std::max(std::min(BitWidth1, BitWidth2), ReductionBitWidth);
18122 if (ReductionBitWidth < 8 && ReductionBitWidth > 1)
18123 ReductionBitWidth = 8;
18125 ReductionBitWidth = bit_ceil(ReductionBitWidth);
18128 bool IsTopRoot = NodeIdx == 0;
18129 while (NodeIdx < VectorizableTree.size() &&
18130 VectorizableTree[NodeIdx]->State == TreeEntry::Vectorize &&
18131 VectorizableTree[NodeIdx]->getOpcode() == Instruction::Trunc) {
18132 RootDemotes.push_back(NodeIdx);
18133 ++NodeIdx;
18134 IsTruncRoot = true;
18136 bool IsSignedCmp = false;
18137 while (NodeIdx < VectorizableTree.size()) {
18138 ArrayRef<Value *> TreeRoot = VectorizableTree[NodeIdx]->Scalars;
18139 unsigned Limit = 2;
18140 unsigned Opcode = VectorizableTree[NodeIdx]->getOpcode();
18141 if (IsTopRoot &&
18142 ReductionBitWidth ==
18143 DL->getTypeSizeInBits(
18144 VectorizableTree.front()->Scalars.front()->getType()))
18145 Limit = 3;
18146 unsigned MaxBitWidth = ComputeMaxBitWidth(
18147 *VectorizableTree[NodeIdx], IsTopRoot, IsProfitableToDemoteRoot, Opcode,
18148 Limit, IsTruncRoot, IsSignedCmp);
18149 if (ReductionBitWidth != 0 && (IsTopRoot || !RootDemotes.empty())) {
18150 if (MaxBitWidth != 0 && ReductionBitWidth < MaxBitWidth)
18151 ReductionBitWidth = bit_ceil(MaxBitWidth);
18152 else if (MaxBitWidth == 0)
18153 ReductionBitWidth = 0;
18156 for (unsigned Idx : RootDemotes) {
18157 if (all_of(VectorizableTree[Idx]->Scalars, [&](Value *V) {
18158 uint32_t OrigBitWidth =
18159 DL->getTypeSizeInBits(V->getType()->getScalarType());
18160 if (OrigBitWidth > MaxBitWidth) {
18161 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, MaxBitWidth);
18162 return MaskedValueIsZero(V, Mask, SimplifyQuery(*DL));
18164 return false;
18166 ToDemote.push_back(Idx);
18168 RootDemotes.clear();
18169 IsTopRoot = false;
18170 IsProfitableToDemoteRoot = true;
18172 if (ExtraBitWidthNodes.empty()) {
18173 NodeIdx = VectorizableTree.size();
18174 } else {
18175 unsigned NewIdx = 0;
18176 do {
18177 NewIdx = *ExtraBitWidthNodes.begin();
18178 ExtraBitWidthNodes.erase(ExtraBitWidthNodes.begin());
18179 } while (NewIdx <= NodeIdx && !ExtraBitWidthNodes.empty());
18180 NodeIdx = NewIdx;
18181 IsTruncRoot =
18182 NodeIdx < VectorizableTree.size() &&
18183 any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
18184 [](const EdgeInfo &EI) {
18185 return EI.EdgeIdx == 0 &&
18186 EI.UserTE->getOpcode() == Instruction::Trunc &&
18187 !EI.UserTE->isAltShuffle();
18189 IsSignedCmp =
18190 NodeIdx < VectorizableTree.size() &&
18191 any_of(VectorizableTree[NodeIdx]->UserTreeIndices,
18192 [&](const EdgeInfo &EI) {
18193 return EI.UserTE->getOpcode() == Instruction::ICmp &&
18194 any_of(EI.UserTE->Scalars, [&](Value *V) {
18195 auto *IC = dyn_cast<ICmpInst>(V);
18196 return IC &&
18197 (IC->isSigned() ||
18198 !isKnownNonNegative(IC->getOperand(0),
18199 SimplifyQuery(*DL)) ||
18200 !isKnownNonNegative(IC->getOperand(1),
18201 SimplifyQuery(*DL)));
18206 // If the maximum bit width we compute is less than the with of the roots'
18207 // type, we can proceed with the narrowing. Otherwise, do nothing.
18208 if (MaxBitWidth == 0 ||
18209 MaxBitWidth >=
18210 cast<IntegerType>(TreeRoot.front()->getType()->getScalarType())
18211 ->getBitWidth()) {
18212 if (UserIgnoreList)
18213 AnalyzedMinBWVals.insert(TreeRoot.begin(), TreeRoot.end());
18214 continue;
18217 // Finally, map the values we can demote to the maximum bit with we
18218 // computed.
18219 for (unsigned Idx : ToDemote) {
18220 TreeEntry *TE = VectorizableTree[Idx].get();
18221 if (MinBWs.contains(TE))
18222 continue;
18223 bool IsSigned = any_of(TE->Scalars, [&](Value *R) {
18224 if (isa<PoisonValue>(R))
18225 return false;
18226 return !isKnownNonNegative(R, SimplifyQuery(*DL));
18228 MinBWs.try_emplace(TE, MaxBitWidth, IsSigned);
18233 PreservedAnalyses SLPVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) {
18234 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
18235 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
18236 auto *TLI = AM.getCachedResult<TargetLibraryAnalysis>(F);
18237 auto *AA = &AM.getResult<AAManager>(F);
18238 auto *LI = &AM.getResult<LoopAnalysis>(F);
18239 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
18240 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
18241 auto *DB = &AM.getResult<DemandedBitsAnalysis>(F);
18242 auto *ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
18244 bool Changed = runImpl(F, SE, TTI, TLI, AA, LI, DT, AC, DB, ORE);
18245 if (!Changed)
18246 return PreservedAnalyses::all();
18248 PreservedAnalyses PA;
18249 PA.preserveSet<CFGAnalyses>();
18250 return PA;
18253 bool SLPVectorizerPass::runImpl(Function &F, ScalarEvolution *SE_,
18254 TargetTransformInfo *TTI_,
18255 TargetLibraryInfo *TLI_, AAResults *AA_,
18256 LoopInfo *LI_, DominatorTree *DT_,
18257 AssumptionCache *AC_, DemandedBits *DB_,
18258 OptimizationRemarkEmitter *ORE_) {
18259 if (!RunSLPVectorization)
18260 return false;
18261 SE = SE_;
18262 TTI = TTI_;
18263 TLI = TLI_;
18264 AA = AA_;
18265 LI = LI_;
18266 DT = DT_;
18267 AC = AC_;
18268 DB = DB_;
18269 DL = &F.getDataLayout();
18271 Stores.clear();
18272 GEPs.clear();
18273 bool Changed = false;
18275 // If the target claims to have no vector registers don't attempt
18276 // vectorization.
18277 if (!TTI->getNumberOfRegisters(TTI->getRegisterClassForType(true))) {
18278 LLVM_DEBUG(
18279 dbgs() << "SLP: Didn't find any vector registers for target, abort.\n");
18280 return false;
18283 // Don't vectorize when the attribute NoImplicitFloat is used.
18284 if (F.hasFnAttribute(Attribute::NoImplicitFloat))
18285 return false;
18287 LLVM_DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
18289 // Use the bottom up slp vectorizer to construct chains that start with
18290 // store instructions.
18291 BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC, DB, DL, ORE_);
18293 // A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
18294 // delete instructions.
18296 // Update DFS numbers now so that we can use them for ordering.
18297 DT->updateDFSNumbers();
18299 // Scan the blocks in the function in post order.
18300 for (auto *BB : post_order(&F.getEntryBlock())) {
18301 if (BB->isEHPad() || isa_and_nonnull<UnreachableInst>(BB->getTerminator()))
18302 continue;
18304 // Start new block - clear the list of reduction roots.
18305 R.clearReductionData();
18306 collectSeedInstructions(BB);
18308 // Vectorize trees that end at stores.
18309 if (!Stores.empty()) {
18310 LLVM_DEBUG(dbgs() << "SLP: Found stores for " << Stores.size()
18311 << " underlying objects.\n");
18312 Changed |= vectorizeStoreChains(R);
18315 // Vectorize trees that end at reductions.
18316 Changed |= vectorizeChainsInBlock(BB, R);
18318 // Vectorize the index computations of getelementptr instructions. This
18319 // is primarily intended to catch gather-like idioms ending at
18320 // non-consecutive loads.
18321 if (!GEPs.empty()) {
18322 LLVM_DEBUG(dbgs() << "SLP: Found GEPs for " << GEPs.size()
18323 << " underlying objects.\n");
18324 Changed |= vectorizeGEPIndices(BB, R);
18328 if (Changed) {
18329 R.optimizeGatherSequence();
18330 LLVM_DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
18332 return Changed;
18335 std::optional<bool>
18336 SLPVectorizerPass::vectorizeStoreChain(ArrayRef<Value *> Chain, BoUpSLP &R,
18337 unsigned Idx, unsigned MinVF,
18338 unsigned &Size) {
18339 Size = 0;
18340 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << Chain.size()
18341 << "\n");
18342 const unsigned Sz = R.getVectorElementSize(Chain[0]);
18343 unsigned VF = Chain.size();
18345 if (!has_single_bit(Sz) ||
18346 !hasFullVectorsOrPowerOf2(
18347 *TTI, cast<StoreInst>(Chain.front())->getValueOperand()->getType(),
18348 VF) ||
18349 VF < 2 || VF < MinVF) {
18350 // Check if vectorizing with a non-power-of-2 VF should be considered. At
18351 // the moment, only consider cases where VF + 1 is a power-of-2, i.e. almost
18352 // all vector lanes are used.
18353 if (!VectorizeNonPowerOf2 || (VF < MinVF && VF + 1 != MinVF))
18354 return false;
18357 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << Idx
18358 << "\n");
18360 SetVector<Value *> ValOps;
18361 for (Value *V : Chain)
18362 ValOps.insert(cast<StoreInst>(V)->getValueOperand());
18363 // Operands are not same/alt opcodes or non-power-of-2 uniques - exit.
18364 InstructionsState S = getSameOpcode(ValOps.getArrayRef(), *TLI);
18365 if (all_of(ValOps, IsaPred<Instruction>) && ValOps.size() > 1) {
18366 DenseSet<Value *> Stores(Chain.begin(), Chain.end());
18367 bool IsAllowedSize =
18368 hasFullVectorsOrPowerOf2(*TTI, ValOps.front()->getType(),
18369 ValOps.size()) ||
18370 (VectorizeNonPowerOf2 && has_single_bit(ValOps.size() + 1));
18371 if ((!IsAllowedSize && S.getOpcode() &&
18372 S.getOpcode() != Instruction::Load &&
18373 (!S.getMainOp()->isSafeToRemove() ||
18374 any_of(ValOps.getArrayRef(),
18375 [&](Value *V) {
18376 return !isa<ExtractElementInst>(V) &&
18377 (V->getNumUses() > Chain.size() ||
18378 any_of(V->users(), [&](User *U) {
18379 return !Stores.contains(U);
18380 }));
18381 }))) ||
18382 (ValOps.size() > Chain.size() / 2 && !S.getOpcode())) {
18383 Size = (!IsAllowedSize && S.getOpcode()) ? 1 : 2;
18384 return false;
18387 if (R.isLoadCombineCandidate(Chain))
18388 return true;
18389 R.buildTree(Chain);
18390 // Check if tree tiny and store itself or its value is not vectorized.
18391 if (R.isTreeTinyAndNotFullyVectorizable()) {
18392 if (R.isGathered(Chain.front()) ||
18393 R.isNotScheduled(cast<StoreInst>(Chain.front())->getValueOperand()))
18394 return std::nullopt;
18395 Size = R.getCanonicalGraphSize();
18396 return false;
18398 R.reorderTopToBottom();
18399 R.reorderBottomToTop();
18400 R.transformNodes();
18401 R.buildExternalUses();
18403 R.computeMinimumValueSizes();
18405 Size = R.getCanonicalGraphSize();
18406 if (S.getOpcode() == Instruction::Load)
18407 Size = 2; // cut off masked gather small trees
18408 InstructionCost Cost = R.getTreeCost();
18410 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost << " for VF=" << VF << "\n");
18411 if (Cost < -SLPCostThreshold) {
18412 LLVM_DEBUG(dbgs() << "SLP: Decided to vectorize cost = " << Cost << "\n");
18414 using namespace ore;
18416 R.getORE()->emit(OptimizationRemark(SV_NAME, "StoresVectorized",
18417 cast<StoreInst>(Chain[0]))
18418 << "Stores SLP vectorized with cost " << NV("Cost", Cost)
18419 << " and with tree size "
18420 << NV("TreeSize", R.getTreeSize()));
18422 R.vectorizeTree();
18423 return true;
18426 return false;
18429 /// Checks if the quadratic mean deviation is less than 90% of the mean size.
18430 static bool checkTreeSizes(ArrayRef<std::pair<unsigned, unsigned>> Sizes,
18431 bool First) {
18432 unsigned Num = 0;
18433 uint64_t Sum = std::accumulate(
18434 Sizes.begin(), Sizes.end(), static_cast<uint64_t>(0),
18435 [&](uint64_t V, const std::pair<unsigned, unsigned> &Val) {
18436 unsigned Size = First ? Val.first : Val.second;
18437 if (Size == 1)
18438 return V;
18439 ++Num;
18440 return V + Size;
18442 if (Num == 0)
18443 return true;
18444 uint64_t Mean = Sum / Num;
18445 if (Mean == 0)
18446 return true;
18447 uint64_t Dev = std::accumulate(
18448 Sizes.begin(), Sizes.end(), static_cast<uint64_t>(0),
18449 [&](uint64_t V, const std::pair<unsigned, unsigned> &Val) {
18450 unsigned P = First ? Val.first : Val.second;
18451 if (P == 1)
18452 return V;
18453 return V + (P - Mean) * (P - Mean);
18454 }) /
18455 Num;
18456 return Dev * 81 / (Mean * Mean) == 0;
18459 bool SLPVectorizerPass::vectorizeStores(
18460 ArrayRef<StoreInst *> Stores, BoUpSLP &R,
18461 DenseSet<std::tuple<Value *, Value *, Value *, Value *, unsigned>>
18462 &Visited) {
18463 // We may run into multiple chains that merge into a single chain. We mark the
18464 // stores that we vectorized so that we don't visit the same store twice.
18465 BoUpSLP::ValueSet VectorizedStores;
18466 bool Changed = false;
18468 struct StoreDistCompare {
18469 bool operator()(const std::pair<unsigned, int> &Op1,
18470 const std::pair<unsigned, int> &Op2) const {
18471 return Op1.second < Op2.second;
18474 // A set of pairs (index of store in Stores array ref, Distance of the store
18475 // address relative to base store address in units).
18476 using StoreIndexToDistSet =
18477 std::set<std::pair<unsigned, int>, StoreDistCompare>;
18478 auto TryToVectorize = [&](const StoreIndexToDistSet &Set) {
18479 int PrevDist = -1;
18480 BoUpSLP::ValueList Operands;
18481 // Collect the chain into a list.
18482 for (auto [Idx, Data] : enumerate(Set)) {
18483 if (Operands.empty() || Data.second - PrevDist == 1) {
18484 Operands.push_back(Stores[Data.first]);
18485 PrevDist = Data.second;
18486 if (Idx != Set.size() - 1)
18487 continue;
18489 auto E = make_scope_exit([&, &DataVar = Data]() {
18490 Operands.clear();
18491 Operands.push_back(Stores[DataVar.first]);
18492 PrevDist = DataVar.second;
18495 if (Operands.size() <= 1 ||
18496 !Visited
18497 .insert({Operands.front(),
18498 cast<StoreInst>(Operands.front())->getValueOperand(),
18499 Operands.back(),
18500 cast<StoreInst>(Operands.back())->getValueOperand(),
18501 Operands.size()})
18502 .second)
18503 continue;
18505 unsigned MaxVecRegSize = R.getMaxVecRegSize();
18506 unsigned EltSize = R.getVectorElementSize(Operands[0]);
18507 unsigned MaxElts = llvm::bit_floor(MaxVecRegSize / EltSize);
18509 unsigned MaxVF =
18510 std::min(R.getMaximumVF(EltSize, Instruction::Store), MaxElts);
18511 auto *Store = cast<StoreInst>(Operands[0]);
18512 Type *StoreTy = Store->getValueOperand()->getType();
18513 Type *ValueTy = StoreTy;
18514 if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
18515 ValueTy = Trunc->getSrcTy();
18516 unsigned MinVF = std::max<unsigned>(
18517 2, PowerOf2Ceil(TTI->getStoreMinimumVF(
18518 R.getMinVF(DL->getTypeStoreSizeInBits(StoreTy)), StoreTy,
18519 ValueTy)));
18521 if (MaxVF < MinVF) {
18522 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF
18523 << ") < "
18524 << "MinVF (" << MinVF << ")\n");
18525 continue;
18528 unsigned NonPowerOf2VF = 0;
18529 if (VectorizeNonPowerOf2) {
18530 // First try vectorizing with a non-power-of-2 VF. At the moment, only
18531 // consider cases where VF + 1 is a power-of-2, i.e. almost all vector
18532 // lanes are used.
18533 unsigned CandVF = std::clamp<unsigned>(Operands.size(), MinVF, MaxVF);
18534 if (has_single_bit(CandVF + 1)) {
18535 NonPowerOf2VF = CandVF;
18536 assert(NonPowerOf2VF != MaxVF &&
18537 "Non-power-of-2 VF should not be equal to MaxVF");
18541 unsigned MaxRegVF = MaxVF;
18542 MaxVF = std::min<unsigned>(MaxVF, bit_floor(Operands.size()));
18543 if (MaxVF < MinVF) {
18544 LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF
18545 << ") < "
18546 << "MinVF (" << MinVF << ")\n");
18547 continue;
18550 unsigned Sz = 1 + Log2_32(MaxVF) - Log2_32(MinVF);
18551 SmallVector<unsigned> CandidateVFs(Sz + (NonPowerOf2VF > 0 ? 1 : 0));
18552 unsigned Size = MinVF;
18553 for_each(reverse(CandidateVFs), [&](unsigned &VF) {
18554 VF = Size > MaxVF ? NonPowerOf2VF : Size;
18555 Size *= 2;
18557 unsigned End = Operands.size();
18558 unsigned Repeat = 0;
18559 constexpr unsigned MaxAttempts = 4;
18560 OwningArrayRef<std::pair<unsigned, unsigned>> RangeSizes(Operands.size());
18561 for_each(RangeSizes, [](std::pair<unsigned, unsigned> &P) {
18562 P.first = P.second = 1;
18564 DenseMap<Value *, std::pair<unsigned, unsigned>> NonSchedulable;
18565 auto IsNotVectorized = [](bool First,
18566 const std::pair<unsigned, unsigned> &P) {
18567 return First ? P.first > 0 : P.second > 0;
18569 auto IsVectorized = [](bool First,
18570 const std::pair<unsigned, unsigned> &P) {
18571 return First ? P.first == 0 : P.second == 0;
18573 auto VFIsProfitable = [](bool First, unsigned Size,
18574 const std::pair<unsigned, unsigned> &P) {
18575 return First ? Size >= P.first : Size >= P.second;
18577 auto FirstSizeSame = [](unsigned Size,
18578 const std::pair<unsigned, unsigned> &P) {
18579 return Size == P.first;
18581 while (true) {
18582 ++Repeat;
18583 bool RepeatChanged = false;
18584 bool AnyProfitableGraph = false;
18585 for (unsigned Size : CandidateVFs) {
18586 AnyProfitableGraph = false;
18587 unsigned StartIdx = std::distance(
18588 RangeSizes.begin(),
18589 find_if(RangeSizes, std::bind(IsNotVectorized, Size >= MaxRegVF,
18590 std::placeholders::_1)));
18591 while (StartIdx < End) {
18592 unsigned EndIdx =
18593 std::distance(RangeSizes.begin(),
18594 find_if(RangeSizes.drop_front(StartIdx),
18595 std::bind(IsVectorized, Size >= MaxRegVF,
18596 std::placeholders::_1)));
18597 unsigned Sz = EndIdx >= End ? End : EndIdx;
18598 for (unsigned Cnt = StartIdx; Cnt + Size <= Sz;) {
18599 if (!checkTreeSizes(RangeSizes.slice(Cnt, Size),
18600 Size >= MaxRegVF)) {
18601 ++Cnt;
18602 continue;
18604 ArrayRef<Value *> Slice = ArrayRef(Operands).slice(Cnt, Size);
18605 assert(all_of(Slice,
18606 [&](Value *V) {
18607 return cast<StoreInst>(V)
18608 ->getValueOperand()
18609 ->getType() ==
18610 cast<StoreInst>(Slice.front())
18611 ->getValueOperand()
18612 ->getType();
18613 }) &&
18614 "Expected all operands of same type.");
18615 if (!NonSchedulable.empty()) {
18616 auto [NonSchedSizeMax, NonSchedSizeMin] =
18617 NonSchedulable.lookup(Slice.front());
18618 if (NonSchedSizeMax > 0 && NonSchedSizeMin <= Size) {
18619 Cnt += NonSchedSizeMax;
18620 continue;
18623 unsigned TreeSize;
18624 std::optional<bool> Res =
18625 vectorizeStoreChain(Slice, R, Cnt, MinVF, TreeSize);
18626 if (!Res) {
18627 NonSchedulable
18628 .try_emplace(Slice.front(), std::make_pair(Size, Size))
18629 .first->getSecond()
18630 .second = Size;
18631 } else if (*Res) {
18632 // Mark the vectorized stores so that we don't vectorize them
18633 // again.
18634 VectorizedStores.insert(Slice.begin(), Slice.end());
18635 // Mark the vectorized stores so that we don't vectorize them
18636 // again.
18637 AnyProfitableGraph = RepeatChanged = Changed = true;
18638 // If we vectorized initial block, no need to try to vectorize
18639 // it again.
18640 for_each(RangeSizes.slice(Cnt, Size),
18641 [](std::pair<unsigned, unsigned> &P) {
18642 P.first = P.second = 0;
18644 if (Cnt < StartIdx + MinVF) {
18645 for_each(RangeSizes.slice(StartIdx, Cnt - StartIdx),
18646 [](std::pair<unsigned, unsigned> &P) {
18647 P.first = P.second = 0;
18649 StartIdx = Cnt + Size;
18651 if (Cnt > Sz - Size - MinVF) {
18652 for_each(RangeSizes.slice(Cnt + Size, Sz - (Cnt + Size)),
18653 [](std::pair<unsigned, unsigned> &P) {
18654 P.first = P.second = 0;
18656 if (Sz == End)
18657 End = Cnt;
18658 Sz = Cnt;
18660 Cnt += Size;
18661 continue;
18663 if (Size > 2 && Res &&
18664 !all_of(RangeSizes.slice(Cnt, Size),
18665 std::bind(VFIsProfitable, Size >= MaxRegVF, TreeSize,
18666 std::placeholders::_1))) {
18667 Cnt += Size;
18668 continue;
18670 // Check for the very big VFs that we're not rebuilding same
18671 // trees, just with larger number of elements.
18672 if (Size > MaxRegVF && TreeSize > 1 &&
18673 all_of(RangeSizes.slice(Cnt, Size),
18674 std::bind(FirstSizeSame, TreeSize,
18675 std::placeholders::_1))) {
18676 Cnt += Size;
18677 while (Cnt != Sz && RangeSizes[Cnt].first == TreeSize)
18678 ++Cnt;
18679 continue;
18681 if (TreeSize > 1)
18682 for_each(RangeSizes.slice(Cnt, Size),
18683 [&](std::pair<unsigned, unsigned> &P) {
18684 if (Size >= MaxRegVF)
18685 P.second = std::max(P.second, TreeSize);
18686 else
18687 P.first = std::max(P.first, TreeSize);
18689 ++Cnt;
18690 AnyProfitableGraph = true;
18692 if (StartIdx >= End)
18693 break;
18694 if (Sz - StartIdx < Size && Sz - StartIdx >= MinVF)
18695 AnyProfitableGraph = true;
18696 StartIdx = std::distance(
18697 RangeSizes.begin(),
18698 find_if(RangeSizes.drop_front(Sz),
18699 std::bind(IsNotVectorized, Size >= MaxRegVF,
18700 std::placeholders::_1)));
18702 if (!AnyProfitableGraph && Size >= MaxRegVF && has_single_bit(Size))
18703 break;
18705 // All values vectorized - exit.
18706 if (all_of(RangeSizes, [](const std::pair<unsigned, unsigned> &P) {
18707 return P.first == 0 && P.second == 0;
18709 break;
18710 // Check if tried all attempts or no need for the last attempts at all.
18711 if (Repeat >= MaxAttempts ||
18712 (Repeat > 1 && (RepeatChanged || !AnyProfitableGraph)))
18713 break;
18714 constexpr unsigned StoresLimit = 64;
18715 const unsigned MaxTotalNum = std::min<unsigned>(
18716 Operands.size(),
18717 static_cast<unsigned>(
18718 End -
18719 std::distance(
18720 RangeSizes.begin(),
18721 find_if(RangeSizes, std::bind(IsNotVectorized, true,
18722 std::placeholders::_1))) +
18723 1));
18724 unsigned VF = bit_ceil(CandidateVFs.front()) * 2;
18725 unsigned Limit =
18726 getFloorFullVectorNumberOfElements(*TTI, StoreTy, MaxTotalNum);
18727 CandidateVFs.clear();
18728 if (bit_floor(Limit) == VF)
18729 CandidateVFs.push_back(Limit);
18730 if (VF > MaxTotalNum || VF >= StoresLimit)
18731 break;
18732 for_each(RangeSizes, [&](std::pair<unsigned, unsigned> &P) {
18733 if (P.first != 0)
18734 P.first = std::max(P.second, P.first);
18736 // Last attempt to vectorize max number of elements, if all previous
18737 // attempts were unsuccessful because of the cost issues.
18738 CandidateVFs.push_back(VF);
18743 // Stores pair (first: index of the store into Stores array ref, address of
18744 // which taken as base, second: sorted set of pairs {index, dist}, which are
18745 // indices of stores in the set and their store location distances relative to
18746 // the base address).
18748 // Need to store the index of the very first store separately, since the set
18749 // may be reordered after the insertion and the first store may be moved. This
18750 // container allows to reduce number of calls of getPointersDiff() function.
18751 SmallVector<std::pair<unsigned, StoreIndexToDistSet>> SortedStores;
18752 // Inserts the specified store SI with the given index Idx to the set of the
18753 // stores. If the store with the same distance is found already - stop
18754 // insertion, try to vectorize already found stores. If some stores from this
18755 // sequence were not vectorized - try to vectorize them with the new store
18756 // later. But this logic is applied only to the stores, that come before the
18757 // previous store with the same distance.
18758 // Example:
18759 // 1. store x, %p
18760 // 2. store y, %p+1
18761 // 3. store z, %p+2
18762 // 4. store a, %p
18763 // 5. store b, %p+3
18764 // - Scan this from the last to first store. The very first bunch of stores is
18765 // {5, {{4, -3}, {2, -2}, {3, -1}, {5, 0}}} (the element in SortedStores
18766 // vector).
18767 // - The next store in the list - #1 - has the same distance from store #5 as
18768 // the store #4.
18769 // - Try to vectorize sequence of stores 4,2,3,5.
18770 // - If all these stores are vectorized - just drop them.
18771 // - If some of them are not vectorized (say, #3 and #5), do extra analysis.
18772 // - Start new stores sequence.
18773 // The new bunch of stores is {1, {1, 0}}.
18774 // - Add the stores from previous sequence, that were not vectorized.
18775 // Here we consider the stores in the reversed order, rather they are used in
18776 // the IR (Stores are reversed already, see vectorizeStoreChains() function).
18777 // Store #3 can be added -> comes after store #4 with the same distance as
18778 // store #1.
18779 // Store #5 cannot be added - comes before store #4.
18780 // This logic allows to improve the compile time, we assume that the stores
18781 // after previous store with the same distance most likely have memory
18782 // dependencies and no need to waste compile time to try to vectorize them.
18783 // - Try to vectorize the sequence {1, {1, 0}, {3, 2}}.
18784 auto FillStoresSet = [&](unsigned Idx, StoreInst *SI) {
18785 for (std::pair<unsigned, StoreIndexToDistSet> &Set : SortedStores) {
18786 std::optional<int> Diff = getPointersDiff(
18787 Stores[Set.first]->getValueOperand()->getType(),
18788 Stores[Set.first]->getPointerOperand(),
18789 SI->getValueOperand()->getType(), SI->getPointerOperand(), *DL, *SE,
18790 /*StrictCheck=*/true);
18791 if (!Diff)
18792 continue;
18793 auto It = Set.second.find(std::make_pair(Idx, *Diff));
18794 if (It == Set.second.end()) {
18795 Set.second.emplace(Idx, *Diff);
18796 return;
18798 // Try to vectorize the first found set to avoid duplicate analysis.
18799 TryToVectorize(Set.second);
18800 unsigned ItIdx = It->first;
18801 int ItDist = It->second;
18802 StoreIndexToDistSet PrevSet;
18803 copy_if(Set.second, std::inserter(PrevSet, PrevSet.end()),
18804 [&](const std::pair<unsigned, int> &Pair) {
18805 return Pair.first > ItIdx;
18807 Set.second.clear();
18808 Set.first = Idx;
18809 Set.second.emplace(Idx, 0);
18810 // Insert stores that followed previous match to try to vectorize them
18811 // with this store.
18812 unsigned StartIdx = ItIdx + 1;
18813 SmallBitVector UsedStores(Idx - StartIdx);
18814 // Distances to previously found dup store (or this store, since they
18815 // store to the same addresses).
18816 SmallVector<int> Dists(Idx - StartIdx, 0);
18817 for (const std::pair<unsigned, int> &Pair : reverse(PrevSet)) {
18818 // Do not try to vectorize sequences, we already tried.
18819 if (VectorizedStores.contains(Stores[Pair.first]))
18820 break;
18821 unsigned BI = Pair.first - StartIdx;
18822 UsedStores.set(BI);
18823 Dists[BI] = Pair.second - ItDist;
18825 for (unsigned I = StartIdx; I < Idx; ++I) {
18826 unsigned BI = I - StartIdx;
18827 if (UsedStores.test(BI))
18828 Set.second.emplace(I, Dists[BI]);
18830 return;
18832 auto &Res = SortedStores.emplace_back();
18833 Res.first = Idx;
18834 Res.second.emplace(Idx, 0);
18836 Type *PrevValTy = nullptr;
18837 for (auto [I, SI] : enumerate(Stores)) {
18838 if (R.isDeleted(SI))
18839 continue;
18840 if (!PrevValTy)
18841 PrevValTy = SI->getValueOperand()->getType();
18842 // Check that we do not try to vectorize stores of different types.
18843 if (PrevValTy != SI->getValueOperand()->getType()) {
18844 for (auto &Set : SortedStores)
18845 TryToVectorize(Set.second);
18846 SortedStores.clear();
18847 PrevValTy = SI->getValueOperand()->getType();
18849 FillStoresSet(I, SI);
18852 // Final vectorization attempt.
18853 for (auto &Set : SortedStores)
18854 TryToVectorize(Set.second);
18856 return Changed;
18859 void SLPVectorizerPass::collectSeedInstructions(BasicBlock *BB) {
18860 // Initialize the collections. We will make a single pass over the block.
18861 Stores.clear();
18862 GEPs.clear();
18864 // Visit the store and getelementptr instructions in BB and organize them in
18865 // Stores and GEPs according to the underlying objects of their pointer
18866 // operands.
18867 for (Instruction &I : *BB) {
18868 // Ignore store instructions that are volatile or have a pointer operand
18869 // that doesn't point to a scalar type.
18870 if (auto *SI = dyn_cast<StoreInst>(&I)) {
18871 if (!SI->isSimple())
18872 continue;
18873 if (!isValidElementType(SI->getValueOperand()->getType()))
18874 continue;
18875 Stores[getUnderlyingObject(SI->getPointerOperand())].push_back(SI);
18878 // Ignore getelementptr instructions that have more than one index, a
18879 // constant index, or a pointer operand that doesn't point to a scalar
18880 // type.
18881 else if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
18882 if (GEP->getNumIndices() != 1)
18883 continue;
18884 Value *Idx = GEP->idx_begin()->get();
18885 if (isa<Constant>(Idx))
18886 continue;
18887 if (!isValidElementType(Idx->getType()))
18888 continue;
18889 if (GEP->getType()->isVectorTy())
18890 continue;
18891 GEPs[GEP->getPointerOperand()].push_back(GEP);
18896 bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
18897 bool MaxVFOnly) {
18898 if (VL.size() < 2)
18899 return false;
18901 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize a list of length = "
18902 << VL.size() << ".\n");
18904 // Check that all of the parts are instructions of the same type,
18905 // we permit an alternate opcode via InstructionsState.
18906 InstructionsState S = getSameOpcode(VL, *TLI);
18907 if (!S.getOpcode())
18908 return false;
18910 Instruction *I0 = S.getMainOp();
18911 // Make sure invalid types (including vector type) are rejected before
18912 // determining vectorization factor for scalar instructions.
18913 for (Value *V : VL) {
18914 Type *Ty = V->getType();
18915 if (!isa<InsertElementInst>(V) && !isValidElementType(Ty)) {
18916 // NOTE: the following will give user internal llvm type name, which may
18917 // not be useful.
18918 R.getORE()->emit([&]() {
18919 std::string TypeStr;
18920 llvm::raw_string_ostream rso(TypeStr);
18921 Ty->print(rso);
18922 return OptimizationRemarkMissed(SV_NAME, "UnsupportedType", I0)
18923 << "Cannot SLP vectorize list: type "
18924 << TypeStr + " is unsupported by vectorizer";
18926 return false;
18930 unsigned Sz = R.getVectorElementSize(I0);
18931 unsigned MinVF = R.getMinVF(Sz);
18932 unsigned MaxVF = std::max<unsigned>(llvm::bit_floor(VL.size()), MinVF);
18933 MaxVF = std::min(R.getMaximumVF(Sz, S.getOpcode()), MaxVF);
18934 if (MaxVF < 2) {
18935 R.getORE()->emit([&]() {
18936 return OptimizationRemarkMissed(SV_NAME, "SmallVF", I0)
18937 << "Cannot SLP vectorize list: vectorization factor "
18938 << "less than 2 is not supported";
18940 return false;
18943 bool Changed = false;
18944 bool CandidateFound = false;
18945 InstructionCost MinCost = SLPCostThreshold.getValue();
18946 Type *ScalarTy = getValueType(VL[0]);
18948 unsigned NextInst = 0, MaxInst = VL.size();
18949 for (unsigned VF = MaxVF; NextInst + 1 < MaxInst && VF >= MinVF; VF /= 2) {
18950 // No actual vectorization should happen, if number of parts is the same as
18951 // provided vectorization factor (i.e. the scalar type is used for vector
18952 // code during codegen).
18953 auto *VecTy = getWidenedType(ScalarTy, VF);
18954 if (TTI->getNumberOfParts(VecTy) == VF)
18955 continue;
18956 for (unsigned I = NextInst; I < MaxInst; ++I) {
18957 unsigned ActualVF = std::min(MaxInst - I, VF);
18959 if (!hasFullVectorsOrPowerOf2(*TTI, ScalarTy, ActualVF))
18960 continue;
18962 if (MaxVFOnly && ActualVF < MaxVF)
18963 break;
18964 if ((VF > MinVF && ActualVF <= VF / 2) || (VF == MinVF && ActualVF < 2))
18965 break;
18967 SmallVector<Value *> Ops(ActualVF, nullptr);
18968 unsigned Idx = 0;
18969 for (Value *V : VL.drop_front(I)) {
18970 // Check that a previous iteration of this loop did not delete the
18971 // Value.
18972 if (auto *Inst = dyn_cast<Instruction>(V);
18973 !Inst || !R.isDeleted(Inst)) {
18974 Ops[Idx] = V;
18975 ++Idx;
18976 if (Idx == ActualVF)
18977 break;
18980 // Not enough vectorizable instructions - exit.
18981 if (Idx != ActualVF)
18982 break;
18984 LLVM_DEBUG(dbgs() << "SLP: Analyzing " << ActualVF << " operations "
18985 << "\n");
18987 R.buildTree(Ops);
18988 if (R.isTreeTinyAndNotFullyVectorizable())
18989 continue;
18990 R.reorderTopToBottom();
18991 R.reorderBottomToTop(
18992 /*IgnoreReorder=*/!isa<InsertElementInst>(Ops.front()) &&
18993 !R.doesRootHaveInTreeUses());
18994 R.transformNodes();
18995 R.buildExternalUses();
18997 R.computeMinimumValueSizes();
18998 InstructionCost Cost = R.getTreeCost();
18999 CandidateFound = true;
19000 MinCost = std::min(MinCost, Cost);
19002 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost
19003 << " for VF=" << ActualVF << "\n");
19004 if (Cost < -SLPCostThreshold) {
19005 LLVM_DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
19006 R.getORE()->emit(OptimizationRemark(SV_NAME, "VectorizedList",
19007 cast<Instruction>(Ops[0]))
19008 << "SLP vectorized with cost " << ore::NV("Cost", Cost)
19009 << " and with tree size "
19010 << ore::NV("TreeSize", R.getTreeSize()));
19012 R.vectorizeTree();
19013 // Move to the next bundle.
19014 I += VF - 1;
19015 NextInst = I + 1;
19016 Changed = true;
19021 if (!Changed && CandidateFound) {
19022 R.getORE()->emit([&]() {
19023 return OptimizationRemarkMissed(SV_NAME, "NotBeneficial", I0)
19024 << "List vectorization was possible but not beneficial with cost "
19025 << ore::NV("Cost", MinCost) << " >= "
19026 << ore::NV("Treshold", -SLPCostThreshold);
19028 } else if (!Changed) {
19029 R.getORE()->emit([&]() {
19030 return OptimizationRemarkMissed(SV_NAME, "NotPossible", I0)
19031 << "Cannot SLP vectorize list: vectorization was impossible"
19032 << " with available vectorization factors";
19035 return Changed;
19038 bool SLPVectorizerPass::tryToVectorize(Instruction *I, BoUpSLP &R) {
19039 if (!I)
19040 return false;
19042 if (!isa<BinaryOperator, CmpInst>(I) || isa<VectorType>(I->getType()))
19043 return false;
19045 Value *P = I->getParent();
19047 // Vectorize in current basic block only.
19048 auto *Op0 = dyn_cast<Instruction>(I->getOperand(0));
19049 auto *Op1 = dyn_cast<Instruction>(I->getOperand(1));
19050 if (!Op0 || !Op1 || Op0->getParent() != P || Op1->getParent() != P ||
19051 R.isDeleted(Op0) || R.isDeleted(Op1))
19052 return false;
19054 // First collect all possible candidates
19055 SmallVector<std::pair<Value *, Value *>, 4> Candidates;
19056 Candidates.emplace_back(Op0, Op1);
19058 auto *A = dyn_cast<BinaryOperator>(Op0);
19059 auto *B = dyn_cast<BinaryOperator>(Op1);
19060 // Try to skip B.
19061 if (A && B && B->hasOneUse()) {
19062 auto *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
19063 auto *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
19064 if (B0 && B0->getParent() == P && !R.isDeleted(B0))
19065 Candidates.emplace_back(A, B0);
19066 if (B1 && B1->getParent() == P && !R.isDeleted(B1))
19067 Candidates.emplace_back(A, B1);
19069 // Try to skip A.
19070 if (B && A && A->hasOneUse()) {
19071 auto *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
19072 auto *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
19073 if (A0 && A0->getParent() == P && !R.isDeleted(A0))
19074 Candidates.emplace_back(A0, B);
19075 if (A1 && A1->getParent() == P && !R.isDeleted(A1))
19076 Candidates.emplace_back(A1, B);
19079 if (Candidates.size() == 1)
19080 return tryToVectorizeList({Op0, Op1}, R);
19082 // We have multiple options. Try to pick the single best.
19083 std::optional<int> BestCandidate = R.findBestRootPair(Candidates);
19084 if (!BestCandidate)
19085 return false;
19086 return tryToVectorizeList(
19087 {Candidates[*BestCandidate].first, Candidates[*BestCandidate].second}, R);
19090 namespace {
19092 /// Model horizontal reductions.
19094 /// A horizontal reduction is a tree of reduction instructions that has values
19095 /// that can be put into a vector as its leaves. For example:
19097 /// mul mul mul mul
19098 /// \ / \ /
19099 /// + +
19100 /// \ /
19101 /// +
19102 /// This tree has "mul" as its leaf values and "+" as its reduction
19103 /// instructions. A reduction can feed into a store or a binary operation
19104 /// feeding a phi.
19105 /// ...
19106 /// \ /
19107 /// +
19108 /// |
19109 /// phi +=
19111 /// Or:
19112 /// ...
19113 /// \ /
19114 /// +
19115 /// |
19116 /// *p =
19118 class HorizontalReduction {
19119 using ReductionOpsType = SmallVector<Value *, 16>;
19120 using ReductionOpsListType = SmallVector<ReductionOpsType, 2>;
19121 ReductionOpsListType ReductionOps;
19122 /// List of possibly reduced values.
19123 SmallVector<SmallVector<Value *>> ReducedVals;
19124 /// Maps reduced value to the corresponding reduction operation.
19125 SmallDenseMap<Value *, SmallVector<Instruction *>, 16> ReducedValsToOps;
19126 WeakTrackingVH ReductionRoot;
19127 /// The type of reduction operation.
19128 RecurKind RdxKind;
19129 /// Checks if the optimization of original scalar identity operations on
19130 /// matched horizontal reductions is enabled and allowed.
19131 bool IsSupportedHorRdxIdentityOp = false;
19133 static bool isCmpSelMinMax(Instruction *I) {
19134 return match(I, m_Select(m_Cmp(), m_Value(), m_Value())) &&
19135 RecurrenceDescriptor::isMinMaxRecurrenceKind(getRdxKind(I));
19138 // And/or are potentially poison-safe logical patterns like:
19139 // select x, y, false
19140 // select x, true, y
19141 static bool isBoolLogicOp(Instruction *I) {
19142 return isa<SelectInst>(I) &&
19143 (match(I, m_LogicalAnd()) || match(I, m_LogicalOr()));
19146 /// Checks if instruction is associative and can be vectorized.
19147 static bool isVectorizable(RecurKind Kind, Instruction *I) {
19148 if (Kind == RecurKind::None)
19149 return false;
19151 // Integer ops that map to select instructions or intrinsics are fine.
19152 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(Kind) ||
19153 isBoolLogicOp(I))
19154 return true;
19156 if (Kind == RecurKind::FMax || Kind == RecurKind::FMin) {
19157 // FP min/max are associative except for NaN and -0.0. We do not
19158 // have to rule out -0.0 here because the intrinsic semantics do not
19159 // specify a fixed result for it.
19160 return I->getFastMathFlags().noNaNs();
19163 if (Kind == RecurKind::FMaximum || Kind == RecurKind::FMinimum)
19164 return true;
19166 return I->isAssociative();
19169 static Value *getRdxOperand(Instruction *I, unsigned Index) {
19170 // Poison-safe 'or' takes the form: select X, true, Y
19171 // To make that work with the normal operand processing, we skip the
19172 // true value operand.
19173 // TODO: Change the code and data structures to handle this without a hack.
19174 if (getRdxKind(I) == RecurKind::Or && isa<SelectInst>(I) && Index == 1)
19175 return I->getOperand(2);
19176 return I->getOperand(Index);
19179 /// Creates reduction operation with the current opcode.
19180 static Value *createOp(IRBuilderBase &Builder, RecurKind Kind, Value *LHS,
19181 Value *RHS, const Twine &Name, bool UseSelect) {
19182 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(Kind);
19183 switch (Kind) {
19184 case RecurKind::Or:
19185 if (UseSelect &&
19186 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
19187 return Builder.CreateSelect(LHS, Builder.getTrue(), RHS, Name);
19188 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
19189 Name);
19190 case RecurKind::And:
19191 if (UseSelect &&
19192 LHS->getType() == CmpInst::makeCmpResultType(LHS->getType()))
19193 return Builder.CreateSelect(LHS, RHS, Builder.getFalse(), Name);
19194 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
19195 Name);
19196 case RecurKind::Add:
19197 case RecurKind::Mul:
19198 case RecurKind::Xor:
19199 case RecurKind::FAdd:
19200 case RecurKind::FMul:
19201 return Builder.CreateBinOp((Instruction::BinaryOps)RdxOpcode, LHS, RHS,
19202 Name);
19203 case RecurKind::FMax:
19204 return Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS);
19205 case RecurKind::FMin:
19206 return Builder.CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS);
19207 case RecurKind::FMaximum:
19208 return Builder.CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS);
19209 case RecurKind::FMinimum:
19210 return Builder.CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS);
19211 case RecurKind::SMax:
19212 if (UseSelect) {
19213 Value *Cmp = Builder.CreateICmpSGT(LHS, RHS, Name);
19214 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19216 return Builder.CreateBinaryIntrinsic(Intrinsic::smax, LHS, RHS);
19217 case RecurKind::SMin:
19218 if (UseSelect) {
19219 Value *Cmp = Builder.CreateICmpSLT(LHS, RHS, Name);
19220 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19222 return Builder.CreateBinaryIntrinsic(Intrinsic::smin, LHS, RHS);
19223 case RecurKind::UMax:
19224 if (UseSelect) {
19225 Value *Cmp = Builder.CreateICmpUGT(LHS, RHS, Name);
19226 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19228 return Builder.CreateBinaryIntrinsic(Intrinsic::umax, LHS, RHS);
19229 case RecurKind::UMin:
19230 if (UseSelect) {
19231 Value *Cmp = Builder.CreateICmpULT(LHS, RHS, Name);
19232 return Builder.CreateSelect(Cmp, LHS, RHS, Name);
19234 return Builder.CreateBinaryIntrinsic(Intrinsic::umin, LHS, RHS);
19235 default:
19236 llvm_unreachable("Unknown reduction operation.");
19240 /// Creates reduction operation with the current opcode with the IR flags
19241 /// from \p ReductionOps, dropping nuw/nsw flags.
19242 static Value *createOp(IRBuilderBase &Builder, RecurKind RdxKind, Value *LHS,
19243 Value *RHS, const Twine &Name,
19244 const ReductionOpsListType &ReductionOps) {
19245 bool UseSelect = ReductionOps.size() == 2 ||
19246 // Logical or/and.
19247 (ReductionOps.size() == 1 &&
19248 any_of(ReductionOps.front(), IsaPred<SelectInst>));
19249 assert((!UseSelect || ReductionOps.size() != 2 ||
19250 isa<SelectInst>(ReductionOps[1][0])) &&
19251 "Expected cmp + select pairs for reduction");
19252 Value *Op = createOp(Builder, RdxKind, LHS, RHS, Name, UseSelect);
19253 if (RecurrenceDescriptor::isIntMinMaxRecurrenceKind(RdxKind)) {
19254 if (auto *Sel = dyn_cast<SelectInst>(Op)) {
19255 propagateIRFlags(Sel->getCondition(), ReductionOps[0], nullptr,
19256 /*IncludeWrapFlags=*/false);
19257 propagateIRFlags(Op, ReductionOps[1], nullptr,
19258 /*IncludeWrapFlags=*/false);
19259 return Op;
19262 propagateIRFlags(Op, ReductionOps[0], nullptr, /*IncludeWrapFlags=*/false);
19263 return Op;
19266 public:
19267 static RecurKind getRdxKind(Value *V) {
19268 auto *I = dyn_cast<Instruction>(V);
19269 if (!I)
19270 return RecurKind::None;
19271 if (match(I, m_Add(m_Value(), m_Value())))
19272 return RecurKind::Add;
19273 if (match(I, m_Mul(m_Value(), m_Value())))
19274 return RecurKind::Mul;
19275 if (match(I, m_And(m_Value(), m_Value())) ||
19276 match(I, m_LogicalAnd(m_Value(), m_Value())))
19277 return RecurKind::And;
19278 if (match(I, m_Or(m_Value(), m_Value())) ||
19279 match(I, m_LogicalOr(m_Value(), m_Value())))
19280 return RecurKind::Or;
19281 if (match(I, m_Xor(m_Value(), m_Value())))
19282 return RecurKind::Xor;
19283 if (match(I, m_FAdd(m_Value(), m_Value())))
19284 return RecurKind::FAdd;
19285 if (match(I, m_FMul(m_Value(), m_Value())))
19286 return RecurKind::FMul;
19288 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_Value())))
19289 return RecurKind::FMax;
19290 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_Value())))
19291 return RecurKind::FMin;
19293 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(), m_Value())))
19294 return RecurKind::FMaximum;
19295 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(), m_Value())))
19296 return RecurKind::FMinimum;
19297 // This matches either cmp+select or intrinsics. SLP is expected to handle
19298 // either form.
19299 // TODO: If we are canonicalizing to intrinsics, we can remove several
19300 // special-case paths that deal with selects.
19301 if (match(I, m_SMax(m_Value(), m_Value())))
19302 return RecurKind::SMax;
19303 if (match(I, m_SMin(m_Value(), m_Value())))
19304 return RecurKind::SMin;
19305 if (match(I, m_UMax(m_Value(), m_Value())))
19306 return RecurKind::UMax;
19307 if (match(I, m_UMin(m_Value(), m_Value())))
19308 return RecurKind::UMin;
19310 if (auto *Select = dyn_cast<SelectInst>(I)) {
19311 // Try harder: look for min/max pattern based on instructions producing
19312 // same values such as: select ((cmp Inst1, Inst2), Inst1, Inst2).
19313 // During the intermediate stages of SLP, it's very common to have
19314 // pattern like this (since optimizeGatherSequence is run only once
19315 // at the end):
19316 // %1 = extractelement <2 x i32> %a, i32 0
19317 // %2 = extractelement <2 x i32> %a, i32 1
19318 // %cond = icmp sgt i32 %1, %2
19319 // %3 = extractelement <2 x i32> %a, i32 0
19320 // %4 = extractelement <2 x i32> %a, i32 1
19321 // %select = select i1 %cond, i32 %3, i32 %4
19322 CmpInst::Predicate Pred;
19323 Instruction *L1;
19324 Instruction *L2;
19326 Value *LHS = Select->getTrueValue();
19327 Value *RHS = Select->getFalseValue();
19328 Value *Cond = Select->getCondition();
19330 // TODO: Support inverse predicates.
19331 if (match(Cond, m_Cmp(Pred, m_Specific(LHS), m_Instruction(L2)))) {
19332 if (!isa<ExtractElementInst>(RHS) ||
19333 !L2->isIdenticalTo(cast<Instruction>(RHS)))
19334 return RecurKind::None;
19335 } else if (match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Specific(RHS)))) {
19336 if (!isa<ExtractElementInst>(LHS) ||
19337 !L1->isIdenticalTo(cast<Instruction>(LHS)))
19338 return RecurKind::None;
19339 } else {
19340 if (!isa<ExtractElementInst>(LHS) || !isa<ExtractElementInst>(RHS))
19341 return RecurKind::None;
19342 if (!match(Cond, m_Cmp(Pred, m_Instruction(L1), m_Instruction(L2))) ||
19343 !L1->isIdenticalTo(cast<Instruction>(LHS)) ||
19344 !L2->isIdenticalTo(cast<Instruction>(RHS)))
19345 return RecurKind::None;
19348 switch (Pred) {
19349 default:
19350 return RecurKind::None;
19351 case CmpInst::ICMP_SGT:
19352 case CmpInst::ICMP_SGE:
19353 return RecurKind::SMax;
19354 case CmpInst::ICMP_SLT:
19355 case CmpInst::ICMP_SLE:
19356 return RecurKind::SMin;
19357 case CmpInst::ICMP_UGT:
19358 case CmpInst::ICMP_UGE:
19359 return RecurKind::UMax;
19360 case CmpInst::ICMP_ULT:
19361 case CmpInst::ICMP_ULE:
19362 return RecurKind::UMin;
19365 return RecurKind::None;
19368 /// Get the index of the first operand.
19369 static unsigned getFirstOperandIndex(Instruction *I) {
19370 return isCmpSelMinMax(I) ? 1 : 0;
19373 private:
19374 /// Total number of operands in the reduction operation.
19375 static unsigned getNumberOfOperands(Instruction *I) {
19376 return isCmpSelMinMax(I) ? 3 : 2;
19379 /// Checks if the instruction is in basic block \p BB.
19380 /// For a cmp+sel min/max reduction check that both ops are in \p BB.
19381 static bool hasSameParent(Instruction *I, BasicBlock *BB) {
19382 if (isCmpSelMinMax(I) || isBoolLogicOp(I)) {
19383 auto *Sel = cast<SelectInst>(I);
19384 auto *Cmp = dyn_cast<Instruction>(Sel->getCondition());
19385 return Sel->getParent() == BB && Cmp && Cmp->getParent() == BB;
19387 return I->getParent() == BB;
19390 /// Expected number of uses for reduction operations/reduced values.
19391 static bool hasRequiredNumberOfUses(bool IsCmpSelMinMax, Instruction *I) {
19392 if (IsCmpSelMinMax) {
19393 // SelectInst must be used twice while the condition op must have single
19394 // use only.
19395 if (auto *Sel = dyn_cast<SelectInst>(I))
19396 return Sel->hasNUses(2) && Sel->getCondition()->hasOneUse();
19397 return I->hasNUses(2);
19400 // Arithmetic reduction operation must be used once only.
19401 return I->hasOneUse();
19404 /// Initializes the list of reduction operations.
19405 void initReductionOps(Instruction *I) {
19406 if (isCmpSelMinMax(I))
19407 ReductionOps.assign(2, ReductionOpsType());
19408 else
19409 ReductionOps.assign(1, ReductionOpsType());
19412 /// Add all reduction operations for the reduction instruction \p I.
19413 void addReductionOps(Instruction *I) {
19414 if (isCmpSelMinMax(I)) {
19415 ReductionOps[0].emplace_back(cast<SelectInst>(I)->getCondition());
19416 ReductionOps[1].emplace_back(I);
19417 } else {
19418 ReductionOps[0].emplace_back(I);
19422 static bool isGoodForReduction(ArrayRef<Value *> Data) {
19423 int Sz = Data.size();
19424 auto *I = dyn_cast<Instruction>(Data.front());
19425 return Sz > 1 || isConstant(Data.front()) ||
19426 (I && !isa<LoadInst>(I) && isValidForAlternation(I->getOpcode()));
19429 public:
19430 HorizontalReduction() = default;
19432 /// Try to find a reduction tree.
19433 bool matchAssociativeReduction(BoUpSLP &R, Instruction *Root,
19434 ScalarEvolution &SE, const DataLayout &DL,
19435 const TargetLibraryInfo &TLI) {
19436 RdxKind = HorizontalReduction::getRdxKind(Root);
19437 if (!isVectorizable(RdxKind, Root))
19438 return false;
19440 // Analyze "regular" integer/FP types for reductions - no target-specific
19441 // types or pointers.
19442 Type *Ty = Root->getType();
19443 if (!isValidElementType(Ty) || Ty->isPointerTy())
19444 return false;
19446 // Though the ultimate reduction may have multiple uses, its condition must
19447 // have only single use.
19448 if (auto *Sel = dyn_cast<SelectInst>(Root))
19449 if (!Sel->getCondition()->hasOneUse())
19450 return false;
19452 ReductionRoot = Root;
19454 // Iterate through all the operands of the possible reduction tree and
19455 // gather all the reduced values, sorting them by their value id.
19456 BasicBlock *BB = Root->getParent();
19457 bool IsCmpSelMinMax = isCmpSelMinMax(Root);
19458 SmallVector<std::pair<Instruction *, unsigned>> Worklist(
19459 1, std::make_pair(Root, 0));
19460 // Checks if the operands of the \p TreeN instruction are also reduction
19461 // operations or should be treated as reduced values or an extra argument,
19462 // which is not part of the reduction.
19463 auto CheckOperands = [&](Instruction *TreeN,
19464 SmallVectorImpl<Value *> &PossibleReducedVals,
19465 SmallVectorImpl<Instruction *> &ReductionOps,
19466 unsigned Level) {
19467 for (int I : reverse(seq<int>(getFirstOperandIndex(TreeN),
19468 getNumberOfOperands(TreeN)))) {
19469 Value *EdgeVal = getRdxOperand(TreeN, I);
19470 ReducedValsToOps[EdgeVal].push_back(TreeN);
19471 auto *EdgeInst = dyn_cast<Instruction>(EdgeVal);
19472 // If the edge is not an instruction, or it is different from the main
19473 // reduction opcode or has too many uses - possible reduced value.
19474 // Also, do not try to reduce const values, if the operation is not
19475 // foldable.
19476 if (!EdgeInst || Level > RecursionMaxDepth ||
19477 getRdxKind(EdgeInst) != RdxKind ||
19478 IsCmpSelMinMax != isCmpSelMinMax(EdgeInst) ||
19479 !hasRequiredNumberOfUses(IsCmpSelMinMax, EdgeInst) ||
19480 !isVectorizable(RdxKind, EdgeInst) ||
19481 (R.isAnalyzedReductionRoot(EdgeInst) &&
19482 all_of(EdgeInst->operands(), IsaPred<Constant>))) {
19483 PossibleReducedVals.push_back(EdgeVal);
19484 continue;
19486 ReductionOps.push_back(EdgeInst);
19489 // Try to regroup reduced values so that it gets more profitable to try to
19490 // reduce them. Values are grouped by their value ids, instructions - by
19491 // instruction op id and/or alternate op id, plus do extra analysis for
19492 // loads (grouping them by the distabce between pointers) and cmp
19493 // instructions (grouping them by the predicate).
19494 SmallMapVector<
19495 size_t, SmallMapVector<size_t, SmallMapVector<Value *, unsigned, 2>, 2>,
19497 PossibleReducedVals;
19498 initReductionOps(Root);
19499 DenseMap<std::pair<size_t, Value *>, SmallVector<LoadInst *>> LoadsMap;
19500 SmallSet<size_t, 2> LoadKeyUsed;
19502 auto GenerateLoadsSubkey = [&](size_t Key, LoadInst *LI) {
19503 Key = hash_combine(hash_value(LI->getParent()), Key);
19504 Value *Ptr =
19505 getUnderlyingObject(LI->getPointerOperand(), RecursionMaxDepth);
19506 if (!LoadKeyUsed.insert(Key).second) {
19507 auto LIt = LoadsMap.find(std::make_pair(Key, Ptr));
19508 if (LIt != LoadsMap.end()) {
19509 for (LoadInst *RLI : LIt->second) {
19510 if (getPointersDiff(RLI->getType(), RLI->getPointerOperand(),
19511 LI->getType(), LI->getPointerOperand(), DL, SE,
19512 /*StrictCheck=*/true))
19513 return hash_value(RLI->getPointerOperand());
19515 for (LoadInst *RLI : LIt->second) {
19516 if (arePointersCompatible(RLI->getPointerOperand(),
19517 LI->getPointerOperand(), TLI)) {
19518 hash_code SubKey = hash_value(RLI->getPointerOperand());
19519 return SubKey;
19522 if (LIt->second.size() > 2) {
19523 hash_code SubKey =
19524 hash_value(LIt->second.back()->getPointerOperand());
19525 return SubKey;
19529 LoadsMap.try_emplace(std::make_pair(Key, Ptr))
19530 .first->second.push_back(LI);
19531 return hash_value(LI->getPointerOperand());
19534 while (!Worklist.empty()) {
19535 auto [TreeN, Level] = Worklist.pop_back_val();
19536 SmallVector<Value *> PossibleRedVals;
19537 SmallVector<Instruction *> PossibleReductionOps;
19538 CheckOperands(TreeN, PossibleRedVals, PossibleReductionOps, Level);
19539 addReductionOps(TreeN);
19540 // Add reduction values. The values are sorted for better vectorization
19541 // results.
19542 for (Value *V : PossibleRedVals) {
19543 size_t Key, Idx;
19544 std::tie(Key, Idx) = generateKeySubkey(V, &TLI, GenerateLoadsSubkey,
19545 /*AllowAlternate=*/false);
19546 ++PossibleReducedVals[Key][Idx]
19547 .insert(std::make_pair(V, 0))
19548 .first->second;
19550 for (Instruction *I : reverse(PossibleReductionOps))
19551 Worklist.emplace_back(I, I->getParent() == BB ? 0 : Level + 1);
19553 auto PossibleReducedValsVect = PossibleReducedVals.takeVector();
19554 // Sort values by the total number of values kinds to start the reduction
19555 // from the longest possible reduced values sequences.
19556 for (auto &PossibleReducedVals : PossibleReducedValsVect) {
19557 auto PossibleRedVals = PossibleReducedVals.second.takeVector();
19558 SmallVector<SmallVector<Value *>> PossibleRedValsVect;
19559 for (auto It = PossibleRedVals.begin(), E = PossibleRedVals.end();
19560 It != E; ++It) {
19561 PossibleRedValsVect.emplace_back();
19562 auto RedValsVect = It->second.takeVector();
19563 stable_sort(RedValsVect, llvm::less_second());
19564 for (const std::pair<Value *, unsigned> &Data : RedValsVect)
19565 PossibleRedValsVect.back().append(Data.second, Data.first);
19567 stable_sort(PossibleRedValsVect, [](const auto &P1, const auto &P2) {
19568 return P1.size() > P2.size();
19570 int NewIdx = -1;
19571 for (ArrayRef<Value *> Data : PossibleRedValsVect) {
19572 if (NewIdx < 0 ||
19573 (!isGoodForReduction(Data) &&
19574 (!isa<LoadInst>(Data.front()) ||
19575 !isa<LoadInst>(ReducedVals[NewIdx].front()) ||
19576 getUnderlyingObject(
19577 cast<LoadInst>(Data.front())->getPointerOperand()) !=
19578 getUnderlyingObject(
19579 cast<LoadInst>(ReducedVals[NewIdx].front())
19580 ->getPointerOperand())))) {
19581 NewIdx = ReducedVals.size();
19582 ReducedVals.emplace_back();
19584 ReducedVals[NewIdx].append(Data.rbegin(), Data.rend());
19587 // Sort the reduced values by number of same/alternate opcode and/or pointer
19588 // operand.
19589 stable_sort(ReducedVals, [](ArrayRef<Value *> P1, ArrayRef<Value *> P2) {
19590 return P1.size() > P2.size();
19592 return true;
19595 /// Attempt to vectorize the tree found by matchAssociativeReduction.
19596 Value *tryToReduce(BoUpSLP &V, const DataLayout &DL, TargetTransformInfo *TTI,
19597 const TargetLibraryInfo &TLI) {
19598 const unsigned ReductionLimit = VectorizeNonPowerOf2 ? 3 : 4;
19599 constexpr unsigned RegMaxNumber = 4;
19600 constexpr unsigned RedValsMaxNumber = 128;
19601 // If there are a sufficient number of reduction values, reduce
19602 // to a nearby power-of-2. We can safely generate oversized
19603 // vectors and rely on the backend to split them to legal sizes.
19604 if (unsigned NumReducedVals = std::accumulate(
19605 ReducedVals.begin(), ReducedVals.end(), 0,
19606 [](unsigned Num, ArrayRef<Value *> Vals) -> unsigned {
19607 if (!isGoodForReduction(Vals))
19608 return Num;
19609 return Num + Vals.size();
19611 NumReducedVals < ReductionLimit &&
19612 all_of(ReducedVals, [](ArrayRef<Value *> RedV) {
19613 return RedV.size() < 2 || !allConstant(RedV) || !isSplat(RedV);
19614 })) {
19615 for (ReductionOpsType &RdxOps : ReductionOps)
19616 for (Value *RdxOp : RdxOps)
19617 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
19618 return nullptr;
19621 IRBuilder<TargetFolder> Builder(ReductionRoot->getContext(),
19622 TargetFolder(DL));
19623 Builder.SetInsertPoint(cast<Instruction>(ReductionRoot));
19625 // Track the reduced values in case if they are replaced by extractelement
19626 // because of the vectorization.
19627 DenseMap<Value *, WeakTrackingVH> TrackedVals(ReducedVals.size() *
19628 ReducedVals.front().size());
19630 // The compare instruction of a min/max is the insertion point for new
19631 // instructions and may be replaced with a new compare instruction.
19632 auto &&GetCmpForMinMaxReduction = [](Instruction *RdxRootInst) {
19633 assert(isa<SelectInst>(RdxRootInst) &&
19634 "Expected min/max reduction to have select root instruction");
19635 Value *ScalarCond = cast<SelectInst>(RdxRootInst)->getCondition();
19636 assert(isa<Instruction>(ScalarCond) &&
19637 "Expected min/max reduction to have compare condition");
19638 return cast<Instruction>(ScalarCond);
19641 // Return new VectorizedTree, based on previous value.
19642 auto GetNewVectorizedTree = [&](Value *VectorizedTree, Value *Res) {
19643 if (VectorizedTree) {
19644 // Update the final value in the reduction.
19645 Builder.SetCurrentDebugLocation(
19646 cast<Instruction>(ReductionOps.front().front())->getDebugLoc());
19647 if ((isa<PoisonValue>(VectorizedTree) && !isa<PoisonValue>(Res)) ||
19648 (isGuaranteedNotToBePoison(Res) &&
19649 !isGuaranteedNotToBePoison(VectorizedTree))) {
19650 auto It = ReducedValsToOps.find(Res);
19651 if (It != ReducedValsToOps.end() &&
19652 any_of(It->getSecond(),
19653 [](Instruction *I) { return isBoolLogicOp(I); }))
19654 std::swap(VectorizedTree, Res);
19657 return createOp(Builder, RdxKind, VectorizedTree, Res, "op.rdx",
19658 ReductionOps);
19660 // Initialize the final value in the reduction.
19661 return Res;
19663 bool AnyBoolLogicOp = any_of(ReductionOps.back(), [](Value *V) {
19664 return isBoolLogicOp(cast<Instruction>(V));
19666 SmallDenseSet<Value *> IgnoreList(ReductionOps.size() *
19667 ReductionOps.front().size());
19668 for (ReductionOpsType &RdxOps : ReductionOps)
19669 for (Value *RdxOp : RdxOps) {
19670 if (!RdxOp)
19671 continue;
19672 IgnoreList.insert(RdxOp);
19674 // Intersect the fast-math-flags from all reduction operations.
19675 FastMathFlags RdxFMF;
19676 RdxFMF.set();
19677 for (Value *U : IgnoreList)
19678 if (auto *FPMO = dyn_cast<FPMathOperator>(U))
19679 RdxFMF &= FPMO->getFastMathFlags();
19680 bool IsCmpSelMinMax = isCmpSelMinMax(cast<Instruction>(ReductionRoot));
19682 // Need to track reduced vals, they may be changed during vectorization of
19683 // subvectors.
19684 for (ArrayRef<Value *> Candidates : ReducedVals)
19685 for (Value *V : Candidates)
19686 TrackedVals.try_emplace(V, V);
19688 auto At = [](SmallMapVector<Value *, unsigned, 16> &MV,
19689 Value *V) -> unsigned & {
19690 auto *It = MV.find(V);
19691 assert(It != MV.end() && "Unable to find given key.");
19692 return It->second;
19695 DenseMap<Value *, unsigned> VectorizedVals(ReducedVals.size());
19696 // List of the values that were reduced in other trees as part of gather
19697 // nodes and thus requiring extract if fully vectorized in other trees.
19698 SmallPtrSet<Value *, 4> RequiredExtract;
19699 WeakTrackingVH VectorizedTree = nullptr;
19700 bool CheckForReusedReductionOps = false;
19701 // Try to vectorize elements based on their type.
19702 SmallVector<InstructionsState> States;
19703 for (ArrayRef<Value *> RV : ReducedVals)
19704 States.push_back(getSameOpcode(RV, TLI));
19705 for (unsigned I = 0, E = ReducedVals.size(); I < E; ++I) {
19706 ArrayRef<Value *> OrigReducedVals = ReducedVals[I];
19707 InstructionsState S = States[I];
19708 SmallVector<Value *> Candidates;
19709 Candidates.reserve(2 * OrigReducedVals.size());
19710 DenseMap<Value *, Value *> TrackedToOrig(2 * OrigReducedVals.size());
19711 for (unsigned Cnt = 0, Sz = OrigReducedVals.size(); Cnt < Sz; ++Cnt) {
19712 Value *RdxVal = TrackedVals.at(OrigReducedVals[Cnt]);
19713 // Check if the reduction value was not overriden by the extractelement
19714 // instruction because of the vectorization and exclude it, if it is not
19715 // compatible with other values.
19716 // Also check if the instruction was folded to constant/other value.
19717 auto *Inst = dyn_cast<Instruction>(RdxVal);
19718 if ((Inst && isVectorLikeInstWithConstOps(Inst) &&
19719 (!S.getOpcode() || !S.isOpcodeOrAlt(Inst))) ||
19720 (S.getOpcode() && !Inst))
19721 continue;
19722 Candidates.push_back(RdxVal);
19723 TrackedToOrig.try_emplace(RdxVal, OrigReducedVals[Cnt]);
19725 bool ShuffledExtracts = false;
19726 // Try to handle shuffled extractelements.
19727 if (S.getOpcode() == Instruction::ExtractElement && !S.isAltShuffle() &&
19728 I + 1 < E) {
19729 SmallVector<Value *> CommonCandidates(Candidates);
19730 for (Value *RV : ReducedVals[I + 1]) {
19731 Value *RdxVal = TrackedVals.at(RV);
19732 // Check if the reduction value was not overriden by the
19733 // extractelement instruction because of the vectorization and
19734 // exclude it, if it is not compatible with other values.
19735 auto *Inst = dyn_cast<ExtractElementInst>(RdxVal);
19736 if (!Inst)
19737 continue;
19738 CommonCandidates.push_back(RdxVal);
19739 TrackedToOrig.try_emplace(RdxVal, RV);
19741 SmallVector<int> Mask;
19742 if (isFixedVectorShuffle(CommonCandidates, Mask)) {
19743 ++I;
19744 Candidates.swap(CommonCandidates);
19745 ShuffledExtracts = true;
19749 // Emit code for constant values.
19750 if (Candidates.size() > 1 && allConstant(Candidates)) {
19751 Value *Res = Candidates.front();
19752 Value *OrigV = TrackedToOrig.at(Candidates.front());
19753 ++VectorizedVals.try_emplace(OrigV).first->getSecond();
19754 for (Value *VC : ArrayRef(Candidates).drop_front()) {
19755 Res = createOp(Builder, RdxKind, Res, VC, "const.rdx", ReductionOps);
19756 Value *OrigV = TrackedToOrig.at(VC);
19757 ++VectorizedVals.try_emplace(OrigV).first->getSecond();
19758 if (auto *ResI = dyn_cast<Instruction>(Res))
19759 V.analyzedReductionRoot(ResI);
19761 VectorizedTree = GetNewVectorizedTree(VectorizedTree, Res);
19762 continue;
19765 unsigned NumReducedVals = Candidates.size();
19766 if (NumReducedVals < ReductionLimit &&
19767 (NumReducedVals < 2 || !isSplat(Candidates)))
19768 continue;
19770 // Check if we support repeated scalar values processing (optimization of
19771 // original scalar identity operations on matched horizontal reductions).
19772 IsSupportedHorRdxIdentityOp = RdxKind != RecurKind::Mul &&
19773 RdxKind != RecurKind::FMul &&
19774 RdxKind != RecurKind::FMulAdd;
19775 // Gather same values.
19776 SmallMapVector<Value *, unsigned, 16> SameValuesCounter;
19777 if (IsSupportedHorRdxIdentityOp)
19778 for (Value *V : Candidates) {
19779 Value *OrigV = TrackedToOrig.at(V);
19780 ++SameValuesCounter.try_emplace(OrigV).first->second;
19782 // Used to check if the reduced values used same number of times. In this
19783 // case the compiler may produce better code. E.g. if reduced values are
19784 // aabbccdd (8 x values), then the first node of the tree will have a node
19785 // for 4 x abcd + shuffle <4 x abcd>, <0, 0, 1, 1, 2, 2, 3, 3>.
19786 // Plus, the final reduction will be performed on <8 x aabbccdd>.
19787 // Instead compiler may build <4 x abcd> tree immediately, + reduction (4
19788 // x abcd) * 2.
19789 // Currently it only handles add/fadd/xor. and/or/min/max do not require
19790 // this analysis, other operations may require an extra estimation of
19791 // the profitability.
19792 bool SameScaleFactor = false;
19793 bool OptReusedScalars = IsSupportedHorRdxIdentityOp &&
19794 SameValuesCounter.size() != Candidates.size();
19795 BoUpSLP::ExtraValueToDebugLocsMap ExternallyUsedValues;
19796 if (OptReusedScalars) {
19797 SameScaleFactor =
19798 (RdxKind == RecurKind::Add || RdxKind == RecurKind::FAdd ||
19799 RdxKind == RecurKind::Xor) &&
19800 all_of(drop_begin(SameValuesCounter),
19801 [&SameValuesCounter](const std::pair<Value *, unsigned> &P) {
19802 return P.second == SameValuesCounter.front().second;
19804 Candidates.resize(SameValuesCounter.size());
19805 transform(SameValuesCounter, Candidates.begin(),
19806 [&](const auto &P) { return TrackedVals.at(P.first); });
19807 NumReducedVals = Candidates.size();
19808 // Have a reduction of the same element.
19809 if (NumReducedVals == 1) {
19810 Value *OrigV = TrackedToOrig.at(Candidates.front());
19811 unsigned Cnt = At(SameValuesCounter, OrigV);
19812 Value *RedVal =
19813 emitScaleForReusedOps(Candidates.front(), Builder, Cnt);
19814 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal);
19815 VectorizedVals.try_emplace(OrigV, Cnt);
19816 ExternallyUsedValues.insert(OrigV);
19817 continue;
19821 unsigned MaxVecRegSize = V.getMaxVecRegSize();
19822 unsigned EltSize = V.getVectorElementSize(Candidates[0]);
19823 const unsigned MaxElts = std::clamp<unsigned>(
19824 llvm::bit_floor(MaxVecRegSize / EltSize), RedValsMaxNumber,
19825 RegMaxNumber * RedValsMaxNumber);
19827 unsigned ReduxWidth = NumReducedVals;
19828 auto GetVectorFactor = [&, &TTI = *TTI](unsigned ReduxWidth) {
19829 unsigned NumParts, NumRegs;
19830 Type *ScalarTy = Candidates.front()->getType();
19831 ReduxWidth =
19832 getFloorFullVectorNumberOfElements(TTI, ScalarTy, ReduxWidth);
19833 VectorType *Tp = getWidenedType(ScalarTy, ReduxWidth);
19834 NumParts = TTI.getNumberOfParts(Tp);
19835 NumRegs =
19836 TTI.getNumberOfRegisters(TTI.getRegisterClassForType(true, Tp));
19837 while (NumParts > NumRegs) {
19838 ReduxWidth = bit_floor(ReduxWidth - 1);
19839 VectorType *Tp = getWidenedType(ScalarTy, ReduxWidth);
19840 NumParts = TTI.getNumberOfParts(Tp);
19841 NumRegs =
19842 TTI.getNumberOfRegisters(TTI.getRegisterClassForType(true, Tp));
19844 if (NumParts > NumRegs / 2)
19845 ReduxWidth = bit_floor(ReduxWidth);
19846 return ReduxWidth;
19848 if (!VectorizeNonPowerOf2 || !has_single_bit(ReduxWidth + 1))
19849 ReduxWidth = GetVectorFactor(ReduxWidth);
19850 ReduxWidth = std::min(ReduxWidth, MaxElts);
19852 unsigned Start = 0;
19853 unsigned Pos = Start;
19854 // Restarts vectorization attempt with lower vector factor.
19855 unsigned PrevReduxWidth = ReduxWidth;
19856 bool CheckForReusedReductionOpsLocal = false;
19857 auto AdjustReducedVals = [&](bool IgnoreVL = false) {
19858 bool IsAnyRedOpGathered = !IgnoreVL && V.isAnyGathered(IgnoreList);
19859 if (!CheckForReusedReductionOpsLocal && PrevReduxWidth == ReduxWidth) {
19860 // Check if any of the reduction ops are gathered. If so, worth
19861 // trying again with less number of reduction ops.
19862 CheckForReusedReductionOpsLocal |= IsAnyRedOpGathered;
19864 ++Pos;
19865 if (Pos < NumReducedVals - ReduxWidth + 1)
19866 return IsAnyRedOpGathered;
19867 Pos = Start;
19868 --ReduxWidth;
19869 if (ReduxWidth > 1)
19870 ReduxWidth = GetVectorFactor(ReduxWidth);
19871 return IsAnyRedOpGathered;
19873 bool AnyVectorized = false;
19874 SmallDenseSet<std::pair<unsigned, unsigned>, 8> IgnoredCandidates;
19875 while (Pos < NumReducedVals - ReduxWidth + 1 &&
19876 ReduxWidth >= ReductionLimit) {
19877 // Dependency in tree of the reduction ops - drop this attempt, try
19878 // later.
19879 if (CheckForReusedReductionOpsLocal && PrevReduxWidth != ReduxWidth &&
19880 Start == 0) {
19881 CheckForReusedReductionOps = true;
19882 break;
19884 PrevReduxWidth = ReduxWidth;
19885 ArrayRef<Value *> VL(std::next(Candidates.begin(), Pos), ReduxWidth);
19886 // Been analyzed already - skip.
19887 if (IgnoredCandidates.contains(std::make_pair(Pos, ReduxWidth)) ||
19888 (!has_single_bit(ReduxWidth) &&
19889 (IgnoredCandidates.contains(
19890 std::make_pair(Pos, bit_floor(ReduxWidth))) ||
19891 IgnoredCandidates.contains(
19892 std::make_pair(Pos + (ReduxWidth - bit_floor(ReduxWidth)),
19893 bit_floor(ReduxWidth))))) ||
19894 V.areAnalyzedReductionVals(VL)) {
19895 (void)AdjustReducedVals(/*IgnoreVL=*/true);
19896 continue;
19898 // Early exit if any of the reduction values were deleted during
19899 // previous vectorization attempts.
19900 if (any_of(VL, [&V](Value *RedVal) {
19901 auto *RedValI = dyn_cast<Instruction>(RedVal);
19902 if (!RedValI)
19903 return false;
19904 return V.isDeleted(RedValI);
19906 break;
19907 V.buildTree(VL, IgnoreList);
19908 if (V.isTreeTinyAndNotFullyVectorizable(/*ForReduction=*/true)) {
19909 if (!AdjustReducedVals())
19910 V.analyzedReductionVals(VL);
19911 continue;
19913 if (V.isLoadCombineReductionCandidate(RdxKind)) {
19914 if (!AdjustReducedVals())
19915 V.analyzedReductionVals(VL);
19916 continue;
19918 V.reorderTopToBottom();
19919 // No need to reorder the root node at all.
19920 V.reorderBottomToTop(/*IgnoreReorder=*/true);
19921 // Keep extracted other reduction values, if they are used in the
19922 // vectorization trees.
19923 BoUpSLP::ExtraValueToDebugLocsMap LocalExternallyUsedValues(
19924 ExternallyUsedValues);
19925 // The reduction root is used as the insertion point for new
19926 // instructions, so set it as externally used to prevent it from being
19927 // deleted.
19928 LocalExternallyUsedValues.insert(ReductionRoot);
19929 for (unsigned Cnt = 0, Sz = ReducedVals.size(); Cnt < Sz; ++Cnt) {
19930 if (Cnt == I || (ShuffledExtracts && Cnt == I - 1))
19931 continue;
19932 for (Value *V : ReducedVals[Cnt])
19933 if (isa<Instruction>(V))
19934 LocalExternallyUsedValues.insert(TrackedVals[V]);
19936 if (!IsSupportedHorRdxIdentityOp) {
19937 // Number of uses of the candidates in the vector of values.
19938 assert(SameValuesCounter.empty() &&
19939 "Reused values counter map is not empty");
19940 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) {
19941 if (Cnt >= Pos && Cnt < Pos + ReduxWidth)
19942 continue;
19943 Value *V = Candidates[Cnt];
19944 Value *OrigV = TrackedToOrig.at(V);
19945 ++SameValuesCounter.try_emplace(OrigV).first->second;
19948 V.transformNodes();
19949 SmallPtrSet<Value *, 4> VLScalars(VL.begin(), VL.end());
19950 // Gather externally used values.
19951 SmallPtrSet<Value *, 4> Visited;
19952 for (unsigned Cnt = 0; Cnt < NumReducedVals; ++Cnt) {
19953 if (Cnt >= Pos && Cnt < Pos + ReduxWidth)
19954 continue;
19955 Value *RdxVal = Candidates[Cnt];
19956 if (auto It = TrackedVals.find(RdxVal); It != TrackedVals.end())
19957 RdxVal = It->second;
19958 if (!Visited.insert(RdxVal).second)
19959 continue;
19960 // Check if the scalar was vectorized as part of the vectorization
19961 // tree but not the top node.
19962 if (!VLScalars.contains(RdxVal) && V.isVectorized(RdxVal)) {
19963 LocalExternallyUsedValues.insert(RdxVal);
19964 continue;
19966 Value *OrigV = TrackedToOrig.at(RdxVal);
19967 unsigned NumOps =
19968 VectorizedVals.lookup(OrigV) + At(SameValuesCounter, OrigV);
19969 if (NumOps != ReducedValsToOps.at(OrigV).size())
19970 LocalExternallyUsedValues.insert(RdxVal);
19972 // Do not need the list of reused scalars in regular mode anymore.
19973 if (!IsSupportedHorRdxIdentityOp)
19974 SameValuesCounter.clear();
19975 for (Value *RdxVal : VL)
19976 if (RequiredExtract.contains(RdxVal))
19977 LocalExternallyUsedValues.insert(RdxVal);
19978 V.buildExternalUses(LocalExternallyUsedValues);
19980 V.computeMinimumValueSizes();
19982 // Estimate cost.
19983 InstructionCost TreeCost = V.getTreeCost(VL);
19984 InstructionCost ReductionCost =
19985 getReductionCost(TTI, VL, IsCmpSelMinMax, RdxFMF, V);
19986 InstructionCost Cost = TreeCost + ReductionCost;
19987 LLVM_DEBUG(dbgs() << "SLP: Found cost = " << Cost
19988 << " for reduction\n");
19989 if (!Cost.isValid())
19990 break;
19991 if (Cost >= -SLPCostThreshold) {
19992 V.getORE()->emit([&]() {
19993 return OptimizationRemarkMissed(SV_NAME, "HorSLPNotBeneficial",
19994 ReducedValsToOps.at(VL[0]).front())
19995 << "Vectorizing horizontal reduction is possible "
19996 << "but not beneficial with cost " << ore::NV("Cost", Cost)
19997 << " and threshold "
19998 << ore::NV("Threshold", -SLPCostThreshold);
20000 if (!AdjustReducedVals()) {
20001 V.analyzedReductionVals(VL);
20002 unsigned Offset = Pos == Start ? Pos : Pos - 1;
20003 if (ReduxWidth > ReductionLimit && V.isTreeNotExtendable()) {
20004 // Add subvectors of VL to the list of the analyzed values.
20005 for (unsigned VF = getFloorFullVectorNumberOfElements(
20006 *TTI, VL.front()->getType(), ReduxWidth - 1);
20007 VF >= ReductionLimit;
20008 VF = getFloorFullVectorNumberOfElements(
20009 *TTI, VL.front()->getType(), VF - 1)) {
20010 if (has_single_bit(VF) &&
20011 V.getCanonicalGraphSize() != V.getTreeSize())
20012 continue;
20013 for (unsigned Idx : seq<unsigned>(ReduxWidth - VF))
20014 IgnoredCandidates.insert(std::make_pair(Offset + Idx, VF));
20018 continue;
20021 LLVM_DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:"
20022 << Cost << ". (HorRdx)\n");
20023 V.getORE()->emit([&]() {
20024 return OptimizationRemark(SV_NAME, "VectorizedHorizontalReduction",
20025 ReducedValsToOps.at(VL[0]).front())
20026 << "Vectorized horizontal reduction with cost "
20027 << ore::NV("Cost", Cost) << " and with tree size "
20028 << ore::NV("TreeSize", V.getTreeSize());
20031 Builder.setFastMathFlags(RdxFMF);
20033 // Emit a reduction. If the root is a select (min/max idiom), the insert
20034 // point is the compare condition of that select.
20035 Instruction *RdxRootInst = cast<Instruction>(ReductionRoot);
20036 Instruction *InsertPt = RdxRootInst;
20037 if (IsCmpSelMinMax)
20038 InsertPt = GetCmpForMinMaxReduction(RdxRootInst);
20040 // Vectorize a tree.
20041 Value *VectorizedRoot =
20042 V.vectorizeTree(LocalExternallyUsedValues, InsertPt);
20043 // Update TrackedToOrig mapping, since the tracked values might be
20044 // updated.
20045 for (Value *RdxVal : Candidates) {
20046 Value *OrigVal = TrackedToOrig.at(RdxVal);
20047 Value *TransformedRdxVal = TrackedVals.at(OrigVal);
20048 if (TransformedRdxVal != RdxVal)
20049 TrackedToOrig.try_emplace(TransformedRdxVal, OrigVal);
20052 Builder.SetInsertPoint(InsertPt);
20054 // To prevent poison from leaking across what used to be sequential,
20055 // safe, scalar boolean logic operations, the reduction operand must be
20056 // frozen.
20057 if (AnyBoolLogicOp && !isGuaranteedNotToBePoison(VectorizedRoot))
20058 VectorizedRoot = Builder.CreateFreeze(VectorizedRoot);
20060 // Emit code to correctly handle reused reduced values, if required.
20061 if (OptReusedScalars && !SameScaleFactor) {
20062 VectorizedRoot = emitReusedOps(VectorizedRoot, Builder, V,
20063 SameValuesCounter, TrackedToOrig);
20066 Value *ReducedSubTree;
20067 Type *ScalarTy = VL.front()->getType();
20068 if (isa<FixedVectorType>(ScalarTy)) {
20069 assert(SLPReVec && "FixedVectorType is not expected.");
20070 unsigned ScalarTyNumElements = getNumElements(ScalarTy);
20071 ReducedSubTree = PoisonValue::get(FixedVectorType::get(
20072 VectorizedRoot->getType()->getScalarType(), ScalarTyNumElements));
20073 for (unsigned I : seq<unsigned>(ScalarTyNumElements)) {
20074 // Do reduction for each lane.
20075 // e.g., do reduce add for
20076 // VL[0] = <4 x Ty> <a, b, c, d>
20077 // VL[1] = <4 x Ty> <e, f, g, h>
20078 // Lane[0] = <2 x Ty> <a, e>
20079 // Lane[1] = <2 x Ty> <b, f>
20080 // Lane[2] = <2 x Ty> <c, g>
20081 // Lane[3] = <2 x Ty> <d, h>
20082 // result[0] = reduce add Lane[0]
20083 // result[1] = reduce add Lane[1]
20084 // result[2] = reduce add Lane[2]
20085 // result[3] = reduce add Lane[3]
20086 SmallVector<int, 16> Mask =
20087 createStrideMask(I, ScalarTyNumElements, VL.size());
20088 Value *Lane = Builder.CreateShuffleVector(VectorizedRoot, Mask);
20089 ReducedSubTree = Builder.CreateInsertElement(
20090 ReducedSubTree,
20091 emitReduction(Lane, Builder, TTI, RdxRootInst->getType()), I);
20093 } else {
20094 ReducedSubTree = emitReduction(VectorizedRoot, Builder, TTI,
20095 RdxRootInst->getType());
20097 if (ReducedSubTree->getType() != VL.front()->getType()) {
20098 assert(ReducedSubTree->getType() != VL.front()->getType() &&
20099 "Expected different reduction type.");
20100 ReducedSubTree =
20101 Builder.CreateIntCast(ReducedSubTree, VL.front()->getType(),
20102 V.isSignedMinBitwidthRootNode());
20105 // Improved analysis for add/fadd/xor reductions with same scale factor
20106 // for all operands of reductions. We can emit scalar ops for them
20107 // instead.
20108 if (OptReusedScalars && SameScaleFactor)
20109 ReducedSubTree = emitScaleForReusedOps(
20110 ReducedSubTree, Builder, SameValuesCounter.front().second);
20112 VectorizedTree = GetNewVectorizedTree(VectorizedTree, ReducedSubTree);
20113 // Count vectorized reduced values to exclude them from final reduction.
20114 for (Value *RdxVal : VL) {
20115 Value *OrigV = TrackedToOrig.at(RdxVal);
20116 if (IsSupportedHorRdxIdentityOp) {
20117 VectorizedVals.try_emplace(OrigV, At(SameValuesCounter, OrigV));
20118 continue;
20120 ++VectorizedVals.try_emplace(OrigV).first->getSecond();
20121 if (!V.isVectorized(RdxVal))
20122 RequiredExtract.insert(RdxVal);
20124 Pos += ReduxWidth;
20125 Start = Pos;
20126 ReduxWidth = NumReducedVals - Pos;
20127 if (ReduxWidth > 1)
20128 ReduxWidth = GetVectorFactor(NumReducedVals - Pos);
20129 AnyVectorized = true;
20131 if (OptReusedScalars && !AnyVectorized) {
20132 for (const std::pair<Value *, unsigned> &P : SameValuesCounter) {
20133 Value *RdxVal = TrackedVals.at(P.first);
20134 Value *RedVal = emitScaleForReusedOps(RdxVal, Builder, P.second);
20135 VectorizedTree = GetNewVectorizedTree(VectorizedTree, RedVal);
20136 VectorizedVals.try_emplace(P.first, P.second);
20138 continue;
20141 if (VectorizedTree) {
20142 // Reorder operands of bool logical op in the natural order to avoid
20143 // possible problem with poison propagation. If not possible to reorder
20144 // (both operands are originally RHS), emit an extra freeze instruction
20145 // for the LHS operand.
20146 // I.e., if we have original code like this:
20147 // RedOp1 = select i1 ?, i1 LHS, i1 false
20148 // RedOp2 = select i1 RHS, i1 ?, i1 false
20150 // Then, we swap LHS/RHS to create a new op that matches the poison
20151 // semantics of the original code.
20153 // If we have original code like this and both values could be poison:
20154 // RedOp1 = select i1 ?, i1 LHS, i1 false
20155 // RedOp2 = select i1 ?, i1 RHS, i1 false
20157 // Then, we must freeze LHS in the new op.
20158 auto FixBoolLogicalOps = [&, VectorizedTree](Value *&LHS, Value *&RHS,
20159 Instruction *RedOp1,
20160 Instruction *RedOp2,
20161 bool InitStep) {
20162 if (!AnyBoolLogicOp)
20163 return;
20164 if (isBoolLogicOp(RedOp1) &&
20165 ((!InitStep && LHS == VectorizedTree) ||
20166 getRdxOperand(RedOp1, 0) == LHS || isGuaranteedNotToBePoison(LHS)))
20167 return;
20168 if (isBoolLogicOp(RedOp2) && ((!InitStep && RHS == VectorizedTree) ||
20169 getRdxOperand(RedOp2, 0) == RHS ||
20170 isGuaranteedNotToBePoison(RHS))) {
20171 std::swap(LHS, RHS);
20172 return;
20174 if (LHS != VectorizedTree)
20175 LHS = Builder.CreateFreeze(LHS);
20177 // Finish the reduction.
20178 // Need to add extra arguments and not vectorized possible reduction
20179 // values.
20180 // Try to avoid dependencies between the scalar remainders after
20181 // reductions.
20182 auto FinalGen =
20183 [&](ArrayRef<std::pair<Instruction *, Value *>> InstVals,
20184 bool InitStep) {
20185 unsigned Sz = InstVals.size();
20186 SmallVector<std::pair<Instruction *, Value *>> ExtraReds(Sz / 2 +
20187 Sz % 2);
20188 for (unsigned I = 0, E = (Sz / 2) * 2; I < E; I += 2) {
20189 Instruction *RedOp = InstVals[I + 1].first;
20190 Builder.SetCurrentDebugLocation(RedOp->getDebugLoc());
20191 Value *RdxVal1 = InstVals[I].second;
20192 Value *StableRdxVal1 = RdxVal1;
20193 auto It1 = TrackedVals.find(RdxVal1);
20194 if (It1 != TrackedVals.end())
20195 StableRdxVal1 = It1->second;
20196 Value *RdxVal2 = InstVals[I + 1].second;
20197 Value *StableRdxVal2 = RdxVal2;
20198 auto It2 = TrackedVals.find(RdxVal2);
20199 if (It2 != TrackedVals.end())
20200 StableRdxVal2 = It2->second;
20201 // To prevent poison from leaking across what used to be
20202 // sequential, safe, scalar boolean logic operations, the
20203 // reduction operand must be frozen.
20204 FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first,
20205 RedOp, InitStep);
20206 Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1,
20207 StableRdxVal2, "op.rdx", ReductionOps);
20208 ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed);
20210 if (Sz % 2 == 1)
20211 ExtraReds[Sz / 2] = InstVals.back();
20212 return ExtraReds;
20214 SmallVector<std::pair<Instruction *, Value *>> ExtraReductions;
20215 ExtraReductions.emplace_back(cast<Instruction>(ReductionRoot),
20216 VectorizedTree);
20217 SmallPtrSet<Value *, 8> Visited;
20218 for (ArrayRef<Value *> Candidates : ReducedVals) {
20219 for (Value *RdxVal : Candidates) {
20220 if (!Visited.insert(RdxVal).second)
20221 continue;
20222 unsigned NumOps = VectorizedVals.lookup(RdxVal);
20223 for (Instruction *RedOp :
20224 ArrayRef(ReducedValsToOps.at(RdxVal)).drop_back(NumOps))
20225 ExtraReductions.emplace_back(RedOp, RdxVal);
20228 // Iterate through all not-vectorized reduction values/extra arguments.
20229 bool InitStep = true;
20230 while (ExtraReductions.size() > 1) {
20231 SmallVector<std::pair<Instruction *, Value *>> NewReds =
20232 FinalGen(ExtraReductions, InitStep);
20233 ExtraReductions.swap(NewReds);
20234 InitStep = false;
20236 VectorizedTree = ExtraReductions.front().second;
20238 ReductionRoot->replaceAllUsesWith(VectorizedTree);
20240 // The original scalar reduction is expected to have no remaining
20241 // uses outside the reduction tree itself. Assert that we got this
20242 // correct, replace internal uses with undef, and mark for eventual
20243 // deletion.
20244 #ifndef NDEBUG
20245 SmallSet<Value *, 4> IgnoreSet;
20246 for (ArrayRef<Value *> RdxOps : ReductionOps)
20247 IgnoreSet.insert(RdxOps.begin(), RdxOps.end());
20248 #endif
20249 for (ArrayRef<Value *> RdxOps : ReductionOps) {
20250 for (Value *Ignore : RdxOps) {
20251 if (!Ignore)
20252 continue;
20253 #ifndef NDEBUG
20254 for (auto *U : Ignore->users()) {
20255 assert(IgnoreSet.count(U) &&
20256 "All users must be either in the reduction ops list.");
20258 #endif
20259 if (!Ignore->use_empty()) {
20260 Value *P = PoisonValue::get(Ignore->getType());
20261 Ignore->replaceAllUsesWith(P);
20264 V.removeInstructionsAndOperands(RdxOps);
20266 } else if (!CheckForReusedReductionOps) {
20267 for (ReductionOpsType &RdxOps : ReductionOps)
20268 for (Value *RdxOp : RdxOps)
20269 V.analyzedReductionRoot(cast<Instruction>(RdxOp));
20271 return VectorizedTree;
20274 private:
20275 /// Calculate the cost of a reduction.
20276 InstructionCost getReductionCost(TargetTransformInfo *TTI,
20277 ArrayRef<Value *> ReducedVals,
20278 bool IsCmpSelMinMax, FastMathFlags FMF,
20279 const BoUpSLP &R) {
20280 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
20281 Type *ScalarTy = ReducedVals.front()->getType();
20282 unsigned ReduxWidth = ReducedVals.size();
20283 FixedVectorType *VectorTy = R.getReductionType();
20284 InstructionCost VectorCost = 0, ScalarCost;
20285 // If all of the reduced values are constant, the vector cost is 0, since
20286 // the reduction value can be calculated at the compile time.
20287 bool AllConsts = allConstant(ReducedVals);
20288 auto EvaluateScalarCost = [&](function_ref<InstructionCost()> GenCostFn) {
20289 InstructionCost Cost = 0;
20290 // Scalar cost is repeated for N-1 elements.
20291 int Cnt = ReducedVals.size();
20292 for (Value *RdxVal : ReducedVals) {
20293 if (Cnt == 1)
20294 break;
20295 --Cnt;
20296 if (RdxVal->hasNUsesOrMore(IsCmpSelMinMax ? 3 : 2)) {
20297 Cost += GenCostFn();
20298 continue;
20300 InstructionCost ScalarCost = 0;
20301 for (User *U : RdxVal->users()) {
20302 auto *RdxOp = cast<Instruction>(U);
20303 if (hasRequiredNumberOfUses(IsCmpSelMinMax, RdxOp)) {
20304 ScalarCost += TTI->getInstructionCost(RdxOp, CostKind);
20305 continue;
20307 ScalarCost = InstructionCost::getInvalid();
20308 break;
20310 if (ScalarCost.isValid())
20311 Cost += ScalarCost;
20312 else
20313 Cost += GenCostFn();
20315 return Cost;
20317 switch (RdxKind) {
20318 case RecurKind::Add:
20319 case RecurKind::Mul:
20320 case RecurKind::Or:
20321 case RecurKind::And:
20322 case RecurKind::Xor:
20323 case RecurKind::FAdd:
20324 case RecurKind::FMul: {
20325 unsigned RdxOpcode = RecurrenceDescriptor::getOpcode(RdxKind);
20326 if (!AllConsts) {
20327 if (auto *VecTy = dyn_cast<FixedVectorType>(ScalarTy)) {
20328 assert(SLPReVec && "FixedVectorType is not expected.");
20329 unsigned ScalarTyNumElements = VecTy->getNumElements();
20330 for (unsigned I : seq<unsigned>(ReducedVals.size())) {
20331 VectorCost += TTI->getShuffleCost(
20332 TTI::SK_PermuteSingleSrc, VectorTy,
20333 createStrideMask(I, ScalarTyNumElements, ReducedVals.size()));
20334 VectorCost += TTI->getArithmeticReductionCost(RdxOpcode, VecTy, FMF,
20335 CostKind);
20337 VectorCost += TTI->getScalarizationOverhead(
20338 VecTy, APInt::getAllOnes(ScalarTyNumElements), /*Insert*/ true,
20339 /*Extract*/ false, TTI::TCK_RecipThroughput);
20340 } else {
20341 Type *RedTy = VectorTy->getElementType();
20342 auto [RType, IsSigned] = R.getRootNodeTypeWithNoCast().value_or(
20343 std::make_pair(RedTy, true));
20344 if (RType == RedTy) {
20345 VectorCost = TTI->getArithmeticReductionCost(RdxOpcode, VectorTy,
20346 FMF, CostKind);
20347 } else {
20348 VectorCost = TTI->getExtendedReductionCost(
20349 RdxOpcode, !IsSigned, RedTy, getWidenedType(RType, ReduxWidth),
20350 FMF, CostKind);
20354 ScalarCost = EvaluateScalarCost([&]() {
20355 return TTI->getArithmeticInstrCost(RdxOpcode, ScalarTy, CostKind);
20357 break;
20359 case RecurKind::FMax:
20360 case RecurKind::FMin:
20361 case RecurKind::FMaximum:
20362 case RecurKind::FMinimum:
20363 case RecurKind::SMax:
20364 case RecurKind::SMin:
20365 case RecurKind::UMax:
20366 case RecurKind::UMin: {
20367 Intrinsic::ID Id = getMinMaxReductionIntrinsicOp(RdxKind);
20368 if (!AllConsts)
20369 VectorCost = TTI->getMinMaxReductionCost(Id, VectorTy, FMF, CostKind);
20370 ScalarCost = EvaluateScalarCost([&]() {
20371 IntrinsicCostAttributes ICA(Id, ScalarTy, {ScalarTy, ScalarTy}, FMF);
20372 return TTI->getIntrinsicInstrCost(ICA, CostKind);
20374 break;
20376 default:
20377 llvm_unreachable("Expected arithmetic or min/max reduction operation");
20380 LLVM_DEBUG(dbgs() << "SLP: Adding cost " << VectorCost - ScalarCost
20381 << " for reduction of " << shortBundleName(ReducedVals)
20382 << " (It is a splitting reduction)\n");
20383 return VectorCost - ScalarCost;
20386 /// Emit a horizontal reduction of the vectorized value.
20387 Value *emitReduction(Value *VectorizedValue, IRBuilderBase &Builder,
20388 const TargetTransformInfo *TTI, Type *DestTy) {
20389 assert(VectorizedValue && "Need to have a vectorized tree node");
20390 assert(RdxKind != RecurKind::FMulAdd &&
20391 "A call to the llvm.fmuladd intrinsic is not handled yet");
20393 auto *FTy = cast<FixedVectorType>(VectorizedValue->getType());
20394 if (FTy->getScalarType() == Builder.getInt1Ty() &&
20395 RdxKind == RecurKind::Add &&
20396 DestTy->getScalarType() != FTy->getScalarType()) {
20397 // Convert vector_reduce_add(ZExt(<n x i1>)) to
20398 // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
20399 Value *V = Builder.CreateBitCast(
20400 VectorizedValue, Builder.getIntNTy(FTy->getNumElements()));
20401 ++NumVectorInstructions;
20402 return Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
20404 ++NumVectorInstructions;
20405 return createSimpleReduction(Builder, VectorizedValue, RdxKind);
20408 /// Emits optimized code for unique scalar value reused \p Cnt times.
20409 Value *emitScaleForReusedOps(Value *VectorizedValue, IRBuilderBase &Builder,
20410 unsigned Cnt) {
20411 assert(IsSupportedHorRdxIdentityOp &&
20412 "The optimization of matched scalar identity horizontal reductions "
20413 "must be supported.");
20414 if (Cnt == 1)
20415 return VectorizedValue;
20416 switch (RdxKind) {
20417 case RecurKind::Add: {
20418 // res = mul vv, n
20419 Value *Scale = ConstantInt::get(VectorizedValue->getType(), Cnt);
20420 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Cnt << "of "
20421 << VectorizedValue << ". (HorRdx)\n");
20422 return Builder.CreateMul(VectorizedValue, Scale);
20424 case RecurKind::Xor: {
20425 // res = n % 2 ? 0 : vv
20426 LLVM_DEBUG(dbgs() << "SLP: Xor " << Cnt << "of " << VectorizedValue
20427 << ". (HorRdx)\n");
20428 if (Cnt % 2 == 0)
20429 return Constant::getNullValue(VectorizedValue->getType());
20430 return VectorizedValue;
20432 case RecurKind::FAdd: {
20433 // res = fmul v, n
20434 Value *Scale = ConstantFP::get(VectorizedValue->getType(), Cnt);
20435 LLVM_DEBUG(dbgs() << "SLP: FAdd (to-fmul) " << Cnt << "of "
20436 << VectorizedValue << ". (HorRdx)\n");
20437 return Builder.CreateFMul(VectorizedValue, Scale);
20439 case RecurKind::And:
20440 case RecurKind::Or:
20441 case RecurKind::SMax:
20442 case RecurKind::SMin:
20443 case RecurKind::UMax:
20444 case RecurKind::UMin:
20445 case RecurKind::FMax:
20446 case RecurKind::FMin:
20447 case RecurKind::FMaximum:
20448 case RecurKind::FMinimum:
20449 // res = vv
20450 return VectorizedValue;
20451 case RecurKind::Mul:
20452 case RecurKind::FMul:
20453 case RecurKind::FMulAdd:
20454 case RecurKind::IAnyOf:
20455 case RecurKind::FAnyOf:
20456 case RecurKind::IFindLastIV:
20457 case RecurKind::FFindLastIV:
20458 case RecurKind::None:
20459 llvm_unreachable("Unexpected reduction kind for repeated scalar.");
20461 return nullptr;
20464 /// Emits actual operation for the scalar identity values, found during
20465 /// horizontal reduction analysis.
20466 Value *
20467 emitReusedOps(Value *VectorizedValue, IRBuilderBase &Builder, BoUpSLP &R,
20468 const SmallMapVector<Value *, unsigned, 16> &SameValuesCounter,
20469 const DenseMap<Value *, Value *> &TrackedToOrig) {
20470 assert(IsSupportedHorRdxIdentityOp &&
20471 "The optimization of matched scalar identity horizontal reductions "
20472 "must be supported.");
20473 ArrayRef<Value *> VL = R.getRootNodeScalars();
20474 auto *VTy = cast<FixedVectorType>(VectorizedValue->getType());
20475 if (VTy->getElementType() != VL.front()->getType()) {
20476 VectorizedValue = Builder.CreateIntCast(
20477 VectorizedValue,
20478 getWidenedType(VL.front()->getType(), VTy->getNumElements()),
20479 R.isSignedMinBitwidthRootNode());
20481 switch (RdxKind) {
20482 case RecurKind::Add: {
20483 // root = mul prev_root, <1, 1, n, 1>
20484 SmallVector<Constant *> Vals;
20485 for (Value *V : VL) {
20486 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.at(V));
20487 Vals.push_back(ConstantInt::get(V->getType(), Cnt, /*IsSigned=*/false));
20489 auto *Scale = ConstantVector::get(Vals);
20490 LLVM_DEBUG(dbgs() << "SLP: Add (to-mul) " << Scale << "of "
20491 << VectorizedValue << ". (HorRdx)\n");
20492 return Builder.CreateMul(VectorizedValue, Scale);
20494 case RecurKind::And:
20495 case RecurKind::Or:
20496 // No need for multiple or/and(s).
20497 LLVM_DEBUG(dbgs() << "SLP: And/or of same " << VectorizedValue
20498 << ". (HorRdx)\n");
20499 return VectorizedValue;
20500 case RecurKind::SMax:
20501 case RecurKind::SMin:
20502 case RecurKind::UMax:
20503 case RecurKind::UMin:
20504 case RecurKind::FMax:
20505 case RecurKind::FMin:
20506 case RecurKind::FMaximum:
20507 case RecurKind::FMinimum:
20508 // No need for multiple min/max(s) of the same value.
20509 LLVM_DEBUG(dbgs() << "SLP: Max/min of same " << VectorizedValue
20510 << ". (HorRdx)\n");
20511 return VectorizedValue;
20512 case RecurKind::Xor: {
20513 // Replace values with even number of repeats with 0, since
20514 // x xor x = 0.
20515 // root = shuffle prev_root, zeroinitalizer, <0, 1, 2, vf, 4, vf, 5, 6,
20516 // 7>, if elements 4th and 6th elements have even number of repeats.
20517 SmallVector<int> Mask(
20518 cast<FixedVectorType>(VectorizedValue->getType())->getNumElements(),
20519 PoisonMaskElem);
20520 std::iota(Mask.begin(), Mask.end(), 0);
20521 bool NeedShuffle = false;
20522 for (unsigned I = 0, VF = VL.size(); I < VF; ++I) {
20523 Value *V = VL[I];
20524 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.at(V));
20525 if (Cnt % 2 == 0) {
20526 Mask[I] = VF;
20527 NeedShuffle = true;
20530 LLVM_DEBUG(dbgs() << "SLP: Xor <"; for (int I
20531 : Mask) dbgs()
20532 << I << " ";
20533 dbgs() << "> of " << VectorizedValue << ". (HorRdx)\n");
20534 if (NeedShuffle)
20535 VectorizedValue = Builder.CreateShuffleVector(
20536 VectorizedValue,
20537 ConstantVector::getNullValue(VectorizedValue->getType()), Mask);
20538 return VectorizedValue;
20540 case RecurKind::FAdd: {
20541 // root = fmul prev_root, <1.0, 1.0, n.0, 1.0>
20542 SmallVector<Constant *> Vals;
20543 for (Value *V : VL) {
20544 unsigned Cnt = SameValuesCounter.lookup(TrackedToOrig.at(V));
20545 Vals.push_back(ConstantFP::get(V->getType(), Cnt));
20547 auto *Scale = ConstantVector::get(Vals);
20548 return Builder.CreateFMul(VectorizedValue, Scale);
20550 case RecurKind::Mul:
20551 case RecurKind::FMul:
20552 case RecurKind::FMulAdd:
20553 case RecurKind::IAnyOf:
20554 case RecurKind::FAnyOf:
20555 case RecurKind::IFindLastIV:
20556 case RecurKind::FFindLastIV:
20557 case RecurKind::None:
20558 llvm_unreachable("Unexpected reduction kind for reused scalars.");
20560 return nullptr;
20563 } // end anonymous namespace
20565 /// Gets recurrence kind from the specified value.
20566 static RecurKind getRdxKind(Value *V) {
20567 return HorizontalReduction::getRdxKind(V);
20569 static std::optional<unsigned> getAggregateSize(Instruction *InsertInst) {
20570 if (auto *IE = dyn_cast<InsertElementInst>(InsertInst))
20571 return cast<FixedVectorType>(IE->getType())->getNumElements();
20573 unsigned AggregateSize = 1;
20574 auto *IV = cast<InsertValueInst>(InsertInst);
20575 Type *CurrentType = IV->getType();
20576 do {
20577 if (auto *ST = dyn_cast<StructType>(CurrentType)) {
20578 for (auto *Elt : ST->elements())
20579 if (Elt != ST->getElementType(0)) // check homogeneity
20580 return std::nullopt;
20581 AggregateSize *= ST->getNumElements();
20582 CurrentType = ST->getElementType(0);
20583 } else if (auto *AT = dyn_cast<ArrayType>(CurrentType)) {
20584 AggregateSize *= AT->getNumElements();
20585 CurrentType = AT->getElementType();
20586 } else if (auto *VT = dyn_cast<FixedVectorType>(CurrentType)) {
20587 AggregateSize *= VT->getNumElements();
20588 return AggregateSize;
20589 } else if (CurrentType->isSingleValueType()) {
20590 return AggregateSize;
20591 } else {
20592 return std::nullopt;
20594 } while (true);
20597 static void findBuildAggregate_rec(Instruction *LastInsertInst,
20598 TargetTransformInfo *TTI,
20599 SmallVectorImpl<Value *> &BuildVectorOpds,
20600 SmallVectorImpl<Value *> &InsertElts,
20601 unsigned OperandOffset, const BoUpSLP &R) {
20602 do {
20603 Value *InsertedOperand = LastInsertInst->getOperand(1);
20604 std::optional<unsigned> OperandIndex =
20605 getElementIndex(LastInsertInst, OperandOffset);
20606 if (!OperandIndex || R.isDeleted(LastInsertInst))
20607 return;
20608 if (isa<InsertElementInst, InsertValueInst>(InsertedOperand)) {
20609 findBuildAggregate_rec(cast<Instruction>(InsertedOperand), TTI,
20610 BuildVectorOpds, InsertElts, *OperandIndex, R);
20612 } else {
20613 BuildVectorOpds[*OperandIndex] = InsertedOperand;
20614 InsertElts[*OperandIndex] = LastInsertInst;
20616 LastInsertInst = dyn_cast<Instruction>(LastInsertInst->getOperand(0));
20617 } while (LastInsertInst != nullptr &&
20618 isa<InsertValueInst, InsertElementInst>(LastInsertInst) &&
20619 LastInsertInst->hasOneUse());
20622 /// Recognize construction of vectors like
20623 /// %ra = insertelement <4 x float> poison, float %s0, i32 0
20624 /// %rb = insertelement <4 x float> %ra, float %s1, i32 1
20625 /// %rc = insertelement <4 x float> %rb, float %s2, i32 2
20626 /// %rd = insertelement <4 x float> %rc, float %s3, i32 3
20627 /// starting from the last insertelement or insertvalue instruction.
20629 /// Also recognize homogeneous aggregates like {<2 x float>, <2 x float>},
20630 /// {{float, float}, {float, float}}, [2 x {float, float}] and so on.
20631 /// See llvm/test/Transforms/SLPVectorizer/X86/pr42022.ll for examples.
20633 /// Assume LastInsertInst is of InsertElementInst or InsertValueInst type.
20635 /// \return true if it matches.
20636 static bool findBuildAggregate(Instruction *LastInsertInst,
20637 TargetTransformInfo *TTI,
20638 SmallVectorImpl<Value *> &BuildVectorOpds,
20639 SmallVectorImpl<Value *> &InsertElts,
20640 const BoUpSLP &R) {
20642 assert((isa<InsertElementInst>(LastInsertInst) ||
20643 isa<InsertValueInst>(LastInsertInst)) &&
20644 "Expected insertelement or insertvalue instruction!");
20646 assert((BuildVectorOpds.empty() && InsertElts.empty()) &&
20647 "Expected empty result vectors!");
20649 std::optional<unsigned> AggregateSize = getAggregateSize(LastInsertInst);
20650 if (!AggregateSize)
20651 return false;
20652 BuildVectorOpds.resize(*AggregateSize);
20653 InsertElts.resize(*AggregateSize);
20655 findBuildAggregate_rec(LastInsertInst, TTI, BuildVectorOpds, InsertElts, 0,
20657 llvm::erase(BuildVectorOpds, nullptr);
20658 llvm::erase(InsertElts, nullptr);
20659 if (BuildVectorOpds.size() >= 2)
20660 return true;
20662 return false;
20665 /// Try and get a reduction instruction from a phi node.
20667 /// Given a phi node \p P in a block \p ParentBB, consider possible reductions
20668 /// if they come from either \p ParentBB or a containing loop latch.
20670 /// \returns A candidate reduction value if possible, or \code nullptr \endcode
20671 /// if not possible.
20672 static Instruction *getReductionInstr(const DominatorTree *DT, PHINode *P,
20673 BasicBlock *ParentBB, LoopInfo *LI) {
20674 // There are situations where the reduction value is not dominated by the
20675 // reduction phi. Vectorizing such cases has been reported to cause
20676 // miscompiles. See PR25787.
20677 auto DominatedReduxValue = [&](Value *R) {
20678 return isa<Instruction>(R) &&
20679 DT->dominates(P->getParent(), cast<Instruction>(R)->getParent());
20682 Instruction *Rdx = nullptr;
20684 // Return the incoming value if it comes from the same BB as the phi node.
20685 if (P->getIncomingBlock(0) == ParentBB) {
20686 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0));
20687 } else if (P->getIncomingBlock(1) == ParentBB) {
20688 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1));
20691 if (Rdx && DominatedReduxValue(Rdx))
20692 return Rdx;
20694 // Otherwise, check whether we have a loop latch to look at.
20695 Loop *BBL = LI->getLoopFor(ParentBB);
20696 if (!BBL)
20697 return nullptr;
20698 BasicBlock *BBLatch = BBL->getLoopLatch();
20699 if (!BBLatch)
20700 return nullptr;
20702 // There is a loop latch, return the incoming value if it comes from
20703 // that. This reduction pattern occasionally turns up.
20704 if (P->getIncomingBlock(0) == BBLatch) {
20705 Rdx = dyn_cast<Instruction>(P->getIncomingValue(0));
20706 } else if (P->getIncomingBlock(1) == BBLatch) {
20707 Rdx = dyn_cast<Instruction>(P->getIncomingValue(1));
20710 if (Rdx && DominatedReduxValue(Rdx))
20711 return Rdx;
20713 return nullptr;
20716 static bool matchRdxBop(Instruction *I, Value *&V0, Value *&V1) {
20717 if (match(I, m_BinOp(m_Value(V0), m_Value(V1))))
20718 return true;
20719 if (match(I, m_Intrinsic<Intrinsic::maxnum>(m_Value(V0), m_Value(V1))))
20720 return true;
20721 if (match(I, m_Intrinsic<Intrinsic::minnum>(m_Value(V0), m_Value(V1))))
20722 return true;
20723 if (match(I, m_Intrinsic<Intrinsic::maximum>(m_Value(V0), m_Value(V1))))
20724 return true;
20725 if (match(I, m_Intrinsic<Intrinsic::minimum>(m_Value(V0), m_Value(V1))))
20726 return true;
20727 if (match(I, m_Intrinsic<Intrinsic::smax>(m_Value(V0), m_Value(V1))))
20728 return true;
20729 if (match(I, m_Intrinsic<Intrinsic::smin>(m_Value(V0), m_Value(V1))))
20730 return true;
20731 if (match(I, m_Intrinsic<Intrinsic::umax>(m_Value(V0), m_Value(V1))))
20732 return true;
20733 if (match(I, m_Intrinsic<Intrinsic::umin>(m_Value(V0), m_Value(V1))))
20734 return true;
20735 return false;
20738 /// We could have an initial reduction that is not an add.
20739 /// r *= v1 + v2 + v3 + v4
20740 /// In such a case start looking for a tree rooted in the first '+'.
20741 /// \Returns the new root if found, which may be nullptr if not an instruction.
20742 static Instruction *tryGetSecondaryReductionRoot(PHINode *Phi,
20743 Instruction *Root) {
20744 assert((isa<BinaryOperator>(Root) || isa<SelectInst>(Root) ||
20745 isa<IntrinsicInst>(Root)) &&
20746 "Expected binop, select, or intrinsic for reduction matching");
20747 Value *LHS =
20748 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root));
20749 Value *RHS =
20750 Root->getOperand(HorizontalReduction::getFirstOperandIndex(Root) + 1);
20751 if (LHS == Phi)
20752 return dyn_cast<Instruction>(RHS);
20753 if (RHS == Phi)
20754 return dyn_cast<Instruction>(LHS);
20755 return nullptr;
20758 /// \p Returns the first operand of \p I that does not match \p Phi. If
20759 /// operand is not an instruction it returns nullptr.
20760 static Instruction *getNonPhiOperand(Instruction *I, PHINode *Phi) {
20761 Value *Op0 = nullptr;
20762 Value *Op1 = nullptr;
20763 if (!matchRdxBop(I, Op0, Op1))
20764 return nullptr;
20765 return dyn_cast<Instruction>(Op0 == Phi ? Op1 : Op0);
20768 /// \Returns true if \p I is a candidate instruction for reduction vectorization.
20769 static bool isReductionCandidate(Instruction *I) {
20770 bool IsSelect = match(I, m_Select(m_Value(), m_Value(), m_Value()));
20771 Value *B0 = nullptr, *B1 = nullptr;
20772 bool IsBinop = matchRdxBop(I, B0, B1);
20773 return IsBinop || IsSelect;
20776 bool SLPVectorizerPass::vectorizeHorReduction(
20777 PHINode *P, Instruction *Root, BasicBlock *BB, BoUpSLP &R,
20778 SmallVectorImpl<WeakTrackingVH> &PostponedInsts) {
20779 if (!ShouldVectorizeHor)
20780 return false;
20781 bool TryOperandsAsNewSeeds = P && isa<BinaryOperator>(Root);
20783 if (Root->getParent() != BB || isa<PHINode>(Root))
20784 return false;
20786 // If we can find a secondary reduction root, use that instead.
20787 auto SelectRoot = [&]() {
20788 if (TryOperandsAsNewSeeds && isReductionCandidate(Root) &&
20789 HorizontalReduction::getRdxKind(Root) != RecurKind::None)
20790 if (Instruction *NewRoot = tryGetSecondaryReductionRoot(P, Root))
20791 return NewRoot;
20792 return Root;
20795 // Start analysis starting from Root instruction. If horizontal reduction is
20796 // found, try to vectorize it. If it is not a horizontal reduction or
20797 // vectorization is not possible or not effective, and currently analyzed
20798 // instruction is a binary operation, try to vectorize the operands, using
20799 // pre-order DFS traversal order. If the operands were not vectorized, repeat
20800 // the same procedure considering each operand as a possible root of the
20801 // horizontal reduction.
20802 // Interrupt the process if the Root instruction itself was vectorized or all
20803 // sub-trees not higher that RecursionMaxDepth were analyzed/vectorized.
20804 // If a horizintal reduction was not matched or vectorized we collect
20805 // instructions for possible later attempts for vectorization.
20806 std::queue<std::pair<Instruction *, unsigned>> Stack;
20807 Stack.emplace(SelectRoot(), 0);
20808 SmallPtrSet<Value *, 8> VisitedInstrs;
20809 bool Res = false;
20810 auto &&TryToReduce = [this, &R](Instruction *Inst) -> Value * {
20811 if (R.isAnalyzedReductionRoot(Inst))
20812 return nullptr;
20813 if (!isReductionCandidate(Inst))
20814 return nullptr;
20815 HorizontalReduction HorRdx;
20816 if (!HorRdx.matchAssociativeReduction(R, Inst, *SE, *DL, *TLI))
20817 return nullptr;
20818 return HorRdx.tryToReduce(R, *DL, TTI, *TLI);
20820 auto TryAppendToPostponedInsts = [&](Instruction *FutureSeed) {
20821 if (TryOperandsAsNewSeeds && FutureSeed == Root) {
20822 FutureSeed = getNonPhiOperand(Root, P);
20823 if (!FutureSeed)
20824 return false;
20826 // Do not collect CmpInst or InsertElementInst/InsertValueInst as their
20827 // analysis is done separately.
20828 if (!isa<CmpInst, InsertElementInst, InsertValueInst>(FutureSeed))
20829 PostponedInsts.push_back(FutureSeed);
20830 return true;
20833 while (!Stack.empty()) {
20834 Instruction *Inst;
20835 unsigned Level;
20836 std::tie(Inst, Level) = Stack.front();
20837 Stack.pop();
20838 // Do not try to analyze instruction that has already been vectorized.
20839 // This may happen when we vectorize instruction operands on a previous
20840 // iteration while stack was populated before that happened.
20841 if (R.isDeleted(Inst))
20842 continue;
20843 if (Value *VectorizedV = TryToReduce(Inst)) {
20844 Res = true;
20845 if (auto *I = dyn_cast<Instruction>(VectorizedV)) {
20846 // Try to find another reduction.
20847 Stack.emplace(I, Level);
20848 continue;
20850 if (R.isDeleted(Inst))
20851 continue;
20852 } else {
20853 // We could not vectorize `Inst` so try to use it as a future seed.
20854 if (!TryAppendToPostponedInsts(Inst)) {
20855 assert(Stack.empty() && "Expected empty stack");
20856 break;
20860 // Try to vectorize operands.
20861 // Continue analysis for the instruction from the same basic block only to
20862 // save compile time.
20863 if (++Level < RecursionMaxDepth)
20864 for (auto *Op : Inst->operand_values())
20865 if (VisitedInstrs.insert(Op).second)
20866 if (auto *I = dyn_cast<Instruction>(Op))
20867 // Do not try to vectorize CmpInst operands, this is done
20868 // separately.
20869 if (!isa<PHINode, CmpInst, InsertElementInst, InsertValueInst>(I) &&
20870 !R.isDeleted(I) && I->getParent() == BB)
20871 Stack.emplace(I, Level);
20873 return Res;
20876 bool SLPVectorizerPass::vectorizeRootInstruction(PHINode *P, Instruction *Root,
20877 BasicBlock *BB, BoUpSLP &R) {
20878 SmallVector<WeakTrackingVH> PostponedInsts;
20879 bool Res = vectorizeHorReduction(P, Root, BB, R, PostponedInsts);
20880 Res |= tryToVectorize(PostponedInsts, R);
20881 return Res;
20884 bool SLPVectorizerPass::tryToVectorize(ArrayRef<WeakTrackingVH> Insts,
20885 BoUpSLP &R) {
20886 bool Res = false;
20887 for (Value *V : Insts)
20888 if (auto *Inst = dyn_cast<Instruction>(V); Inst && !R.isDeleted(Inst))
20889 Res |= tryToVectorize(Inst, R);
20890 return Res;
20893 bool SLPVectorizerPass::vectorizeInsertValueInst(InsertValueInst *IVI,
20894 BasicBlock *BB, BoUpSLP &R,
20895 bool MaxVFOnly) {
20896 if (!R.canMapToVector(IVI->getType()))
20897 return false;
20899 SmallVector<Value *, 16> BuildVectorOpds;
20900 SmallVector<Value *, 16> BuildVectorInsts;
20901 if (!findBuildAggregate(IVI, TTI, BuildVectorOpds, BuildVectorInsts, R))
20902 return false;
20904 if (MaxVFOnly && BuildVectorOpds.size() == 2) {
20905 R.getORE()->emit([&]() {
20906 return OptimizationRemarkMissed(SV_NAME, "NotPossible", IVI)
20907 << "Cannot SLP vectorize list: only 2 elements of buildvalue, "
20908 "trying reduction first.";
20910 return false;
20912 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IVI << "\n");
20913 // Aggregate value is unlikely to be processed in vector register.
20914 return tryToVectorizeList(BuildVectorOpds, R, MaxVFOnly);
20917 bool SLPVectorizerPass::vectorizeInsertElementInst(InsertElementInst *IEI,
20918 BasicBlock *BB, BoUpSLP &R,
20919 bool MaxVFOnly) {
20920 SmallVector<Value *, 16> BuildVectorInsts;
20921 SmallVector<Value *, 16> BuildVectorOpds;
20922 SmallVector<int> Mask;
20923 if (!findBuildAggregate(IEI, TTI, BuildVectorOpds, BuildVectorInsts, R) ||
20924 (llvm::all_of(BuildVectorOpds, IsaPred<ExtractElementInst, UndefValue>) &&
20925 isFixedVectorShuffle(BuildVectorOpds, Mask)))
20926 return false;
20928 if (MaxVFOnly && BuildVectorInsts.size() == 2) {
20929 R.getORE()->emit([&]() {
20930 return OptimizationRemarkMissed(SV_NAME, "NotPossible", IEI)
20931 << "Cannot SLP vectorize list: only 2 elements of buildvector, "
20932 "trying reduction first.";
20934 return false;
20936 LLVM_DEBUG(dbgs() << "SLP: array mappable to vector: " << *IEI << "\n");
20937 return tryToVectorizeList(BuildVectorInsts, R, MaxVFOnly);
20940 template <typename T>
20941 static bool tryToVectorizeSequence(
20942 SmallVectorImpl<T *> &Incoming, function_ref<bool(T *, T *)> Comparator,
20943 function_ref<bool(T *, T *)> AreCompatible,
20944 function_ref<bool(ArrayRef<T *>, bool)> TryToVectorizeHelper,
20945 bool MaxVFOnly, BoUpSLP &R) {
20946 bool Changed = false;
20947 // Sort by type, parent, operands.
20948 stable_sort(Incoming, Comparator);
20950 // Try to vectorize elements base on their type.
20951 SmallVector<T *> Candidates;
20952 SmallVector<T *> VL;
20953 for (auto *IncIt = Incoming.begin(), *E = Incoming.end(); IncIt != E;
20954 VL.clear()) {
20955 // Look for the next elements with the same type, parent and operand
20956 // kinds.
20957 auto *I = dyn_cast<Instruction>(*IncIt);
20958 if (!I || R.isDeleted(I)) {
20959 ++IncIt;
20960 continue;
20962 auto *SameTypeIt = IncIt;
20963 while (SameTypeIt != E && (!isa<Instruction>(*SameTypeIt) ||
20964 R.isDeleted(cast<Instruction>(*SameTypeIt)) ||
20965 AreCompatible(*SameTypeIt, *IncIt))) {
20966 auto *I = dyn_cast<Instruction>(*SameTypeIt);
20967 ++SameTypeIt;
20968 if (I && !R.isDeleted(I))
20969 VL.push_back(cast<T>(I));
20972 // Try to vectorize them.
20973 unsigned NumElts = VL.size();
20974 LLVM_DEBUG(dbgs() << "SLP: Trying to vectorize starting at nodes ("
20975 << NumElts << ")\n");
20976 // The vectorization is a 3-state attempt:
20977 // 1. Try to vectorize instructions with the same/alternate opcodes with the
20978 // size of maximal register at first.
20979 // 2. Try to vectorize remaining instructions with the same type, if
20980 // possible. This may result in the better vectorization results rather than
20981 // if we try just to vectorize instructions with the same/alternate opcodes.
20982 // 3. Final attempt to try to vectorize all instructions with the
20983 // same/alternate ops only, this may result in some extra final
20984 // vectorization.
20985 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(VL), MaxVFOnly)) {
20986 // Success start over because instructions might have been changed.
20987 Changed = true;
20988 VL.swap(Candidates);
20989 Candidates.clear();
20990 for (T *V : VL) {
20991 if (auto *I = dyn_cast<Instruction>(V); I && !R.isDeleted(I))
20992 Candidates.push_back(V);
20994 } else {
20995 /// \Returns the minimum number of elements that we will attempt to
20996 /// vectorize.
20997 auto GetMinNumElements = [&R](Value *V) {
20998 unsigned EltSize = R.getVectorElementSize(V);
20999 return std::max(2U, R.getMaxVecRegSize() / EltSize);
21001 if (NumElts < GetMinNumElements(*IncIt) &&
21002 (Candidates.empty() ||
21003 Candidates.front()->getType() == (*IncIt)->getType())) {
21004 for (T *V : VL) {
21005 if (auto *I = dyn_cast<Instruction>(V); I && !R.isDeleted(I))
21006 Candidates.push_back(V);
21010 // Final attempt to vectorize instructions with the same types.
21011 if (Candidates.size() > 1 &&
21012 (SameTypeIt == E || (*SameTypeIt)->getType() != (*IncIt)->getType())) {
21013 if (TryToVectorizeHelper(Candidates, /*MaxVFOnly=*/false)) {
21014 // Success start over because instructions might have been changed.
21015 Changed = true;
21016 } else if (MaxVFOnly) {
21017 // Try to vectorize using small vectors.
21018 SmallVector<T *> VL;
21019 for (auto *It = Candidates.begin(), *End = Candidates.end(); It != End;
21020 VL.clear()) {
21021 auto *I = dyn_cast<Instruction>(*It);
21022 if (!I || R.isDeleted(I)) {
21023 ++It;
21024 continue;
21026 auto *SameTypeIt = It;
21027 while (SameTypeIt != End &&
21028 (!isa<Instruction>(*SameTypeIt) ||
21029 R.isDeleted(cast<Instruction>(*SameTypeIt)) ||
21030 AreCompatible(*SameTypeIt, *It))) {
21031 auto *I = dyn_cast<Instruction>(*SameTypeIt);
21032 ++SameTypeIt;
21033 if (I && !R.isDeleted(I))
21034 VL.push_back(cast<T>(I));
21036 unsigned NumElts = VL.size();
21037 if (NumElts > 1 && TryToVectorizeHelper(ArrayRef(VL),
21038 /*MaxVFOnly=*/false))
21039 Changed = true;
21040 It = SameTypeIt;
21043 Candidates.clear();
21046 // Start over at the next instruction of a different type (or the end).
21047 IncIt = SameTypeIt;
21049 return Changed;
21052 /// Compare two cmp instructions. If IsCompatibility is true, function returns
21053 /// true if 2 cmps have same/swapped predicates and mos compatible corresponding
21054 /// operands. If IsCompatibility is false, function implements strict weak
21055 /// ordering relation between two cmp instructions, returning true if the first
21056 /// instruction is "less" than the second, i.e. its predicate is less than the
21057 /// predicate of the second or the operands IDs are less than the operands IDs
21058 /// of the second cmp instruction.
21059 template <bool IsCompatibility>
21060 static bool compareCmp(Value *V, Value *V2, TargetLibraryInfo &TLI,
21061 const DominatorTree &DT) {
21062 assert(isValidElementType(V->getType()) &&
21063 isValidElementType(V2->getType()) &&
21064 "Expected valid element types only.");
21065 if (V == V2)
21066 return IsCompatibility;
21067 auto *CI1 = cast<CmpInst>(V);
21068 auto *CI2 = cast<CmpInst>(V2);
21069 if (CI1->getOperand(0)->getType()->getTypeID() <
21070 CI2->getOperand(0)->getType()->getTypeID())
21071 return !IsCompatibility;
21072 if (CI1->getOperand(0)->getType()->getTypeID() >
21073 CI2->getOperand(0)->getType()->getTypeID())
21074 return false;
21075 if (CI1->getOperand(0)->getType()->getScalarSizeInBits() <
21076 CI2->getOperand(0)->getType()->getScalarSizeInBits())
21077 return !IsCompatibility;
21078 if (CI1->getOperand(0)->getType()->getScalarSizeInBits() >
21079 CI2->getOperand(0)->getType()->getScalarSizeInBits())
21080 return false;
21081 CmpInst::Predicate Pred1 = CI1->getPredicate();
21082 CmpInst::Predicate Pred2 = CI2->getPredicate();
21083 CmpInst::Predicate SwapPred1 = CmpInst::getSwappedPredicate(Pred1);
21084 CmpInst::Predicate SwapPred2 = CmpInst::getSwappedPredicate(Pred2);
21085 CmpInst::Predicate BasePred1 = std::min(Pred1, SwapPred1);
21086 CmpInst::Predicate BasePred2 = std::min(Pred2, SwapPred2);
21087 if (BasePred1 < BasePred2)
21088 return !IsCompatibility;
21089 if (BasePred1 > BasePred2)
21090 return false;
21091 // Compare operands.
21092 bool CI1Preds = Pred1 == BasePred1;
21093 bool CI2Preds = Pred2 == BasePred1;
21094 for (int I = 0, E = CI1->getNumOperands(); I < E; ++I) {
21095 auto *Op1 = CI1->getOperand(CI1Preds ? I : E - I - 1);
21096 auto *Op2 = CI2->getOperand(CI2Preds ? I : E - I - 1);
21097 if (Op1 == Op2)
21098 continue;
21099 if (Op1->getValueID() < Op2->getValueID())
21100 return !IsCompatibility;
21101 if (Op1->getValueID() > Op2->getValueID())
21102 return false;
21103 if (auto *I1 = dyn_cast<Instruction>(Op1))
21104 if (auto *I2 = dyn_cast<Instruction>(Op2)) {
21105 if (IsCompatibility) {
21106 if (I1->getParent() != I2->getParent())
21107 return false;
21108 } else {
21109 // Try to compare nodes with same parent.
21110 DomTreeNodeBase<BasicBlock> *NodeI1 = DT.getNode(I1->getParent());
21111 DomTreeNodeBase<BasicBlock> *NodeI2 = DT.getNode(I2->getParent());
21112 if (!NodeI1)
21113 return NodeI2 != nullptr;
21114 if (!NodeI2)
21115 return false;
21116 assert((NodeI1 == NodeI2) ==
21117 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
21118 "Different nodes should have different DFS numbers");
21119 if (NodeI1 != NodeI2)
21120 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
21122 InstructionsState S = getSameOpcode({I1, I2}, TLI);
21123 if (S.getOpcode() && (IsCompatibility || !S.isAltShuffle()))
21124 continue;
21125 if (IsCompatibility)
21126 return false;
21127 if (I1->getOpcode() != I2->getOpcode())
21128 return I1->getOpcode() < I2->getOpcode();
21131 return IsCompatibility;
21134 template <typename ItT>
21135 bool SLPVectorizerPass::vectorizeCmpInsts(iterator_range<ItT> CmpInsts,
21136 BasicBlock *BB, BoUpSLP &R) {
21137 bool Changed = false;
21138 // Try to find reductions first.
21139 for (CmpInst *I : CmpInsts) {
21140 if (R.isDeleted(I))
21141 continue;
21142 for (Value *Op : I->operands())
21143 if (auto *RootOp = dyn_cast<Instruction>(Op))
21144 Changed |= vectorizeRootInstruction(nullptr, RootOp, BB, R);
21146 // Try to vectorize operands as vector bundles.
21147 for (CmpInst *I : CmpInsts) {
21148 if (R.isDeleted(I))
21149 continue;
21150 Changed |= tryToVectorize(I, R);
21152 // Try to vectorize list of compares.
21153 // Sort by type, compare predicate, etc.
21154 auto CompareSorter = [&](Value *V, Value *V2) {
21155 if (V == V2)
21156 return false;
21157 return compareCmp<false>(V, V2, *TLI, *DT);
21160 auto AreCompatibleCompares = [&](Value *V1, Value *V2) {
21161 if (V1 == V2)
21162 return true;
21163 return compareCmp<true>(V1, V2, *TLI, *DT);
21166 SmallVector<Value *> Vals;
21167 for (Instruction *V : CmpInsts)
21168 if (!R.isDeleted(V) && isValidElementType(getValueType(V)))
21169 Vals.push_back(V);
21170 if (Vals.size() <= 1)
21171 return Changed;
21172 Changed |= tryToVectorizeSequence<Value>(
21173 Vals, CompareSorter, AreCompatibleCompares,
21174 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) {
21175 // Exclude possible reductions from other blocks.
21176 bool ArePossiblyReducedInOtherBlock = any_of(Candidates, [](Value *V) {
21177 return any_of(V->users(), [V](User *U) {
21178 auto *Select = dyn_cast<SelectInst>(U);
21179 return Select &&
21180 Select->getParent() != cast<Instruction>(V)->getParent();
21183 if (ArePossiblyReducedInOtherBlock)
21184 return false;
21185 return tryToVectorizeList(Candidates, R, MaxVFOnly);
21187 /*MaxVFOnly=*/true, R);
21188 return Changed;
21191 bool SLPVectorizerPass::vectorizeInserts(InstSetVector &Instructions,
21192 BasicBlock *BB, BoUpSLP &R) {
21193 assert(all_of(Instructions, IsaPred<InsertElementInst, InsertValueInst>) &&
21194 "This function only accepts Insert instructions");
21195 bool OpsChanged = false;
21196 SmallVector<WeakTrackingVH> PostponedInsts;
21197 for (auto *I : reverse(Instructions)) {
21198 // pass1 - try to match and vectorize a buildvector sequence for MaxVF only.
21199 if (R.isDeleted(I) || isa<CmpInst>(I))
21200 continue;
21201 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
21202 OpsChanged |=
21203 vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/true);
21204 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
21205 OpsChanged |=
21206 vectorizeInsertElementInst(LastInsertElem, BB, R, /*MaxVFOnly=*/true);
21208 // pass2 - try to vectorize reductions only
21209 if (R.isDeleted(I))
21210 continue;
21211 OpsChanged |= vectorizeHorReduction(nullptr, I, BB, R, PostponedInsts);
21212 if (R.isDeleted(I) || isa<CmpInst>(I))
21213 continue;
21214 // pass3 - try to match and vectorize a buildvector sequence.
21215 if (auto *LastInsertValue = dyn_cast<InsertValueInst>(I)) {
21216 OpsChanged |=
21217 vectorizeInsertValueInst(LastInsertValue, BB, R, /*MaxVFOnly=*/false);
21218 } else if (auto *LastInsertElem = dyn_cast<InsertElementInst>(I)) {
21219 OpsChanged |= vectorizeInsertElementInst(LastInsertElem, BB, R,
21220 /*MaxVFOnly=*/false);
21223 // Now try to vectorize postponed instructions.
21224 OpsChanged |= tryToVectorize(PostponedInsts, R);
21226 Instructions.clear();
21227 return OpsChanged;
21230 bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
21231 bool Changed = false;
21232 SmallVector<Value *, 4> Incoming;
21233 SmallPtrSet<Value *, 16> VisitedInstrs;
21234 // Maps phi nodes to the non-phi nodes found in the use tree for each phi
21235 // node. Allows better to identify the chains that can be vectorized in the
21236 // better way.
21237 DenseMap<Value *, SmallVector<Value *, 4>> PHIToOpcodes;
21238 auto PHICompare = [this, &PHIToOpcodes](Value *V1, Value *V2) {
21239 assert(isValidElementType(V1->getType()) &&
21240 isValidElementType(V2->getType()) &&
21241 "Expected vectorizable types only.");
21242 // It is fine to compare type IDs here, since we expect only vectorizable
21243 // types, like ints, floats and pointers, we don't care about other type.
21244 if (V1->getType()->getTypeID() < V2->getType()->getTypeID())
21245 return true;
21246 if (V1->getType()->getTypeID() > V2->getType()->getTypeID())
21247 return false;
21248 if (V1->getType()->getScalarSizeInBits() <
21249 V2->getType()->getScalarSizeInBits())
21250 return true;
21251 if (V1->getType()->getScalarSizeInBits() >
21252 V2->getType()->getScalarSizeInBits())
21253 return false;
21254 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
21255 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
21256 if (Opcodes1.size() < Opcodes2.size())
21257 return true;
21258 if (Opcodes1.size() > Opcodes2.size())
21259 return false;
21260 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
21262 // Instructions come first.
21263 auto *I1 = dyn_cast<Instruction>(Opcodes1[I]);
21264 auto *I2 = dyn_cast<Instruction>(Opcodes2[I]);
21265 if (I1 && I2) {
21266 DomTreeNodeBase<BasicBlock> *NodeI1 = DT->getNode(I1->getParent());
21267 DomTreeNodeBase<BasicBlock> *NodeI2 = DT->getNode(I2->getParent());
21268 if (!NodeI1)
21269 return NodeI2 != nullptr;
21270 if (!NodeI2)
21271 return false;
21272 assert((NodeI1 == NodeI2) ==
21273 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
21274 "Different nodes should have different DFS numbers");
21275 if (NodeI1 != NodeI2)
21276 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
21277 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21278 if (S.getOpcode() && !S.isAltShuffle())
21279 continue;
21280 return I1->getOpcode() < I2->getOpcode();
21282 if (I1)
21283 return true;
21284 if (I2)
21285 return false;
21288 // Non-undef constants come next.
21289 bool C1 = isa<Constant>(Opcodes1[I]) && !isa<UndefValue>(Opcodes1[I]);
21290 bool C2 = isa<Constant>(Opcodes2[I]) && !isa<UndefValue>(Opcodes2[I]);
21291 if (C1 && C2)
21292 continue;
21293 if (C1)
21294 return true;
21295 if (C2)
21296 return false;
21298 bool U1 = isa<UndefValue>(Opcodes1[I]);
21299 bool U2 = isa<UndefValue>(Opcodes2[I]);
21301 // Non-constant non-instructions come next.
21302 if (!U1 && !U2) {
21303 auto ValID1 = Opcodes1[I]->getValueID();
21304 auto ValID2 = Opcodes2[I]->getValueID();
21305 if (ValID1 == ValID2)
21306 continue;
21307 if (ValID1 < ValID2)
21308 return true;
21309 if (ValID1 > ValID2)
21310 return false;
21312 if (!U1)
21313 return true;
21314 if (!U2)
21315 return false;
21317 // Undefs come last.
21318 assert(U1 && U2 && "The only thing left should be undef & undef.");
21320 return false;
21322 auto AreCompatiblePHIs = [&PHIToOpcodes, this, &R](Value *V1, Value *V2) {
21323 if (V1 == V2)
21324 return true;
21325 if (V1->getType() != V2->getType())
21326 return false;
21327 ArrayRef<Value *> Opcodes1 = PHIToOpcodes[V1];
21328 ArrayRef<Value *> Opcodes2 = PHIToOpcodes[V2];
21329 if (Opcodes1.size() != Opcodes2.size())
21330 return false;
21331 for (int I = 0, E = Opcodes1.size(); I < E; ++I) {
21332 // Undefs are compatible with any other value.
21333 if (isa<UndefValue>(Opcodes1[I]) || isa<UndefValue>(Opcodes2[I]))
21334 continue;
21335 if (auto *I1 = dyn_cast<Instruction>(Opcodes1[I]))
21336 if (auto *I2 = dyn_cast<Instruction>(Opcodes2[I])) {
21337 if (R.isDeleted(I1) || R.isDeleted(I2))
21338 return false;
21339 if (I1->getParent() != I2->getParent())
21340 return false;
21341 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21342 if (S.getOpcode())
21343 continue;
21344 return false;
21346 if (isa<Constant>(Opcodes1[I]) && isa<Constant>(Opcodes2[I]))
21347 continue;
21348 if (Opcodes1[I]->getValueID() != Opcodes2[I]->getValueID())
21349 return false;
21351 return true;
21354 bool HaveVectorizedPhiNodes = false;
21355 do {
21356 // Collect the incoming values from the PHIs.
21357 Incoming.clear();
21358 for (Instruction &I : *BB) {
21359 auto *P = dyn_cast<PHINode>(&I);
21360 if (!P || P->getNumIncomingValues() > MaxPHINumOperands)
21361 break;
21363 // No need to analyze deleted, vectorized and non-vectorizable
21364 // instructions.
21365 if (!VisitedInstrs.count(P) && !R.isDeleted(P) &&
21366 isValidElementType(P->getType()))
21367 Incoming.push_back(P);
21370 if (Incoming.size() <= 1)
21371 break;
21373 // Find the corresponding non-phi nodes for better matching when trying to
21374 // build the tree.
21375 for (Value *V : Incoming) {
21376 SmallVectorImpl<Value *> &Opcodes =
21377 PHIToOpcodes.try_emplace(V).first->getSecond();
21378 if (!Opcodes.empty())
21379 continue;
21380 SmallVector<Value *, 4> Nodes(1, V);
21381 SmallPtrSet<Value *, 4> Visited;
21382 while (!Nodes.empty()) {
21383 auto *PHI = cast<PHINode>(Nodes.pop_back_val());
21384 if (!Visited.insert(PHI).second)
21385 continue;
21386 for (Value *V : PHI->incoming_values()) {
21387 if (auto *PHI1 = dyn_cast<PHINode>((V))) {
21388 Nodes.push_back(PHI1);
21389 continue;
21391 Opcodes.emplace_back(V);
21396 HaveVectorizedPhiNodes = tryToVectorizeSequence<Value>(
21397 Incoming, PHICompare, AreCompatiblePHIs,
21398 [this, &R](ArrayRef<Value *> Candidates, bool MaxVFOnly) {
21399 return tryToVectorizeList(Candidates, R, MaxVFOnly);
21401 /*MaxVFOnly=*/true, R);
21402 Changed |= HaveVectorizedPhiNodes;
21403 if (HaveVectorizedPhiNodes && any_of(PHIToOpcodes, [&](const auto &P) {
21404 auto *PHI = dyn_cast<PHINode>(P.first);
21405 return !PHI || R.isDeleted(PHI);
21407 PHIToOpcodes.clear();
21408 VisitedInstrs.insert(Incoming.begin(), Incoming.end());
21409 } while (HaveVectorizedPhiNodes);
21411 VisitedInstrs.clear();
21413 InstSetVector PostProcessInserts;
21414 SmallSetVector<CmpInst *, 8> PostProcessCmps;
21415 // Vectorizes Inserts in `PostProcessInserts` and if `VecctorizeCmps` is true
21416 // also vectorizes `PostProcessCmps`.
21417 auto VectorizeInsertsAndCmps = [&](bool VectorizeCmps) {
21418 bool Changed = vectorizeInserts(PostProcessInserts, BB, R);
21419 if (VectorizeCmps) {
21420 Changed |= vectorizeCmpInsts(reverse(PostProcessCmps), BB, R);
21421 PostProcessCmps.clear();
21423 PostProcessInserts.clear();
21424 return Changed;
21426 // Returns true if `I` is in `PostProcessInserts` or `PostProcessCmps`.
21427 auto IsInPostProcessInstrs = [&](Instruction *I) {
21428 if (auto *Cmp = dyn_cast<CmpInst>(I))
21429 return PostProcessCmps.contains(Cmp);
21430 return isa<InsertElementInst, InsertValueInst>(I) &&
21431 PostProcessInserts.contains(I);
21433 // Returns true if `I` is an instruction without users, like terminator, or
21434 // function call with ignored return value, store. Ignore unused instructions
21435 // (basing on instruction type, except for CallInst and InvokeInst).
21436 auto HasNoUsers = [](Instruction *I) {
21437 return I->use_empty() &&
21438 (I->getType()->isVoidTy() || isa<CallInst, InvokeInst>(I));
21440 for (BasicBlock::iterator It = BB->begin(), E = BB->end(); It != E; ++It) {
21441 // Skip instructions with scalable type. The num of elements is unknown at
21442 // compile-time for scalable type.
21443 if (isa<ScalableVectorType>(It->getType()))
21444 continue;
21446 // Skip instructions marked for the deletion.
21447 if (R.isDeleted(&*It))
21448 continue;
21449 // We may go through BB multiple times so skip the one we have checked.
21450 if (!VisitedInstrs.insert(&*It).second) {
21451 if (HasNoUsers(&*It) &&
21452 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator())) {
21453 // We would like to start over since some instructions are deleted
21454 // and the iterator may become invalid value.
21455 Changed = true;
21456 It = BB->begin();
21457 E = BB->end();
21459 continue;
21462 if (isa<DbgInfoIntrinsic>(It))
21463 continue;
21465 // Try to vectorize reductions that use PHINodes.
21466 if (PHINode *P = dyn_cast<PHINode>(It)) {
21467 // Check that the PHI is a reduction PHI.
21468 if (P->getNumIncomingValues() == 2) {
21469 // Try to match and vectorize a horizontal reduction.
21470 Instruction *Root = getReductionInstr(DT, P, BB, LI);
21471 if (Root && vectorizeRootInstruction(P, Root, BB, R)) {
21472 Changed = true;
21473 It = BB->begin();
21474 E = BB->end();
21475 continue;
21478 // Try to vectorize the incoming values of the PHI, to catch reductions
21479 // that feed into PHIs.
21480 for (unsigned I : seq<unsigned>(P->getNumIncomingValues())) {
21481 // Skip if the incoming block is the current BB for now. Also, bypass
21482 // unreachable IR for efficiency and to avoid crashing.
21483 // TODO: Collect the skipped incoming values and try to vectorize them
21484 // after processing BB.
21485 if (BB == P->getIncomingBlock(I) ||
21486 !DT->isReachableFromEntry(P->getIncomingBlock(I)))
21487 continue;
21489 // Postponed instructions should not be vectorized here, delay their
21490 // vectorization.
21491 if (auto *PI = dyn_cast<Instruction>(P->getIncomingValue(I));
21492 PI && !IsInPostProcessInstrs(PI)) {
21493 bool Res =
21494 vectorizeRootInstruction(nullptr, PI, P->getIncomingBlock(I), R);
21495 Changed |= Res;
21496 if (Res && R.isDeleted(P)) {
21497 It = BB->begin();
21498 E = BB->end();
21499 break;
21503 continue;
21506 if (HasNoUsers(&*It)) {
21507 bool OpsChanged = false;
21508 auto *SI = dyn_cast<StoreInst>(It);
21509 bool TryToVectorizeRoot = ShouldStartVectorizeHorAtStore || !SI;
21510 if (SI) {
21511 auto *I = Stores.find(getUnderlyingObject(SI->getPointerOperand()));
21512 // Try to vectorize chain in store, if this is the only store to the
21513 // address in the block.
21514 // TODO: This is just a temporarily solution to save compile time. Need
21515 // to investigate if we can safely turn on slp-vectorize-hor-store
21516 // instead to allow lookup for reduction chains in all non-vectorized
21517 // stores (need to check side effects and compile time).
21518 TryToVectorizeRoot |= (I == Stores.end() || I->second.size() == 1) &&
21519 SI->getValueOperand()->hasOneUse();
21521 if (TryToVectorizeRoot) {
21522 for (auto *V : It->operand_values()) {
21523 // Postponed instructions should not be vectorized here, delay their
21524 // vectorization.
21525 if (auto *VI = dyn_cast<Instruction>(V);
21526 VI && !IsInPostProcessInstrs(VI))
21527 // Try to match and vectorize a horizontal reduction.
21528 OpsChanged |= vectorizeRootInstruction(nullptr, VI, BB, R);
21531 // Start vectorization of post-process list of instructions from the
21532 // top-tree instructions to try to vectorize as many instructions as
21533 // possible.
21534 OpsChanged |=
21535 VectorizeInsertsAndCmps(/*VectorizeCmps=*/It->isTerminator());
21536 if (OpsChanged) {
21537 // We would like to start over since some instructions are deleted
21538 // and the iterator may become invalid value.
21539 Changed = true;
21540 It = BB->begin();
21541 E = BB->end();
21542 continue;
21546 if (isa<InsertElementInst, InsertValueInst>(It))
21547 PostProcessInserts.insert(&*It);
21548 else if (isa<CmpInst>(It))
21549 PostProcessCmps.insert(cast<CmpInst>(&*It));
21552 return Changed;
21555 bool SLPVectorizerPass::vectorizeGEPIndices(BasicBlock *BB, BoUpSLP &R) {
21556 auto Changed = false;
21557 for (auto &Entry : GEPs) {
21558 // If the getelementptr list has fewer than two elements, there's nothing
21559 // to do.
21560 if (Entry.second.size() < 2)
21561 continue;
21563 LLVM_DEBUG(dbgs() << "SLP: Analyzing a getelementptr list of length "
21564 << Entry.second.size() << ".\n");
21566 // Process the GEP list in chunks suitable for the target's supported
21567 // vector size. If a vector register can't hold 1 element, we are done. We
21568 // are trying to vectorize the index computations, so the maximum number of
21569 // elements is based on the size of the index expression, rather than the
21570 // size of the GEP itself (the target's pointer size).
21571 auto *It = find_if(Entry.second, [&](GetElementPtrInst *GEP) {
21572 return !R.isDeleted(GEP);
21574 if (It == Entry.second.end())
21575 continue;
21576 unsigned MaxVecRegSize = R.getMaxVecRegSize();
21577 unsigned EltSize = R.getVectorElementSize(*(*It)->idx_begin());
21578 if (MaxVecRegSize < EltSize)
21579 continue;
21581 unsigned MaxElts = MaxVecRegSize / EltSize;
21582 for (unsigned BI = 0, BE = Entry.second.size(); BI < BE; BI += MaxElts) {
21583 auto Len = std::min<unsigned>(BE - BI, MaxElts);
21584 ArrayRef<GetElementPtrInst *> GEPList(&Entry.second[BI], Len);
21586 // Initialize a set a candidate getelementptrs. Note that we use a
21587 // SetVector here to preserve program order. If the index computations
21588 // are vectorizable and begin with loads, we want to minimize the chance
21589 // of having to reorder them later.
21590 SetVector<Value *> Candidates(GEPList.begin(), GEPList.end());
21592 // Some of the candidates may have already been vectorized after we
21593 // initially collected them or their index is optimized to constant value.
21594 // If so, they are marked as deleted, so remove them from the set of
21595 // candidates.
21596 Candidates.remove_if([&R](Value *I) {
21597 return R.isDeleted(cast<Instruction>(I)) ||
21598 isa<Constant>(cast<GetElementPtrInst>(I)->idx_begin()->get());
21601 // Remove from the set of candidates all pairs of getelementptrs with
21602 // constant differences. Such getelementptrs are likely not good
21603 // candidates for vectorization in a bottom-up phase since one can be
21604 // computed from the other. We also ensure all candidate getelementptr
21605 // indices are unique.
21606 for (int I = 0, E = GEPList.size(); I < E && Candidates.size() > 1; ++I) {
21607 auto *GEPI = GEPList[I];
21608 if (!Candidates.count(GEPI))
21609 continue;
21610 const SCEV *SCEVI = SE->getSCEV(GEPList[I]);
21611 for (int J = I + 1; J < E && Candidates.size() > 1; ++J) {
21612 auto *GEPJ = GEPList[J];
21613 const SCEV *SCEVJ = SE->getSCEV(GEPList[J]);
21614 if (isa<SCEVConstant>(SE->getMinusSCEV(SCEVI, SCEVJ))) {
21615 Candidates.remove(GEPI);
21616 Candidates.remove(GEPJ);
21617 } else if (GEPI->idx_begin()->get() == GEPJ->idx_begin()->get()) {
21618 Candidates.remove(GEPJ);
21623 // We break out of the above computation as soon as we know there are
21624 // fewer than two candidates remaining.
21625 if (Candidates.size() < 2)
21626 continue;
21628 // Add the single, non-constant index of each candidate to the bundle. We
21629 // ensured the indices met these constraints when we originally collected
21630 // the getelementptrs.
21631 SmallVector<Value *, 16> Bundle(Candidates.size());
21632 auto BundleIndex = 0u;
21633 for (auto *V : Candidates) {
21634 auto *GEP = cast<GetElementPtrInst>(V);
21635 auto *GEPIdx = GEP->idx_begin()->get();
21636 assert(GEP->getNumIndices() == 1 && !isa<Constant>(GEPIdx));
21637 Bundle[BundleIndex++] = GEPIdx;
21640 // Try and vectorize the indices. We are currently only interested in
21641 // gather-like cases of the form:
21643 // ... = g[a[0] - b[0]] + g[a[1] - b[1]] + ...
21645 // where the loads of "a", the loads of "b", and the subtractions can be
21646 // performed in parallel. It's likely that detecting this pattern in a
21647 // bottom-up phase will be simpler and less costly than building a
21648 // full-blown top-down phase beginning at the consecutive loads.
21649 Changed |= tryToVectorizeList(Bundle, R);
21652 return Changed;
21655 bool SLPVectorizerPass::vectorizeStoreChains(BoUpSLP &R) {
21656 bool Changed = false;
21657 // Sort by type, base pointers and values operand. Value operands must be
21658 // compatible (have the same opcode, same parent), otherwise it is
21659 // definitely not profitable to try to vectorize them.
21660 auto &&StoreSorter = [this](StoreInst *V, StoreInst *V2) {
21661 if (V->getValueOperand()->getType()->getTypeID() <
21662 V2->getValueOperand()->getType()->getTypeID())
21663 return true;
21664 if (V->getValueOperand()->getType()->getTypeID() >
21665 V2->getValueOperand()->getType()->getTypeID())
21666 return false;
21667 if (V->getPointerOperandType()->getTypeID() <
21668 V2->getPointerOperandType()->getTypeID())
21669 return true;
21670 if (V->getPointerOperandType()->getTypeID() >
21671 V2->getPointerOperandType()->getTypeID())
21672 return false;
21673 if (V->getValueOperand()->getType()->getScalarSizeInBits() <
21674 V2->getValueOperand()->getType()->getScalarSizeInBits())
21675 return true;
21676 if (V->getValueOperand()->getType()->getScalarSizeInBits() >
21677 V2->getValueOperand()->getType()->getScalarSizeInBits())
21678 return false;
21679 // UndefValues are compatible with all other values.
21680 if (isa<UndefValue>(V->getValueOperand()) ||
21681 isa<UndefValue>(V2->getValueOperand()))
21682 return false;
21683 if (auto *I1 = dyn_cast<Instruction>(V->getValueOperand()))
21684 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
21685 DomTreeNodeBase<llvm::BasicBlock> *NodeI1 =
21686 DT->getNode(I1->getParent());
21687 DomTreeNodeBase<llvm::BasicBlock> *NodeI2 =
21688 DT->getNode(I2->getParent());
21689 assert(NodeI1 && "Should only process reachable instructions");
21690 assert(NodeI2 && "Should only process reachable instructions");
21691 assert((NodeI1 == NodeI2) ==
21692 (NodeI1->getDFSNumIn() == NodeI2->getDFSNumIn()) &&
21693 "Different nodes should have different DFS numbers");
21694 if (NodeI1 != NodeI2)
21695 return NodeI1->getDFSNumIn() < NodeI2->getDFSNumIn();
21696 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21697 if (S.getOpcode())
21698 return false;
21699 return I1->getOpcode() < I2->getOpcode();
21701 if (isa<Constant>(V->getValueOperand()) &&
21702 isa<Constant>(V2->getValueOperand()))
21703 return false;
21704 return V->getValueOperand()->getValueID() <
21705 V2->getValueOperand()->getValueID();
21708 auto &&AreCompatibleStores = [this](StoreInst *V1, StoreInst *V2) {
21709 if (V1 == V2)
21710 return true;
21711 if (V1->getValueOperand()->getType() != V2->getValueOperand()->getType())
21712 return false;
21713 if (V1->getPointerOperandType() != V2->getPointerOperandType())
21714 return false;
21715 // Undefs are compatible with any other value.
21716 if (isa<UndefValue>(V1->getValueOperand()) ||
21717 isa<UndefValue>(V2->getValueOperand()))
21718 return true;
21719 if (auto *I1 = dyn_cast<Instruction>(V1->getValueOperand()))
21720 if (auto *I2 = dyn_cast<Instruction>(V2->getValueOperand())) {
21721 if (I1->getParent() != I2->getParent())
21722 return false;
21723 InstructionsState S = getSameOpcode({I1, I2}, *TLI);
21724 return S.getOpcode() > 0;
21726 if (isa<Constant>(V1->getValueOperand()) &&
21727 isa<Constant>(V2->getValueOperand()))
21728 return true;
21729 return V1->getValueOperand()->getValueID() ==
21730 V2->getValueOperand()->getValueID();
21733 // Attempt to sort and vectorize each of the store-groups.
21734 DenseSet<std::tuple<Value *, Value *, Value *, Value *, unsigned>> Attempted;
21735 for (auto &Pair : Stores) {
21736 if (Pair.second.size() < 2)
21737 continue;
21739 LLVM_DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
21740 << Pair.second.size() << ".\n");
21742 if (!isValidElementType(Pair.second.front()->getValueOperand()->getType()))
21743 continue;
21745 // Reverse stores to do bottom-to-top analysis. This is important if the
21746 // values are stores to the same addresses several times, in this case need
21747 // to follow the stores order (reversed to meet the memory dependecies).
21748 SmallVector<StoreInst *> ReversedStores(Pair.second.rbegin(),
21749 Pair.second.rend());
21750 Changed |= tryToVectorizeSequence<StoreInst>(
21751 ReversedStores, StoreSorter, AreCompatibleStores,
21752 [&](ArrayRef<StoreInst *> Candidates, bool) {
21753 return vectorizeStores(Candidates, R, Attempted);
21755 /*MaxVFOnly=*/false, R);
21757 return Changed;