1 //===- llvm/Analysis/VectorUtils.h - Vector utilities -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines some vectorizer utilities.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_ANALYSIS_VECTORUTILS_H
14 #define LLVM_ANALYSIS_VECTORUTILS_H
16 #include "llvm/ADT/MapVector.h"
17 #include "llvm/Analysis/LoopAccessAnalysis.h"
18 #include "llvm/Analysis/TargetLibraryInfo.h"
19 #include "llvm/IR/IRBuilder.h"
20 #include "llvm/Support/CheckedArithmetic.h"
24 template <typename T
> class ArrayRef
;
26 class GetElementPtrInst
;
27 template <typename InstTy
> class InterleaveGroup
;
29 class ScalarEvolution
;
30 class TargetTransformInfo
;
38 /// Identify if the intrinsic is trivially vectorizable.
39 /// This method returns true if the intrinsic's argument types are all scalars
40 /// for the scalar form of the intrinsic and all vectors (or scalars handled by
41 /// hasVectorInstrinsicScalarOpd) for the vector form of the intrinsic.
42 bool isTriviallyVectorizable(Intrinsic::ID ID
);
44 /// Identifies if the vector form of the intrinsic has a scalar operand.
45 bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID
, unsigned ScalarOpdIdx
);
47 /// Returns intrinsic ID for call.
48 /// For the input call instruction it finds mapping intrinsic and returns
49 /// its intrinsic ID, in case it does not found it return not_intrinsic.
50 Intrinsic::ID
getVectorIntrinsicIDForCall(const CallInst
*CI
,
51 const TargetLibraryInfo
*TLI
);
53 /// Find the operand of the GEP that should be checked for consecutive
54 /// stores. This ignores trailing indices that have no effect on the final
56 unsigned getGEPInductionOperand(const GetElementPtrInst
*Gep
);
58 /// If the argument is a GEP, then returns the operand identified by
59 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
60 /// operand, it returns that instead.
61 Value
*stripGetElementPtr(Value
*Ptr
, ScalarEvolution
*SE
, Loop
*Lp
);
63 /// If a value has only one user that is a CastInst, return it.
64 Value
*getUniqueCastUse(Value
*Ptr
, Loop
*Lp
, Type
*Ty
);
66 /// Get the stride of a pointer access in a loop. Looks for symbolic
67 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
68 Value
*getStrideFromPointer(Value
*Ptr
, ScalarEvolution
*SE
, Loop
*Lp
);
70 /// Given a vector and an element number, see if the scalar value is
71 /// already around as a register, for example if it were inserted then extracted
73 Value
*findScalarElement(Value
*V
, unsigned EltNo
);
75 /// Get splat value if the input is a splat vector or return nullptr.
76 /// The value may be extracted from a splat constants vector or from
77 /// a sequence of instructions that broadcast a single value into a vector.
78 const Value
*getSplatValue(const Value
*V
);
80 /// Return true if the input value is known to be a vector with all identical
81 /// elements (potentially including undefined elements).
82 /// This may be more powerful than the related getSplatValue() because it is
83 /// not limited by finding a scalar source value to a splatted vector.
84 bool isSplatValue(const Value
*V
, unsigned Depth
= 0);
86 /// Compute a map of integer instructions to their minimum legal type
89 /// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
90 /// type (e.g. i32) whenever arithmetic is performed on them.
92 /// For targets with native i8 or i16 operations, usually InstCombine can shrink
93 /// the arithmetic type down again. However InstCombine refuses to create
94 /// illegal types, so for targets without i8 or i16 registers, the lengthening
95 /// and shrinking remains.
97 /// Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
98 /// their scalar equivalents do not, so during vectorization it is important to
99 /// remove these lengthens and truncates when deciding the profitability of
102 /// This function analyzes the given range of instructions and determines the
103 /// minimum type size each can be converted to. It attempts to remove or
104 /// minimize type size changes across each def-use chain, so for example in the
107 /// %1 = load i8, i8*
108 /// %2 = add i8 %1, 2
109 /// %3 = load i16, i16*
110 /// %4 = zext i8 %2 to i32
111 /// %5 = zext i16 %3 to i32
112 /// %6 = add i32 %4, %5
113 /// %7 = trunc i32 %6 to i16
115 /// Instruction %6 must be done at least in i16, so computeMinimumValueSizes
116 /// will return: {%1: 16, %2: 16, %3: 16, %4: 16, %5: 16, %6: 16, %7: 16}.
118 /// If the optional TargetTransformInfo is provided, this function tries harder
119 /// to do less work by only looking at illegal types.
120 MapVector
<Instruction
*, uint64_t>
121 computeMinimumValueSizes(ArrayRef
<BasicBlock
*> Blocks
,
123 const TargetTransformInfo
*TTI
=nullptr);
125 /// Compute the union of two access-group lists.
127 /// If the list contains just one access group, it is returned directly. If the
128 /// list is empty, returns nullptr.
129 MDNode
*uniteAccessGroups(MDNode
*AccGroups1
, MDNode
*AccGroups2
);
131 /// Compute the access-group list of access groups that @p Inst1 and @p Inst2
132 /// are both in. If either instruction does not access memory at all, it is
133 /// considered to be in every list.
135 /// If the list contains just one access group, it is returned directly. If the
136 /// list is empty, returns nullptr.
137 MDNode
*intersectAccessGroups(const Instruction
*Inst1
,
138 const Instruction
*Inst2
);
140 /// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
141 /// MD_nontemporal, MD_access_group].
142 /// For K in Kinds, we get the MDNode for K from each of the
143 /// elements of VL, compute their "intersection" (i.e., the most generic
144 /// metadata value that covers all of the individual values), and set I's
145 /// metadata for M equal to the intersection value.
147 /// This function always sets a (possibly null) value for each K in Kinds.
148 Instruction
*propagateMetadata(Instruction
*I
, ArrayRef
<Value
*> VL
);
150 /// Create a mask that filters the members of an interleave group where there
153 /// For example, the mask for \p Group with interleave-factor 3
154 /// and \p VF 4, that has only its first member present is:
156 /// <1,0,0,1,0,0,1,0,0,1,0,0>
158 /// Note: The result is a mask of 0's and 1's, as opposed to the other
159 /// create[*]Mask() utilities which create a shuffle mask (mask that
160 /// consists of indices).
161 Constant
*createBitMaskForGaps(IRBuilder
<> &Builder
, unsigned VF
,
162 const InterleaveGroup
<Instruction
> &Group
);
164 /// Create a mask with replicated elements.
166 /// This function creates a shuffle mask for replicating each of the \p VF
167 /// elements in a vector \p ReplicationFactor times. It can be used to
168 /// transform a mask of \p VF elements into a mask of
169 /// \p VF * \p ReplicationFactor elements used by a predicated
170 /// interleaved-group of loads/stores whose Interleaved-factor ==
171 /// \p ReplicationFactor.
173 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
175 /// <0,0,0,1,1,1,2,2,2,3,3,3>
176 Constant
*createReplicatedMask(IRBuilder
<> &Builder
, unsigned ReplicationFactor
,
179 /// Create an interleave shuffle mask.
181 /// This function creates a shuffle mask for interleaving \p NumVecs vectors of
182 /// vectorization factor \p VF into a single wide vector. The mask is of the
185 /// <0, VF, VF * 2, ..., VF * (NumVecs - 1), 1, VF + 1, VF * 2 + 1, ...>
187 /// For example, the mask for VF = 4 and NumVecs = 2 is:
189 /// <0, 4, 1, 5, 2, 6, 3, 7>.
190 Constant
*createInterleaveMask(IRBuilder
<> &Builder
, unsigned VF
,
193 /// Create a stride shuffle mask.
195 /// This function creates a shuffle mask whose elements begin at \p Start and
196 /// are incremented by \p Stride. The mask can be used to deinterleave an
197 /// interleaved vector into separate vectors of vectorization factor \p VF. The
198 /// mask is of the form:
200 /// <Start, Start + Stride, ..., Start + Stride * (VF - 1)>
202 /// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
205 Constant
*createStrideMask(IRBuilder
<> &Builder
, unsigned Start
,
206 unsigned Stride
, unsigned VF
);
208 /// Create a sequential shuffle mask.
210 /// This function creates shuffle mask whose elements are sequential and begin
211 /// at \p Start. The mask contains \p NumInts integers and is padded with \p
212 /// NumUndefs undef values. The mask is of the form:
214 /// <Start, Start + 1, ... Start + NumInts - 1, undef_1, ... undef_NumUndefs>
216 /// For example, the mask for Start = 0, NumInsts = 4, and NumUndefs = 4 is:
218 /// <0, 1, 2, 3, undef, undef, undef, undef>
219 Constant
*createSequentialMask(IRBuilder
<> &Builder
, unsigned Start
,
220 unsigned NumInts
, unsigned NumUndefs
);
222 /// Concatenate a list of vectors.
224 /// This function generates code that concatenate the vectors in \p Vecs into a
225 /// single large vector. The number of vectors should be greater than one, and
226 /// their element types should be the same. The number of elements in the
227 /// vectors should also be the same; however, if the last vector has fewer
228 /// elements, it will be padded with undefs.
229 Value
*concatenateVectors(IRBuilder
<> &Builder
, ArrayRef
<Value
*> Vecs
);
231 /// Given a mask vector of the form <Y x i1>, Return true if all of the
232 /// elements of this predicate mask are false or undef. That is, return true
233 /// if all lanes can be assumed inactive.
234 bool maskIsAllZeroOrUndef(Value
*Mask
);
236 /// Given a mask vector of the form <Y x i1>, Return true if all of the
237 /// elements of this predicate mask are true or undef. That is, return true
238 /// if all lanes can be assumed active.
239 bool maskIsAllOneOrUndef(Value
*Mask
);
241 /// Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y)
242 /// for each lane which may be active.
243 APInt
possiblyDemandedEltsInMask(Value
*Mask
);
245 /// The group of interleaved loads/stores sharing the same stride and
246 /// close to each other.
248 /// Each member in this group has an index starting from 0, and the largest
249 /// index should be less than interleaved factor, which is equal to the absolute
250 /// value of the access's stride.
252 /// E.g. An interleaved load group of factor 4:
253 /// for (unsigned i = 0; i < 1024; i+=4) {
254 /// a = A[i]; // Member of index 0
255 /// b = A[i+1]; // Member of index 1
256 /// d = A[i+3]; // Member of index 3
260 /// An interleaved store group of factor 4:
261 /// for (unsigned i = 0; i < 1024; i+=4) {
263 /// A[i] = a; // Member of index 0
264 /// A[i+1] = b; // Member of index 1
265 /// A[i+2] = c; // Member of index 2
266 /// A[i+3] = d; // Member of index 3
269 /// Note: the interleaved load group could have gaps (missing members), but
270 /// the interleaved store group doesn't allow gaps.
271 template <typename InstTy
> class InterleaveGroup
{
273 InterleaveGroup(uint32_t Factor
, bool Reverse
, uint32_t Align
)
274 : Factor(Factor
), Reverse(Reverse
), Align(Align
), InsertPos(nullptr) {}
276 InterleaveGroup(InstTy
*Instr
, int32_t Stride
, uint32_t Align
)
277 : Align(Align
), InsertPos(Instr
) {
278 assert(Align
&& "The alignment should be non-zero");
280 Factor
= std::abs(Stride
);
281 assert(Factor
> 1 && "Invalid interleave factor");
283 Reverse
= Stride
< 0;
287 bool isReverse() const { return Reverse
; }
288 uint32_t getFactor() const { return Factor
; }
289 uint32_t getAlignment() const { return Align
; }
290 uint32_t getNumMembers() const { return Members
.size(); }
292 /// Try to insert a new member \p Instr with index \p Index and
293 /// alignment \p NewAlign. The index is related to the leader and it could be
294 /// negative if it is the new leader.
296 /// \returns false if the instruction doesn't belong to the group.
297 bool insertMember(InstTy
*Instr
, int32_t Index
, uint32_t NewAlign
) {
298 assert(NewAlign
&& "The new member's alignment should be non-zero");
300 // Make sure the key fits in an int32_t.
301 Optional
<int32_t> MaybeKey
= checkedAdd(Index
, SmallestKey
);
304 int32_t Key
= *MaybeKey
;
306 // Skip if there is already a member with the same index.
307 if (Members
.find(Key
) != Members
.end())
310 if (Key
> LargestKey
) {
311 // The largest index is always less than the interleave factor.
312 if (Index
>= static_cast<int32_t>(Factor
))
316 } else if (Key
< SmallestKey
) {
318 // Make sure the largest index fits in an int32_t.
319 Optional
<int32_t> MaybeLargestIndex
= checkedSub(LargestKey
, Key
);
320 if (!MaybeLargestIndex
)
323 // The largest index is always less than the interleave factor.
324 if (*MaybeLargestIndex
>= static_cast<int64_t>(Factor
))
330 // It's always safe to select the minimum alignment.
331 Align
= std::min(Align
, NewAlign
);
332 Members
[Key
] = Instr
;
336 /// Get the member with the given index \p Index
338 /// \returns nullptr if contains no such member.
339 InstTy
*getMember(uint32_t Index
) const {
340 int32_t Key
= SmallestKey
+ Index
;
341 auto Member
= Members
.find(Key
);
342 if (Member
== Members
.end())
345 return Member
->second
;
348 /// Get the index for the given member. Unlike the key in the member
349 /// map, the index starts from 0.
350 uint32_t getIndex(const InstTy
*Instr
) const {
351 for (auto I
: Members
) {
352 if (I
.second
== Instr
)
353 return I
.first
- SmallestKey
;
356 llvm_unreachable("InterleaveGroup contains no such member");
359 InstTy
*getInsertPos() const { return InsertPos
; }
360 void setInsertPos(InstTy
*Inst
) { InsertPos
= Inst
; }
362 /// Add metadata (e.g. alias info) from the instructions in this group to \p
365 /// FIXME: this function currently does not add noalias metadata a'la
366 /// addNewMedata. To do that we need to compute the intersection of the
367 /// noalias info from all members.
368 void addMetadata(InstTy
*NewInst
) const;
370 /// Returns true if this Group requires a scalar iteration to handle gaps.
371 bool requiresScalarEpilogue() const {
372 // If the last member of the Group exists, then a scalar epilog is not
373 // needed for this group.
374 if (getMember(getFactor() - 1))
377 // We have a group with gaps. It therefore cannot be a group of stores,
378 // and it can't be a reversed access, because such groups get invalidated.
379 assert(!getMember(0)->mayWriteToMemory() &&
380 "Group should have been invalidated");
381 assert(!isReverse() && "Group should have been invalidated");
383 // This is a group of loads, with gaps, and without a last-member
388 uint32_t Factor
; // Interleave Factor.
391 DenseMap
<int32_t, InstTy
*> Members
;
392 int32_t SmallestKey
= 0;
393 int32_t LargestKey
= 0;
395 // To avoid breaking dependences, vectorized instructions of an interleave
396 // group should be inserted at either the first load or the last store in
399 // E.g. %even = load i32 // Insert Position
400 // %add = add i32 %even // Use of %even
404 // %odd = add i32 // Def of %odd
405 // store i32 %odd // Insert Position
409 /// Drive the analysis of interleaved memory accesses in the loop.
411 /// Use this class to analyze interleaved accesses only when we can vectorize
412 /// a loop. Otherwise it's meaningless to do analysis as the vectorization
413 /// on interleaved accesses is unsafe.
415 /// The analysis collects interleave groups and records the relationships
416 /// between the member and the group in a map.
417 class InterleavedAccessInfo
{
419 InterleavedAccessInfo(PredicatedScalarEvolution
&PSE
, Loop
*L
,
420 DominatorTree
*DT
, LoopInfo
*LI
,
421 const LoopAccessInfo
*LAI
)
422 : PSE(PSE
), TheLoop(L
), DT(DT
), LI(LI
), LAI(LAI
) {}
424 ~InterleavedAccessInfo() { reset(); }
426 /// Analyze the interleaved accesses and collect them in interleave
427 /// groups. Substitute symbolic strides using \p Strides.
428 /// Consider also predicated loads/stores in the analysis if
429 /// \p EnableMaskedInterleavedGroup is true.
430 void analyzeInterleaving(bool EnableMaskedInterleavedGroup
);
432 /// Invalidate groups, e.g., in case all blocks in loop will be predicated
433 /// contrary to original assumption. Although we currently prevent group
434 /// formation for predicated accesses, we may be able to relax this limitation
435 /// in the future once we handle more complicated blocks.
437 SmallPtrSet
<InterleaveGroup
<Instruction
> *, 4> DelSet
;
438 // Avoid releasing a pointer twice.
439 for (auto &I
: InterleaveGroupMap
)
440 DelSet
.insert(I
.second
);
441 for (auto *Ptr
: DelSet
)
443 InterleaveGroupMap
.clear();
444 RequiresScalarEpilogue
= false;
448 /// Check if \p Instr belongs to any interleave group.
449 bool isInterleaved(Instruction
*Instr
) const {
450 return InterleaveGroupMap
.find(Instr
) != InterleaveGroupMap
.end();
453 /// Get the interleave group that \p Instr belongs to.
455 /// \returns nullptr if doesn't have such group.
456 InterleaveGroup
<Instruction
> *
457 getInterleaveGroup(const Instruction
*Instr
) const {
458 if (InterleaveGroupMap
.count(Instr
))
459 return InterleaveGroupMap
.find(Instr
)->second
;
463 iterator_range
<SmallPtrSetIterator
<llvm::InterleaveGroup
<Instruction
> *>>
464 getInterleaveGroups() {
465 return make_range(InterleaveGroups
.begin(), InterleaveGroups
.end());
468 /// Returns true if an interleaved group that may access memory
469 /// out-of-bounds requires a scalar epilogue iteration for correctness.
470 bool requiresScalarEpilogue() const { return RequiresScalarEpilogue
; }
472 /// Invalidate groups that require a scalar epilogue (due to gaps). This can
473 /// happen when optimizing for size forbids a scalar epilogue, and the gap
474 /// cannot be filtered by masking the load/store.
475 void invalidateGroupsRequiringScalarEpilogue();
478 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
479 /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
480 /// The interleaved access analysis can also add new predicates (for example
481 /// by versioning strides of pointers).
482 PredicatedScalarEvolution
&PSE
;
487 const LoopAccessInfo
*LAI
;
489 /// True if the loop may contain non-reversed interleaved groups with
490 /// out-of-bounds accesses. We ensure we don't speculatively access memory
491 /// out-of-bounds by executing at least one scalar epilogue iteration.
492 bool RequiresScalarEpilogue
= false;
494 /// Holds the relationships between the members and the interleave group.
495 DenseMap
<Instruction
*, InterleaveGroup
<Instruction
> *> InterleaveGroupMap
;
497 SmallPtrSet
<InterleaveGroup
<Instruction
> *, 4> InterleaveGroups
;
499 /// Holds dependences among the memory accesses in the loop. It maps a source
500 /// access to a set of dependent sink accesses.
501 DenseMap
<Instruction
*, SmallPtrSet
<Instruction
*, 2>> Dependences
;
503 /// The descriptor for a strided memory access.
504 struct StrideDescriptor
{
505 StrideDescriptor() = default;
506 StrideDescriptor(int64_t Stride
, const SCEV
*Scev
, uint64_t Size
,
508 : Stride(Stride
), Scev(Scev
), Size(Size
), Align(Align
) {}
510 // The access's stride. It is negative for a reverse access.
513 // The scalar expression of this access.
514 const SCEV
*Scev
= nullptr;
516 // The size of the memory object.
519 // The alignment of this access.
523 /// A type for holding instructions and their stride descriptors.
524 using StrideEntry
= std::pair
<Instruction
*, StrideDescriptor
>;
526 /// Create a new interleave group with the given instruction \p Instr,
527 /// stride \p Stride and alignment \p Align.
529 /// \returns the newly created interleave group.
530 InterleaveGroup
<Instruction
> *
531 createInterleaveGroup(Instruction
*Instr
, int Stride
, unsigned Align
) {
532 assert(!InterleaveGroupMap
.count(Instr
) &&
533 "Already in an interleaved access group");
534 InterleaveGroupMap
[Instr
] =
535 new InterleaveGroup
<Instruction
>(Instr
, Stride
, Align
);
536 InterleaveGroups
.insert(InterleaveGroupMap
[Instr
]);
537 return InterleaveGroupMap
[Instr
];
540 /// Release the group and remove all the relationships.
541 void releaseGroup(InterleaveGroup
<Instruction
> *Group
) {
542 for (unsigned i
= 0; i
< Group
->getFactor(); i
++)
543 if (Instruction
*Member
= Group
->getMember(i
))
544 InterleaveGroupMap
.erase(Member
);
546 InterleaveGroups
.erase(Group
);
550 /// Collect all the accesses with a constant stride in program order.
551 void collectConstStrideAccesses(
552 MapVector
<Instruction
*, StrideDescriptor
> &AccessStrideInfo
,
553 const ValueToValueMap
&Strides
);
555 /// Returns true if \p Stride is allowed in an interleaved group.
556 static bool isStrided(int Stride
);
558 /// Returns true if \p BB is a predicated block.
559 bool isPredicated(BasicBlock
*BB
) const {
560 return LoopAccessInfo::blockNeedsPredication(BB
, TheLoop
, DT
);
563 /// Returns true if LoopAccessInfo can be used for dependence queries.
564 bool areDependencesValid() const {
565 return LAI
&& LAI
->getDepChecker().getDependences();
568 /// Returns true if memory accesses \p A and \p B can be reordered, if
569 /// necessary, when constructing interleaved groups.
571 /// \p A must precede \p B in program order. We return false if reordering is
572 /// not necessary or is prevented because \p A and \p B may be dependent.
573 bool canReorderMemAccessesForInterleavedGroups(StrideEntry
*A
,
574 StrideEntry
*B
) const {
575 // Code motion for interleaved accesses can potentially hoist strided loads
576 // and sink strided stores. The code below checks the legality of the
577 // following two conditions:
579 // 1. Potentially moving a strided load (B) before any store (A) that
582 // 2. Potentially moving a strided store (A) after any load or store (B)
585 // It's legal to reorder A and B if we know there isn't a dependence from A
586 // to B. Note that this determination is conservative since some
587 // dependences could potentially be reordered safely.
589 // A is potentially the source of a dependence.
590 auto *Src
= A
->first
;
591 auto SrcDes
= A
->second
;
593 // B is potentially the sink of a dependence.
594 auto *Sink
= B
->first
;
595 auto SinkDes
= B
->second
;
597 // Code motion for interleaved accesses can't violate WAR dependences.
598 // Thus, reordering is legal if the source isn't a write.
599 if (!Src
->mayWriteToMemory())
602 // At least one of the accesses must be strided.
603 if (!isStrided(SrcDes
.Stride
) && !isStrided(SinkDes
.Stride
))
606 // If dependence information is not available from LoopAccessInfo,
607 // conservatively assume the instructions can't be reordered.
608 if (!areDependencesValid())
611 // If we know there is a dependence from source to sink, assume the
612 // instructions can't be reordered. Otherwise, reordering is legal.
613 return Dependences
.find(Src
) == Dependences
.end() ||
614 !Dependences
.lookup(Src
).count(Sink
);
617 /// Collect the dependences from LoopAccessInfo.
619 /// We process the dependences once during the interleaved access analysis to
620 /// enable constant-time dependence queries.
621 void collectDependences() {
622 if (!areDependencesValid())
624 auto *Deps
= LAI
->getDepChecker().getDependences();
625 for (auto Dep
: *Deps
)
626 Dependences
[Dep
.getSource(*LAI
)].insert(Dep
.getDestination(*LAI
));