1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines vectorizer utilities.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/Analysis/DemandedBits.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/LoopIterator.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Value.h"
27 #include "llvm/Support/CommandLine.h"
29 #define DEBUG_TYPE "vectorutils"
32 using namespace llvm::PatternMatch
;
34 /// Maximum factor for an interleaved memory access.
35 static cl::opt
<unsigned> MaxInterleaveGroupFactor(
36 "max-interleave-group-factor", cl::Hidden
,
37 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
40 /// Return true if all of the intrinsic's arguments and return type are scalars
41 /// for the scalar form of the intrinsic, and vectors for the vector form of the
42 /// intrinsic (except operands that are marked as always being scalar by
43 /// hasVectorInstrinsicScalarOpd).
44 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID
) {
46 case Intrinsic::abs
: // Begin integer bit-manipulation.
47 case Intrinsic::bswap
:
48 case Intrinsic::bitreverse
:
49 case Intrinsic::ctpop
:
58 case Intrinsic::sadd_sat
:
59 case Intrinsic::ssub_sat
:
60 case Intrinsic::uadd_sat
:
61 case Intrinsic::usub_sat
:
62 case Intrinsic::smul_fix
:
63 case Intrinsic::smul_fix_sat
:
64 case Intrinsic::umul_fix
:
65 case Intrinsic::umul_fix_sat
:
66 case Intrinsic::sqrt
: // Begin floating-point.
72 case Intrinsic::log10
:
75 case Intrinsic::minnum
:
76 case Intrinsic::maxnum
:
77 case Intrinsic::minimum
:
78 case Intrinsic::maximum
:
79 case Intrinsic::copysign
:
80 case Intrinsic::floor
:
82 case Intrinsic::trunc
:
84 case Intrinsic::nearbyint
:
85 case Intrinsic::round
:
86 case Intrinsic::roundeven
:
89 case Intrinsic::fmuladd
:
91 case Intrinsic::canonicalize
:
98 /// Identifies if the vector form of the intrinsic has a scalar operand.
99 bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID
,
100 unsigned ScalarOpdIdx
) {
103 case Intrinsic::ctlz
:
104 case Intrinsic::cttz
:
105 case Intrinsic::powi
:
106 return (ScalarOpdIdx
== 1);
107 case Intrinsic::smul_fix
:
108 case Intrinsic::smul_fix_sat
:
109 case Intrinsic::umul_fix
:
110 case Intrinsic::umul_fix_sat
:
111 return (ScalarOpdIdx
== 2);
117 bool llvm::hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID
,
118 unsigned ScalarOpdIdx
) {
120 case Intrinsic::powi
:
121 return (ScalarOpdIdx
== 1);
127 /// Returns intrinsic ID for call.
128 /// For the input call instruction it finds mapping intrinsic and returns
129 /// its ID, in case it does not found it return not_intrinsic.
130 Intrinsic::ID
llvm::getVectorIntrinsicIDForCall(const CallInst
*CI
,
131 const TargetLibraryInfo
*TLI
) {
132 Intrinsic::ID ID
= getIntrinsicForCallSite(*CI
, TLI
);
133 if (ID
== Intrinsic::not_intrinsic
)
134 return Intrinsic::not_intrinsic
;
136 if (isTriviallyVectorizable(ID
) || ID
== Intrinsic::lifetime_start
||
137 ID
== Intrinsic::lifetime_end
|| ID
== Intrinsic::assume
||
138 ID
== Intrinsic::experimental_noalias_scope_decl
||
139 ID
== Intrinsic::sideeffect
|| ID
== Intrinsic::pseudoprobe
)
141 return Intrinsic::not_intrinsic
;
144 /// Find the operand of the GEP that should be checked for consecutive
145 /// stores. This ignores trailing indices that have no effect on the final
147 unsigned llvm::getGEPInductionOperand(const GetElementPtrInst
*Gep
) {
148 const DataLayout
&DL
= Gep
->getModule()->getDataLayout();
149 unsigned LastOperand
= Gep
->getNumOperands() - 1;
150 TypeSize GEPAllocSize
= DL
.getTypeAllocSize(Gep
->getResultElementType());
152 // Walk backwards and try to peel off zeros.
153 while (LastOperand
> 1 && match(Gep
->getOperand(LastOperand
), m_Zero())) {
154 // Find the type we're currently indexing into.
155 gep_type_iterator GEPTI
= gep_type_begin(Gep
);
156 std::advance(GEPTI
, LastOperand
- 2);
158 // If it's a type with the same allocation size as the result of the GEP we
159 // can peel off the zero index.
160 if (DL
.getTypeAllocSize(GEPTI
.getIndexedType()) != GEPAllocSize
)
168 /// If the argument is a GEP, then returns the operand identified by
169 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
170 /// operand, it returns that instead.
171 Value
*llvm::stripGetElementPtr(Value
*Ptr
, ScalarEvolution
*SE
, Loop
*Lp
) {
172 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
);
176 unsigned InductionOperand
= getGEPInductionOperand(GEP
);
178 // Check that all of the gep indices are uniform except for our induction
180 for (unsigned i
= 0, e
= GEP
->getNumOperands(); i
!= e
; ++i
)
181 if (i
!= InductionOperand
&&
182 !SE
->isLoopInvariant(SE
->getSCEV(GEP
->getOperand(i
)), Lp
))
184 return GEP
->getOperand(InductionOperand
);
187 /// If a value has only one user that is a CastInst, return it.
188 Value
*llvm::getUniqueCastUse(Value
*Ptr
, Loop
*Lp
, Type
*Ty
) {
189 Value
*UniqueCast
= nullptr;
190 for (User
*U
: Ptr
->users()) {
191 CastInst
*CI
= dyn_cast
<CastInst
>(U
);
192 if (CI
&& CI
->getType() == Ty
) {
202 /// Get the stride of a pointer access in a loop. Looks for symbolic
203 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
204 Value
*llvm::getStrideFromPointer(Value
*Ptr
, ScalarEvolution
*SE
, Loop
*Lp
) {
205 auto *PtrTy
= dyn_cast
<PointerType
>(Ptr
->getType());
206 if (!PtrTy
|| PtrTy
->isAggregateType())
209 // Try to remove a gep instruction to make the pointer (actually index at this
210 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
211 // pointer, otherwise, we are analyzing the index.
212 Value
*OrigPtr
= Ptr
;
214 // The size of the pointer access.
215 int64_t PtrAccessSize
= 1;
217 Ptr
= stripGetElementPtr(Ptr
, SE
, Lp
);
218 const SCEV
*V
= SE
->getSCEV(Ptr
);
222 while (const SCEVIntegralCastExpr
*C
= dyn_cast
<SCEVIntegralCastExpr
>(V
))
225 const SCEVAddRecExpr
*S
= dyn_cast
<SCEVAddRecExpr
>(V
);
229 V
= S
->getStepRecurrence(*SE
);
233 // Strip off the size of access multiplication if we are still analyzing the
235 if (OrigPtr
== Ptr
) {
236 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(V
)) {
237 if (M
->getOperand(0)->getSCEVType() != scConstant
)
240 const APInt
&APStepVal
= cast
<SCEVConstant
>(M
->getOperand(0))->getAPInt();
242 // Huge step value - give up.
243 if (APStepVal
.getBitWidth() > 64)
246 int64_t StepVal
= APStepVal
.getSExtValue();
247 if (PtrAccessSize
!= StepVal
)
249 V
= M
->getOperand(1);
254 Type
*StripedOffRecurrenceCast
= nullptr;
255 if (const SCEVIntegralCastExpr
*C
= dyn_cast
<SCEVIntegralCastExpr
>(V
)) {
256 StripedOffRecurrenceCast
= C
->getType();
260 // Look for the loop invariant symbolic value.
261 const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(V
);
265 Value
*Stride
= U
->getValue();
266 if (!Lp
->isLoopInvariant(Stride
))
269 // If we have stripped off the recurrence cast we have to make sure that we
270 // return the value that is used in this loop so that we can replace it later.
271 if (StripedOffRecurrenceCast
)
272 Stride
= getUniqueCastUse(Stride
, Lp
, StripedOffRecurrenceCast
);
277 /// Given a vector and an element number, see if the scalar value is
278 /// already around as a register, for example if it were inserted then extracted
280 Value
*llvm::findScalarElement(Value
*V
, unsigned EltNo
) {
281 assert(V
->getType()->isVectorTy() && "Not looking at a vector?");
282 VectorType
*VTy
= cast
<VectorType
>(V
->getType());
283 // For fixed-length vector, return undef for out of range access.
284 if (auto *FVTy
= dyn_cast
<FixedVectorType
>(VTy
)) {
285 unsigned Width
= FVTy
->getNumElements();
287 return UndefValue::get(FVTy
->getElementType());
290 if (Constant
*C
= dyn_cast
<Constant
>(V
))
291 return C
->getAggregateElement(EltNo
);
293 if (InsertElementInst
*III
= dyn_cast
<InsertElementInst
>(V
)) {
294 // If this is an insert to a variable element, we don't know what it is.
295 if (!isa
<ConstantInt
>(III
->getOperand(2)))
297 unsigned IIElt
= cast
<ConstantInt
>(III
->getOperand(2))->getZExtValue();
299 // If this is an insert to the element we are looking for, return the
302 return III
->getOperand(1);
304 // Guard against infinite loop on malformed, unreachable IR.
305 if (III
== III
->getOperand(0))
308 // Otherwise, the insertelement doesn't modify the value, recurse on its
310 return findScalarElement(III
->getOperand(0), EltNo
);
313 ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(V
);
314 // Restrict the following transformation to fixed-length vector.
315 if (SVI
&& isa
<FixedVectorType
>(SVI
->getType())) {
317 cast
<FixedVectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
318 int InEl
= SVI
->getMaskValue(EltNo
);
320 return UndefValue::get(VTy
->getElementType());
321 if (InEl
< (int)LHSWidth
)
322 return findScalarElement(SVI
->getOperand(0), InEl
);
323 return findScalarElement(SVI
->getOperand(1), InEl
- LHSWidth
);
326 // Extract a value from a vector add operation with a constant zero.
327 // TODO: Use getBinOpIdentity() to generalize this.
328 Value
*Val
; Constant
*C
;
329 if (match(V
, m_Add(m_Value(Val
), m_Constant(C
))))
330 if (Constant
*Elt
= C
->getAggregateElement(EltNo
))
331 if (Elt
->isNullValue())
332 return findScalarElement(Val
, EltNo
);
334 // Otherwise, we don't know.
338 int llvm::getSplatIndex(ArrayRef
<int> Mask
) {
341 // Ignore invalid (undefined) mask elements.
345 // There can be only 1 non-negative mask element value if this is a splat.
346 if (SplatIndex
!= -1 && SplatIndex
!= M
)
349 // Initialize the splat index to the 1st non-negative mask element.
352 assert((SplatIndex
== -1 || SplatIndex
>= 0) && "Negative index?");
356 /// Get splat value if the input is a splat vector or return nullptr.
357 /// This function is not fully general. It checks only 2 cases:
358 /// the input value is (1) a splat constant vector or (2) a sequence
359 /// of instructions that broadcasts a scalar at element 0.
360 Value
*llvm::getSplatValue(const Value
*V
) {
361 if (isa
<VectorType
>(V
->getType()))
362 if (auto *C
= dyn_cast
<Constant
>(V
))
363 return C
->getSplatValue();
365 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
368 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat
), m_ZeroInt()),
369 m_Value(), m_ZeroMask())))
375 bool llvm::isSplatValue(const Value
*V
, int Index
, unsigned Depth
) {
376 assert(Depth
<= MaxAnalysisRecursionDepth
&& "Limit Search Depth");
378 if (isa
<VectorType
>(V
->getType())) {
379 if (isa
<UndefValue
>(V
))
381 // FIXME: We can allow undefs, but if Index was specified, we may want to
382 // check that the constant is defined at that index.
383 if (auto *C
= dyn_cast
<Constant
>(V
))
384 return C
->getSplatValue() != nullptr;
387 if (auto *Shuf
= dyn_cast
<ShuffleVectorInst
>(V
)) {
388 // FIXME: We can safely allow undefs here. If Index was specified, we will
389 // check that the mask elt is defined at the required index.
390 if (!is_splat(Shuf
->getShuffleMask()))
397 // Match a specific element. The mask should be defined at and match the
399 return Shuf
->getMaskValue(Index
) == Index
;
402 // The remaining tests are all recursive, so bail out if we hit the limit.
403 if (Depth
++ == MaxAnalysisRecursionDepth
)
406 // If both operands of a binop are splats, the result is a splat.
408 if (match(V
, m_BinOp(m_Value(X
), m_Value(Y
))))
409 return isSplatValue(X
, Index
, Depth
) && isSplatValue(Y
, Index
, Depth
);
411 // If all operands of a select are splats, the result is a splat.
412 if (match(V
, m_Select(m_Value(X
), m_Value(Y
), m_Value(Z
))))
413 return isSplatValue(X
, Index
, Depth
) && isSplatValue(Y
, Index
, Depth
) &&
414 isSplatValue(Z
, Index
, Depth
);
416 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
421 void llvm::narrowShuffleMaskElts(int Scale
, ArrayRef
<int> Mask
,
422 SmallVectorImpl
<int> &ScaledMask
) {
423 assert(Scale
> 0 && "Unexpected scaling factor");
425 // Fast-path: if no scaling, then it is just a copy.
427 ScaledMask
.assign(Mask
.begin(), Mask
.end());
432 for (int MaskElt
: Mask
) {
434 assert(((uint64_t)Scale
* MaskElt
+ (Scale
- 1)) <= INT32_MAX
&&
435 "Overflowed 32-bits");
437 for (int SliceElt
= 0; SliceElt
!= Scale
; ++SliceElt
)
438 ScaledMask
.push_back(MaskElt
< 0 ? MaskElt
: Scale
* MaskElt
+ SliceElt
);
442 bool llvm::widenShuffleMaskElts(int Scale
, ArrayRef
<int> Mask
,
443 SmallVectorImpl
<int> &ScaledMask
) {
444 assert(Scale
> 0 && "Unexpected scaling factor");
446 // Fast-path: if no scaling, then it is just a copy.
448 ScaledMask
.assign(Mask
.begin(), Mask
.end());
452 // We must map the original elements down evenly to a type with less elements.
453 int NumElts
= Mask
.size();
454 if (NumElts
% Scale
!= 0)
458 ScaledMask
.reserve(NumElts
/ Scale
);
460 // Step through the input mask by splitting into Scale-sized slices.
462 ArrayRef
<int> MaskSlice
= Mask
.take_front(Scale
);
463 assert((int)MaskSlice
.size() == Scale
&& "Expected Scale-sized slice.");
465 // The first element of the slice determines how we evaluate this slice.
466 int SliceFront
= MaskSlice
.front();
467 if (SliceFront
< 0) {
468 // Negative values (undef or other "sentinel" values) must be equal across
470 if (!is_splat(MaskSlice
))
472 ScaledMask
.push_back(SliceFront
);
474 // A positive mask element must be cleanly divisible.
475 if (SliceFront
% Scale
!= 0)
477 // Elements of the slice must be consecutive.
478 for (int i
= 1; i
< Scale
; ++i
)
479 if (MaskSlice
[i
] != SliceFront
+ i
)
481 ScaledMask
.push_back(SliceFront
/ Scale
);
483 Mask
= Mask
.drop_front(Scale
);
484 } while (!Mask
.empty());
486 assert((int)ScaledMask
.size() * Scale
== NumElts
&& "Unexpected scaled mask");
488 // All elements of the original mask can be scaled down to map to the elements
489 // of a mask with wider elements.
493 MapVector
<Instruction
*, uint64_t>
494 llvm::computeMinimumValueSizes(ArrayRef
<BasicBlock
*> Blocks
, DemandedBits
&DB
,
495 const TargetTransformInfo
*TTI
) {
497 // DemandedBits will give us every value's live-out bits. But we want
498 // to ensure no extra casts would need to be inserted, so every DAG
499 // of connected values must have the same minimum bitwidth.
500 EquivalenceClasses
<Value
*> ECs
;
501 SmallVector
<Value
*, 16> Worklist
;
502 SmallPtrSet
<Value
*, 4> Roots
;
503 SmallPtrSet
<Value
*, 16> Visited
;
504 DenseMap
<Value
*, uint64_t> DBits
;
505 SmallPtrSet
<Instruction
*, 4> InstructionSet
;
506 MapVector
<Instruction
*, uint64_t> MinBWs
;
508 // Determine the roots. We work bottom-up, from truncs or icmps.
509 bool SeenExtFromIllegalType
= false;
510 for (auto *BB
: Blocks
)
511 for (auto &I
: *BB
) {
512 InstructionSet
.insert(&I
);
514 if (TTI
&& (isa
<ZExtInst
>(&I
) || isa
<SExtInst
>(&I
)) &&
515 !TTI
->isTypeLegal(I
.getOperand(0)->getType()))
516 SeenExtFromIllegalType
= true;
518 // Only deal with non-vector integers up to 64-bits wide.
519 if ((isa
<TruncInst
>(&I
) || isa
<ICmpInst
>(&I
)) &&
520 !I
.getType()->isVectorTy() &&
521 I
.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
522 // Don't make work for ourselves. If we know the loaded type is legal,
523 // don't add it to the worklist.
524 if (TTI
&& isa
<TruncInst
>(&I
) && TTI
->isTypeLegal(I
.getType()))
527 Worklist
.push_back(&I
);
532 if (Worklist
.empty() || (TTI
&& !SeenExtFromIllegalType
))
535 // Now proceed breadth-first, unioning values together.
536 while (!Worklist
.empty()) {
537 Value
*Val
= Worklist
.pop_back_val();
538 Value
*Leader
= ECs
.getOrInsertLeaderValue(Val
);
540 if (Visited
.count(Val
))
544 // Non-instructions terminate a chain successfully.
545 if (!isa
<Instruction
>(Val
))
547 Instruction
*I
= cast
<Instruction
>(Val
);
549 // If we encounter a type that is larger than 64 bits, we can't represent
551 if (DB
.getDemandedBits(I
).getBitWidth() > 64)
552 return MapVector
<Instruction
*, uint64_t>();
554 uint64_t V
= DB
.getDemandedBits(I
).getZExtValue();
558 // Casts, loads and instructions outside of our range terminate a chain
560 if (isa
<SExtInst
>(I
) || isa
<ZExtInst
>(I
) || isa
<LoadInst
>(I
) ||
561 !InstructionSet
.count(I
))
564 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
565 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
566 // transform anything that relies on them.
567 if (isa
<BitCastInst
>(I
) || isa
<PtrToIntInst
>(I
) || isa
<IntToPtrInst
>(I
) ||
568 !I
->getType()->isIntegerTy()) {
569 DBits
[Leader
] |= ~0ULL;
573 // We don't modify the types of PHIs. Reductions will already have been
574 // truncated if possible, and inductions' sizes will have been chosen by
579 if (DBits
[Leader
] == ~0ULL)
580 // All bits demanded, no point continuing.
583 for (Value
*O
: cast
<User
>(I
)->operands()) {
584 ECs
.unionSets(Leader
, O
);
585 Worklist
.push_back(O
);
589 // Now we've discovered all values, walk them to see if there are
590 // any users we didn't see. If there are, we can't optimize that
592 for (auto &I
: DBits
)
593 for (auto *U
: I
.first
->users())
594 if (U
->getType()->isIntegerTy() && DBits
.count(U
) == 0)
595 DBits
[ECs
.getOrInsertLeaderValue(I
.first
)] |= ~0ULL;
597 for (auto I
= ECs
.begin(), E
= ECs
.end(); I
!= E
; ++I
) {
598 uint64_t LeaderDemandedBits
= 0;
599 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end()))
600 LeaderDemandedBits
|= DBits
[M
];
602 uint64_t MinBW
= (sizeof(LeaderDemandedBits
) * 8) -
603 llvm::countLeadingZeros(LeaderDemandedBits
);
604 // Round up to a power of 2
605 if (!isPowerOf2_64((uint64_t)MinBW
))
606 MinBW
= NextPowerOf2(MinBW
);
608 // We don't modify the types of PHIs. Reductions will already have been
609 // truncated if possible, and inductions' sizes will have been chosen by
611 // If we are required to shrink a PHI, abandon this entire equivalence class.
613 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end()))
614 if (isa
<PHINode
>(M
) && MinBW
< M
->getType()->getScalarSizeInBits()) {
621 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end())) {
622 if (!isa
<Instruction
>(M
))
624 Type
*Ty
= M
->getType();
626 Ty
= cast
<Instruction
>(M
)->getOperand(0)->getType();
627 if (MinBW
< Ty
->getScalarSizeInBits())
628 MinBWs
[cast
<Instruction
>(M
)] = MinBW
;
635 /// Add all access groups in @p AccGroups to @p List.
636 template <typename ListT
>
637 static void addToAccessGroupList(ListT
&List
, MDNode
*AccGroups
) {
638 // Interpret an access group as a list containing itself.
639 if (AccGroups
->getNumOperands() == 0) {
640 assert(isValidAsAccessGroup(AccGroups
) && "Node must be an access group");
641 List
.insert(AccGroups
);
645 for (auto &AccGroupListOp
: AccGroups
->operands()) {
646 auto *Item
= cast
<MDNode
>(AccGroupListOp
.get());
647 assert(isValidAsAccessGroup(Item
) && "List item must be an access group");
652 MDNode
*llvm::uniteAccessGroups(MDNode
*AccGroups1
, MDNode
*AccGroups2
) {
657 if (AccGroups1
== AccGroups2
)
660 SmallSetVector
<Metadata
*, 4> Union
;
661 addToAccessGroupList(Union
, AccGroups1
);
662 addToAccessGroupList(Union
, AccGroups2
);
664 if (Union
.size() == 0)
666 if (Union
.size() == 1)
667 return cast
<MDNode
>(Union
.front());
669 LLVMContext
&Ctx
= AccGroups1
->getContext();
670 return MDNode::get(Ctx
, Union
.getArrayRef());
673 MDNode
*llvm::intersectAccessGroups(const Instruction
*Inst1
,
674 const Instruction
*Inst2
) {
675 bool MayAccessMem1
= Inst1
->mayReadOrWriteMemory();
676 bool MayAccessMem2
= Inst2
->mayReadOrWriteMemory();
678 if (!MayAccessMem1
&& !MayAccessMem2
)
681 return Inst2
->getMetadata(LLVMContext::MD_access_group
);
683 return Inst1
->getMetadata(LLVMContext::MD_access_group
);
685 MDNode
*MD1
= Inst1
->getMetadata(LLVMContext::MD_access_group
);
686 MDNode
*MD2
= Inst2
->getMetadata(LLVMContext::MD_access_group
);
692 // Use set for scalable 'contains' check.
693 SmallPtrSet
<Metadata
*, 4> AccGroupSet2
;
694 addToAccessGroupList(AccGroupSet2
, MD2
);
696 SmallVector
<Metadata
*, 4> Intersection
;
697 if (MD1
->getNumOperands() == 0) {
698 assert(isValidAsAccessGroup(MD1
) && "Node must be an access group");
699 if (AccGroupSet2
.count(MD1
))
700 Intersection
.push_back(MD1
);
702 for (const MDOperand
&Node
: MD1
->operands()) {
703 auto *Item
= cast
<MDNode
>(Node
.get());
704 assert(isValidAsAccessGroup(Item
) && "List item must be an access group");
705 if (AccGroupSet2
.count(Item
))
706 Intersection
.push_back(Item
);
710 if (Intersection
.size() == 0)
712 if (Intersection
.size() == 1)
713 return cast
<MDNode
>(Intersection
.front());
715 LLVMContext
&Ctx
= Inst1
->getContext();
716 return MDNode::get(Ctx
, Intersection
);
719 /// \returns \p I after propagating metadata from \p VL.
720 Instruction
*llvm::propagateMetadata(Instruction
*Inst
, ArrayRef
<Value
*> VL
) {
723 Instruction
*I0
= cast
<Instruction
>(VL
[0]);
724 SmallVector
<std::pair
<unsigned, MDNode
*>, 4> Metadata
;
725 I0
->getAllMetadataOtherThanDebugLoc(Metadata
);
727 for (auto Kind
: {LLVMContext::MD_tbaa
, LLVMContext::MD_alias_scope
,
728 LLVMContext::MD_noalias
, LLVMContext::MD_fpmath
,
729 LLVMContext::MD_nontemporal
, LLVMContext::MD_invariant_load
,
730 LLVMContext::MD_access_group
}) {
731 MDNode
*MD
= I0
->getMetadata(Kind
);
733 for (int J
= 1, E
= VL
.size(); MD
&& J
!= E
; ++J
) {
734 const Instruction
*IJ
= cast
<Instruction
>(VL
[J
]);
735 MDNode
*IMD
= IJ
->getMetadata(Kind
);
737 case LLVMContext::MD_tbaa
:
738 MD
= MDNode::getMostGenericTBAA(MD
, IMD
);
740 case LLVMContext::MD_alias_scope
:
741 MD
= MDNode::getMostGenericAliasScope(MD
, IMD
);
743 case LLVMContext::MD_fpmath
:
744 MD
= MDNode::getMostGenericFPMath(MD
, IMD
);
746 case LLVMContext::MD_noalias
:
747 case LLVMContext::MD_nontemporal
:
748 case LLVMContext::MD_invariant_load
:
749 MD
= MDNode::intersect(MD
, IMD
);
751 case LLVMContext::MD_access_group
:
752 MD
= intersectAccessGroups(Inst
, IJ
);
755 llvm_unreachable("unhandled metadata");
759 Inst
->setMetadata(Kind
, MD
);
766 llvm::createBitMaskForGaps(IRBuilderBase
&Builder
, unsigned VF
,
767 const InterleaveGroup
<Instruction
> &Group
) {
768 // All 1's means mask is not needed.
769 if (Group
.getNumMembers() == Group
.getFactor())
772 // TODO: support reversed access.
773 assert(!Group
.isReverse() && "Reversed group not supported.");
775 SmallVector
<Constant
*, 16> Mask
;
776 for (unsigned i
= 0; i
< VF
; i
++)
777 for (unsigned j
= 0; j
< Group
.getFactor(); ++j
) {
778 unsigned HasMember
= Group
.getMember(j
) ? 1 : 0;
779 Mask
.push_back(Builder
.getInt1(HasMember
));
782 return ConstantVector::get(Mask
);
785 llvm::SmallVector
<int, 16>
786 llvm::createReplicatedMask(unsigned ReplicationFactor
, unsigned VF
) {
787 SmallVector
<int, 16> MaskVec
;
788 for (unsigned i
= 0; i
< VF
; i
++)
789 for (unsigned j
= 0; j
< ReplicationFactor
; j
++)
790 MaskVec
.push_back(i
);
795 llvm::SmallVector
<int, 16> llvm::createInterleaveMask(unsigned VF
,
797 SmallVector
<int, 16> Mask
;
798 for (unsigned i
= 0; i
< VF
; i
++)
799 for (unsigned j
= 0; j
< NumVecs
; j
++)
800 Mask
.push_back(j
* VF
+ i
);
805 llvm::SmallVector
<int, 16>
806 llvm::createStrideMask(unsigned Start
, unsigned Stride
, unsigned VF
) {
807 SmallVector
<int, 16> Mask
;
808 for (unsigned i
= 0; i
< VF
; i
++)
809 Mask
.push_back(Start
+ i
* Stride
);
814 llvm::SmallVector
<int, 16> llvm::createSequentialMask(unsigned Start
,
816 unsigned NumUndefs
) {
817 SmallVector
<int, 16> Mask
;
818 for (unsigned i
= 0; i
< NumInts
; i
++)
819 Mask
.push_back(Start
+ i
);
821 for (unsigned i
= 0; i
< NumUndefs
; i
++)
827 /// A helper function for concatenating vectors. This function concatenates two
828 /// vectors having the same element type. If the second vector has fewer
829 /// elements than the first, it is padded with undefs.
830 static Value
*concatenateTwoVectors(IRBuilderBase
&Builder
, Value
*V1
,
832 VectorType
*VecTy1
= dyn_cast
<VectorType
>(V1
->getType());
833 VectorType
*VecTy2
= dyn_cast
<VectorType
>(V2
->getType());
834 assert(VecTy1
&& VecTy2
&&
835 VecTy1
->getScalarType() == VecTy2
->getScalarType() &&
836 "Expect two vectors with the same element type");
838 unsigned NumElts1
= cast
<FixedVectorType
>(VecTy1
)->getNumElements();
839 unsigned NumElts2
= cast
<FixedVectorType
>(VecTy2
)->getNumElements();
840 assert(NumElts1
>= NumElts2
&& "Unexpect the first vector has less elements");
842 if (NumElts1
> NumElts2
) {
843 // Extend with UNDEFs.
844 V2
= Builder
.CreateShuffleVector(
845 V2
, createSequentialMask(0, NumElts2
, NumElts1
- NumElts2
));
848 return Builder
.CreateShuffleVector(
849 V1
, V2
, createSequentialMask(0, NumElts1
+ NumElts2
, 0));
852 Value
*llvm::concatenateVectors(IRBuilderBase
&Builder
,
853 ArrayRef
<Value
*> Vecs
) {
854 unsigned NumVecs
= Vecs
.size();
855 assert(NumVecs
> 1 && "Should be at least two vectors");
857 SmallVector
<Value
*, 8> ResList
;
858 ResList
.append(Vecs
.begin(), Vecs
.end());
860 SmallVector
<Value
*, 8> TmpList
;
861 for (unsigned i
= 0; i
< NumVecs
- 1; i
+= 2) {
862 Value
*V0
= ResList
[i
], *V1
= ResList
[i
+ 1];
863 assert((V0
->getType() == V1
->getType() || i
== NumVecs
- 2) &&
864 "Only the last vector may have a different type");
866 TmpList
.push_back(concatenateTwoVectors(Builder
, V0
, V1
));
869 // Push the last vector if the total number of vectors is odd.
870 if (NumVecs
% 2 != 0)
871 TmpList
.push_back(ResList
[NumVecs
- 1]);
874 NumVecs
= ResList
.size();
875 } while (NumVecs
> 1);
880 bool llvm::maskIsAllZeroOrUndef(Value
*Mask
) {
881 assert(isa
<VectorType
>(Mask
->getType()) &&
882 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
883 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
885 "Mask must be a vector of i1");
887 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
890 if (ConstMask
->isNullValue() || isa
<UndefValue
>(ConstMask
))
892 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
896 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
898 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
899 if (MaskElt
->isNullValue() || isa
<UndefValue
>(MaskElt
))
906 bool llvm::maskIsAllOneOrUndef(Value
*Mask
) {
907 assert(isa
<VectorType
>(Mask
->getType()) &&
908 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
909 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
911 "Mask must be a vector of i1");
913 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
916 if (ConstMask
->isAllOnesValue() || isa
<UndefValue
>(ConstMask
))
918 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
922 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
924 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
925 if (MaskElt
->isAllOnesValue() || isa
<UndefValue
>(MaskElt
))
932 /// TODO: This is a lot like known bits, but for
933 /// vectors. Is there something we can common this with?
934 APInt
llvm::possiblyDemandedEltsInMask(Value
*Mask
) {
935 assert(isa
<FixedVectorType
>(Mask
->getType()) &&
936 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
937 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
939 "Mask must be a fixed width vector of i1");
941 const unsigned VWidth
=
942 cast
<FixedVectorType
>(Mask
->getType())->getNumElements();
943 APInt DemandedElts
= APInt::getAllOnesValue(VWidth
);
944 if (auto *CV
= dyn_cast
<ConstantVector
>(Mask
))
945 for (unsigned i
= 0; i
< VWidth
; i
++)
946 if (CV
->getAggregateElement(i
)->isNullValue())
947 DemandedElts
.clearBit(i
);
951 bool InterleavedAccessInfo::isStrided(int Stride
) {
952 unsigned Factor
= std::abs(Stride
);
953 return Factor
>= 2 && Factor
<= MaxInterleaveGroupFactor
;
956 void InterleavedAccessInfo::collectConstStrideAccesses(
957 MapVector
<Instruction
*, StrideDescriptor
> &AccessStrideInfo
,
958 const ValueToValueMap
&Strides
) {
959 auto &DL
= TheLoop
->getHeader()->getModule()->getDataLayout();
961 // Since it's desired that the load/store instructions be maintained in
962 // "program order" for the interleaved access analysis, we have to visit the
963 // blocks in the loop in reverse postorder (i.e., in a topological order).
964 // Such an ordering will ensure that any load/store that may be executed
965 // before a second load/store will precede the second load/store in
967 LoopBlocksDFS
DFS(TheLoop
);
969 for (BasicBlock
*BB
: make_range(DFS
.beginRPO(), DFS
.endRPO()))
970 for (auto &I
: *BB
) {
971 Value
*Ptr
= getLoadStorePointerOperand(&I
);
974 Type
*ElementTy
= getLoadStoreType(&I
);
976 // We don't check wrapping here because we don't know yet if Ptr will be
977 // part of a full group or a group with gaps. Checking wrapping for all
978 // pointers (even those that end up in groups with no gaps) will be overly
979 // conservative. For full groups, wrapping should be ok since if we would
980 // wrap around the address space we would do a memory access at nullptr
981 // even without the transformation. The wrapping checks are therefore
982 // deferred until after we've formed the interleaved groups.
983 int64_t Stride
= getPtrStride(PSE
, Ptr
, TheLoop
, Strides
,
984 /*Assume=*/true, /*ShouldCheckWrap=*/false);
986 const SCEV
*Scev
= replaceSymbolicStrideSCEV(PSE
, Strides
, Ptr
);
987 uint64_t Size
= DL
.getTypeAllocSize(ElementTy
);
988 AccessStrideInfo
[&I
] = StrideDescriptor(Stride
, Scev
, Size
,
989 getLoadStoreAlignment(&I
));
993 // Analyze interleaved accesses and collect them into interleaved load and
996 // When generating code for an interleaved load group, we effectively hoist all
997 // loads in the group to the location of the first load in program order. When
998 // generating code for an interleaved store group, we sink all stores to the
999 // location of the last store. This code motion can change the order of load
1000 // and store instructions and may break dependences.
1002 // The code generation strategy mentioned above ensures that we won't violate
1003 // any write-after-read (WAR) dependences.
1005 // E.g., for the WAR dependence: a = A[i]; // (1)
1008 // The store group of (2) is always inserted at or below (2), and the load
1009 // group of (1) is always inserted at or above (1). Thus, the instructions will
1010 // never be reordered. All other dependences are checked to ensure the
1011 // correctness of the instruction reordering.
1013 // The algorithm visits all memory accesses in the loop in bottom-up program
1014 // order. Program order is established by traversing the blocks in the loop in
1015 // reverse postorder when collecting the accesses.
1017 // We visit the memory accesses in bottom-up order because it can simplify the
1018 // construction of store groups in the presence of write-after-write (WAW)
1021 // E.g., for the WAW dependence: A[i] = a; // (1)
1023 // A[i + 1] = c; // (3)
1025 // We will first create a store group with (3) and (2). (1) can't be added to
1026 // this group because it and (2) are dependent. However, (1) can be grouped
1027 // with other accesses that may precede it in program order. Note that a
1028 // bottom-up order does not imply that WAW dependences should not be checked.
1029 void InterleavedAccessInfo::analyzeInterleaving(
1030 bool EnablePredicatedInterleavedMemAccesses
) {
1031 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1032 const ValueToValueMap
&Strides
= LAI
->getSymbolicStrides();
1034 // Holds all accesses with a constant stride.
1035 MapVector
<Instruction
*, StrideDescriptor
> AccessStrideInfo
;
1036 collectConstStrideAccesses(AccessStrideInfo
, Strides
);
1038 if (AccessStrideInfo
.empty())
1041 // Collect the dependences in the loop.
1042 collectDependences();
1044 // Holds all interleaved store groups temporarily.
1045 SmallSetVector
<InterleaveGroup
<Instruction
> *, 4> StoreGroups
;
1046 // Holds all interleaved load groups temporarily.
1047 SmallSetVector
<InterleaveGroup
<Instruction
> *, 4> LoadGroups
;
1049 // Search in bottom-up program order for pairs of accesses (A and B) that can
1050 // form interleaved load or store groups. In the algorithm below, access A
1051 // precedes access B in program order. We initialize a group for B in the
1052 // outer loop of the algorithm, and then in the inner loop, we attempt to
1053 // insert each A into B's group if:
1055 // 1. A and B have the same stride,
1056 // 2. A and B have the same memory object size, and
1057 // 3. A belongs in B's group according to its distance from B.
1059 // Special care is taken to ensure group formation will not break any
1061 for (auto BI
= AccessStrideInfo
.rbegin(), E
= AccessStrideInfo
.rend();
1063 Instruction
*B
= BI
->first
;
1064 StrideDescriptor DesB
= BI
->second
;
1066 // Initialize a group for B if it has an allowable stride. Even if we don't
1067 // create a group for B, we continue with the bottom-up algorithm to ensure
1068 // we don't break any of B's dependences.
1069 InterleaveGroup
<Instruction
> *Group
= nullptr;
1070 if (isStrided(DesB
.Stride
) &&
1071 (!isPredicated(B
->getParent()) || EnablePredicatedInterleavedMemAccesses
)) {
1072 Group
= getInterleaveGroup(B
);
1074 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1076 Group
= createInterleaveGroup(B
, DesB
.Stride
, DesB
.Alignment
);
1078 if (B
->mayWriteToMemory())
1079 StoreGroups
.insert(Group
);
1081 LoadGroups
.insert(Group
);
1084 for (auto AI
= std::next(BI
); AI
!= E
; ++AI
) {
1085 Instruction
*A
= AI
->first
;
1086 StrideDescriptor DesA
= AI
->second
;
1088 // Our code motion strategy implies that we can't have dependences
1089 // between accesses in an interleaved group and other accesses located
1090 // between the first and last member of the group. Note that this also
1091 // means that a group can't have more than one member at a given offset.
1092 // The accesses in a group can have dependences with other accesses, but
1093 // we must ensure we don't extend the boundaries of the group such that
1094 // we encompass those dependent accesses.
1096 // For example, assume we have the sequence of accesses shown below in a
1099 // (1, 2) is a group | A[i] = a; // (1)
1100 // | A[i-1] = b; // (2) |
1101 // A[i-3] = c; // (3)
1102 // A[i] = d; // (4) | (2, 4) is not a group
1104 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1105 // but not with (4). If we did, the dependent access (3) would be within
1106 // the boundaries of the (2, 4) group.
1107 if (!canReorderMemAccessesForInterleavedGroups(&*AI
, &*BI
)) {
1108 // If a dependence exists and A is already in a group, we know that A
1109 // must be a store since A precedes B and WAR dependences are allowed.
1110 // Thus, A would be sunk below B. We release A's group to prevent this
1111 // illegal code motion. A will then be free to form another group with
1112 // instructions that precede it.
1113 if (isInterleaved(A
)) {
1114 InterleaveGroup
<Instruction
> *StoreGroup
= getInterleaveGroup(A
);
1116 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1117 "dependence between " << *A
<< " and "<< *B
<< '\n');
1119 StoreGroups
.remove(StoreGroup
);
1120 releaseGroup(StoreGroup
);
1123 // If a dependence exists and A is not already in a group (or it was
1124 // and we just released it), B might be hoisted above A (if B is a
1125 // load) or another store might be sunk below A (if B is a store). In
1126 // either case, we can't add additional instructions to B's group. B
1127 // will only form a group with instructions that it precedes.
1131 // At this point, we've checked for illegal code motion. If either A or B
1132 // isn't strided, there's nothing left to do.
1133 if (!isStrided(DesA
.Stride
) || !isStrided(DesB
.Stride
))
1136 // Ignore A if it's already in a group or isn't the same kind of memory
1138 // Note that mayReadFromMemory() isn't mutually exclusive to
1139 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1140 // here, canVectorizeMemory() should have returned false - except for the
1141 // case we asked for optimization remarks.
1142 if (isInterleaved(A
) ||
1143 (A
->mayReadFromMemory() != B
->mayReadFromMemory()) ||
1144 (A
->mayWriteToMemory() != B
->mayWriteToMemory()))
1147 // Check rules 1 and 2. Ignore A if its stride or size is different from
1149 if (DesA
.Stride
!= DesB
.Stride
|| DesA
.Size
!= DesB
.Size
)
1152 // Ignore A if the memory object of A and B don't belong to the same
1154 if (getLoadStoreAddressSpace(A
) != getLoadStoreAddressSpace(B
))
1157 // Calculate the distance from A to B.
1158 const SCEVConstant
*DistToB
= dyn_cast
<SCEVConstant
>(
1159 PSE
.getSE()->getMinusSCEV(DesA
.Scev
, DesB
.Scev
));
1162 int64_t DistanceToB
= DistToB
->getAPInt().getSExtValue();
1164 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1166 if (DistanceToB
% static_cast<int64_t>(DesB
.Size
))
1169 // All members of a predicated interleave-group must have the same predicate,
1170 // and currently must reside in the same BB.
1171 BasicBlock
*BlockA
= A
->getParent();
1172 BasicBlock
*BlockB
= B
->getParent();
1173 if ((isPredicated(BlockA
) || isPredicated(BlockB
)) &&
1174 (!EnablePredicatedInterleavedMemAccesses
|| BlockA
!= BlockB
))
1177 // The index of A is the index of B plus A's distance to B in multiples
1180 Group
->getIndex(B
) + DistanceToB
/ static_cast<int64_t>(DesB
.Size
);
1182 // Try to insert A into B's group.
1183 if (Group
->insertMember(A
, IndexA
, DesA
.Alignment
)) {
1184 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A
<< '\n'
1185 << " into the interleave group with" << *B
1187 InterleaveGroupMap
[A
] = Group
;
1189 // Set the first load in program order as the insert position.
1190 if (A
->mayReadFromMemory())
1191 Group
->setInsertPos(A
);
1193 } // Iteration over A accesses.
1194 } // Iteration over B accesses.
1196 auto InvalidateGroupIfMemberMayWrap
= [&](InterleaveGroup
<Instruction
> *Group
,
1198 std::string FirstOrLast
) -> bool {
1199 Instruction
*Member
= Group
->getMember(Index
);
1200 assert(Member
&& "Group member does not exist");
1201 Value
*MemberPtr
= getLoadStorePointerOperand(Member
);
1202 if (getPtrStride(PSE
, MemberPtr
, TheLoop
, Strides
, /*Assume=*/false,
1203 /*ShouldCheckWrap=*/true))
1205 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1207 << " group member potentially pointer-wrapping.\n");
1208 releaseGroup(Group
);
1212 // Remove interleaved groups with gaps whose memory
1213 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1214 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1215 // not check wrapping (see documentation there).
1216 // FORNOW we use Assume=false;
1217 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1218 // of runtime SCEV assumptions checks (thereby potentially failing to
1219 // vectorize altogether).
1220 // Additional optional optimizations:
1221 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1222 // wrap then we can deduce that all pointers in the group don't wrap.
1223 // This means that we can forcefully peel the loop in order to only have to
1224 // check the first pointer for no-wrap. When we'll change to use Assume=true
1225 // we'll only need at most one runtime check per interleaved group.
1226 for (auto *Group
: LoadGroups
) {
1227 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1228 // load would wrap around the address space we would do a memory access at
1229 // nullptr even without the transformation.
1230 if (Group
->getNumMembers() == Group
->getFactor())
1233 // Case 2: If first and last members of the group don't wrap this implies
1234 // that all the pointers in the group don't wrap.
1235 // So we check only group member 0 (which is always guaranteed to exist),
1236 // and group member Factor - 1; If the latter doesn't exist we rely on
1237 // peeling (if it is a non-reversed accsess -- see Case 3).
1238 if (InvalidateGroupIfMemberMayWrap(Group
, 0, std::string("first")))
1240 if (Group
->getMember(Group
->getFactor() - 1))
1241 InvalidateGroupIfMemberMayWrap(Group
, Group
->getFactor() - 1,
1242 std::string("last"));
1244 // Case 3: A non-reversed interleaved load group with gaps: We need
1245 // to execute at least one scalar epilogue iteration. This will ensure
1246 // we don't speculatively access memory out-of-bounds. We only need
1247 // to look for a member at index factor - 1, since every group must have
1248 // a member at index zero.
1249 if (Group
->isReverse()) {
1251 dbgs() << "LV: Invalidate candidate interleaved group due to "
1252 "a reverse access with gaps.\n");
1253 releaseGroup(Group
);
1257 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1258 RequiresScalarEpilogue
= true;
1262 for (auto *Group
: StoreGroups
) {
1263 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1264 // store would wrap around the address space we would do a memory access at
1265 // nullptr even without the transformation.
1266 if (Group
->getNumMembers() == Group
->getFactor())
1269 // Interleave-store-group with gaps is implemented using masked wide store.
1270 // Remove interleaved store groups with gaps if
1271 // masked-interleaved-accesses are not enabled by the target.
1272 if (!EnablePredicatedInterleavedMemAccesses
) {
1274 dbgs() << "LV: Invalidate candidate interleaved store group due "
1276 releaseGroup(Group
);
1280 // Case 2: If first and last members of the group don't wrap this implies
1281 // that all the pointers in the group don't wrap.
1282 // So we check only group member 0 (which is always guaranteed to exist),
1283 // and the last group member. Case 3 (scalar epilog) is not relevant for
1284 // stores with gaps, which are implemented with masked-store (rather than
1285 // speculative access, as in loads).
1286 if (InvalidateGroupIfMemberMayWrap(Group
, 0, std::string("first")))
1288 for (int Index
= Group
->getFactor() - 1; Index
> 0; Index
--)
1289 if (Group
->getMember(Index
)) {
1290 InvalidateGroupIfMemberMayWrap(Group
, Index
, std::string("last"));
1296 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1297 // If no group had triggered the requirement to create an epilogue loop,
1298 // there is nothing to do.
1299 if (!requiresScalarEpilogue())
1302 bool ReleasedGroup
= false;
1303 // Release groups requiring scalar epilogues. Note that this also removes them
1304 // from InterleaveGroups.
1305 for (auto *Group
: make_early_inc_range(InterleaveGroups
)) {
1306 if (!Group
->requiresScalarEpilogue())
1310 << "LV: Invalidate candidate interleaved group due to gaps that "
1311 "require a scalar epilogue (not allowed under optsize) and cannot "
1312 "be masked (not enabled). \n");
1313 releaseGroup(Group
);
1314 ReleasedGroup
= true;
1316 assert(ReleasedGroup
&& "At least one group must be invalidated, as a "
1317 "scalar epilogue was required");
1318 (void)ReleasedGroup
;
1319 RequiresScalarEpilogue
= false;
1322 template <typename InstT
>
1323 void InterleaveGroup
<InstT
>::addMetadata(InstT
*NewInst
) const {
1324 llvm_unreachable("addMetadata can only be used for Instruction");
1329 void InterleaveGroup
<Instruction
>::addMetadata(Instruction
*NewInst
) const {
1330 SmallVector
<Value
*, 4> VL
;
1331 std::transform(Members
.begin(), Members
.end(), std::back_inserter(VL
),
1332 [](std::pair
<int, Instruction
*> p
) { return p
.second
; });
1333 propagateMetadata(NewInst
, VL
);
1337 std::string
VFABI::mangleTLIVectorName(StringRef VectorName
,
1338 StringRef ScalarName
, unsigned numArgs
,
1340 SmallString
<256> Buffer
;
1341 llvm::raw_svector_ostream
Out(Buffer
);
1342 Out
<< "_ZGV" << VFABI::_LLVM_
<< "N";
1343 if (VF
.isScalable())
1346 Out
<< VF
.getFixedValue();
1347 for (unsigned I
= 0; I
< numArgs
; ++I
)
1349 Out
<< "_" << ScalarName
<< "(" << VectorName
<< ")";
1350 return std::string(Out
.str());
1353 void VFABI::getVectorVariantNames(
1354 const CallInst
&CI
, SmallVectorImpl
<std::string
> &VariantMappings
) {
1355 const StringRef S
= CI
.getFnAttr(VFABI::MappingsAttrName
).getValueAsString();
1359 SmallVector
<StringRef
, 8> ListAttr
;
1360 S
.split(ListAttr
, ",");
1362 for (auto &S
: SetVector
<StringRef
>(ListAttr
.begin(), ListAttr
.end())) {
1364 LLVM_DEBUG(dbgs() << "VFABI: adding mapping '" << S
<< "'\n");
1365 Optional
<VFInfo
> Info
= VFABI::tryDemangleForVFABI(S
, *(CI
.getModule()));
1366 assert(Info
.hasValue() && "Invalid name for a VFABI variant.");
1367 assert(CI
.getModule()->getFunction(Info
.getValue().VectorName
) &&
1368 "Vector function is missing.");
1370 VariantMappings
.push_back(std::string(S
));
1374 bool VFShape::hasValidParameterList() const {
1375 for (unsigned Pos
= 0, NumParams
= Parameters
.size(); Pos
< NumParams
;
1377 assert(Parameters
[Pos
].ParamPos
== Pos
&& "Broken parameter list.");
1379 switch (Parameters
[Pos
].ParamKind
) {
1380 default: // Nothing to check.
1382 case VFParamKind::OMP_Linear
:
1383 case VFParamKind::OMP_LinearRef
:
1384 case VFParamKind::OMP_LinearVal
:
1385 case VFParamKind::OMP_LinearUVal
:
1386 // Compile time linear steps must be non-zero.
1387 if (Parameters
[Pos
].LinearStepOrPos
== 0)
1390 case VFParamKind::OMP_LinearPos
:
1391 case VFParamKind::OMP_LinearRefPos
:
1392 case VFParamKind::OMP_LinearValPos
:
1393 case VFParamKind::OMP_LinearUValPos
:
1394 // The runtime linear step must be referring to some other
1395 // parameters in the signature.
1396 if (Parameters
[Pos
].LinearStepOrPos
>= int(NumParams
))
1398 // The linear step parameter must be marked as uniform.
1399 if (Parameters
[Parameters
[Pos
].LinearStepOrPos
].ParamKind
!=
1400 VFParamKind::OMP_Uniform
)
1402 // The linear step parameter can't point at itself.
1403 if (Parameters
[Pos
].LinearStepOrPos
== int(Pos
))
1406 case VFParamKind::GlobalPredicate
:
1407 // The global predicate must be the unique. Can be placed anywhere in the
1409 for (unsigned NextPos
= Pos
+ 1; NextPos
< NumParams
; ++NextPos
)
1410 if (Parameters
[NextPos
].ParamKind
== VFParamKind::GlobalPredicate
)