1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines vectorizer utilities.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/Analysis/DemandedBits.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/LoopIterator.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/MemoryModelRelaxationAnnotations.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/CommandLine.h"
31 #define DEBUG_TYPE "vectorutils"
34 using namespace llvm::PatternMatch
;
36 /// Maximum factor for an interleaved memory access.
37 static cl::opt
<unsigned> MaxInterleaveGroupFactor(
38 "max-interleave-group-factor", cl::Hidden
,
39 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
42 /// Return true if all of the intrinsic's arguments and return type are scalars
43 /// for the scalar form of the intrinsic, and vectors for the vector form of the
44 /// intrinsic (except operands that are marked as always being scalar by
45 /// isVectorIntrinsicWithScalarOpAtArg).
46 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID
) {
48 case Intrinsic::abs
: // Begin integer bit-manipulation.
49 case Intrinsic::bswap
:
50 case Intrinsic::bitreverse
:
51 case Intrinsic::ctpop
:
60 case Intrinsic::sadd_sat
:
61 case Intrinsic::ssub_sat
:
62 case Intrinsic::uadd_sat
:
63 case Intrinsic::usub_sat
:
64 case Intrinsic::smul_fix
:
65 case Intrinsic::smul_fix_sat
:
66 case Intrinsic::umul_fix
:
67 case Intrinsic::umul_fix_sat
:
68 case Intrinsic::sqrt
: // Begin floating-point.
72 case Intrinsic::atan2
:
80 case Intrinsic::exp10
:
83 case Intrinsic::log10
:
86 case Intrinsic::minnum
:
87 case Intrinsic::maxnum
:
88 case Intrinsic::minimum
:
89 case Intrinsic::maximum
:
90 case Intrinsic::copysign
:
91 case Intrinsic::floor
:
93 case Intrinsic::trunc
:
95 case Intrinsic::nearbyint
:
96 case Intrinsic::round
:
97 case Intrinsic::roundeven
:
100 case Intrinsic::fmuladd
:
101 case Intrinsic::is_fpclass
:
102 case Intrinsic::powi
:
103 case Intrinsic::canonicalize
:
104 case Intrinsic::fptosi_sat
:
105 case Intrinsic::fptoui_sat
:
106 case Intrinsic::lrint
:
107 case Intrinsic::llrint
:
108 case Intrinsic::ucmp
:
109 case Intrinsic::scmp
:
116 bool llvm::isTriviallyScalarizable(Intrinsic::ID ID
,
117 const TargetTransformInfo
*TTI
) {
118 if (isTriviallyVectorizable(ID
))
121 if (TTI
&& Intrinsic::isTargetIntrinsic(ID
))
122 return TTI
->isTargetIntrinsicTriviallyScalarizable(ID
);
124 // TODO: Move frexp to isTriviallyVectorizable.
125 // https://github.com/llvm/llvm-project/issues/112408
127 case Intrinsic::frexp
:
133 /// Identifies if the vector form of the intrinsic has a scalar operand.
134 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID
,
135 unsigned ScalarOpdIdx
,
136 const TargetTransformInfo
*TTI
) {
138 if (TTI
&& Intrinsic::isTargetIntrinsic(ID
))
139 return TTI
->isTargetIntrinsicWithScalarOpAtArg(ID
, ScalarOpdIdx
);
143 case Intrinsic::vp_abs
:
144 case Intrinsic::ctlz
:
145 case Intrinsic::vp_ctlz
:
146 case Intrinsic::cttz
:
147 case Intrinsic::vp_cttz
:
148 case Intrinsic::is_fpclass
:
149 case Intrinsic::vp_is_fpclass
:
150 case Intrinsic::powi
:
151 return (ScalarOpdIdx
== 1);
152 case Intrinsic::smul_fix
:
153 case Intrinsic::smul_fix_sat
:
154 case Intrinsic::umul_fix
:
155 case Intrinsic::umul_fix_sat
:
156 return (ScalarOpdIdx
== 2);
162 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(
163 Intrinsic::ID ID
, int OpdIdx
, const TargetTransformInfo
*TTI
) {
164 assert(ID
!= Intrinsic::not_intrinsic
&& "Not an intrinsic!");
166 if (TTI
&& Intrinsic::isTargetIntrinsic(ID
))
167 return TTI
->isTargetIntrinsicWithOverloadTypeAtArg(ID
, OpdIdx
);
169 if (VPCastIntrinsic::isVPCast(ID
))
170 return OpdIdx
== -1 || OpdIdx
== 0;
173 case Intrinsic::fptosi_sat
:
174 case Intrinsic::fptoui_sat
:
175 case Intrinsic::lrint
:
176 case Intrinsic::llrint
:
177 case Intrinsic::vp_lrint
:
178 case Intrinsic::vp_llrint
:
179 case Intrinsic::ucmp
:
180 case Intrinsic::scmp
:
181 return OpdIdx
== -1 || OpdIdx
== 0;
182 case Intrinsic::is_fpclass
:
183 case Intrinsic::vp_is_fpclass
:
185 case Intrinsic::powi
:
186 return OpdIdx
== -1 || OpdIdx
== 1;
192 bool llvm::isVectorIntrinsicWithStructReturnOverloadAtField(
193 Intrinsic::ID ID
, int RetIdx
, const TargetTransformInfo
*TTI
) {
195 if (TTI
&& Intrinsic::isTargetIntrinsic(ID
))
196 return TTI
->isTargetIntrinsicWithStructReturnOverloadAtField(ID
, RetIdx
);
199 case Intrinsic::frexp
:
200 return RetIdx
== 0 || RetIdx
== 1;
206 /// Returns intrinsic ID for call.
207 /// For the input call instruction it finds mapping intrinsic and returns
208 /// its ID, in case it does not found it return not_intrinsic.
209 Intrinsic::ID
llvm::getVectorIntrinsicIDForCall(const CallInst
*CI
,
210 const TargetLibraryInfo
*TLI
) {
211 Intrinsic::ID ID
= getIntrinsicForCallSite(*CI
, TLI
);
212 if (ID
== Intrinsic::not_intrinsic
)
213 return Intrinsic::not_intrinsic
;
215 if (isTriviallyVectorizable(ID
) || ID
== Intrinsic::lifetime_start
||
216 ID
== Intrinsic::lifetime_end
|| ID
== Intrinsic::assume
||
217 ID
== Intrinsic::experimental_noalias_scope_decl
||
218 ID
== Intrinsic::sideeffect
|| ID
== Intrinsic::pseudoprobe
)
220 return Intrinsic::not_intrinsic
;
223 /// Given a vector and an element number, see if the scalar value is
224 /// already around as a register, for example if it were inserted then extracted
226 Value
*llvm::findScalarElement(Value
*V
, unsigned EltNo
) {
227 assert(V
->getType()->isVectorTy() && "Not looking at a vector?");
228 VectorType
*VTy
= cast
<VectorType
>(V
->getType());
229 // For fixed-length vector, return poison for out of range access.
230 if (auto *FVTy
= dyn_cast
<FixedVectorType
>(VTy
)) {
231 unsigned Width
= FVTy
->getNumElements();
233 return PoisonValue::get(FVTy
->getElementType());
236 if (Constant
*C
= dyn_cast
<Constant
>(V
))
237 return C
->getAggregateElement(EltNo
);
239 if (InsertElementInst
*III
= dyn_cast
<InsertElementInst
>(V
)) {
240 // If this is an insert to a variable element, we don't know what it is.
241 if (!isa
<ConstantInt
>(III
->getOperand(2)))
243 unsigned IIElt
= cast
<ConstantInt
>(III
->getOperand(2))->getZExtValue();
245 // If this is an insert to the element we are looking for, return the
248 return III
->getOperand(1);
250 // Guard against infinite loop on malformed, unreachable IR.
251 if (III
== III
->getOperand(0))
254 // Otherwise, the insertelement doesn't modify the value, recurse on its
256 return findScalarElement(III
->getOperand(0), EltNo
);
259 ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(V
);
260 // Restrict the following transformation to fixed-length vector.
261 if (SVI
&& isa
<FixedVectorType
>(SVI
->getType())) {
263 cast
<FixedVectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
264 int InEl
= SVI
->getMaskValue(EltNo
);
266 return PoisonValue::get(VTy
->getElementType());
267 if (InEl
< (int)LHSWidth
)
268 return findScalarElement(SVI
->getOperand(0), InEl
);
269 return findScalarElement(SVI
->getOperand(1), InEl
- LHSWidth
);
272 // Extract a value from a vector add operation with a constant zero.
273 // TODO: Use getBinOpIdentity() to generalize this.
274 Value
*Val
; Constant
*C
;
275 if (match(V
, m_Add(m_Value(Val
), m_Constant(C
))))
276 if (Constant
*Elt
= C
->getAggregateElement(EltNo
))
277 if (Elt
->isNullValue())
278 return findScalarElement(Val
, EltNo
);
280 // If the vector is a splat then we can trivially find the scalar element.
281 if (isa
<ScalableVectorType
>(VTy
))
282 if (Value
*Splat
= getSplatValue(V
))
283 if (EltNo
< VTy
->getElementCount().getKnownMinValue())
286 // Otherwise, we don't know.
290 int llvm::getSplatIndex(ArrayRef
<int> Mask
) {
293 // Ignore invalid (undefined) mask elements.
297 // There can be only 1 non-negative mask element value if this is a splat.
298 if (SplatIndex
!= -1 && SplatIndex
!= M
)
301 // Initialize the splat index to the 1st non-negative mask element.
304 assert((SplatIndex
== -1 || SplatIndex
>= 0) && "Negative index?");
308 /// Get splat value if the input is a splat vector or return nullptr.
309 /// This function is not fully general. It checks only 2 cases:
310 /// the input value is (1) a splat constant vector or (2) a sequence
311 /// of instructions that broadcasts a scalar at element 0.
312 Value
*llvm::getSplatValue(const Value
*V
) {
313 if (isa
<VectorType
>(V
->getType()))
314 if (auto *C
= dyn_cast
<Constant
>(V
))
315 return C
->getSplatValue();
317 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
320 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat
), m_ZeroInt()),
321 m_Value(), m_ZeroMask())))
327 bool llvm::isSplatValue(const Value
*V
, int Index
, unsigned Depth
) {
328 assert(Depth
<= MaxAnalysisRecursionDepth
&& "Limit Search Depth");
330 if (isa
<VectorType
>(V
->getType())) {
331 if (isa
<UndefValue
>(V
))
333 // FIXME: We can allow undefs, but if Index was specified, we may want to
334 // check that the constant is defined at that index.
335 if (auto *C
= dyn_cast
<Constant
>(V
))
336 return C
->getSplatValue() != nullptr;
339 if (auto *Shuf
= dyn_cast
<ShuffleVectorInst
>(V
)) {
340 // FIXME: We can safely allow undefs here. If Index was specified, we will
341 // check that the mask elt is defined at the required index.
342 if (!all_equal(Shuf
->getShuffleMask()))
349 // Match a specific element. The mask should be defined at and match the
351 return Shuf
->getMaskValue(Index
) == Index
;
354 // The remaining tests are all recursive, so bail out if we hit the limit.
355 if (Depth
++ == MaxAnalysisRecursionDepth
)
358 // If both operands of a binop are splats, the result is a splat.
360 if (match(V
, m_BinOp(m_Value(X
), m_Value(Y
))))
361 return isSplatValue(X
, Index
, Depth
) && isSplatValue(Y
, Index
, Depth
);
363 // If all operands of a select are splats, the result is a splat.
364 if (match(V
, m_Select(m_Value(X
), m_Value(Y
), m_Value(Z
))))
365 return isSplatValue(X
, Index
, Depth
) && isSplatValue(Y
, Index
, Depth
) &&
366 isSplatValue(Z
, Index
, Depth
);
368 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
373 bool llvm::getShuffleDemandedElts(int SrcWidth
, ArrayRef
<int> Mask
,
374 const APInt
&DemandedElts
, APInt
&DemandedLHS
,
375 APInt
&DemandedRHS
, bool AllowUndefElts
) {
376 DemandedLHS
= DemandedRHS
= APInt::getZero(SrcWidth
);
378 // Early out if we don't demand any elements.
379 if (DemandedElts
.isZero())
382 // Simple case of a shuffle with zeroinitializer.
383 if (all_of(Mask
, [](int Elt
) { return Elt
== 0; })) {
384 DemandedLHS
.setBit(0);
388 for (unsigned I
= 0, E
= Mask
.size(); I
!= E
; ++I
) {
390 assert((-1 <= M
) && (M
< (SrcWidth
* 2)) &&
391 "Invalid shuffle mask constant");
393 if (!DemandedElts
[I
] || (AllowUndefElts
&& (M
< 0)))
396 // For undef elements, we don't know anything about the common state of
397 // the shuffle result.
402 DemandedLHS
.setBit(M
);
404 DemandedRHS
.setBit(M
- SrcWidth
);
410 void llvm::narrowShuffleMaskElts(int Scale
, ArrayRef
<int> Mask
,
411 SmallVectorImpl
<int> &ScaledMask
) {
412 assert(Scale
> 0 && "Unexpected scaling factor");
414 // Fast-path: if no scaling, then it is just a copy.
416 ScaledMask
.assign(Mask
.begin(), Mask
.end());
421 for (int MaskElt
: Mask
) {
423 assert(((uint64_t)Scale
* MaskElt
+ (Scale
- 1)) <= INT32_MAX
&&
424 "Overflowed 32-bits");
426 for (int SliceElt
= 0; SliceElt
!= Scale
; ++SliceElt
)
427 ScaledMask
.push_back(MaskElt
< 0 ? MaskElt
: Scale
* MaskElt
+ SliceElt
);
431 bool llvm::widenShuffleMaskElts(int Scale
, ArrayRef
<int> Mask
,
432 SmallVectorImpl
<int> &ScaledMask
) {
433 assert(Scale
> 0 && "Unexpected scaling factor");
435 // Fast-path: if no scaling, then it is just a copy.
437 ScaledMask
.assign(Mask
.begin(), Mask
.end());
441 // We must map the original elements down evenly to a type with less elements.
442 int NumElts
= Mask
.size();
443 if (NumElts
% Scale
!= 0)
447 ScaledMask
.reserve(NumElts
/ Scale
);
449 // Step through the input mask by splitting into Scale-sized slices.
451 ArrayRef
<int> MaskSlice
= Mask
.take_front(Scale
);
452 assert((int)MaskSlice
.size() == Scale
&& "Expected Scale-sized slice.");
454 // The first element of the slice determines how we evaluate this slice.
455 int SliceFront
= MaskSlice
.front();
456 if (SliceFront
< 0) {
457 // Negative values (undef or other "sentinel" values) must be equal across
459 if (!all_equal(MaskSlice
))
461 ScaledMask
.push_back(SliceFront
);
463 // A positive mask element must be cleanly divisible.
464 if (SliceFront
% Scale
!= 0)
466 // Elements of the slice must be consecutive.
467 for (int i
= 1; i
< Scale
; ++i
)
468 if (MaskSlice
[i
] != SliceFront
+ i
)
470 ScaledMask
.push_back(SliceFront
/ Scale
);
472 Mask
= Mask
.drop_front(Scale
);
473 } while (!Mask
.empty());
475 assert((int)ScaledMask
.size() * Scale
== NumElts
&& "Unexpected scaled mask");
477 // All elements of the original mask can be scaled down to map to the elements
478 // of a mask with wider elements.
482 bool llvm::widenShuffleMaskElts(ArrayRef
<int> M
,
483 SmallVectorImpl
<int> &NewMask
) {
484 unsigned NumElts
= M
.size();
485 if (NumElts
% 2 != 0)
489 for (unsigned i
= 0; i
< NumElts
; i
+= 2) {
493 // If both elements are undef, new mask is undef too.
494 if (M0
== -1 && M1
== -1) {
495 NewMask
.push_back(-1);
499 if (M0
== -1 && M1
!= -1 && (M1
% 2) == 1) {
500 NewMask
.push_back(M1
/ 2);
504 if (M0
!= -1 && (M0
% 2) == 0 && ((M0
+ 1) == M1
|| M1
== -1)) {
505 NewMask
.push_back(M0
/ 2);
513 assert(NewMask
.size() == NumElts
/ 2 && "Incorrect size for mask!");
517 bool llvm::scaleShuffleMaskElts(unsigned NumDstElts
, ArrayRef
<int> Mask
,
518 SmallVectorImpl
<int> &ScaledMask
) {
519 unsigned NumSrcElts
= Mask
.size();
520 assert(NumSrcElts
> 0 && NumDstElts
> 0 && "Unexpected scaling factor");
522 // Fast-path: if no scaling, then it is just a copy.
523 if (NumSrcElts
== NumDstElts
) {
524 ScaledMask
.assign(Mask
.begin(), Mask
.end());
528 // Ensure we can find a whole scale factor.
529 assert(((NumSrcElts
% NumDstElts
) == 0 || (NumDstElts
% NumSrcElts
) == 0) &&
530 "Unexpected scaling factor");
532 if (NumSrcElts
> NumDstElts
) {
533 int Scale
= NumSrcElts
/ NumDstElts
;
534 return widenShuffleMaskElts(Scale
, Mask
, ScaledMask
);
537 int Scale
= NumDstElts
/ NumSrcElts
;
538 narrowShuffleMaskElts(Scale
, Mask
, ScaledMask
);
542 void llvm::getShuffleMaskWithWidestElts(ArrayRef
<int> Mask
,
543 SmallVectorImpl
<int> &ScaledMask
) {
544 std::array
<SmallVector
<int, 16>, 2> TmpMasks
;
545 SmallVectorImpl
<int> *Output
= &TmpMasks
[0], *Tmp
= &TmpMasks
[1];
546 ArrayRef
<int> InputMask
= Mask
;
547 for (unsigned Scale
= 2; Scale
<= InputMask
.size(); ++Scale
) {
548 while (widenShuffleMaskElts(Scale
, InputMask
, *Output
)) {
550 std::swap(Output
, Tmp
);
553 ScaledMask
.assign(InputMask
.begin(), InputMask
.end());
556 void llvm::processShuffleMasks(
557 ArrayRef
<int> Mask
, unsigned NumOfSrcRegs
, unsigned NumOfDestRegs
,
558 unsigned NumOfUsedRegs
, function_ref
<void()> NoInputAction
,
559 function_ref
<void(ArrayRef
<int>, unsigned, unsigned)> SingleInputAction
,
560 function_ref
<void(ArrayRef
<int>, unsigned, unsigned, bool)>
562 SmallVector
<SmallVector
<SmallVector
<int>>> Res(NumOfDestRegs
);
563 // Try to perform better estimation of the permutation.
564 // 1. Split the source/destination vectors into real registers.
565 // 2. Do the mask analysis to identify which real registers are
567 int Sz
= Mask
.size();
568 unsigned SzDest
= Sz
/ NumOfDestRegs
;
569 unsigned SzSrc
= Sz
/ NumOfSrcRegs
;
570 for (unsigned I
= 0; I
< NumOfDestRegs
; ++I
) {
571 auto &RegMasks
= Res
[I
];
572 RegMasks
.assign(2 * NumOfSrcRegs
, {});
573 // Check that the values in dest registers are in the one src
575 for (unsigned K
= 0; K
< SzDest
; ++K
) {
576 int Idx
= I
* SzDest
+ K
;
579 if (Mask
[Idx
] >= 2 * Sz
|| Mask
[Idx
] == PoisonMaskElem
)
581 int MaskIdx
= Mask
[Idx
] % Sz
;
582 int SrcRegIdx
= MaskIdx
/ SzSrc
+ (Mask
[Idx
] >= Sz
? NumOfSrcRegs
: 0);
583 // Add a cost of PermuteTwoSrc for each new source register permute,
584 // if we have more than one source registers.
585 if (RegMasks
[SrcRegIdx
].empty())
586 RegMasks
[SrcRegIdx
].assign(SzDest
, PoisonMaskElem
);
587 RegMasks
[SrcRegIdx
][K
] = MaskIdx
% SzSrc
;
590 // Process split mask.
591 for (unsigned I
: seq
<unsigned>(NumOfUsedRegs
)) {
594 count_if(Dest
, [](ArrayRef
<int> Mask
) { return !Mask
.empty(); });
595 switch (NumSrcRegs
) {
597 // No input vectors were used!
601 // Find the only mask with at least single undef mask elem.
603 find_if(Dest
, [](ArrayRef
<int> Mask
) { return !Mask
.empty(); });
604 unsigned SrcReg
= std::distance(Dest
.begin(), It
);
605 SingleInputAction(*It
, SrcReg
, I
);
609 // The first mask is a permutation of a single register. Since we have >2
610 // input registers to shuffle, we merge the masks for 2 first registers
611 // and generate a shuffle of 2 registers rather than the reordering of the
612 // first register and then shuffle with the second register. Next,
613 // generate the shuffles of the resulting register + the remaining
614 // registers from the list.
615 auto &&CombineMasks
= [](MutableArrayRef
<int> FirstMask
,
616 ArrayRef
<int> SecondMask
) {
617 for (int Idx
= 0, VF
= FirstMask
.size(); Idx
< VF
; ++Idx
) {
618 if (SecondMask
[Idx
] != PoisonMaskElem
) {
619 assert(FirstMask
[Idx
] == PoisonMaskElem
&&
620 "Expected undefined mask element.");
621 FirstMask
[Idx
] = SecondMask
[Idx
] + VF
;
625 auto &&NormalizeMask
= [](MutableArrayRef
<int> Mask
) {
626 for (int Idx
= 0, VF
= Mask
.size(); Idx
< VF
; ++Idx
) {
627 if (Mask
[Idx
] != PoisonMaskElem
)
636 MutableArrayRef
<int> FirstMask
, SecondMask
;
637 for (unsigned I
: seq
<unsigned>(2 * NumOfSrcRegs
)) {
638 SmallVectorImpl
<int> &RegMask
= Dest
[I
];
642 if (FirstIdx
== SecondIdx
) {
648 SecondMask
= RegMask
;
649 CombineMasks(FirstMask
, SecondMask
);
650 ManyInputsAction(FirstMask
, FirstIdx
, SecondIdx
, NewReg
);
652 NormalizeMask(FirstMask
);
654 SecondMask
= FirstMask
;
655 SecondIdx
= FirstIdx
;
657 if (FirstIdx
!= SecondIdx
&& SecondIdx
>= 0) {
658 CombineMasks(SecondMask
, FirstMask
);
659 ManyInputsAction(SecondMask
, SecondIdx
, FirstIdx
, NewReg
);
661 Dest
[FirstIdx
].clear();
662 NormalizeMask(SecondMask
);
664 } while (SecondIdx
>= 0);
671 void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth
,
672 const APInt
&DemandedElts
,
674 APInt
&DemandedRHS
) {
675 assert(VectorBitWidth
>= 128 && "Vectors smaller than 128 bit not supported");
676 int NumLanes
= VectorBitWidth
/ 128;
677 int NumElts
= DemandedElts
.getBitWidth();
678 int NumEltsPerLane
= NumElts
/ NumLanes
;
679 int HalfEltsPerLane
= NumEltsPerLane
/ 2;
681 DemandedLHS
= APInt::getZero(NumElts
);
682 DemandedRHS
= APInt::getZero(NumElts
);
684 // Map DemandedElts to the horizontal operands.
685 for (int Idx
= 0; Idx
!= NumElts
; ++Idx
) {
686 if (!DemandedElts
[Idx
])
688 int LaneIdx
= (Idx
/ NumEltsPerLane
) * NumEltsPerLane
;
689 int LocalIdx
= Idx
% NumEltsPerLane
;
690 if (LocalIdx
< HalfEltsPerLane
) {
691 DemandedLHS
.setBit(LaneIdx
+ 2 * LocalIdx
);
693 LocalIdx
-= HalfEltsPerLane
;
694 DemandedRHS
.setBit(LaneIdx
+ 2 * LocalIdx
);
699 MapVector
<Instruction
*, uint64_t>
700 llvm::computeMinimumValueSizes(ArrayRef
<BasicBlock
*> Blocks
, DemandedBits
&DB
,
701 const TargetTransformInfo
*TTI
) {
703 // DemandedBits will give us every value's live-out bits. But we want
704 // to ensure no extra casts would need to be inserted, so every DAG
705 // of connected values must have the same minimum bitwidth.
706 EquivalenceClasses
<Value
*> ECs
;
707 SmallVector
<Value
*, 16> Worklist
;
708 SmallPtrSet
<Value
*, 4> Roots
;
709 SmallPtrSet
<Value
*, 16> Visited
;
710 DenseMap
<Value
*, uint64_t> DBits
;
711 SmallPtrSet
<Instruction
*, 4> InstructionSet
;
712 MapVector
<Instruction
*, uint64_t> MinBWs
;
714 // Determine the roots. We work bottom-up, from truncs or icmps.
715 bool SeenExtFromIllegalType
= false;
716 for (auto *BB
: Blocks
)
717 for (auto &I
: *BB
) {
718 InstructionSet
.insert(&I
);
720 if (TTI
&& (isa
<ZExtInst
>(&I
) || isa
<SExtInst
>(&I
)) &&
721 !TTI
->isTypeLegal(I
.getOperand(0)->getType()))
722 SeenExtFromIllegalType
= true;
724 // Only deal with non-vector integers up to 64-bits wide.
725 if ((isa
<TruncInst
>(&I
) || isa
<ICmpInst
>(&I
)) &&
726 !I
.getType()->isVectorTy() &&
727 I
.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
728 // Don't make work for ourselves. If we know the loaded type is legal,
729 // don't add it to the worklist.
730 if (TTI
&& isa
<TruncInst
>(&I
) && TTI
->isTypeLegal(I
.getType()))
733 Worklist
.push_back(&I
);
738 if (Worklist
.empty() || (TTI
&& !SeenExtFromIllegalType
))
741 // Now proceed breadth-first, unioning values together.
742 while (!Worklist
.empty()) {
743 Value
*Val
= Worklist
.pop_back_val();
744 Value
*Leader
= ECs
.getOrInsertLeaderValue(Val
);
746 if (!Visited
.insert(Val
).second
)
749 // Non-instructions terminate a chain successfully.
750 if (!isa
<Instruction
>(Val
))
752 Instruction
*I
= cast
<Instruction
>(Val
);
754 // If we encounter a type that is larger than 64 bits, we can't represent
756 if (DB
.getDemandedBits(I
).getBitWidth() > 64)
757 return MapVector
<Instruction
*, uint64_t>();
759 uint64_t V
= DB
.getDemandedBits(I
).getZExtValue();
763 // Casts, loads and instructions outside of our range terminate a chain
765 if (isa
<SExtInst
>(I
) || isa
<ZExtInst
>(I
) || isa
<LoadInst
>(I
) ||
766 !InstructionSet
.count(I
))
769 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
770 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
771 // transform anything that relies on them.
772 if (isa
<BitCastInst
>(I
) || isa
<PtrToIntInst
>(I
) || isa
<IntToPtrInst
>(I
) ||
773 !I
->getType()->isIntegerTy()) {
774 DBits
[Leader
] |= ~0ULL;
778 // We don't modify the types of PHIs. Reductions will already have been
779 // truncated if possible, and inductions' sizes will have been chosen by
784 if (DBits
[Leader
] == ~0ULL)
785 // All bits demanded, no point continuing.
788 for (Value
*O
: cast
<User
>(I
)->operands()) {
789 ECs
.unionSets(Leader
, O
);
790 Worklist
.push_back(O
);
794 // Now we've discovered all values, walk them to see if there are
795 // any users we didn't see. If there are, we can't optimize that
797 for (auto &I
: DBits
)
798 for (auto *U
: I
.first
->users())
799 if (U
->getType()->isIntegerTy() && DBits
.count(U
) == 0)
800 DBits
[ECs
.getOrInsertLeaderValue(I
.first
)] |= ~0ULL;
802 for (auto I
= ECs
.begin(), E
= ECs
.end(); I
!= E
; ++I
) {
803 uint64_t LeaderDemandedBits
= 0;
804 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end()))
805 LeaderDemandedBits
|= DBits
[M
];
807 uint64_t MinBW
= llvm::bit_width(LeaderDemandedBits
);
808 // Round up to a power of 2
809 MinBW
= llvm::bit_ceil(MinBW
);
811 // We don't modify the types of PHIs. Reductions will already have been
812 // truncated if possible, and inductions' sizes will have been chosen by
814 // If we are required to shrink a PHI, abandon this entire equivalence class.
816 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end()))
817 if (isa
<PHINode
>(M
) && MinBW
< M
->getType()->getScalarSizeInBits()) {
824 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end())) {
825 auto *MI
= dyn_cast
<Instruction
>(M
);
828 Type
*Ty
= M
->getType();
830 Ty
= MI
->getOperand(0)->getType();
832 if (MinBW
>= Ty
->getScalarSizeInBits())
835 // If any of M's operands demand more bits than MinBW then M cannot be
836 // performed safely in MinBW.
837 if (any_of(MI
->operands(), [&DB
, MinBW
](Use
&U
) {
838 auto *CI
= dyn_cast
<ConstantInt
>(U
);
839 // For constants shift amounts, check if the shift would result in
842 isa
<ShlOperator
, LShrOperator
, AShrOperator
>(U
.getUser()) &&
843 U
.getOperandNo() == 1)
844 return CI
->uge(MinBW
);
845 uint64_t BW
= bit_width(DB
.getDemandedBits(&U
).getZExtValue());
846 return bit_ceil(BW
) > MinBW
;
857 /// Add all access groups in @p AccGroups to @p List.
858 template <typename ListT
>
859 static void addToAccessGroupList(ListT
&List
, MDNode
*AccGroups
) {
860 // Interpret an access group as a list containing itself.
861 if (AccGroups
->getNumOperands() == 0) {
862 assert(isValidAsAccessGroup(AccGroups
) && "Node must be an access group");
863 List
.insert(AccGroups
);
867 for (const auto &AccGroupListOp
: AccGroups
->operands()) {
868 auto *Item
= cast
<MDNode
>(AccGroupListOp
.get());
869 assert(isValidAsAccessGroup(Item
) && "List item must be an access group");
874 MDNode
*llvm::uniteAccessGroups(MDNode
*AccGroups1
, MDNode
*AccGroups2
) {
879 if (AccGroups1
== AccGroups2
)
882 SmallSetVector
<Metadata
*, 4> Union
;
883 addToAccessGroupList(Union
, AccGroups1
);
884 addToAccessGroupList(Union
, AccGroups2
);
886 if (Union
.size() == 0)
888 if (Union
.size() == 1)
889 return cast
<MDNode
>(Union
.front());
891 LLVMContext
&Ctx
= AccGroups1
->getContext();
892 return MDNode::get(Ctx
, Union
.getArrayRef());
895 MDNode
*llvm::intersectAccessGroups(const Instruction
*Inst1
,
896 const Instruction
*Inst2
) {
897 bool MayAccessMem1
= Inst1
->mayReadOrWriteMemory();
898 bool MayAccessMem2
= Inst2
->mayReadOrWriteMemory();
900 if (!MayAccessMem1
&& !MayAccessMem2
)
903 return Inst2
->getMetadata(LLVMContext::MD_access_group
);
905 return Inst1
->getMetadata(LLVMContext::MD_access_group
);
907 MDNode
*MD1
= Inst1
->getMetadata(LLVMContext::MD_access_group
);
908 MDNode
*MD2
= Inst2
->getMetadata(LLVMContext::MD_access_group
);
914 // Use set for scalable 'contains' check.
915 SmallPtrSet
<Metadata
*, 4> AccGroupSet2
;
916 addToAccessGroupList(AccGroupSet2
, MD2
);
918 SmallVector
<Metadata
*, 4> Intersection
;
919 if (MD1
->getNumOperands() == 0) {
920 assert(isValidAsAccessGroup(MD1
) && "Node must be an access group");
921 if (AccGroupSet2
.count(MD1
))
922 Intersection
.push_back(MD1
);
924 for (const MDOperand
&Node
: MD1
->operands()) {
925 auto *Item
= cast
<MDNode
>(Node
.get());
926 assert(isValidAsAccessGroup(Item
) && "List item must be an access group");
927 if (AccGroupSet2
.count(Item
))
928 Intersection
.push_back(Item
);
932 if (Intersection
.size() == 0)
934 if (Intersection
.size() == 1)
935 return cast
<MDNode
>(Intersection
.front());
937 LLVMContext
&Ctx
= Inst1
->getContext();
938 return MDNode::get(Ctx
, Intersection
);
941 /// \returns \p I after propagating metadata from \p VL.
942 Instruction
*llvm::propagateMetadata(Instruction
*Inst
, ArrayRef
<Value
*> VL
) {
945 Instruction
*I0
= cast
<Instruction
>(VL
[0]);
946 SmallVector
<std::pair
<unsigned, MDNode
*>, 4> Metadata
;
947 I0
->getAllMetadataOtherThanDebugLoc(Metadata
);
949 for (auto Kind
: {LLVMContext::MD_tbaa
, LLVMContext::MD_alias_scope
,
950 LLVMContext::MD_noalias
, LLVMContext::MD_fpmath
,
951 LLVMContext::MD_nontemporal
, LLVMContext::MD_invariant_load
,
952 LLVMContext::MD_access_group
, LLVMContext::MD_mmra
}) {
953 MDNode
*MD
= I0
->getMetadata(Kind
);
954 for (int J
= 1, E
= VL
.size(); MD
&& J
!= E
; ++J
) {
955 const Instruction
*IJ
= cast
<Instruction
>(VL
[J
]);
956 MDNode
*IMD
= IJ
->getMetadata(Kind
);
959 case LLVMContext::MD_mmra
: {
960 MD
= MMRAMetadata::combine(Inst
->getContext(), MD
, IMD
);
963 case LLVMContext::MD_tbaa
:
964 MD
= MDNode::getMostGenericTBAA(MD
, IMD
);
966 case LLVMContext::MD_alias_scope
:
967 MD
= MDNode::getMostGenericAliasScope(MD
, IMD
);
969 case LLVMContext::MD_fpmath
:
970 MD
= MDNode::getMostGenericFPMath(MD
, IMD
);
972 case LLVMContext::MD_noalias
:
973 case LLVMContext::MD_nontemporal
:
974 case LLVMContext::MD_invariant_load
:
975 MD
= MDNode::intersect(MD
, IMD
);
977 case LLVMContext::MD_access_group
:
978 MD
= intersectAccessGroups(Inst
, IJ
);
981 llvm_unreachable("unhandled metadata");
985 Inst
->setMetadata(Kind
, MD
);
992 llvm::createBitMaskForGaps(IRBuilderBase
&Builder
, unsigned VF
,
993 const InterleaveGroup
<Instruction
> &Group
) {
994 // All 1's means mask is not needed.
995 if (Group
.getNumMembers() == Group
.getFactor())
998 // TODO: support reversed access.
999 assert(!Group
.isReverse() && "Reversed group not supported.");
1001 SmallVector
<Constant
*, 16> Mask
;
1002 for (unsigned i
= 0; i
< VF
; i
++)
1003 for (unsigned j
= 0; j
< Group
.getFactor(); ++j
) {
1004 unsigned HasMember
= Group
.getMember(j
) ? 1 : 0;
1005 Mask
.push_back(Builder
.getInt1(HasMember
));
1008 return ConstantVector::get(Mask
);
1011 llvm::SmallVector
<int, 16>
1012 llvm::createReplicatedMask(unsigned ReplicationFactor
, unsigned VF
) {
1013 SmallVector
<int, 16> MaskVec
;
1014 for (unsigned i
= 0; i
< VF
; i
++)
1015 for (unsigned j
= 0; j
< ReplicationFactor
; j
++)
1016 MaskVec
.push_back(i
);
1021 llvm::SmallVector
<int, 16> llvm::createInterleaveMask(unsigned VF
,
1023 SmallVector
<int, 16> Mask
;
1024 for (unsigned i
= 0; i
< VF
; i
++)
1025 for (unsigned j
= 0; j
< NumVecs
; j
++)
1026 Mask
.push_back(j
* VF
+ i
);
1031 llvm::SmallVector
<int, 16>
1032 llvm::createStrideMask(unsigned Start
, unsigned Stride
, unsigned VF
) {
1033 SmallVector
<int, 16> Mask
;
1034 for (unsigned i
= 0; i
< VF
; i
++)
1035 Mask
.push_back(Start
+ i
* Stride
);
1040 llvm::SmallVector
<int, 16> llvm::createSequentialMask(unsigned Start
,
1042 unsigned NumUndefs
) {
1043 SmallVector
<int, 16> Mask
;
1044 for (unsigned i
= 0; i
< NumInts
; i
++)
1045 Mask
.push_back(Start
+ i
);
1047 for (unsigned i
= 0; i
< NumUndefs
; i
++)
1053 llvm::SmallVector
<int, 16> llvm::createUnaryMask(ArrayRef
<int> Mask
,
1055 // Avoid casts in the loop and make sure we have a reasonable number.
1056 int NumEltsSigned
= NumElts
;
1057 assert(NumEltsSigned
> 0 && "Expected smaller or non-zero element count");
1059 // If the mask chooses an element from operand 1, reduce it to choose from the
1060 // corresponding element of operand 0. Undef mask elements are unchanged.
1061 SmallVector
<int, 16> UnaryMask
;
1062 for (int MaskElt
: Mask
) {
1063 assert((MaskElt
< NumEltsSigned
* 2) && "Expected valid shuffle mask");
1064 int UnaryElt
= MaskElt
>= NumEltsSigned
? MaskElt
- NumEltsSigned
: MaskElt
;
1065 UnaryMask
.push_back(UnaryElt
);
1070 /// A helper function for concatenating vectors. This function concatenates two
1071 /// vectors having the same element type. If the second vector has fewer
1072 /// elements than the first, it is padded with undefs.
1073 static Value
*concatenateTwoVectors(IRBuilderBase
&Builder
, Value
*V1
,
1075 VectorType
*VecTy1
= dyn_cast
<VectorType
>(V1
->getType());
1076 VectorType
*VecTy2
= dyn_cast
<VectorType
>(V2
->getType());
1077 assert(VecTy1
&& VecTy2
&&
1078 VecTy1
->getScalarType() == VecTy2
->getScalarType() &&
1079 "Expect two vectors with the same element type");
1081 unsigned NumElts1
= cast
<FixedVectorType
>(VecTy1
)->getNumElements();
1082 unsigned NumElts2
= cast
<FixedVectorType
>(VecTy2
)->getNumElements();
1083 assert(NumElts1
>= NumElts2
&& "Unexpect the first vector has less elements");
1085 if (NumElts1
> NumElts2
) {
1086 // Extend with UNDEFs.
1087 V2
= Builder
.CreateShuffleVector(
1088 V2
, createSequentialMask(0, NumElts2
, NumElts1
- NumElts2
));
1091 return Builder
.CreateShuffleVector(
1092 V1
, V2
, createSequentialMask(0, NumElts1
+ NumElts2
, 0));
1095 Value
*llvm::concatenateVectors(IRBuilderBase
&Builder
,
1096 ArrayRef
<Value
*> Vecs
) {
1097 unsigned NumVecs
= Vecs
.size();
1098 assert(NumVecs
> 1 && "Should be at least two vectors");
1100 SmallVector
<Value
*, 8> ResList
;
1101 ResList
.append(Vecs
.begin(), Vecs
.end());
1103 SmallVector
<Value
*, 8> TmpList
;
1104 for (unsigned i
= 0; i
< NumVecs
- 1; i
+= 2) {
1105 Value
*V0
= ResList
[i
], *V1
= ResList
[i
+ 1];
1106 assert((V0
->getType() == V1
->getType() || i
== NumVecs
- 2) &&
1107 "Only the last vector may have a different type");
1109 TmpList
.push_back(concatenateTwoVectors(Builder
, V0
, V1
));
1112 // Push the last vector if the total number of vectors is odd.
1113 if (NumVecs
% 2 != 0)
1114 TmpList
.push_back(ResList
[NumVecs
- 1]);
1117 NumVecs
= ResList
.size();
1118 } while (NumVecs
> 1);
1123 bool llvm::maskIsAllZeroOrUndef(Value
*Mask
) {
1124 assert(isa
<VectorType
>(Mask
->getType()) &&
1125 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
1126 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
1128 "Mask must be a vector of i1");
1130 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
1133 if (ConstMask
->isNullValue() || isa
<UndefValue
>(ConstMask
))
1135 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
1139 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
1141 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
1142 if (MaskElt
->isNullValue() || isa
<UndefValue
>(MaskElt
))
1149 bool llvm::maskIsAllOneOrUndef(Value
*Mask
) {
1150 assert(isa
<VectorType
>(Mask
->getType()) &&
1151 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
1152 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
1154 "Mask must be a vector of i1");
1156 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
1159 if (ConstMask
->isAllOnesValue() || isa
<UndefValue
>(ConstMask
))
1161 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
1165 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
1167 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
1168 if (MaskElt
->isAllOnesValue() || isa
<UndefValue
>(MaskElt
))
1175 bool llvm::maskContainsAllOneOrUndef(Value
*Mask
) {
1176 assert(isa
<VectorType
>(Mask
->getType()) &&
1177 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
1178 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
1180 "Mask must be a vector of i1");
1182 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
1185 if (ConstMask
->isAllOnesValue() || isa
<UndefValue
>(ConstMask
))
1187 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
1191 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
1193 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
1194 if (MaskElt
->isAllOnesValue() || isa
<UndefValue
>(MaskElt
))
1200 /// TODO: This is a lot like known bits, but for
1201 /// vectors. Is there something we can common this with?
1202 APInt
llvm::possiblyDemandedEltsInMask(Value
*Mask
) {
1203 assert(isa
<FixedVectorType
>(Mask
->getType()) &&
1204 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
1205 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
1207 "Mask must be a fixed width vector of i1");
1209 const unsigned VWidth
=
1210 cast
<FixedVectorType
>(Mask
->getType())->getNumElements();
1211 APInt DemandedElts
= APInt::getAllOnes(VWidth
);
1212 if (auto *CV
= dyn_cast
<ConstantVector
>(Mask
))
1213 for (unsigned i
= 0; i
< VWidth
; i
++)
1214 if (CV
->getAggregateElement(i
)->isNullValue())
1215 DemandedElts
.clearBit(i
);
1216 return DemandedElts
;
1219 bool InterleavedAccessInfo::isStrided(int Stride
) {
1220 unsigned Factor
= std::abs(Stride
);
1221 return Factor
>= 2 && Factor
<= MaxInterleaveGroupFactor
;
1224 void InterleavedAccessInfo::collectConstStrideAccesses(
1225 MapVector
<Instruction
*, StrideDescriptor
> &AccessStrideInfo
,
1226 const DenseMap
<Value
*, const SCEV
*> &Strides
) {
1227 auto &DL
= TheLoop
->getHeader()->getDataLayout();
1229 // Since it's desired that the load/store instructions be maintained in
1230 // "program order" for the interleaved access analysis, we have to visit the
1231 // blocks in the loop in reverse postorder (i.e., in a topological order).
1232 // Such an ordering will ensure that any load/store that may be executed
1233 // before a second load/store will precede the second load/store in
1234 // AccessStrideInfo.
1235 LoopBlocksDFS
DFS(TheLoop
);
1237 for (BasicBlock
*BB
: make_range(DFS
.beginRPO(), DFS
.endRPO()))
1238 for (auto &I
: *BB
) {
1239 Value
*Ptr
= getLoadStorePointerOperand(&I
);
1242 Type
*ElementTy
= getLoadStoreType(&I
);
1244 // Currently, codegen doesn't support cases where the type size doesn't
1245 // match the alloc size. Skip them for now.
1246 uint64_t Size
= DL
.getTypeAllocSize(ElementTy
);
1247 if (Size
* 8 != DL
.getTypeSizeInBits(ElementTy
))
1250 // We don't check wrapping here because we don't know yet if Ptr will be
1251 // part of a full group or a group with gaps. Checking wrapping for all
1252 // pointers (even those that end up in groups with no gaps) will be overly
1253 // conservative. For full groups, wrapping should be ok since if we would
1254 // wrap around the address space we would do a memory access at nullptr
1255 // even without the transformation. The wrapping checks are therefore
1256 // deferred until after we've formed the interleaved groups.
1258 getPtrStride(PSE
, ElementTy
, Ptr
, TheLoop
, Strides
,
1259 /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1261 const SCEV
*Scev
= replaceSymbolicStrideSCEV(PSE
, Strides
, Ptr
);
1262 AccessStrideInfo
[&I
] = StrideDescriptor(Stride
, Scev
, Size
,
1263 getLoadStoreAlignment(&I
));
1267 // Analyze interleaved accesses and collect them into interleaved load and
1270 // When generating code for an interleaved load group, we effectively hoist all
1271 // loads in the group to the location of the first load in program order. When
1272 // generating code for an interleaved store group, we sink all stores to the
1273 // location of the last store. This code motion can change the order of load
1274 // and store instructions and may break dependences.
1276 // The code generation strategy mentioned above ensures that we won't violate
1277 // any write-after-read (WAR) dependences.
1279 // E.g., for the WAR dependence: a = A[i]; // (1)
1282 // The store group of (2) is always inserted at or below (2), and the load
1283 // group of (1) is always inserted at or above (1). Thus, the instructions will
1284 // never be reordered. All other dependences are checked to ensure the
1285 // correctness of the instruction reordering.
1287 // The algorithm visits all memory accesses in the loop in bottom-up program
1288 // order. Program order is established by traversing the blocks in the loop in
1289 // reverse postorder when collecting the accesses.
1291 // We visit the memory accesses in bottom-up order because it can simplify the
1292 // construction of store groups in the presence of write-after-write (WAW)
1295 // E.g., for the WAW dependence: A[i] = a; // (1)
1297 // A[i + 1] = c; // (3)
1299 // We will first create a store group with (3) and (2). (1) can't be added to
1300 // this group because it and (2) are dependent. However, (1) can be grouped
1301 // with other accesses that may precede it in program order. Note that a
1302 // bottom-up order does not imply that WAW dependences should not be checked.
1303 void InterleavedAccessInfo::analyzeInterleaving(
1304 bool EnablePredicatedInterleavedMemAccesses
) {
1305 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1306 const auto &Strides
= LAI
->getSymbolicStrides();
1308 // Holds all accesses with a constant stride.
1309 MapVector
<Instruction
*, StrideDescriptor
> AccessStrideInfo
;
1310 collectConstStrideAccesses(AccessStrideInfo
, Strides
);
1312 if (AccessStrideInfo
.empty())
1315 // Collect the dependences in the loop.
1316 collectDependences();
1318 // Holds all interleaved store groups temporarily.
1319 SmallSetVector
<InterleaveGroup
<Instruction
> *, 4> StoreGroups
;
1320 // Holds all interleaved load groups temporarily.
1321 SmallSetVector
<InterleaveGroup
<Instruction
> *, 4> LoadGroups
;
1322 // Groups added to this set cannot have new members added.
1323 SmallPtrSet
<InterleaveGroup
<Instruction
> *, 4> CompletedLoadGroups
;
1325 // Search in bottom-up program order for pairs of accesses (A and B) that can
1326 // form interleaved load or store groups. In the algorithm below, access A
1327 // precedes access B in program order. We initialize a group for B in the
1328 // outer loop of the algorithm, and then in the inner loop, we attempt to
1329 // insert each A into B's group if:
1331 // 1. A and B have the same stride,
1332 // 2. A and B have the same memory object size, and
1333 // 3. A belongs in B's group according to its distance from B.
1335 // Special care is taken to ensure group formation will not break any
1337 for (auto BI
= AccessStrideInfo
.rbegin(), E
= AccessStrideInfo
.rend();
1339 Instruction
*B
= BI
->first
;
1340 StrideDescriptor DesB
= BI
->second
;
1342 // Initialize a group for B if it has an allowable stride. Even if we don't
1343 // create a group for B, we continue with the bottom-up algorithm to ensure
1344 // we don't break any of B's dependences.
1345 InterleaveGroup
<Instruction
> *GroupB
= nullptr;
1346 if (isStrided(DesB
.Stride
) &&
1347 (!isPredicated(B
->getParent()) || EnablePredicatedInterleavedMemAccesses
)) {
1348 GroupB
= getInterleaveGroup(B
);
1350 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1352 GroupB
= createInterleaveGroup(B
, DesB
.Stride
, DesB
.Alignment
);
1353 if (B
->mayWriteToMemory())
1354 StoreGroups
.insert(GroupB
);
1356 LoadGroups
.insert(GroupB
);
1360 for (auto AI
= std::next(BI
); AI
!= E
; ++AI
) {
1361 Instruction
*A
= AI
->first
;
1362 StrideDescriptor DesA
= AI
->second
;
1364 // Our code motion strategy implies that we can't have dependences
1365 // between accesses in an interleaved group and other accesses located
1366 // between the first and last member of the group. Note that this also
1367 // means that a group can't have more than one member at a given offset.
1368 // The accesses in a group can have dependences with other accesses, but
1369 // we must ensure we don't extend the boundaries of the group such that
1370 // we encompass those dependent accesses.
1372 // For example, assume we have the sequence of accesses shown below in a
1375 // (1, 2) is a group | A[i] = a; // (1)
1376 // | A[i-1] = b; // (2) |
1377 // A[i-3] = c; // (3)
1378 // A[i] = d; // (4) | (2, 4) is not a group
1380 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1381 // but not with (4). If we did, the dependent access (3) would be within
1382 // the boundaries of the (2, 4) group.
1383 auto DependentMember
= [&](InterleaveGroup
<Instruction
> *Group
,
1384 StrideEntry
*A
) -> Instruction
* {
1385 for (uint32_t Index
= 0; Index
< Group
->getFactor(); ++Index
) {
1386 Instruction
*MemberOfGroupB
= Group
->getMember(Index
);
1387 if (MemberOfGroupB
&& !canReorderMemAccessesForInterleavedGroups(
1388 A
, &*AccessStrideInfo
.find(MemberOfGroupB
)))
1389 return MemberOfGroupB
;
1394 auto GroupA
= getInterleaveGroup(A
);
1395 // If A is a load, dependencies are tolerable, there's nothing to do here.
1396 // If both A and B belong to the same (store) group, they are independent,
1397 // even if dependencies have not been recorded.
1398 // If both GroupA and GroupB are null, there's nothing to do here.
1399 if (A
->mayWriteToMemory() && GroupA
!= GroupB
) {
1400 Instruction
*DependentInst
= nullptr;
1401 // If GroupB is a load group, we have to compare AI against all
1402 // members of GroupB because if any load within GroupB has a dependency
1403 // on AI, we need to mark GroupB as complete and also release the
1404 // store GroupA (if A belongs to one). The former prevents incorrect
1405 // hoisting of load B above store A while the latter prevents incorrect
1406 // sinking of store A below load B.
1407 if (GroupB
&& LoadGroups
.contains(GroupB
))
1408 DependentInst
= DependentMember(GroupB
, &*AI
);
1409 else if (!canReorderMemAccessesForInterleavedGroups(&*AI
, &*BI
))
1412 if (DependentInst
) {
1413 // A has a store dependence on B (or on some load within GroupB) and
1414 // is part of a store group. Release A's group to prevent illegal
1415 // sinking of A below B. A will then be free to form another group
1416 // with instructions that precede it.
1417 if (GroupA
&& StoreGroups
.contains(GroupA
)) {
1418 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1419 "dependence between "
1420 << *A
<< " and " << *DependentInst
<< '\n');
1421 StoreGroups
.remove(GroupA
);
1422 releaseGroup(GroupA
);
1424 // If B is a load and part of an interleave group, no earlier loads
1425 // can be added to B's interleave group, because this would mean the
1426 // DependentInst would move across store A. Mark the interleave group
1428 if (GroupB
&& LoadGroups
.contains(GroupB
)) {
1429 LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1430 << " as complete.\n");
1431 CompletedLoadGroups
.insert(GroupB
);
1435 if (CompletedLoadGroups
.contains(GroupB
)) {
1436 // Skip trying to add A to B, continue to look for other conflicting A's
1437 // in groups to be released.
1441 // At this point, we've checked for illegal code motion. If either A or B
1442 // isn't strided, there's nothing left to do.
1443 if (!isStrided(DesA
.Stride
) || !isStrided(DesB
.Stride
))
1446 // Ignore A if it's already in a group or isn't the same kind of memory
1448 // Note that mayReadFromMemory() isn't mutually exclusive to
1449 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1450 // here, canVectorizeMemory() should have returned false - except for the
1451 // case we asked for optimization remarks.
1452 if (isInterleaved(A
) ||
1453 (A
->mayReadFromMemory() != B
->mayReadFromMemory()) ||
1454 (A
->mayWriteToMemory() != B
->mayWriteToMemory()))
1457 // Check rules 1 and 2. Ignore A if its stride or size is different from
1459 if (DesA
.Stride
!= DesB
.Stride
|| DesA
.Size
!= DesB
.Size
)
1462 // Ignore A if the memory object of A and B don't belong to the same
1464 if (getLoadStoreAddressSpace(A
) != getLoadStoreAddressSpace(B
))
1467 // Calculate the distance from A to B.
1468 const SCEVConstant
*DistToB
= dyn_cast
<SCEVConstant
>(
1469 PSE
.getSE()->getMinusSCEV(DesA
.Scev
, DesB
.Scev
));
1472 int64_t DistanceToB
= DistToB
->getAPInt().getSExtValue();
1474 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1476 if (DistanceToB
% static_cast<int64_t>(DesB
.Size
))
1479 // All members of a predicated interleave-group must have the same predicate,
1480 // and currently must reside in the same BB.
1481 BasicBlock
*BlockA
= A
->getParent();
1482 BasicBlock
*BlockB
= B
->getParent();
1483 if ((isPredicated(BlockA
) || isPredicated(BlockB
)) &&
1484 (!EnablePredicatedInterleavedMemAccesses
|| BlockA
!= BlockB
))
1487 // The index of A is the index of B plus A's distance to B in multiples
1490 GroupB
->getIndex(B
) + DistanceToB
/ static_cast<int64_t>(DesB
.Size
);
1492 // Try to insert A into B's group.
1493 if (GroupB
->insertMember(A
, IndexA
, DesA
.Alignment
)) {
1494 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A
<< '\n'
1495 << " into the interleave group with" << *B
1497 InterleaveGroupMap
[A
] = GroupB
;
1499 // Set the first load in program order as the insert position.
1500 if (A
->mayReadFromMemory())
1501 GroupB
->setInsertPos(A
);
1503 } // Iteration over A accesses.
1504 } // Iteration over B accesses.
1506 auto InvalidateGroupIfMemberMayWrap
= [&](InterleaveGroup
<Instruction
> *Group
,
1508 const char *FirstOrLast
) -> bool {
1509 Instruction
*Member
= Group
->getMember(Index
);
1510 assert(Member
&& "Group member does not exist");
1511 Value
*MemberPtr
= getLoadStorePointerOperand(Member
);
1512 Type
*AccessTy
= getLoadStoreType(Member
);
1513 if (getPtrStride(PSE
, AccessTy
, MemberPtr
, TheLoop
, Strides
,
1514 /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1516 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1518 << " group member potentially pointer-wrapping.\n");
1519 releaseGroup(Group
);
1523 // Remove interleaved groups with gaps whose memory
1524 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1525 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1526 // not check wrapping (see documentation there).
1527 // FORNOW we use Assume=false;
1528 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1529 // of runtime SCEV assumptions checks (thereby potentially failing to
1530 // vectorize altogether).
1531 // Additional optional optimizations:
1532 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1533 // wrap then we can deduce that all pointers in the group don't wrap.
1534 // This means that we can forcefully peel the loop in order to only have to
1535 // check the first pointer for no-wrap. When we'll change to use Assume=true
1536 // we'll only need at most one runtime check per interleaved group.
1537 for (auto *Group
: LoadGroups
) {
1538 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1539 // load would wrap around the address space we would do a memory access at
1540 // nullptr even without the transformation.
1541 if (Group
->getNumMembers() == Group
->getFactor())
1544 // Case 2: If first and last members of the group don't wrap this implies
1545 // that all the pointers in the group don't wrap.
1546 // So we check only group member 0 (which is always guaranteed to exist),
1547 // and group member Factor - 1; If the latter doesn't exist we rely on
1548 // peeling (if it is a non-reversed access -- see Case 3).
1549 if (InvalidateGroupIfMemberMayWrap(Group
, 0, "first"))
1551 if (Group
->getMember(Group
->getFactor() - 1))
1552 InvalidateGroupIfMemberMayWrap(Group
, Group
->getFactor() - 1, "last");
1554 // Case 3: A non-reversed interleaved load group with gaps: We need
1555 // to execute at least one scalar epilogue iteration. This will ensure
1556 // we don't speculatively access memory out-of-bounds. We only need
1557 // to look for a member at index factor - 1, since every group must have
1558 // a member at index zero.
1559 if (Group
->isReverse()) {
1561 dbgs() << "LV: Invalidate candidate interleaved group due to "
1562 "a reverse access with gaps.\n");
1563 releaseGroup(Group
);
1567 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1568 RequiresScalarEpilogue
= true;
1572 for (auto *Group
: StoreGroups
) {
1573 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1574 // store would wrap around the address space we would do a memory access at
1575 // nullptr even without the transformation.
1576 if (Group
->getNumMembers() == Group
->getFactor())
1579 // Interleave-store-group with gaps is implemented using masked wide store.
1580 // Remove interleaved store groups with gaps if
1581 // masked-interleaved-accesses are not enabled by the target.
1582 if (!EnablePredicatedInterleavedMemAccesses
) {
1584 dbgs() << "LV: Invalidate candidate interleaved store group due "
1586 releaseGroup(Group
);
1590 // Case 2: If first and last members of the group don't wrap this implies
1591 // that all the pointers in the group don't wrap.
1592 // So we check only group member 0 (which is always guaranteed to exist),
1593 // and the last group member. Case 3 (scalar epilog) is not relevant for
1594 // stores with gaps, which are implemented with masked-store (rather than
1595 // speculative access, as in loads).
1596 if (InvalidateGroupIfMemberMayWrap(Group
, 0, "first"))
1598 for (int Index
= Group
->getFactor() - 1; Index
> 0; Index
--)
1599 if (Group
->getMember(Index
)) {
1600 InvalidateGroupIfMemberMayWrap(Group
, Index
, "last");
1606 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1607 // If no group had triggered the requirement to create an epilogue loop,
1608 // there is nothing to do.
1609 if (!requiresScalarEpilogue())
1612 // Release groups requiring scalar epilogues. Note that this also removes them
1613 // from InterleaveGroups.
1614 bool ReleasedGroup
= InterleaveGroups
.remove_if([&](auto *Group
) {
1615 if (!Group
->requiresScalarEpilogue())
1619 << "LV: Invalidate candidate interleaved group due to gaps that "
1620 "require a scalar epilogue (not allowed under optsize) and cannot "
1621 "be masked (not enabled). \n");
1622 releaseGroupWithoutRemovingFromSet(Group
);
1625 assert(ReleasedGroup
&& "At least one group must be invalidated, as a "
1626 "scalar epilogue was required");
1627 (void)ReleasedGroup
;
1628 RequiresScalarEpilogue
= false;
1631 template <typename InstT
>
1632 void InterleaveGroup
<InstT
>::addMetadata(InstT
*NewInst
) const {
1633 llvm_unreachable("addMetadata can only be used for Instruction");
1638 void InterleaveGroup
<Instruction
>::addMetadata(Instruction
*NewInst
) const {
1639 SmallVector
<Value
*, 4> VL
;
1640 std::transform(Members
.begin(), Members
.end(), std::back_inserter(VL
),
1641 [](std::pair
<int, Instruction
*> p
) { return p
.second
; });
1642 propagateMetadata(NewInst
, VL
);