1 //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines vectorizer utilities.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/VectorUtils.h"
14 #include "llvm/ADT/EquivalenceClasses.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/Analysis/DemandedBits.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/LoopIterator.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Value.h"
28 #include "llvm/Support/CommandLine.h"
30 #define DEBUG_TYPE "vectorutils"
33 using namespace llvm::PatternMatch
;
35 /// Maximum factor for an interleaved memory access.
36 static cl::opt
<unsigned> MaxInterleaveGroupFactor(
37 "max-interleave-group-factor", cl::Hidden
,
38 cl::desc("Maximum factor for an interleaved access group (default = 8)"),
41 /// Return true if all of the intrinsic's arguments and return type are scalars
42 /// for the scalar form of the intrinsic, and vectors for the vector form of the
43 /// intrinsic (except operands that are marked as always being scalar by
44 /// isVectorIntrinsicWithScalarOpAtArg).
45 bool llvm::isTriviallyVectorizable(Intrinsic::ID ID
) {
47 case Intrinsic::abs
: // Begin integer bit-manipulation.
48 case Intrinsic::bswap
:
49 case Intrinsic::bitreverse
:
50 case Intrinsic::ctpop
:
59 case Intrinsic::sadd_sat
:
60 case Intrinsic::ssub_sat
:
61 case Intrinsic::uadd_sat
:
62 case Intrinsic::usub_sat
:
63 case Intrinsic::smul_fix
:
64 case Intrinsic::smul_fix_sat
:
65 case Intrinsic::umul_fix
:
66 case Intrinsic::umul_fix_sat
:
67 case Intrinsic::sqrt
: // Begin floating-point.
73 case Intrinsic::log10
:
76 case Intrinsic::minnum
:
77 case Intrinsic::maxnum
:
78 case Intrinsic::minimum
:
79 case Intrinsic::maximum
:
80 case Intrinsic::copysign
:
81 case Intrinsic::floor
:
83 case Intrinsic::trunc
:
85 case Intrinsic::nearbyint
:
86 case Intrinsic::round
:
87 case Intrinsic::roundeven
:
90 case Intrinsic::fmuladd
:
91 case Intrinsic::is_fpclass
:
93 case Intrinsic::canonicalize
:
94 case Intrinsic::fptosi_sat
:
95 case Intrinsic::fptoui_sat
:
96 case Intrinsic::lrint
:
97 case Intrinsic::llrint
:
104 /// Identifies if the vector form of the intrinsic has a scalar operand.
105 bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID
,
106 unsigned ScalarOpdIdx
) {
109 case Intrinsic::ctlz
:
110 case Intrinsic::cttz
:
111 case Intrinsic::is_fpclass
:
112 case Intrinsic::powi
:
113 return (ScalarOpdIdx
== 1);
114 case Intrinsic::smul_fix
:
115 case Intrinsic::smul_fix_sat
:
116 case Intrinsic::umul_fix
:
117 case Intrinsic::umul_fix_sat
:
118 return (ScalarOpdIdx
== 2);
124 bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID
,
126 assert(ID
!= Intrinsic::not_intrinsic
&& "Not an intrinsic!");
129 case Intrinsic::fptosi_sat
:
130 case Intrinsic::fptoui_sat
:
131 case Intrinsic::lrint
:
132 case Intrinsic::llrint
:
133 return OpdIdx
== -1 || OpdIdx
== 0;
134 case Intrinsic::is_fpclass
:
136 case Intrinsic::powi
:
137 return OpdIdx
== -1 || OpdIdx
== 1;
143 /// Returns intrinsic ID for call.
144 /// For the input call instruction it finds mapping intrinsic and returns
145 /// its ID, in case it does not found it return not_intrinsic.
146 Intrinsic::ID
llvm::getVectorIntrinsicIDForCall(const CallInst
*CI
,
147 const TargetLibraryInfo
*TLI
) {
148 Intrinsic::ID ID
= getIntrinsicForCallSite(*CI
, TLI
);
149 if (ID
== Intrinsic::not_intrinsic
)
150 return Intrinsic::not_intrinsic
;
152 if (isTriviallyVectorizable(ID
) || ID
== Intrinsic::lifetime_start
||
153 ID
== Intrinsic::lifetime_end
|| ID
== Intrinsic::assume
||
154 ID
== Intrinsic::experimental_noalias_scope_decl
||
155 ID
== Intrinsic::sideeffect
|| ID
== Intrinsic::pseudoprobe
)
157 return Intrinsic::not_intrinsic
;
160 /// Given a vector and an element number, see if the scalar value is
161 /// already around as a register, for example if it were inserted then extracted
163 Value
*llvm::findScalarElement(Value
*V
, unsigned EltNo
) {
164 assert(V
->getType()->isVectorTy() && "Not looking at a vector?");
165 VectorType
*VTy
= cast
<VectorType
>(V
->getType());
166 // For fixed-length vector, return undef for out of range access.
167 if (auto *FVTy
= dyn_cast
<FixedVectorType
>(VTy
)) {
168 unsigned Width
= FVTy
->getNumElements();
170 return UndefValue::get(FVTy
->getElementType());
173 if (Constant
*C
= dyn_cast
<Constant
>(V
))
174 return C
->getAggregateElement(EltNo
);
176 if (InsertElementInst
*III
= dyn_cast
<InsertElementInst
>(V
)) {
177 // If this is an insert to a variable element, we don't know what it is.
178 if (!isa
<ConstantInt
>(III
->getOperand(2)))
180 unsigned IIElt
= cast
<ConstantInt
>(III
->getOperand(2))->getZExtValue();
182 // If this is an insert to the element we are looking for, return the
185 return III
->getOperand(1);
187 // Guard against infinite loop on malformed, unreachable IR.
188 if (III
== III
->getOperand(0))
191 // Otherwise, the insertelement doesn't modify the value, recurse on its
193 return findScalarElement(III
->getOperand(0), EltNo
);
196 ShuffleVectorInst
*SVI
= dyn_cast
<ShuffleVectorInst
>(V
);
197 // Restrict the following transformation to fixed-length vector.
198 if (SVI
&& isa
<FixedVectorType
>(SVI
->getType())) {
200 cast
<FixedVectorType
>(SVI
->getOperand(0)->getType())->getNumElements();
201 int InEl
= SVI
->getMaskValue(EltNo
);
203 return UndefValue::get(VTy
->getElementType());
204 if (InEl
< (int)LHSWidth
)
205 return findScalarElement(SVI
->getOperand(0), InEl
);
206 return findScalarElement(SVI
->getOperand(1), InEl
- LHSWidth
);
209 // Extract a value from a vector add operation with a constant zero.
210 // TODO: Use getBinOpIdentity() to generalize this.
211 Value
*Val
; Constant
*C
;
212 if (match(V
, m_Add(m_Value(Val
), m_Constant(C
))))
213 if (Constant
*Elt
= C
->getAggregateElement(EltNo
))
214 if (Elt
->isNullValue())
215 return findScalarElement(Val
, EltNo
);
217 // If the vector is a splat then we can trivially find the scalar element.
218 if (isa
<ScalableVectorType
>(VTy
))
219 if (Value
*Splat
= getSplatValue(V
))
220 if (EltNo
< VTy
->getElementCount().getKnownMinValue())
223 // Otherwise, we don't know.
227 int llvm::getSplatIndex(ArrayRef
<int> Mask
) {
230 // Ignore invalid (undefined) mask elements.
234 // There can be only 1 non-negative mask element value if this is a splat.
235 if (SplatIndex
!= -1 && SplatIndex
!= M
)
238 // Initialize the splat index to the 1st non-negative mask element.
241 assert((SplatIndex
== -1 || SplatIndex
>= 0) && "Negative index?");
245 /// Get splat value if the input is a splat vector or return nullptr.
246 /// This function is not fully general. It checks only 2 cases:
247 /// the input value is (1) a splat constant vector or (2) a sequence
248 /// of instructions that broadcasts a scalar at element 0.
249 Value
*llvm::getSplatValue(const Value
*V
) {
250 if (isa
<VectorType
>(V
->getType()))
251 if (auto *C
= dyn_cast
<Constant
>(V
))
252 return C
->getSplatValue();
254 // shuf (inselt ?, Splat, 0), ?, <0, undef, 0, ...>
257 m_Shuffle(m_InsertElt(m_Value(), m_Value(Splat
), m_ZeroInt()),
258 m_Value(), m_ZeroMask())))
264 bool llvm::isSplatValue(const Value
*V
, int Index
, unsigned Depth
) {
265 assert(Depth
<= MaxAnalysisRecursionDepth
&& "Limit Search Depth");
267 if (isa
<VectorType
>(V
->getType())) {
268 if (isa
<UndefValue
>(V
))
270 // FIXME: We can allow undefs, but if Index was specified, we may want to
271 // check that the constant is defined at that index.
272 if (auto *C
= dyn_cast
<Constant
>(V
))
273 return C
->getSplatValue() != nullptr;
276 if (auto *Shuf
= dyn_cast
<ShuffleVectorInst
>(V
)) {
277 // FIXME: We can safely allow undefs here. If Index was specified, we will
278 // check that the mask elt is defined at the required index.
279 if (!all_equal(Shuf
->getShuffleMask()))
286 // Match a specific element. The mask should be defined at and match the
288 return Shuf
->getMaskValue(Index
) == Index
;
291 // The remaining tests are all recursive, so bail out if we hit the limit.
292 if (Depth
++ == MaxAnalysisRecursionDepth
)
295 // If both operands of a binop are splats, the result is a splat.
297 if (match(V
, m_BinOp(m_Value(X
), m_Value(Y
))))
298 return isSplatValue(X
, Index
, Depth
) && isSplatValue(Y
, Index
, Depth
);
300 // If all operands of a select are splats, the result is a splat.
301 if (match(V
, m_Select(m_Value(X
), m_Value(Y
), m_Value(Z
))))
302 return isSplatValue(X
, Index
, Depth
) && isSplatValue(Y
, Index
, Depth
) &&
303 isSplatValue(Z
, Index
, Depth
);
305 // TODO: Add support for unary ops (fneg), casts, intrinsics (overflow ops).
310 bool llvm::getShuffleDemandedElts(int SrcWidth
, ArrayRef
<int> Mask
,
311 const APInt
&DemandedElts
, APInt
&DemandedLHS
,
312 APInt
&DemandedRHS
, bool AllowUndefElts
) {
313 DemandedLHS
= DemandedRHS
= APInt::getZero(SrcWidth
);
315 // Early out if we don't demand any elements.
316 if (DemandedElts
.isZero())
319 // Simple case of a shuffle with zeroinitializer.
320 if (all_of(Mask
, [](int Elt
) { return Elt
== 0; })) {
321 DemandedLHS
.setBit(0);
325 for (unsigned I
= 0, E
= Mask
.size(); I
!= E
; ++I
) {
327 assert((-1 <= M
) && (M
< (SrcWidth
* 2)) &&
328 "Invalid shuffle mask constant");
330 if (!DemandedElts
[I
] || (AllowUndefElts
&& (M
< 0)))
333 // For undef elements, we don't know anything about the common state of
334 // the shuffle result.
339 DemandedLHS
.setBit(M
);
341 DemandedRHS
.setBit(M
- SrcWidth
);
347 void llvm::narrowShuffleMaskElts(int Scale
, ArrayRef
<int> Mask
,
348 SmallVectorImpl
<int> &ScaledMask
) {
349 assert(Scale
> 0 && "Unexpected scaling factor");
351 // Fast-path: if no scaling, then it is just a copy.
353 ScaledMask
.assign(Mask
.begin(), Mask
.end());
358 for (int MaskElt
: Mask
) {
360 assert(((uint64_t)Scale
* MaskElt
+ (Scale
- 1)) <= INT32_MAX
&&
361 "Overflowed 32-bits");
363 for (int SliceElt
= 0; SliceElt
!= Scale
; ++SliceElt
)
364 ScaledMask
.push_back(MaskElt
< 0 ? MaskElt
: Scale
* MaskElt
+ SliceElt
);
368 bool llvm::widenShuffleMaskElts(int Scale
, ArrayRef
<int> Mask
,
369 SmallVectorImpl
<int> &ScaledMask
) {
370 assert(Scale
> 0 && "Unexpected scaling factor");
372 // Fast-path: if no scaling, then it is just a copy.
374 ScaledMask
.assign(Mask
.begin(), Mask
.end());
378 // We must map the original elements down evenly to a type with less elements.
379 int NumElts
= Mask
.size();
380 if (NumElts
% Scale
!= 0)
384 ScaledMask
.reserve(NumElts
/ Scale
);
386 // Step through the input mask by splitting into Scale-sized slices.
388 ArrayRef
<int> MaskSlice
= Mask
.take_front(Scale
);
389 assert((int)MaskSlice
.size() == Scale
&& "Expected Scale-sized slice.");
391 // The first element of the slice determines how we evaluate this slice.
392 int SliceFront
= MaskSlice
.front();
393 if (SliceFront
< 0) {
394 // Negative values (undef or other "sentinel" values) must be equal across
396 if (!all_equal(MaskSlice
))
398 ScaledMask
.push_back(SliceFront
);
400 // A positive mask element must be cleanly divisible.
401 if (SliceFront
% Scale
!= 0)
403 // Elements of the slice must be consecutive.
404 for (int i
= 1; i
< Scale
; ++i
)
405 if (MaskSlice
[i
] != SliceFront
+ i
)
407 ScaledMask
.push_back(SliceFront
/ Scale
);
409 Mask
= Mask
.drop_front(Scale
);
410 } while (!Mask
.empty());
412 assert((int)ScaledMask
.size() * Scale
== NumElts
&& "Unexpected scaled mask");
414 // All elements of the original mask can be scaled down to map to the elements
415 // of a mask with wider elements.
419 void llvm::getShuffleMaskWithWidestElts(ArrayRef
<int> Mask
,
420 SmallVectorImpl
<int> &ScaledMask
) {
421 std::array
<SmallVector
<int, 16>, 2> TmpMasks
;
422 SmallVectorImpl
<int> *Output
= &TmpMasks
[0], *Tmp
= &TmpMasks
[1];
423 ArrayRef
<int> InputMask
= Mask
;
424 for (unsigned Scale
= 2; Scale
<= InputMask
.size(); ++Scale
) {
425 while (widenShuffleMaskElts(Scale
, InputMask
, *Output
)) {
427 std::swap(Output
, Tmp
);
430 ScaledMask
.assign(InputMask
.begin(), InputMask
.end());
433 void llvm::processShuffleMasks(
434 ArrayRef
<int> Mask
, unsigned NumOfSrcRegs
, unsigned NumOfDestRegs
,
435 unsigned NumOfUsedRegs
, function_ref
<void()> NoInputAction
,
436 function_ref
<void(ArrayRef
<int>, unsigned, unsigned)> SingleInputAction
,
437 function_ref
<void(ArrayRef
<int>, unsigned, unsigned)> ManyInputsAction
) {
438 SmallVector
<SmallVector
<SmallVector
<int>>> Res(NumOfDestRegs
);
439 // Try to perform better estimation of the permutation.
440 // 1. Split the source/destination vectors into real registers.
441 // 2. Do the mask analysis to identify which real registers are
443 int Sz
= Mask
.size();
444 unsigned SzDest
= Sz
/ NumOfDestRegs
;
445 unsigned SzSrc
= Sz
/ NumOfSrcRegs
;
446 for (unsigned I
= 0; I
< NumOfDestRegs
; ++I
) {
447 auto &RegMasks
= Res
[I
];
448 RegMasks
.assign(NumOfSrcRegs
, {});
449 // Check that the values in dest registers are in the one src
451 for (unsigned K
= 0; K
< SzDest
; ++K
) {
452 int Idx
= I
* SzDest
+ K
;
455 if (Mask
[Idx
] >= Sz
|| Mask
[Idx
] == PoisonMaskElem
)
457 int SrcRegIdx
= Mask
[Idx
] / SzSrc
;
458 // Add a cost of PermuteTwoSrc for each new source register permute,
459 // if we have more than one source registers.
460 if (RegMasks
[SrcRegIdx
].empty())
461 RegMasks
[SrcRegIdx
].assign(SzDest
, PoisonMaskElem
);
462 RegMasks
[SrcRegIdx
][K
] = Mask
[Idx
] % SzSrc
;
465 // Process split mask.
466 for (unsigned I
= 0; I
< NumOfUsedRegs
; ++I
) {
469 count_if(Dest
, [](ArrayRef
<int> Mask
) { return !Mask
.empty(); });
470 switch (NumSrcRegs
) {
472 // No input vectors were used!
476 // Find the only mask with at least single undef mask elem.
478 find_if(Dest
, [](ArrayRef
<int> Mask
) { return !Mask
.empty(); });
479 unsigned SrcReg
= std::distance(Dest
.begin(), It
);
480 SingleInputAction(*It
, SrcReg
, I
);
484 // The first mask is a permutation of a single register. Since we have >2
485 // input registers to shuffle, we merge the masks for 2 first registers
486 // and generate a shuffle of 2 registers rather than the reordering of the
487 // first register and then shuffle with the second register. Next,
488 // generate the shuffles of the resulting register + the remaining
489 // registers from the list.
490 auto &&CombineMasks
= [](MutableArrayRef
<int> FirstMask
,
491 ArrayRef
<int> SecondMask
) {
492 for (int Idx
= 0, VF
= FirstMask
.size(); Idx
< VF
; ++Idx
) {
493 if (SecondMask
[Idx
] != PoisonMaskElem
) {
494 assert(FirstMask
[Idx
] == PoisonMaskElem
&&
495 "Expected undefined mask element.");
496 FirstMask
[Idx
] = SecondMask
[Idx
] + VF
;
500 auto &&NormalizeMask
= [](MutableArrayRef
<int> Mask
) {
501 for (int Idx
= 0, VF
= Mask
.size(); Idx
< VF
; ++Idx
) {
502 if (Mask
[Idx
] != PoisonMaskElem
)
510 MutableArrayRef
<int> FirstMask
, SecondMask
;
511 for (unsigned I
= 0; I
< NumOfDestRegs
; ++I
) {
512 SmallVectorImpl
<int> &RegMask
= Dest
[I
];
516 if (FirstIdx
== SecondIdx
) {
522 SecondMask
= RegMask
;
523 CombineMasks(FirstMask
, SecondMask
);
524 ManyInputsAction(FirstMask
, FirstIdx
, SecondIdx
);
525 NormalizeMask(FirstMask
);
527 SecondMask
= FirstMask
;
528 SecondIdx
= FirstIdx
;
530 if (FirstIdx
!= SecondIdx
&& SecondIdx
>= 0) {
531 CombineMasks(SecondMask
, FirstMask
);
532 ManyInputsAction(SecondMask
, SecondIdx
, FirstIdx
);
533 Dest
[FirstIdx
].clear();
534 NormalizeMask(SecondMask
);
536 } while (SecondIdx
>= 0);
543 MapVector
<Instruction
*, uint64_t>
544 llvm::computeMinimumValueSizes(ArrayRef
<BasicBlock
*> Blocks
, DemandedBits
&DB
,
545 const TargetTransformInfo
*TTI
) {
547 // DemandedBits will give us every value's live-out bits. But we want
548 // to ensure no extra casts would need to be inserted, so every DAG
549 // of connected values must have the same minimum bitwidth.
550 EquivalenceClasses
<Value
*> ECs
;
551 SmallVector
<Value
*, 16> Worklist
;
552 SmallPtrSet
<Value
*, 4> Roots
;
553 SmallPtrSet
<Value
*, 16> Visited
;
554 DenseMap
<Value
*, uint64_t> DBits
;
555 SmallPtrSet
<Instruction
*, 4> InstructionSet
;
556 MapVector
<Instruction
*, uint64_t> MinBWs
;
558 // Determine the roots. We work bottom-up, from truncs or icmps.
559 bool SeenExtFromIllegalType
= false;
560 for (auto *BB
: Blocks
)
561 for (auto &I
: *BB
) {
562 InstructionSet
.insert(&I
);
564 if (TTI
&& (isa
<ZExtInst
>(&I
) || isa
<SExtInst
>(&I
)) &&
565 !TTI
->isTypeLegal(I
.getOperand(0)->getType()))
566 SeenExtFromIllegalType
= true;
568 // Only deal with non-vector integers up to 64-bits wide.
569 if ((isa
<TruncInst
>(&I
) || isa
<ICmpInst
>(&I
)) &&
570 !I
.getType()->isVectorTy() &&
571 I
.getOperand(0)->getType()->getScalarSizeInBits() <= 64) {
572 // Don't make work for ourselves. If we know the loaded type is legal,
573 // don't add it to the worklist.
574 if (TTI
&& isa
<TruncInst
>(&I
) && TTI
->isTypeLegal(I
.getType()))
577 Worklist
.push_back(&I
);
582 if (Worklist
.empty() || (TTI
&& !SeenExtFromIllegalType
))
585 // Now proceed breadth-first, unioning values together.
586 while (!Worklist
.empty()) {
587 Value
*Val
= Worklist
.pop_back_val();
588 Value
*Leader
= ECs
.getOrInsertLeaderValue(Val
);
590 if (!Visited
.insert(Val
).second
)
593 // Non-instructions terminate a chain successfully.
594 if (!isa
<Instruction
>(Val
))
596 Instruction
*I
= cast
<Instruction
>(Val
);
598 // If we encounter a type that is larger than 64 bits, we can't represent
600 if (DB
.getDemandedBits(I
).getBitWidth() > 64)
601 return MapVector
<Instruction
*, uint64_t>();
603 uint64_t V
= DB
.getDemandedBits(I
).getZExtValue();
607 // Casts, loads and instructions outside of our range terminate a chain
609 if (isa
<SExtInst
>(I
) || isa
<ZExtInst
>(I
) || isa
<LoadInst
>(I
) ||
610 !InstructionSet
.count(I
))
613 // Unsafe casts terminate a chain unsuccessfully. We can't do anything
614 // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to
615 // transform anything that relies on them.
616 if (isa
<BitCastInst
>(I
) || isa
<PtrToIntInst
>(I
) || isa
<IntToPtrInst
>(I
) ||
617 !I
->getType()->isIntegerTy()) {
618 DBits
[Leader
] |= ~0ULL;
622 // We don't modify the types of PHIs. Reductions will already have been
623 // truncated if possible, and inductions' sizes will have been chosen by
628 if (DBits
[Leader
] == ~0ULL)
629 // All bits demanded, no point continuing.
632 for (Value
*O
: cast
<User
>(I
)->operands()) {
633 ECs
.unionSets(Leader
, O
);
634 Worklist
.push_back(O
);
638 // Now we've discovered all values, walk them to see if there are
639 // any users we didn't see. If there are, we can't optimize that
641 for (auto &I
: DBits
)
642 for (auto *U
: I
.first
->users())
643 if (U
->getType()->isIntegerTy() && DBits
.count(U
) == 0)
644 DBits
[ECs
.getOrInsertLeaderValue(I
.first
)] |= ~0ULL;
646 for (auto I
= ECs
.begin(), E
= ECs
.end(); I
!= E
; ++I
) {
647 uint64_t LeaderDemandedBits
= 0;
648 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end()))
649 LeaderDemandedBits
|= DBits
[M
];
651 uint64_t MinBW
= llvm::bit_width(LeaderDemandedBits
);
652 // Round up to a power of 2
653 MinBW
= llvm::bit_ceil(MinBW
);
655 // We don't modify the types of PHIs. Reductions will already have been
656 // truncated if possible, and inductions' sizes will have been chosen by
658 // If we are required to shrink a PHI, abandon this entire equivalence class.
660 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end()))
661 if (isa
<PHINode
>(M
) && MinBW
< M
->getType()->getScalarSizeInBits()) {
668 for (Value
*M
: llvm::make_range(ECs
.member_begin(I
), ECs
.member_end())) {
669 auto *MI
= dyn_cast
<Instruction
>(M
);
672 Type
*Ty
= M
->getType();
674 Ty
= MI
->getOperand(0)->getType();
676 if (MinBW
>= Ty
->getScalarSizeInBits())
679 // If any of M's operands demand more bits than MinBW then M cannot be
680 // performed safely in MinBW.
681 if (any_of(MI
->operands(), [&DB
, MinBW
](Use
&U
) {
682 auto *CI
= dyn_cast
<ConstantInt
>(U
);
683 // For constants shift amounts, check if the shift would result in
686 isa
<ShlOperator
, LShrOperator
, AShrOperator
>(U
.getUser()) &&
687 U
.getOperandNo() == 1)
688 return CI
->uge(MinBW
);
689 uint64_t BW
= bit_width(DB
.getDemandedBits(&U
).getZExtValue());
690 return bit_ceil(BW
) > MinBW
;
701 /// Add all access groups in @p AccGroups to @p List.
702 template <typename ListT
>
703 static void addToAccessGroupList(ListT
&List
, MDNode
*AccGroups
) {
704 // Interpret an access group as a list containing itself.
705 if (AccGroups
->getNumOperands() == 0) {
706 assert(isValidAsAccessGroup(AccGroups
) && "Node must be an access group");
707 List
.insert(AccGroups
);
711 for (const auto &AccGroupListOp
: AccGroups
->operands()) {
712 auto *Item
= cast
<MDNode
>(AccGroupListOp
.get());
713 assert(isValidAsAccessGroup(Item
) && "List item must be an access group");
718 MDNode
*llvm::uniteAccessGroups(MDNode
*AccGroups1
, MDNode
*AccGroups2
) {
723 if (AccGroups1
== AccGroups2
)
726 SmallSetVector
<Metadata
*, 4> Union
;
727 addToAccessGroupList(Union
, AccGroups1
);
728 addToAccessGroupList(Union
, AccGroups2
);
730 if (Union
.size() == 0)
732 if (Union
.size() == 1)
733 return cast
<MDNode
>(Union
.front());
735 LLVMContext
&Ctx
= AccGroups1
->getContext();
736 return MDNode::get(Ctx
, Union
.getArrayRef());
739 MDNode
*llvm::intersectAccessGroups(const Instruction
*Inst1
,
740 const Instruction
*Inst2
) {
741 bool MayAccessMem1
= Inst1
->mayReadOrWriteMemory();
742 bool MayAccessMem2
= Inst2
->mayReadOrWriteMemory();
744 if (!MayAccessMem1
&& !MayAccessMem2
)
747 return Inst2
->getMetadata(LLVMContext::MD_access_group
);
749 return Inst1
->getMetadata(LLVMContext::MD_access_group
);
751 MDNode
*MD1
= Inst1
->getMetadata(LLVMContext::MD_access_group
);
752 MDNode
*MD2
= Inst2
->getMetadata(LLVMContext::MD_access_group
);
758 // Use set for scalable 'contains' check.
759 SmallPtrSet
<Metadata
*, 4> AccGroupSet2
;
760 addToAccessGroupList(AccGroupSet2
, MD2
);
762 SmallVector
<Metadata
*, 4> Intersection
;
763 if (MD1
->getNumOperands() == 0) {
764 assert(isValidAsAccessGroup(MD1
) && "Node must be an access group");
765 if (AccGroupSet2
.count(MD1
))
766 Intersection
.push_back(MD1
);
768 for (const MDOperand
&Node
: MD1
->operands()) {
769 auto *Item
= cast
<MDNode
>(Node
.get());
770 assert(isValidAsAccessGroup(Item
) && "List item must be an access group");
771 if (AccGroupSet2
.count(Item
))
772 Intersection
.push_back(Item
);
776 if (Intersection
.size() == 0)
778 if (Intersection
.size() == 1)
779 return cast
<MDNode
>(Intersection
.front());
781 LLVMContext
&Ctx
= Inst1
->getContext();
782 return MDNode::get(Ctx
, Intersection
);
785 /// \returns \p I after propagating metadata from \p VL.
786 Instruction
*llvm::propagateMetadata(Instruction
*Inst
, ArrayRef
<Value
*> VL
) {
789 Instruction
*I0
= cast
<Instruction
>(VL
[0]);
790 SmallVector
<std::pair
<unsigned, MDNode
*>, 4> Metadata
;
791 I0
->getAllMetadataOtherThanDebugLoc(Metadata
);
793 for (auto Kind
: {LLVMContext::MD_tbaa
, LLVMContext::MD_alias_scope
,
794 LLVMContext::MD_noalias
, LLVMContext::MD_fpmath
,
795 LLVMContext::MD_nontemporal
, LLVMContext::MD_invariant_load
,
796 LLVMContext::MD_access_group
}) {
797 MDNode
*MD
= I0
->getMetadata(Kind
);
799 for (int J
= 1, E
= VL
.size(); MD
&& J
!= E
; ++J
) {
800 const Instruction
*IJ
= cast
<Instruction
>(VL
[J
]);
801 MDNode
*IMD
= IJ
->getMetadata(Kind
);
803 case LLVMContext::MD_tbaa
:
804 MD
= MDNode::getMostGenericTBAA(MD
, IMD
);
806 case LLVMContext::MD_alias_scope
:
807 MD
= MDNode::getMostGenericAliasScope(MD
, IMD
);
809 case LLVMContext::MD_fpmath
:
810 MD
= MDNode::getMostGenericFPMath(MD
, IMD
);
812 case LLVMContext::MD_noalias
:
813 case LLVMContext::MD_nontemporal
:
814 case LLVMContext::MD_invariant_load
:
815 MD
= MDNode::intersect(MD
, IMD
);
817 case LLVMContext::MD_access_group
:
818 MD
= intersectAccessGroups(Inst
, IJ
);
821 llvm_unreachable("unhandled metadata");
825 Inst
->setMetadata(Kind
, MD
);
832 llvm::createBitMaskForGaps(IRBuilderBase
&Builder
, unsigned VF
,
833 const InterleaveGroup
<Instruction
> &Group
) {
834 // All 1's means mask is not needed.
835 if (Group
.getNumMembers() == Group
.getFactor())
838 // TODO: support reversed access.
839 assert(!Group
.isReverse() && "Reversed group not supported.");
841 SmallVector
<Constant
*, 16> Mask
;
842 for (unsigned i
= 0; i
< VF
; i
++)
843 for (unsigned j
= 0; j
< Group
.getFactor(); ++j
) {
844 unsigned HasMember
= Group
.getMember(j
) ? 1 : 0;
845 Mask
.push_back(Builder
.getInt1(HasMember
));
848 return ConstantVector::get(Mask
);
851 llvm::SmallVector
<int, 16>
852 llvm::createReplicatedMask(unsigned ReplicationFactor
, unsigned VF
) {
853 SmallVector
<int, 16> MaskVec
;
854 for (unsigned i
= 0; i
< VF
; i
++)
855 for (unsigned j
= 0; j
< ReplicationFactor
; j
++)
856 MaskVec
.push_back(i
);
861 llvm::SmallVector
<int, 16> llvm::createInterleaveMask(unsigned VF
,
863 SmallVector
<int, 16> Mask
;
864 for (unsigned i
= 0; i
< VF
; i
++)
865 for (unsigned j
= 0; j
< NumVecs
; j
++)
866 Mask
.push_back(j
* VF
+ i
);
871 llvm::SmallVector
<int, 16>
872 llvm::createStrideMask(unsigned Start
, unsigned Stride
, unsigned VF
) {
873 SmallVector
<int, 16> Mask
;
874 for (unsigned i
= 0; i
< VF
; i
++)
875 Mask
.push_back(Start
+ i
* Stride
);
880 llvm::SmallVector
<int, 16> llvm::createSequentialMask(unsigned Start
,
882 unsigned NumUndefs
) {
883 SmallVector
<int, 16> Mask
;
884 for (unsigned i
= 0; i
< NumInts
; i
++)
885 Mask
.push_back(Start
+ i
);
887 for (unsigned i
= 0; i
< NumUndefs
; i
++)
893 llvm::SmallVector
<int, 16> llvm::createUnaryMask(ArrayRef
<int> Mask
,
895 // Avoid casts in the loop and make sure we have a reasonable number.
896 int NumEltsSigned
= NumElts
;
897 assert(NumEltsSigned
> 0 && "Expected smaller or non-zero element count");
899 // If the mask chooses an element from operand 1, reduce it to choose from the
900 // corresponding element of operand 0. Undef mask elements are unchanged.
901 SmallVector
<int, 16> UnaryMask
;
902 for (int MaskElt
: Mask
) {
903 assert((MaskElt
< NumEltsSigned
* 2) && "Expected valid shuffle mask");
904 int UnaryElt
= MaskElt
>= NumEltsSigned
? MaskElt
- NumEltsSigned
: MaskElt
;
905 UnaryMask
.push_back(UnaryElt
);
910 /// A helper function for concatenating vectors. This function concatenates two
911 /// vectors having the same element type. If the second vector has fewer
912 /// elements than the first, it is padded with undefs.
913 static Value
*concatenateTwoVectors(IRBuilderBase
&Builder
, Value
*V1
,
915 VectorType
*VecTy1
= dyn_cast
<VectorType
>(V1
->getType());
916 VectorType
*VecTy2
= dyn_cast
<VectorType
>(V2
->getType());
917 assert(VecTy1
&& VecTy2
&&
918 VecTy1
->getScalarType() == VecTy2
->getScalarType() &&
919 "Expect two vectors with the same element type");
921 unsigned NumElts1
= cast
<FixedVectorType
>(VecTy1
)->getNumElements();
922 unsigned NumElts2
= cast
<FixedVectorType
>(VecTy2
)->getNumElements();
923 assert(NumElts1
>= NumElts2
&& "Unexpect the first vector has less elements");
925 if (NumElts1
> NumElts2
) {
926 // Extend with UNDEFs.
927 V2
= Builder
.CreateShuffleVector(
928 V2
, createSequentialMask(0, NumElts2
, NumElts1
- NumElts2
));
931 return Builder
.CreateShuffleVector(
932 V1
, V2
, createSequentialMask(0, NumElts1
+ NumElts2
, 0));
935 Value
*llvm::concatenateVectors(IRBuilderBase
&Builder
,
936 ArrayRef
<Value
*> Vecs
) {
937 unsigned NumVecs
= Vecs
.size();
938 assert(NumVecs
> 1 && "Should be at least two vectors");
940 SmallVector
<Value
*, 8> ResList
;
941 ResList
.append(Vecs
.begin(), Vecs
.end());
943 SmallVector
<Value
*, 8> TmpList
;
944 for (unsigned i
= 0; i
< NumVecs
- 1; i
+= 2) {
945 Value
*V0
= ResList
[i
], *V1
= ResList
[i
+ 1];
946 assert((V0
->getType() == V1
->getType() || i
== NumVecs
- 2) &&
947 "Only the last vector may have a different type");
949 TmpList
.push_back(concatenateTwoVectors(Builder
, V0
, V1
));
952 // Push the last vector if the total number of vectors is odd.
953 if (NumVecs
% 2 != 0)
954 TmpList
.push_back(ResList
[NumVecs
- 1]);
957 NumVecs
= ResList
.size();
958 } while (NumVecs
> 1);
963 bool llvm::maskIsAllZeroOrUndef(Value
*Mask
) {
964 assert(isa
<VectorType
>(Mask
->getType()) &&
965 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
966 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
968 "Mask must be a vector of i1");
970 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
973 if (ConstMask
->isNullValue() || isa
<UndefValue
>(ConstMask
))
975 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
979 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
981 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
982 if (MaskElt
->isNullValue() || isa
<UndefValue
>(MaskElt
))
989 bool llvm::maskIsAllOneOrUndef(Value
*Mask
) {
990 assert(isa
<VectorType
>(Mask
->getType()) &&
991 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
992 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
994 "Mask must be a vector of i1");
996 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
999 if (ConstMask
->isAllOnesValue() || isa
<UndefValue
>(ConstMask
))
1001 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
1005 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
1007 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
1008 if (MaskElt
->isAllOnesValue() || isa
<UndefValue
>(MaskElt
))
1015 bool llvm::maskContainsAllOneOrUndef(Value
*Mask
) {
1016 assert(isa
<VectorType
>(Mask
->getType()) &&
1017 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
1018 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
1020 "Mask must be a vector of i1");
1022 auto *ConstMask
= dyn_cast
<Constant
>(Mask
);
1025 if (ConstMask
->isAllOnesValue() || isa
<UndefValue
>(ConstMask
))
1027 if (isa
<ScalableVectorType
>(ConstMask
->getType()))
1031 E
= cast
<FixedVectorType
>(ConstMask
->getType())->getNumElements();
1033 if (auto *MaskElt
= ConstMask
->getAggregateElement(I
))
1034 if (MaskElt
->isAllOnesValue() || isa
<UndefValue
>(MaskElt
))
1040 /// TODO: This is a lot like known bits, but for
1041 /// vectors. Is there something we can common this with?
1042 APInt
llvm::possiblyDemandedEltsInMask(Value
*Mask
) {
1043 assert(isa
<FixedVectorType
>(Mask
->getType()) &&
1044 isa
<IntegerType
>(Mask
->getType()->getScalarType()) &&
1045 cast
<IntegerType
>(Mask
->getType()->getScalarType())->getBitWidth() ==
1047 "Mask must be a fixed width vector of i1");
1049 const unsigned VWidth
=
1050 cast
<FixedVectorType
>(Mask
->getType())->getNumElements();
1051 APInt DemandedElts
= APInt::getAllOnes(VWidth
);
1052 if (auto *CV
= dyn_cast
<ConstantVector
>(Mask
))
1053 for (unsigned i
= 0; i
< VWidth
; i
++)
1054 if (CV
->getAggregateElement(i
)->isNullValue())
1055 DemandedElts
.clearBit(i
);
1056 return DemandedElts
;
1059 bool InterleavedAccessInfo::isStrided(int Stride
) {
1060 unsigned Factor
= std::abs(Stride
);
1061 return Factor
>= 2 && Factor
<= MaxInterleaveGroupFactor
;
1064 void InterleavedAccessInfo::collectConstStrideAccesses(
1065 MapVector
<Instruction
*, StrideDescriptor
> &AccessStrideInfo
,
1066 const DenseMap
<Value
*, const SCEV
*> &Strides
) {
1067 auto &DL
= TheLoop
->getHeader()->getModule()->getDataLayout();
1069 // Since it's desired that the load/store instructions be maintained in
1070 // "program order" for the interleaved access analysis, we have to visit the
1071 // blocks in the loop in reverse postorder (i.e., in a topological order).
1072 // Such an ordering will ensure that any load/store that may be executed
1073 // before a second load/store will precede the second load/store in
1074 // AccessStrideInfo.
1075 LoopBlocksDFS
DFS(TheLoop
);
1077 for (BasicBlock
*BB
: make_range(DFS
.beginRPO(), DFS
.endRPO()))
1078 for (auto &I
: *BB
) {
1079 Value
*Ptr
= getLoadStorePointerOperand(&I
);
1082 Type
*ElementTy
= getLoadStoreType(&I
);
1084 // Currently, codegen doesn't support cases where the type size doesn't
1085 // match the alloc size. Skip them for now.
1086 uint64_t Size
= DL
.getTypeAllocSize(ElementTy
);
1087 if (Size
* 8 != DL
.getTypeSizeInBits(ElementTy
))
1090 // We don't check wrapping here because we don't know yet if Ptr will be
1091 // part of a full group or a group with gaps. Checking wrapping for all
1092 // pointers (even those that end up in groups with no gaps) will be overly
1093 // conservative. For full groups, wrapping should be ok since if we would
1094 // wrap around the address space we would do a memory access at nullptr
1095 // even without the transformation. The wrapping checks are therefore
1096 // deferred until after we've formed the interleaved groups.
1098 getPtrStride(PSE
, ElementTy
, Ptr
, TheLoop
, Strides
,
1099 /*Assume=*/true, /*ShouldCheckWrap=*/false).value_or(0);
1101 const SCEV
*Scev
= replaceSymbolicStrideSCEV(PSE
, Strides
, Ptr
);
1102 AccessStrideInfo
[&I
] = StrideDescriptor(Stride
, Scev
, Size
,
1103 getLoadStoreAlignment(&I
));
1107 // Analyze interleaved accesses and collect them into interleaved load and
1110 // When generating code for an interleaved load group, we effectively hoist all
1111 // loads in the group to the location of the first load in program order. When
1112 // generating code for an interleaved store group, we sink all stores to the
1113 // location of the last store. This code motion can change the order of load
1114 // and store instructions and may break dependences.
1116 // The code generation strategy mentioned above ensures that we won't violate
1117 // any write-after-read (WAR) dependences.
1119 // E.g., for the WAR dependence: a = A[i]; // (1)
1122 // The store group of (2) is always inserted at or below (2), and the load
1123 // group of (1) is always inserted at or above (1). Thus, the instructions will
1124 // never be reordered. All other dependences are checked to ensure the
1125 // correctness of the instruction reordering.
1127 // The algorithm visits all memory accesses in the loop in bottom-up program
1128 // order. Program order is established by traversing the blocks in the loop in
1129 // reverse postorder when collecting the accesses.
1131 // We visit the memory accesses in bottom-up order because it can simplify the
1132 // construction of store groups in the presence of write-after-write (WAW)
1135 // E.g., for the WAW dependence: A[i] = a; // (1)
1137 // A[i + 1] = c; // (3)
1139 // We will first create a store group with (3) and (2). (1) can't be added to
1140 // this group because it and (2) are dependent. However, (1) can be grouped
1141 // with other accesses that may precede it in program order. Note that a
1142 // bottom-up order does not imply that WAW dependences should not be checked.
1143 void InterleavedAccessInfo::analyzeInterleaving(
1144 bool EnablePredicatedInterleavedMemAccesses
) {
1145 LLVM_DEBUG(dbgs() << "LV: Analyzing interleaved accesses...\n");
1146 const auto &Strides
= LAI
->getSymbolicStrides();
1148 // Holds all accesses with a constant stride.
1149 MapVector
<Instruction
*, StrideDescriptor
> AccessStrideInfo
;
1150 collectConstStrideAccesses(AccessStrideInfo
, Strides
);
1152 if (AccessStrideInfo
.empty())
1155 // Collect the dependences in the loop.
1156 collectDependences();
1158 // Holds all interleaved store groups temporarily.
1159 SmallSetVector
<InterleaveGroup
<Instruction
> *, 4> StoreGroups
;
1160 // Holds all interleaved load groups temporarily.
1161 SmallSetVector
<InterleaveGroup
<Instruction
> *, 4> LoadGroups
;
1162 // Groups added to this set cannot have new members added.
1163 SmallPtrSet
<InterleaveGroup
<Instruction
> *, 4> CompletedLoadGroups
;
1165 // Search in bottom-up program order for pairs of accesses (A and B) that can
1166 // form interleaved load or store groups. In the algorithm below, access A
1167 // precedes access B in program order. We initialize a group for B in the
1168 // outer loop of the algorithm, and then in the inner loop, we attempt to
1169 // insert each A into B's group if:
1171 // 1. A and B have the same stride,
1172 // 2. A and B have the same memory object size, and
1173 // 3. A belongs in B's group according to its distance from B.
1175 // Special care is taken to ensure group formation will not break any
1177 for (auto BI
= AccessStrideInfo
.rbegin(), E
= AccessStrideInfo
.rend();
1179 Instruction
*B
= BI
->first
;
1180 StrideDescriptor DesB
= BI
->second
;
1182 // Initialize a group for B if it has an allowable stride. Even if we don't
1183 // create a group for B, we continue with the bottom-up algorithm to ensure
1184 // we don't break any of B's dependences.
1185 InterleaveGroup
<Instruction
> *GroupB
= nullptr;
1186 if (isStrided(DesB
.Stride
) &&
1187 (!isPredicated(B
->getParent()) || EnablePredicatedInterleavedMemAccesses
)) {
1188 GroupB
= getInterleaveGroup(B
);
1190 LLVM_DEBUG(dbgs() << "LV: Creating an interleave group with:" << *B
1192 GroupB
= createInterleaveGroup(B
, DesB
.Stride
, DesB
.Alignment
);
1193 if (B
->mayWriteToMemory())
1194 StoreGroups
.insert(GroupB
);
1196 LoadGroups
.insert(GroupB
);
1200 for (auto AI
= std::next(BI
); AI
!= E
; ++AI
) {
1201 Instruction
*A
= AI
->first
;
1202 StrideDescriptor DesA
= AI
->second
;
1204 // Our code motion strategy implies that we can't have dependences
1205 // between accesses in an interleaved group and other accesses located
1206 // between the first and last member of the group. Note that this also
1207 // means that a group can't have more than one member at a given offset.
1208 // The accesses in a group can have dependences with other accesses, but
1209 // we must ensure we don't extend the boundaries of the group such that
1210 // we encompass those dependent accesses.
1212 // For example, assume we have the sequence of accesses shown below in a
1215 // (1, 2) is a group | A[i] = a; // (1)
1216 // | A[i-1] = b; // (2) |
1217 // A[i-3] = c; // (3)
1218 // A[i] = d; // (4) | (2, 4) is not a group
1220 // Because accesses (2) and (3) are dependent, we can group (2) with (1)
1221 // but not with (4). If we did, the dependent access (3) would be within
1222 // the boundaries of the (2, 4) group.
1223 auto DependentMember
= [&](InterleaveGroup
<Instruction
> *Group
,
1224 StrideEntry
*A
) -> Instruction
* {
1225 for (uint32_t Index
= 0; Index
< Group
->getFactor(); ++Index
) {
1226 Instruction
*MemberOfGroupB
= Group
->getMember(Index
);
1227 if (MemberOfGroupB
&& !canReorderMemAccessesForInterleavedGroups(
1228 A
, &*AccessStrideInfo
.find(MemberOfGroupB
)))
1229 return MemberOfGroupB
;
1234 auto GroupA
= getInterleaveGroup(A
);
1235 // If A is a load, dependencies are tolerable, there's nothing to do here.
1236 // If both A and B belong to the same (store) group, they are independent,
1237 // even if dependencies have not been recorded.
1238 // If both GroupA and GroupB are null, there's nothing to do here.
1239 if (A
->mayWriteToMemory() && GroupA
!= GroupB
) {
1240 Instruction
*DependentInst
= nullptr;
1241 // If GroupB is a load group, we have to compare AI against all
1242 // members of GroupB because if any load within GroupB has a dependency
1243 // on AI, we need to mark GroupB as complete and also release the
1244 // store GroupA (if A belongs to one). The former prevents incorrect
1245 // hoisting of load B above store A while the latter prevents incorrect
1246 // sinking of store A below load B.
1247 if (GroupB
&& LoadGroups
.contains(GroupB
))
1248 DependentInst
= DependentMember(GroupB
, &*AI
);
1249 else if (!canReorderMemAccessesForInterleavedGroups(&*AI
, &*BI
))
1252 if (DependentInst
) {
1253 // A has a store dependence on B (or on some load within GroupB) and
1254 // is part of a store group. Release A's group to prevent illegal
1255 // sinking of A below B. A will then be free to form another group
1256 // with instructions that precede it.
1257 if (GroupA
&& StoreGroups
.contains(GroupA
)) {
1258 LLVM_DEBUG(dbgs() << "LV: Invalidated store group due to "
1259 "dependence between "
1260 << *A
<< " and " << *DependentInst
<< '\n');
1261 StoreGroups
.remove(GroupA
);
1262 releaseGroup(GroupA
);
1264 // If B is a load and part of an interleave group, no earlier loads
1265 // can be added to B's interleave group, because this would mean the
1266 // DependentInst would move across store A. Mark the interleave group
1268 if (GroupB
&& LoadGroups
.contains(GroupB
)) {
1269 LLVM_DEBUG(dbgs() << "LV: Marking interleave group for " << *B
1270 << " as complete.\n");
1271 CompletedLoadGroups
.insert(GroupB
);
1275 if (CompletedLoadGroups
.contains(GroupB
)) {
1276 // Skip trying to add A to B, continue to look for other conflicting A's
1277 // in groups to be released.
1281 // At this point, we've checked for illegal code motion. If either A or B
1282 // isn't strided, there's nothing left to do.
1283 if (!isStrided(DesA
.Stride
) || !isStrided(DesB
.Stride
))
1286 // Ignore A if it's already in a group or isn't the same kind of memory
1288 // Note that mayReadFromMemory() isn't mutually exclusive to
1289 // mayWriteToMemory in the case of atomic loads. We shouldn't see those
1290 // here, canVectorizeMemory() should have returned false - except for the
1291 // case we asked for optimization remarks.
1292 if (isInterleaved(A
) ||
1293 (A
->mayReadFromMemory() != B
->mayReadFromMemory()) ||
1294 (A
->mayWriteToMemory() != B
->mayWriteToMemory()))
1297 // Check rules 1 and 2. Ignore A if its stride or size is different from
1299 if (DesA
.Stride
!= DesB
.Stride
|| DesA
.Size
!= DesB
.Size
)
1302 // Ignore A if the memory object of A and B don't belong to the same
1304 if (getLoadStoreAddressSpace(A
) != getLoadStoreAddressSpace(B
))
1307 // Calculate the distance from A to B.
1308 const SCEVConstant
*DistToB
= dyn_cast
<SCEVConstant
>(
1309 PSE
.getSE()->getMinusSCEV(DesA
.Scev
, DesB
.Scev
));
1312 int64_t DistanceToB
= DistToB
->getAPInt().getSExtValue();
1314 // Check rule 3. Ignore A if its distance to B is not a multiple of the
1316 if (DistanceToB
% static_cast<int64_t>(DesB
.Size
))
1319 // All members of a predicated interleave-group must have the same predicate,
1320 // and currently must reside in the same BB.
1321 BasicBlock
*BlockA
= A
->getParent();
1322 BasicBlock
*BlockB
= B
->getParent();
1323 if ((isPredicated(BlockA
) || isPredicated(BlockB
)) &&
1324 (!EnablePredicatedInterleavedMemAccesses
|| BlockA
!= BlockB
))
1327 // The index of A is the index of B plus A's distance to B in multiples
1330 GroupB
->getIndex(B
) + DistanceToB
/ static_cast<int64_t>(DesB
.Size
);
1332 // Try to insert A into B's group.
1333 if (GroupB
->insertMember(A
, IndexA
, DesA
.Alignment
)) {
1334 LLVM_DEBUG(dbgs() << "LV: Inserted:" << *A
<< '\n'
1335 << " into the interleave group with" << *B
1337 InterleaveGroupMap
[A
] = GroupB
;
1339 // Set the first load in program order as the insert position.
1340 if (A
->mayReadFromMemory())
1341 GroupB
->setInsertPos(A
);
1343 } // Iteration over A accesses.
1344 } // Iteration over B accesses.
1346 auto InvalidateGroupIfMemberMayWrap
= [&](InterleaveGroup
<Instruction
> *Group
,
1348 std::string FirstOrLast
) -> bool {
1349 Instruction
*Member
= Group
->getMember(Index
);
1350 assert(Member
&& "Group member does not exist");
1351 Value
*MemberPtr
= getLoadStorePointerOperand(Member
);
1352 Type
*AccessTy
= getLoadStoreType(Member
);
1353 if (getPtrStride(PSE
, AccessTy
, MemberPtr
, TheLoop
, Strides
,
1354 /*Assume=*/false, /*ShouldCheckWrap=*/true).value_or(0))
1356 LLVM_DEBUG(dbgs() << "LV: Invalidate candidate interleaved group due to "
1358 << " group member potentially pointer-wrapping.\n");
1359 releaseGroup(Group
);
1363 // Remove interleaved groups with gaps whose memory
1364 // accesses may wrap around. We have to revisit the getPtrStride analysis,
1365 // this time with ShouldCheckWrap=true, since collectConstStrideAccesses does
1366 // not check wrapping (see documentation there).
1367 // FORNOW we use Assume=false;
1368 // TODO: Change to Assume=true but making sure we don't exceed the threshold
1369 // of runtime SCEV assumptions checks (thereby potentially failing to
1370 // vectorize altogether).
1371 // Additional optional optimizations:
1372 // TODO: If we are peeling the loop and we know that the first pointer doesn't
1373 // wrap then we can deduce that all pointers in the group don't wrap.
1374 // This means that we can forcefully peel the loop in order to only have to
1375 // check the first pointer for no-wrap. When we'll change to use Assume=true
1376 // we'll only need at most one runtime check per interleaved group.
1377 for (auto *Group
: LoadGroups
) {
1378 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1379 // load would wrap around the address space we would do a memory access at
1380 // nullptr even without the transformation.
1381 if (Group
->getNumMembers() == Group
->getFactor())
1384 // Case 2: If first and last members of the group don't wrap this implies
1385 // that all the pointers in the group don't wrap.
1386 // So we check only group member 0 (which is always guaranteed to exist),
1387 // and group member Factor - 1; If the latter doesn't exist we rely on
1388 // peeling (if it is a non-reversed accsess -- see Case 3).
1389 if (InvalidateGroupIfMemberMayWrap(Group
, 0, std::string("first")))
1391 if (Group
->getMember(Group
->getFactor() - 1))
1392 InvalidateGroupIfMemberMayWrap(Group
, Group
->getFactor() - 1,
1393 std::string("last"));
1395 // Case 3: A non-reversed interleaved load group with gaps: We need
1396 // to execute at least one scalar epilogue iteration. This will ensure
1397 // we don't speculatively access memory out-of-bounds. We only need
1398 // to look for a member at index factor - 1, since every group must have
1399 // a member at index zero.
1400 if (Group
->isReverse()) {
1402 dbgs() << "LV: Invalidate candidate interleaved group due to "
1403 "a reverse access with gaps.\n");
1404 releaseGroup(Group
);
1408 dbgs() << "LV: Interleaved group requires epilogue iteration.\n");
1409 RequiresScalarEpilogue
= true;
1413 for (auto *Group
: StoreGroups
) {
1414 // Case 1: A full group. Can Skip the checks; For full groups, if the wide
1415 // store would wrap around the address space we would do a memory access at
1416 // nullptr even without the transformation.
1417 if (Group
->getNumMembers() == Group
->getFactor())
1420 // Interleave-store-group with gaps is implemented using masked wide store.
1421 // Remove interleaved store groups with gaps if
1422 // masked-interleaved-accesses are not enabled by the target.
1423 if (!EnablePredicatedInterleavedMemAccesses
) {
1425 dbgs() << "LV: Invalidate candidate interleaved store group due "
1427 releaseGroup(Group
);
1431 // Case 2: If first and last members of the group don't wrap this implies
1432 // that all the pointers in the group don't wrap.
1433 // So we check only group member 0 (which is always guaranteed to exist),
1434 // and the last group member. Case 3 (scalar epilog) is not relevant for
1435 // stores with gaps, which are implemented with masked-store (rather than
1436 // speculative access, as in loads).
1437 if (InvalidateGroupIfMemberMayWrap(Group
, 0, std::string("first")))
1439 for (int Index
= Group
->getFactor() - 1; Index
> 0; Index
--)
1440 if (Group
->getMember(Index
)) {
1441 InvalidateGroupIfMemberMayWrap(Group
, Index
, std::string("last"));
1447 void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
1448 // If no group had triggered the requirement to create an epilogue loop,
1449 // there is nothing to do.
1450 if (!requiresScalarEpilogue())
1453 bool ReleasedGroup
= false;
1454 // Release groups requiring scalar epilogues. Note that this also removes them
1455 // from InterleaveGroups.
1456 for (auto *Group
: make_early_inc_range(InterleaveGroups
)) {
1457 if (!Group
->requiresScalarEpilogue())
1461 << "LV: Invalidate candidate interleaved group due to gaps that "
1462 "require a scalar epilogue (not allowed under optsize) and cannot "
1463 "be masked (not enabled). \n");
1464 releaseGroup(Group
);
1465 ReleasedGroup
= true;
1467 assert(ReleasedGroup
&& "At least one group must be invalidated, as a "
1468 "scalar epilogue was required");
1469 (void)ReleasedGroup
;
1470 RequiresScalarEpilogue
= false;
1473 template <typename InstT
>
1474 void InterleaveGroup
<InstT
>::addMetadata(InstT
*NewInst
) const {
1475 llvm_unreachable("addMetadata can only be used for Instruction");
1480 void InterleaveGroup
<Instruction
>::addMetadata(Instruction
*NewInst
) const {
1481 SmallVector
<Value
*, 4> VL
;
1482 std::transform(Members
.begin(), Members
.end(), std::back_inserter(VL
),
1483 [](std::pair
<int, Instruction
*> p
) { return p
.second
; });
1484 propagateMetadata(NewInst
, VL
);