1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Analysis/VectorUtils.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/raw_ostream.h"
70 #define DEBUG_TYPE "loop-accesses"
72 static cl::opt
<unsigned, true>
73 VectorizationFactor("force-vector-width", cl::Hidden
,
74 cl::desc("Sets the SIMD width. Zero is autoselect."),
75 cl::location(VectorizerParams::VectorizationFactor
));
76 unsigned VectorizerParams::VectorizationFactor
;
78 static cl::opt
<unsigned, true>
79 VectorizationInterleave("force-vector-interleave", cl::Hidden
,
80 cl::desc("Sets the vectorization interleave count. "
81 "Zero is autoselect."),
83 VectorizerParams::VectorizationInterleave
));
84 unsigned VectorizerParams::VectorizationInterleave
;
86 static cl::opt
<unsigned, true> RuntimeMemoryCheckThreshold(
87 "runtime-memory-check-threshold", cl::Hidden
,
88 cl::desc("When performing memory disambiguation checks at runtime do not "
89 "generate more than this number of comparisons (default = 8)."),
90 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold
), cl::init(8));
91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold
;
93 /// The maximum iterations used to merge memory checks
94 static cl::opt
<unsigned> MemoryCheckMergeThreshold(
95 "memory-check-merge-threshold", cl::Hidden
,
96 cl::desc("Maximum number of comparisons done when trying to merge "
97 "runtime memory checks. (default = 100)"),
100 /// Maximum SIMD width.
101 const unsigned VectorizerParams::MaxVectorWidth
= 64;
103 /// We collect dependences up to this threshold.
104 static cl::opt
<unsigned>
105 MaxDependences("max-dependences", cl::Hidden
,
106 cl::desc("Maximum number of dependences collected by "
107 "loop-access analysis (default = 100)"),
110 /// This enables versioning on the strides of symbolically striding memory
111 /// accesses in code like the following.
112 /// for (i = 0; i < N; ++i)
113 /// A[i * Stride1] += B[i * Stride2] ...
115 /// Will be roughly translated to
116 /// if (Stride1 == 1 && Stride2 == 1) {
117 /// for (i = 0; i < N; i+=4)
121 static cl::opt
<bool> EnableMemAccessVersioning(
122 "enable-mem-access-versioning", cl::init(true), cl::Hidden
,
123 cl::desc("Enable symbolic stride memory access versioning"));
125 /// Enable store-to-load forwarding conflict detection. This option can
126 /// be disabled for correctness testing.
127 static cl::opt
<bool> EnableForwardingConflictDetection(
128 "store-to-load-forwarding-conflict-detection", cl::Hidden
,
129 cl::desc("Enable conflict detection in loop-access analysis"),
132 bool VectorizerParams::isInterleaveForced() {
133 return ::VectorizationInterleave
.getNumOccurrences() > 0;
136 Value
*llvm::stripIntegerCast(Value
*V
) {
137 if (auto *CI
= dyn_cast
<CastInst
>(V
))
138 if (CI
->getOperand(0)->getType()->isIntegerTy())
139 return CI
->getOperand(0);
143 const SCEV
*llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution
&PSE
,
144 const ValueToValueMap
&PtrToStride
,
145 Value
*Ptr
, Value
*OrigPtr
) {
146 const SCEV
*OrigSCEV
= PSE
.getSCEV(Ptr
);
148 // If there is an entry in the map return the SCEV of the pointer with the
149 // symbolic stride replaced by one.
150 ValueToValueMap::const_iterator SI
=
151 PtrToStride
.find(OrigPtr
? OrigPtr
: Ptr
);
152 if (SI
== PtrToStride
.end())
153 // For a non-symbolic stride, just return the original expression.
156 Value
*StrideVal
= stripIntegerCast(SI
->second
);
158 ScalarEvolution
*SE
= PSE
.getSE();
159 const auto *U
= cast
<SCEVUnknown
>(SE
->getSCEV(StrideVal
));
161 static_cast<const SCEVConstant
*>(SE
->getOne(StrideVal
->getType()));
163 PSE
.addPredicate(*SE
->getEqualPredicate(U
, CT
));
164 auto *Expr
= PSE
.getSCEV(Ptr
);
166 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
167 << " by: " << *Expr
<< "\n");
171 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
172 unsigned Index
, RuntimePointerChecking
&RtCheck
)
173 : High(RtCheck
.Pointers
[Index
].End
), Low(RtCheck
.Pointers
[Index
].Start
),
174 AddressSpace(RtCheck
.Pointers
[Index
]
175 .PointerValue
->getType()
176 ->getPointerAddressSpace()) {
177 Members
.push_back(Index
);
180 /// Calculate Start and End points of memory access.
181 /// Let's assume A is the first access and B is a memory access on N-th loop
182 /// iteration. Then B is calculated as:
184 /// Step value may be positive or negative.
185 /// N is a calculated back-edge taken count:
186 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
187 /// Start and End points are calculated in the following way:
188 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
189 /// where SizeOfElt is the size of single memory access in bytes.
191 /// There is no conflict when the intervals are disjoint:
192 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
193 void RuntimePointerChecking::insert(Loop
*Lp
, Value
*Ptr
, bool WritePtr
,
194 unsigned DepSetId
, unsigned ASId
,
195 const ValueToValueMap
&Strides
,
196 PredicatedScalarEvolution
&PSE
) {
197 // Get the stride replaced scev.
198 const SCEV
*Sc
= replaceSymbolicStrideSCEV(PSE
, Strides
, Ptr
);
199 ScalarEvolution
*SE
= PSE
.getSE();
204 if (SE
->isLoopInvariant(Sc
, Lp
)) {
205 ScStart
= ScEnd
= Sc
;
207 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Sc
);
208 assert(AR
&& "Invalid addrec expression");
209 const SCEV
*Ex
= PSE
.getBackedgeTakenCount();
211 ScStart
= AR
->getStart();
212 ScEnd
= AR
->evaluateAtIteration(Ex
, *SE
);
213 const SCEV
*Step
= AR
->getStepRecurrence(*SE
);
215 // For expressions with negative step, the upper bound is ScStart and the
216 // lower bound is ScEnd.
217 if (const auto *CStep
= dyn_cast
<SCEVConstant
>(Step
)) {
218 if (CStep
->getValue()->isNegative())
219 std::swap(ScStart
, ScEnd
);
221 // Fallback case: the step is not constant, but we can still
222 // get the upper and lower bounds of the interval by using min/max
224 ScStart
= SE
->getUMinExpr(ScStart
, ScEnd
);
225 ScEnd
= SE
->getUMaxExpr(AR
->getStart(), ScEnd
);
228 // Add the size of the pointed element to ScEnd.
229 auto &DL
= Lp
->getHeader()->getModule()->getDataLayout();
230 Type
*IdxTy
= DL
.getIndexType(Ptr
->getType());
231 const SCEV
*EltSizeSCEV
=
232 SE
->getStoreSizeOfExpr(IdxTy
, Ptr
->getType()->getPointerElementType());
233 ScEnd
= SE
->getAddExpr(ScEnd
, EltSizeSCEV
);
235 Pointers
.emplace_back(Ptr
, ScStart
, ScEnd
, WritePtr
, DepSetId
, ASId
, Sc
);
238 SmallVector
<RuntimePointerCheck
, 4>
239 RuntimePointerChecking::generateChecks() const {
240 SmallVector
<RuntimePointerCheck
, 4> Checks
;
242 for (unsigned I
= 0; I
< CheckingGroups
.size(); ++I
) {
243 for (unsigned J
= I
+ 1; J
< CheckingGroups
.size(); ++J
) {
244 const RuntimeCheckingPtrGroup
&CGI
= CheckingGroups
[I
];
245 const RuntimeCheckingPtrGroup
&CGJ
= CheckingGroups
[J
];
247 if (needsChecking(CGI
, CGJ
))
248 Checks
.push_back(std::make_pair(&CGI
, &CGJ
));
254 void RuntimePointerChecking::generateChecks(
255 MemoryDepChecker::DepCandidates
&DepCands
, bool UseDependencies
) {
256 assert(Checks
.empty() && "Checks is not empty");
257 groupChecks(DepCands
, UseDependencies
);
258 Checks
= generateChecks();
261 bool RuntimePointerChecking::needsChecking(
262 const RuntimeCheckingPtrGroup
&M
, const RuntimeCheckingPtrGroup
&N
) const {
263 for (unsigned I
= 0, EI
= M
.Members
.size(); EI
!= I
; ++I
)
264 for (unsigned J
= 0, EJ
= N
.Members
.size(); EJ
!= J
; ++J
)
265 if (needsChecking(M
.Members
[I
], N
.Members
[J
]))
270 /// Compare \p I and \p J and return the minimum.
271 /// Return nullptr in case we couldn't find an answer.
272 static const SCEV
*getMinFromExprs(const SCEV
*I
, const SCEV
*J
,
273 ScalarEvolution
*SE
) {
274 const SCEV
*Diff
= SE
->getMinusSCEV(J
, I
);
275 const SCEVConstant
*C
= dyn_cast
<const SCEVConstant
>(Diff
);
279 if (C
->getValue()->isNegative())
284 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index
,
285 RuntimePointerChecking
&RtCheck
) {
287 Index
, RtCheck
.Pointers
[Index
].Start
, RtCheck
.Pointers
[Index
].End
,
288 RtCheck
.Pointers
[Index
].PointerValue
->getType()->getPointerAddressSpace(),
292 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index
, const SCEV
*Start
,
293 const SCEV
*End
, unsigned AS
,
294 ScalarEvolution
&SE
) {
295 assert(AddressSpace
== AS
&&
296 "all pointers in a checking group must be in the same address space");
298 // Compare the starts and ends with the known minimum and maximum
299 // of this set. We need to know how we compare against the min/max
300 // of the set in order to be able to emit memchecks.
301 const SCEV
*Min0
= getMinFromExprs(Start
, Low
, &SE
);
305 const SCEV
*Min1
= getMinFromExprs(End
, High
, &SE
);
309 // Update the low bound expression if we've found a new min value.
313 // Update the high bound expression if we've found a new max value.
317 Members
.push_back(Index
);
321 void RuntimePointerChecking::groupChecks(
322 MemoryDepChecker::DepCandidates
&DepCands
, bool UseDependencies
) {
323 // We build the groups from dependency candidates equivalence classes
325 // - We know that pointers in the same equivalence class share
326 // the same underlying object and therefore there is a chance
327 // that we can compare pointers
328 // - We wouldn't be able to merge two pointers for which we need
329 // to emit a memcheck. The classes in DepCands are already
330 // conveniently built such that no two pointers in the same
331 // class need checking against each other.
333 // We use the following (greedy) algorithm to construct the groups
334 // For every pointer in the equivalence class:
335 // For each existing group:
336 // - if the difference between this pointer and the min/max bounds
337 // of the group is a constant, then make the pointer part of the
338 // group and update the min/max bounds of that group as required.
340 CheckingGroups
.clear();
342 // If we need to check two pointers to the same underlying object
343 // with a non-constant difference, we shouldn't perform any pointer
344 // grouping with those pointers. This is because we can easily get
345 // into cases where the resulting check would return false, even when
346 // the accesses are safe.
348 // The following example shows this:
349 // for (i = 0; i < 1000; ++i)
350 // a[5000 + i * m] = a[i] + a[i + 9000]
352 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
353 // (0, 10000) which is always false. However, if m is 1, there is no
354 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
355 // us to perform an accurate check in this case.
357 // The above case requires that we have an UnknownDependence between
358 // accesses to the same underlying object. This cannot happen unless
359 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
360 // is also false. In this case we will use the fallback path and create
361 // separate checking groups for all pointers.
363 // If we don't have the dependency partitions, construct a new
364 // checking pointer group for each pointer. This is also required
365 // for correctness, because in this case we can have checking between
366 // pointers to the same underlying object.
367 if (!UseDependencies
) {
368 for (unsigned I
= 0; I
< Pointers
.size(); ++I
)
369 CheckingGroups
.push_back(RuntimeCheckingPtrGroup(I
, *this));
373 unsigned TotalComparisons
= 0;
375 DenseMap
<Value
*, unsigned> PositionMap
;
376 for (unsigned Index
= 0; Index
< Pointers
.size(); ++Index
)
377 PositionMap
[Pointers
[Index
].PointerValue
] = Index
;
379 // We need to keep track of what pointers we've already seen so we
380 // don't process them twice.
381 SmallSet
<unsigned, 2> Seen
;
383 // Go through all equivalence classes, get the "pointer check groups"
384 // and add them to the overall solution. We use the order in which accesses
385 // appear in 'Pointers' to enforce determinism.
386 for (unsigned I
= 0; I
< Pointers
.size(); ++I
) {
387 // We've seen this pointer before, and therefore already processed
388 // its equivalence class.
392 MemoryDepChecker::MemAccessInfo
Access(Pointers
[I
].PointerValue
,
393 Pointers
[I
].IsWritePtr
);
395 SmallVector
<RuntimeCheckingPtrGroup
, 2> Groups
;
396 auto LeaderI
= DepCands
.findValue(DepCands
.getLeaderValue(Access
));
398 // Because DepCands is constructed by visiting accesses in the order in
399 // which they appear in alias sets (which is deterministic) and the
400 // iteration order within an equivalence class member is only dependent on
401 // the order in which unions and insertions are performed on the
402 // equivalence class, the iteration order is deterministic.
403 for (auto MI
= DepCands
.member_begin(LeaderI
), ME
= DepCands
.member_end();
405 auto PointerI
= PositionMap
.find(MI
->getPointer());
406 assert(PointerI
!= PositionMap
.end() &&
407 "pointer in equivalence class not found in PositionMap");
408 unsigned Pointer
= PointerI
->second
;
410 // Mark this pointer as seen.
411 Seen
.insert(Pointer
);
413 // Go through all the existing sets and see if we can find one
414 // which can include this pointer.
415 for (RuntimeCheckingPtrGroup
&Group
: Groups
) {
416 // Don't perform more than a certain amount of comparisons.
417 // This should limit the cost of grouping the pointers to something
418 // reasonable. If we do end up hitting this threshold, the algorithm
419 // will create separate groups for all remaining pointers.
420 if (TotalComparisons
> MemoryCheckMergeThreshold
)
425 if (Group
.addPointer(Pointer
, *this)) {
432 // We couldn't add this pointer to any existing set or the threshold
433 // for the number of comparisons has been reached. Create a new group
434 // to hold the current pointer.
435 Groups
.push_back(RuntimeCheckingPtrGroup(Pointer
, *this));
438 // We've computed the grouped checks for this partition.
439 // Save the results and continue with the next one.
440 llvm::copy(Groups
, std::back_inserter(CheckingGroups
));
444 bool RuntimePointerChecking::arePointersInSamePartition(
445 const SmallVectorImpl
<int> &PtrToPartition
, unsigned PtrIdx1
,
447 return (PtrToPartition
[PtrIdx1
] != -1 &&
448 PtrToPartition
[PtrIdx1
] == PtrToPartition
[PtrIdx2
]);
451 bool RuntimePointerChecking::needsChecking(unsigned I
, unsigned J
) const {
452 const PointerInfo
&PointerI
= Pointers
[I
];
453 const PointerInfo
&PointerJ
= Pointers
[J
];
455 // No need to check if two readonly pointers intersect.
456 if (!PointerI
.IsWritePtr
&& !PointerJ
.IsWritePtr
)
459 // Only need to check pointers between two different dependency sets.
460 if (PointerI
.DependencySetId
== PointerJ
.DependencySetId
)
463 // Only need to check pointers in the same alias set.
464 if (PointerI
.AliasSetId
!= PointerJ
.AliasSetId
)
470 void RuntimePointerChecking::printChecks(
471 raw_ostream
&OS
, const SmallVectorImpl
<RuntimePointerCheck
> &Checks
,
472 unsigned Depth
) const {
474 for (const auto &Check
: Checks
) {
475 const auto &First
= Check
.first
->Members
, &Second
= Check
.second
->Members
;
477 OS
.indent(Depth
) << "Check " << N
++ << ":\n";
479 OS
.indent(Depth
+ 2) << "Comparing group (" << Check
.first
<< "):\n";
480 for (unsigned K
= 0; K
< First
.size(); ++K
)
481 OS
.indent(Depth
+ 2) << *Pointers
[First
[K
]].PointerValue
<< "\n";
483 OS
.indent(Depth
+ 2) << "Against group (" << Check
.second
<< "):\n";
484 for (unsigned K
= 0; K
< Second
.size(); ++K
)
485 OS
.indent(Depth
+ 2) << *Pointers
[Second
[K
]].PointerValue
<< "\n";
489 void RuntimePointerChecking::print(raw_ostream
&OS
, unsigned Depth
) const {
491 OS
.indent(Depth
) << "Run-time memory checks:\n";
492 printChecks(OS
, Checks
, Depth
);
494 OS
.indent(Depth
) << "Grouped accesses:\n";
495 for (unsigned I
= 0; I
< CheckingGroups
.size(); ++I
) {
496 const auto &CG
= CheckingGroups
[I
];
498 OS
.indent(Depth
+ 2) << "Group " << &CG
<< ":\n";
499 OS
.indent(Depth
+ 4) << "(Low: " << *CG
.Low
<< " High: " << *CG
.High
501 for (unsigned J
= 0; J
< CG
.Members
.size(); ++J
) {
502 OS
.indent(Depth
+ 6) << "Member: " << *Pointers
[CG
.Members
[J
]].Expr
510 /// Analyses memory accesses in a loop.
512 /// Checks whether run time pointer checks are needed and builds sets for data
513 /// dependence checking.
514 class AccessAnalysis
{
516 /// Read or write access location.
517 typedef PointerIntPair
<Value
*, 1, bool> MemAccessInfo
;
518 typedef SmallVector
<MemAccessInfo
, 8> MemAccessInfoList
;
520 AccessAnalysis(Loop
*TheLoop
, AAResults
*AA
, LoopInfo
*LI
,
521 MemoryDepChecker::DepCandidates
&DA
,
522 PredicatedScalarEvolution
&PSE
)
523 : TheLoop(TheLoop
), AST(*AA
), LI(LI
), DepCands(DA
),
524 IsRTCheckAnalysisNeeded(false), PSE(PSE
) {}
526 /// Register a load and whether it is only read from.
527 void addLoad(MemoryLocation
&Loc
, bool IsReadOnly
) {
528 Value
*Ptr
= const_cast<Value
*>(Loc
.Ptr
);
529 AST
.add(Ptr
, LocationSize::beforeOrAfterPointer(), Loc
.AATags
);
530 Accesses
.insert(MemAccessInfo(Ptr
, false));
532 ReadOnlyPtr
.insert(Ptr
);
535 /// Register a store.
536 void addStore(MemoryLocation
&Loc
) {
537 Value
*Ptr
= const_cast<Value
*>(Loc
.Ptr
);
538 AST
.add(Ptr
, LocationSize::beforeOrAfterPointer(), Loc
.AATags
);
539 Accesses
.insert(MemAccessInfo(Ptr
, true));
542 /// Check if we can emit a run-time no-alias check for \p Access.
544 /// Returns true if we can emit a run-time no alias check for \p Access.
545 /// If we can check this access, this also adds it to a dependence set and
546 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
547 /// we will attempt to use additional run-time checks in order to get
548 /// the bounds of the pointer.
549 bool createCheckForAccess(RuntimePointerChecking
&RtCheck
,
550 MemAccessInfo Access
,
551 const ValueToValueMap
&Strides
,
552 DenseMap
<Value
*, unsigned> &DepSetId
,
553 Loop
*TheLoop
, unsigned &RunningDepId
,
554 unsigned ASId
, bool ShouldCheckStride
,
557 /// Check whether we can check the pointers at runtime for
558 /// non-intersection.
560 /// Returns true if we need no check or if we do and we can generate them
561 /// (i.e. the pointers have computable bounds).
562 bool canCheckPtrAtRT(RuntimePointerChecking
&RtCheck
, ScalarEvolution
*SE
,
563 Loop
*TheLoop
, const ValueToValueMap
&Strides
,
564 bool ShouldCheckWrap
= false);
566 /// Goes over all memory accesses, checks whether a RT check is needed
567 /// and builds sets of dependent accesses.
568 void buildDependenceSets() {
569 processMemAccesses();
572 /// Initial processing of memory accesses determined that we need to
573 /// perform dependency checking.
575 /// Note that this can later be cleared if we retry memcheck analysis without
576 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
577 bool isDependencyCheckNeeded() { return !CheckDeps
.empty(); }
579 /// We decided that no dependence analysis would be used. Reset the state.
580 void resetDepChecks(MemoryDepChecker
&DepChecker
) {
582 DepChecker
.clearDependences();
585 MemAccessInfoList
&getDependenciesToCheck() { return CheckDeps
; }
588 typedef SetVector
<MemAccessInfo
> PtrAccessSet
;
590 /// Go over all memory access and check whether runtime pointer checks
591 /// are needed and build sets of dependency check candidates.
592 void processMemAccesses();
594 /// Set of all accesses.
595 PtrAccessSet Accesses
;
597 /// The loop being checked.
600 /// List of accesses that need a further dependence check.
601 MemAccessInfoList CheckDeps
;
603 /// Set of pointers that are read only.
604 SmallPtrSet
<Value
*, 16> ReadOnlyPtr
;
606 /// An alias set tracker to partition the access set by underlying object and
607 //intrinsic property (such as TBAA metadata).
612 /// Sets of potentially dependent accesses - members of one set share an
613 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
614 /// dependence check.
615 MemoryDepChecker::DepCandidates
&DepCands
;
617 /// Initial processing of memory accesses determined that we may need
618 /// to add memchecks. Perform the analysis to determine the necessary checks.
620 /// Note that, this is different from isDependencyCheckNeeded. When we retry
621 /// memcheck analysis without dependency checking
622 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
623 /// cleared while this remains set if we have potentially dependent accesses.
624 bool IsRTCheckAnalysisNeeded
;
626 /// The SCEV predicate containing all the SCEV-related assumptions.
627 PredicatedScalarEvolution
&PSE
;
630 } // end anonymous namespace
632 /// Check whether a pointer can participate in a runtime bounds check.
633 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
634 /// by adding run-time checks (overflow checks) if necessary.
635 static bool hasComputableBounds(PredicatedScalarEvolution
&PSE
,
636 const ValueToValueMap
&Strides
, Value
*Ptr
,
637 Loop
*L
, bool Assume
) {
638 const SCEV
*PtrScev
= replaceSymbolicStrideSCEV(PSE
, Strides
, Ptr
);
640 // The bounds for loop-invariant pointer is trivial.
641 if (PSE
.getSE()->isLoopInvariant(PtrScev
, L
))
644 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(PtrScev
);
647 AR
= PSE
.getAsAddRec(Ptr
);
652 return AR
->isAffine();
655 /// Check whether a pointer address cannot wrap.
656 static bool isNoWrap(PredicatedScalarEvolution
&PSE
,
657 const ValueToValueMap
&Strides
, Value
*Ptr
, Loop
*L
) {
658 const SCEV
*PtrScev
= PSE
.getSCEV(Ptr
);
659 if (PSE
.getSE()->isLoopInvariant(PtrScev
, L
))
662 int64_t Stride
= getPtrStride(PSE
, Ptr
, L
, Strides
);
663 if (Stride
== 1 || PSE
.hasNoOverflow(Ptr
, SCEVWrapPredicate::IncrementNUSW
))
669 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking
&RtCheck
,
670 MemAccessInfo Access
,
671 const ValueToValueMap
&StridesMap
,
672 DenseMap
<Value
*, unsigned> &DepSetId
,
673 Loop
*TheLoop
, unsigned &RunningDepId
,
674 unsigned ASId
, bool ShouldCheckWrap
,
676 Value
*Ptr
= Access
.getPointer();
678 if (!hasComputableBounds(PSE
, StridesMap
, Ptr
, TheLoop
, Assume
))
681 // When we run after a failing dependency check we have to make sure
682 // we don't have wrapping pointers.
683 if (ShouldCheckWrap
&& !isNoWrap(PSE
, StridesMap
, Ptr
, TheLoop
)) {
684 auto *Expr
= PSE
.getSCEV(Ptr
);
685 if (!Assume
|| !isa
<SCEVAddRecExpr
>(Expr
))
687 PSE
.setNoOverflow(Ptr
, SCEVWrapPredicate::IncrementNUSW
);
690 // The id of the dependence set.
693 if (isDependencyCheckNeeded()) {
694 Value
*Leader
= DepCands
.getLeaderValue(Access
).getPointer();
695 unsigned &LeaderId
= DepSetId
[Leader
];
697 LeaderId
= RunningDepId
++;
700 // Each access has its own dependence set.
701 DepId
= RunningDepId
++;
703 bool IsWrite
= Access
.getInt();
704 RtCheck
.insert(TheLoop
, Ptr
, IsWrite
, DepId
, ASId
, StridesMap
, PSE
);
705 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr
<< '\n');
710 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking
&RtCheck
,
711 ScalarEvolution
*SE
, Loop
*TheLoop
,
712 const ValueToValueMap
&StridesMap
,
713 bool ShouldCheckWrap
) {
714 // Find pointers with computable bounds. We are going to use this information
715 // to place a runtime bound check.
718 bool MayNeedRTCheck
= false;
719 if (!IsRTCheckAnalysisNeeded
) return true;
721 bool IsDepCheckNeeded
= isDependencyCheckNeeded();
723 // We assign a consecutive id to access from different alias sets.
724 // Accesses between different groups doesn't need to be checked.
726 for (auto &AS
: AST
) {
727 int NumReadPtrChecks
= 0;
728 int NumWritePtrChecks
= 0;
729 bool CanDoAliasSetRT
= true;
732 // We assign consecutive id to access from different dependence sets.
733 // Accesses within the same set don't need a runtime check.
734 unsigned RunningDepId
= 1;
735 DenseMap
<Value
*, unsigned> DepSetId
;
737 SmallVector
<MemAccessInfo
, 4> Retries
;
739 // First, count how many write and read accesses are in the alias set. Also
740 // collect MemAccessInfos for later.
741 SmallVector
<MemAccessInfo
, 4> AccessInfos
;
742 for (const auto &A
: AS
) {
743 Value
*Ptr
= A
.getValue();
744 bool IsWrite
= Accesses
.count(MemAccessInfo(Ptr
, true));
750 AccessInfos
.emplace_back(Ptr
, IsWrite
);
753 // We do not need runtime checks for this alias set, if there are no writes
754 // or a single write and no reads.
755 if (NumWritePtrChecks
== 0 ||
756 (NumWritePtrChecks
== 1 && NumReadPtrChecks
== 0)) {
757 assert((AS
.size() <= 1 ||
760 MemAccessInfo
AccessWrite(AC
.getValue(), true);
761 return DepCands
.findValue(AccessWrite
) == DepCands
.end();
763 "Can only skip updating CanDoRT below, if all entries in AS "
764 "are reads or there is at most 1 entry");
768 for (auto &Access
: AccessInfos
) {
769 if (!createCheckForAccess(RtCheck
, Access
, StridesMap
, DepSetId
, TheLoop
,
770 RunningDepId
, ASId
, ShouldCheckWrap
, false)) {
771 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
772 << *Access
.getPointer() << '\n');
773 Retries
.push_back(Access
);
774 CanDoAliasSetRT
= false;
778 // Note that this function computes CanDoRT and MayNeedRTCheck
779 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
780 // we have a pointer for which we couldn't find the bounds but we don't
781 // actually need to emit any checks so it does not matter.
783 // We need runtime checks for this alias set, if there are at least 2
784 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
785 // any bound checks (because in that case the number of dependence sets is
787 bool NeedsAliasSetRTCheck
= RunningDepId
> 2 || !Retries
.empty();
789 // We need to perform run-time alias checks, but some pointers had bounds
790 // that couldn't be checked.
791 if (NeedsAliasSetRTCheck
&& !CanDoAliasSetRT
) {
792 // Reset the CanDoSetRt flag and retry all accesses that have failed.
793 // We know that we need these checks, so we can now be more aggressive
794 // and add further checks if required (overflow checks).
795 CanDoAliasSetRT
= true;
796 for (auto Access
: Retries
)
797 if (!createCheckForAccess(RtCheck
, Access
, StridesMap
, DepSetId
,
798 TheLoop
, RunningDepId
, ASId
,
799 ShouldCheckWrap
, /*Assume=*/true)) {
800 CanDoAliasSetRT
= false;
805 CanDoRT
&= CanDoAliasSetRT
;
806 MayNeedRTCheck
|= NeedsAliasSetRTCheck
;
810 // If the pointers that we would use for the bounds comparison have different
811 // address spaces, assume the values aren't directly comparable, so we can't
812 // use them for the runtime check. We also have to assume they could
813 // overlap. In the future there should be metadata for whether address spaces
815 unsigned NumPointers
= RtCheck
.Pointers
.size();
816 for (unsigned i
= 0; i
< NumPointers
; ++i
) {
817 for (unsigned j
= i
+ 1; j
< NumPointers
; ++j
) {
818 // Only need to check pointers between two different dependency sets.
819 if (RtCheck
.Pointers
[i
].DependencySetId
==
820 RtCheck
.Pointers
[j
].DependencySetId
)
822 // Only need to check pointers in the same alias set.
823 if (RtCheck
.Pointers
[i
].AliasSetId
!= RtCheck
.Pointers
[j
].AliasSetId
)
826 Value
*PtrI
= RtCheck
.Pointers
[i
].PointerValue
;
827 Value
*PtrJ
= RtCheck
.Pointers
[j
].PointerValue
;
829 unsigned ASi
= PtrI
->getType()->getPointerAddressSpace();
830 unsigned ASj
= PtrJ
->getType()->getPointerAddressSpace();
833 dbgs() << "LAA: Runtime check would require comparison between"
834 " different address spaces\n");
840 if (MayNeedRTCheck
&& CanDoRT
)
841 RtCheck
.generateChecks(DepCands
, IsDepCheckNeeded
);
843 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck
.getNumberOfChecks()
844 << " pointer comparisons.\n");
846 // If we can do run-time checks, but there are no checks, no runtime checks
847 // are needed. This can happen when all pointers point to the same underlying
848 // object for example.
849 RtCheck
.Need
= CanDoRT
? RtCheck
.getNumberOfChecks() != 0 : MayNeedRTCheck
;
851 bool CanDoRTIfNeeded
= !RtCheck
.Need
|| CanDoRT
;
852 if (!CanDoRTIfNeeded
)
854 return CanDoRTIfNeeded
;
857 void AccessAnalysis::processMemAccesses() {
858 // We process the set twice: first we process read-write pointers, last we
859 // process read-only pointers. This allows us to skip dependence tests for
860 // read-only pointers.
862 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
863 LLVM_DEBUG(dbgs() << " AST: "; AST
.dump());
864 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses
.size() << "):\n");
866 for (auto A
: Accesses
)
867 dbgs() << "\t" << *A
.getPointer() << " (" <<
868 (A
.getInt() ? "write" : (ReadOnlyPtr
.count(A
.getPointer()) ?
869 "read-only" : "read")) << ")\n";
872 // The AliasSetTracker has nicely partitioned our pointers by metadata
873 // compatibility and potential for underlying-object overlap. As a result, we
874 // only need to check for potential pointer dependencies within each alias
876 for (const auto &AS
: AST
) {
877 // Note that both the alias-set tracker and the alias sets themselves used
878 // linked lists internally and so the iteration order here is deterministic
879 // (matching the original instruction order within each set).
881 bool SetHasWrite
= false;
883 // Map of pointers to last access encountered.
884 typedef DenseMap
<const Value
*, MemAccessInfo
> UnderlyingObjToAccessMap
;
885 UnderlyingObjToAccessMap ObjToLastAccess
;
887 // Set of access to check after all writes have been processed.
888 PtrAccessSet DeferredAccesses
;
890 // Iterate over each alias set twice, once to process read/write pointers,
891 // and then to process read-only pointers.
892 for (int SetIteration
= 0; SetIteration
< 2; ++SetIteration
) {
893 bool UseDeferred
= SetIteration
> 0;
894 PtrAccessSet
&S
= UseDeferred
? DeferredAccesses
: Accesses
;
896 for (const auto &AV
: AS
) {
897 Value
*Ptr
= AV
.getValue();
899 // For a single memory access in AliasSetTracker, Accesses may contain
900 // both read and write, and they both need to be handled for CheckDeps.
901 for (const auto &AC
: S
) {
902 if (AC
.getPointer() != Ptr
)
905 bool IsWrite
= AC
.getInt();
907 // If we're using the deferred access set, then it contains only
909 bool IsReadOnlyPtr
= ReadOnlyPtr
.count(Ptr
) && !IsWrite
;
910 if (UseDeferred
&& !IsReadOnlyPtr
)
912 // Otherwise, the pointer must be in the PtrAccessSet, either as a
914 assert(((IsReadOnlyPtr
&& UseDeferred
) || IsWrite
||
915 S
.count(MemAccessInfo(Ptr
, false))) &&
916 "Alias-set pointer not in the access set?");
918 MemAccessInfo
Access(Ptr
, IsWrite
);
919 DepCands
.insert(Access
);
921 // Memorize read-only pointers for later processing and skip them in
922 // the first round (they need to be checked after we have seen all
923 // write pointers). Note: we also mark pointer that are not
924 // consecutive as "read-only" pointers (so that we check
925 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
926 if (!UseDeferred
&& IsReadOnlyPtr
) {
927 DeferredAccesses
.insert(Access
);
931 // If this is a write - check other reads and writes for conflicts. If
932 // this is a read only check other writes for conflicts (but only if
933 // there is no other write to the ptr - this is an optimization to
934 // catch "a[i] = a[i] + " without having to do a dependence check).
935 if ((IsWrite
|| IsReadOnlyPtr
) && SetHasWrite
) {
936 CheckDeps
.push_back(Access
);
937 IsRTCheckAnalysisNeeded
= true;
943 // Create sets of pointers connected by a shared alias set and
944 // underlying object.
945 typedef SmallVector
<const Value
*, 16> ValueVector
;
946 ValueVector TempObjects
;
948 getUnderlyingObjects(Ptr
, TempObjects
, LI
);
950 << "Underlying objects for pointer " << *Ptr
<< "\n");
951 for (const Value
*UnderlyingObj
: TempObjects
) {
952 // nullptr never alias, don't join sets for pointer that have "null"
953 // in their UnderlyingObjects list.
954 if (isa
<ConstantPointerNull
>(UnderlyingObj
) &&
955 !NullPointerIsDefined(
956 TheLoop
->getHeader()->getParent(),
957 UnderlyingObj
->getType()->getPointerAddressSpace()))
960 UnderlyingObjToAccessMap::iterator Prev
=
961 ObjToLastAccess
.find(UnderlyingObj
);
962 if (Prev
!= ObjToLastAccess
.end())
963 DepCands
.unionSets(Access
, Prev
->second
);
965 ObjToLastAccess
[UnderlyingObj
] = Access
;
966 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj
<< "\n");
974 static bool isInBoundsGep(Value
*Ptr
) {
975 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
))
976 return GEP
->isInBounds();
980 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
981 /// i.e. monotonically increasing/decreasing.
982 static bool isNoWrapAddRec(Value
*Ptr
, const SCEVAddRecExpr
*AR
,
983 PredicatedScalarEvolution
&PSE
, const Loop
*L
) {
984 // FIXME: This should probably only return true for NUW.
985 if (AR
->getNoWrapFlags(SCEV::NoWrapMask
))
988 // Scalar evolution does not propagate the non-wrapping flags to values that
989 // are derived from a non-wrapping induction variable because non-wrapping
990 // could be flow-sensitive.
992 // Look through the potentially overflowing instruction to try to prove
993 // non-wrapping for the *specific* value of Ptr.
995 // The arithmetic implied by an inbounds GEP can't overflow.
996 auto *GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
);
997 if (!GEP
|| !GEP
->isInBounds())
1000 // Make sure there is only one non-const index and analyze that.
1001 Value
*NonConstIndex
= nullptr;
1002 for (Value
*Index
: GEP
->indices())
1003 if (!isa
<ConstantInt
>(Index
)) {
1006 NonConstIndex
= Index
;
1009 // The recurrence is on the pointer, ignore for now.
1012 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1013 // AddRec using a NSW operation.
1014 if (auto *OBO
= dyn_cast
<OverflowingBinaryOperator
>(NonConstIndex
))
1015 if (OBO
->hasNoSignedWrap() &&
1016 // Assume constant for other the operand so that the AddRec can be
1018 isa
<ConstantInt
>(OBO
->getOperand(1))) {
1019 auto *OpScev
= PSE
.getSCEV(OBO
->getOperand(0));
1021 if (auto *OpAR
= dyn_cast
<SCEVAddRecExpr
>(OpScev
))
1022 return OpAR
->getLoop() == L
&& OpAR
->getNoWrapFlags(SCEV::FlagNSW
);
1028 /// Check whether the access through \p Ptr has a constant stride.
1029 int64_t llvm::getPtrStride(PredicatedScalarEvolution
&PSE
, Value
*Ptr
,
1030 const Loop
*Lp
, const ValueToValueMap
&StridesMap
,
1031 bool Assume
, bool ShouldCheckWrap
) {
1032 Type
*Ty
= Ptr
->getType();
1033 assert(Ty
->isPointerTy() && "Unexpected non-ptr");
1035 // Make sure that the pointer does not point to aggregate types.
1036 auto *PtrTy
= cast
<PointerType
>(Ty
);
1037 if (PtrTy
->getElementType()->isAggregateType()) {
1038 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
1043 const SCEV
*PtrScev
= replaceSymbolicStrideSCEV(PSE
, StridesMap
, Ptr
);
1045 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(PtrScev
);
1047 AR
= PSE
.getAsAddRec(Ptr
);
1050 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1051 << " SCEV: " << *PtrScev
<< "\n");
1055 // The access function must stride over the innermost loop.
1056 if (Lp
!= AR
->getLoop()) {
1057 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1058 << *Ptr
<< " SCEV: " << *AR
<< "\n");
1062 // The address calculation must not wrap. Otherwise, a dependence could be
1064 // An inbounds getelementptr that is a AddRec with a unit stride
1065 // cannot wrap per definition. The unit stride requirement is checked later.
1066 // An getelementptr without an inbounds attribute and unit stride would have
1067 // to access the pointer value "0" which is undefined behavior in address
1068 // space 0, therefore we can also vectorize this case.
1069 bool IsInBoundsGEP
= isInBoundsGep(Ptr
);
1070 bool IsNoWrapAddRec
= !ShouldCheckWrap
||
1071 PSE
.hasNoOverflow(Ptr
, SCEVWrapPredicate::IncrementNUSW
) ||
1072 isNoWrapAddRec(Ptr
, AR
, PSE
, Lp
);
1073 if (!IsNoWrapAddRec
&& !IsInBoundsGEP
&&
1074 NullPointerIsDefined(Lp
->getHeader()->getParent(),
1075 PtrTy
->getAddressSpace())) {
1077 PSE
.setNoOverflow(Ptr
, SCEVWrapPredicate::IncrementNUSW
);
1078 IsNoWrapAddRec
= true;
1079 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1080 << "LAA: Pointer: " << *Ptr
<< "\n"
1081 << "LAA: SCEV: " << *AR
<< "\n"
1082 << "LAA: Added an overflow assumption\n");
1085 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1086 << *Ptr
<< " SCEV: " << *AR
<< "\n");
1091 // Check the step is constant.
1092 const SCEV
*Step
= AR
->getStepRecurrence(*PSE
.getSE());
1094 // Calculate the pointer stride and check if it is constant.
1095 const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Step
);
1097 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1098 << " SCEV: " << *AR
<< "\n");
1102 auto &DL
= Lp
->getHeader()->getModule()->getDataLayout();
1103 int64_t Size
= DL
.getTypeAllocSize(PtrTy
->getElementType());
1104 const APInt
&APStepVal
= C
->getAPInt();
1106 // Huge step value - give up.
1107 if (APStepVal
.getBitWidth() > 64)
1110 int64_t StepVal
= APStepVal
.getSExtValue();
1113 int64_t Stride
= StepVal
/ Size
;
1114 int64_t Rem
= StepVal
% Size
;
1118 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1119 // know we can't "wrap around the address space". In case of address space
1120 // zero we know that this won't happen without triggering undefined behavior.
1121 if (!IsNoWrapAddRec
&& Stride
!= 1 && Stride
!= -1 &&
1122 (IsInBoundsGEP
|| !NullPointerIsDefined(Lp
->getHeader()->getParent(),
1123 PtrTy
->getAddressSpace()))) {
1125 // We can avoid this case by adding a run-time check.
1126 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1127 << "inbounds or in address space 0 may wrap:\n"
1128 << "LAA: Pointer: " << *Ptr
<< "\n"
1129 << "LAA: SCEV: " << *AR
<< "\n"
1130 << "LAA: Added an overflow assumption\n");
1131 PSE
.setNoOverflow(Ptr
, SCEVWrapPredicate::IncrementNUSW
);
1139 Optional
<int> llvm::getPointersDiff(Type
*ElemTyA
, Value
*PtrA
, Type
*ElemTyB
,
1140 Value
*PtrB
, const DataLayout
&DL
,
1141 ScalarEvolution
&SE
, bool StrictCheck
,
1143 assert(PtrA
&& PtrB
&& "Expected non-nullptr pointers.");
1144 assert(cast
<PointerType
>(PtrA
->getType())
1145 ->isOpaqueOrPointeeTypeMatches(ElemTyA
) && "Wrong PtrA type");
1146 assert(cast
<PointerType
>(PtrB
->getType())
1147 ->isOpaqueOrPointeeTypeMatches(ElemTyB
) && "Wrong PtrB type");
1149 // Make sure that A and B are different pointers.
1153 // Make sure that the element types are the same if required.
1154 if (CheckType
&& ElemTyA
!= ElemTyB
)
1157 unsigned ASA
= PtrA
->getType()->getPointerAddressSpace();
1158 unsigned ASB
= PtrB
->getType()->getPointerAddressSpace();
1160 // Check that the address spaces match.
1163 unsigned IdxWidth
= DL
.getIndexSizeInBits(ASA
);
1165 APInt
OffsetA(IdxWidth
, 0), OffsetB(IdxWidth
, 0);
1166 Value
*PtrA1
= PtrA
->stripAndAccumulateInBoundsConstantOffsets(DL
, OffsetA
);
1167 Value
*PtrB1
= PtrB
->stripAndAccumulateInBoundsConstantOffsets(DL
, OffsetB
);
1170 if (PtrA1
== PtrB1
) {
1171 // Retrieve the address space again as pointer stripping now tracks through
1173 ASA
= cast
<PointerType
>(PtrA1
->getType())->getAddressSpace();
1174 ASB
= cast
<PointerType
>(PtrB1
->getType())->getAddressSpace();
1175 // Check that the address spaces match and that the pointers are valid.
1179 IdxWidth
= DL
.getIndexSizeInBits(ASA
);
1180 OffsetA
= OffsetA
.sextOrTrunc(IdxWidth
);
1181 OffsetB
= OffsetB
.sextOrTrunc(IdxWidth
);
1184 Val
= OffsetB
.getSExtValue();
1186 // Otherwise compute the distance with SCEV between the base pointers.
1187 const SCEV
*PtrSCEVA
= SE
.getSCEV(PtrA
);
1188 const SCEV
*PtrSCEVB
= SE
.getSCEV(PtrB
);
1190 dyn_cast
<SCEVConstant
>(SE
.getMinusSCEV(PtrSCEVB
, PtrSCEVA
));
1193 Val
= Diff
->getAPInt().getSExtValue();
1195 int Size
= DL
.getTypeStoreSize(ElemTyA
);
1196 int Dist
= Val
/ Size
;
1198 // Ensure that the calculated distance matches the type-based one after all
1199 // the bitcasts removal in the provided pointers.
1200 if (!StrictCheck
|| Dist
* Size
== Val
)
1205 bool llvm::sortPtrAccesses(ArrayRef
<Value
*> VL
, Type
*ElemTy
,
1206 const DataLayout
&DL
, ScalarEvolution
&SE
,
1207 SmallVectorImpl
<unsigned> &SortedIndices
) {
1208 assert(llvm::all_of(
1209 VL
, [](const Value
*V
) { return V
->getType()->isPointerTy(); }) &&
1210 "Expected list of pointer operands.");
1211 // Walk over the pointers, and map each of them to an offset relative to
1212 // first pointer in the array.
1213 Value
*Ptr0
= VL
[0];
1215 using DistOrdPair
= std::pair
<int64_t, int>;
1216 auto Compare
= [](const DistOrdPair
&L
, const DistOrdPair
&R
) {
1217 return L
.first
< R
.first
;
1219 std::set
<DistOrdPair
, decltype(Compare
)> Offsets(Compare
);
1220 Offsets
.emplace(0, 0);
1222 bool IsConsecutive
= true;
1223 for (auto *Ptr
: VL
.drop_front()) {
1224 Optional
<int> Diff
= getPointersDiff(ElemTy
, Ptr0
, ElemTy
, Ptr
, DL
, SE
,
1225 /*StrictCheck=*/true);
1229 // Check if the pointer with the same offset is found.
1230 int64_t Offset
= *Diff
;
1231 auto Res
= Offsets
.emplace(Offset
, Cnt
);
1234 // Consecutive order if the inserted element is the last one.
1235 IsConsecutive
= IsConsecutive
&& std::next(Res
.first
) == Offsets
.end();
1238 SortedIndices
.clear();
1239 if (!IsConsecutive
) {
1240 // Fill SortedIndices array only if it is non-consecutive.
1241 SortedIndices
.resize(VL
.size());
1243 for (const std::pair
<int64_t, int> &Pair
: Offsets
) {
1244 SortedIndices
[Cnt
] = Pair
.second
;
1251 /// Returns true if the memory operations \p A and \p B are consecutive.
1252 bool llvm::isConsecutiveAccess(Value
*A
, Value
*B
, const DataLayout
&DL
,
1253 ScalarEvolution
&SE
, bool CheckType
) {
1254 Value
*PtrA
= getLoadStorePointerOperand(A
);
1255 Value
*PtrB
= getLoadStorePointerOperand(B
);
1258 Type
*ElemTyA
= getLoadStoreType(A
);
1259 Type
*ElemTyB
= getLoadStoreType(B
);
1260 Optional
<int> Diff
= getPointersDiff(ElemTyA
, PtrA
, ElemTyB
, PtrB
, DL
, SE
,
1261 /*StrictCheck=*/true, CheckType
);
1262 return Diff
&& *Diff
== 1;
1265 MemoryDepChecker::VectorizationSafetyStatus
1266 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type
) {
1270 case BackwardVectorizable
:
1271 return VectorizationSafetyStatus::Safe
;
1274 return VectorizationSafetyStatus::PossiblySafeWithRtChecks
;
1275 case ForwardButPreventsForwarding
:
1277 case BackwardVectorizableButPreventsForwarding
:
1278 return VectorizationSafetyStatus::Unsafe
;
1280 llvm_unreachable("unexpected DepType!");
1283 bool MemoryDepChecker::Dependence::isBackward() const {
1287 case ForwardButPreventsForwarding
:
1291 case BackwardVectorizable
:
1293 case BackwardVectorizableButPreventsForwarding
:
1296 llvm_unreachable("unexpected DepType!");
1299 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1300 return isBackward() || Type
== Unknown
;
1303 bool MemoryDepChecker::Dependence::isForward() const {
1306 case ForwardButPreventsForwarding
:
1311 case BackwardVectorizable
:
1313 case BackwardVectorizableButPreventsForwarding
:
1316 llvm_unreachable("unexpected DepType!");
1319 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance
,
1320 uint64_t TypeByteSize
) {
1321 // If loads occur at a distance that is not a multiple of a feasible vector
1322 // factor store-load forwarding does not take place.
1323 // Positive dependences might cause troubles because vectorizing them might
1324 // prevent store-load forwarding making vectorized code run a lot slower.
1325 // a[i] = a[i-3] ^ a[i-8];
1326 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1327 // hence on your typical architecture store-load forwarding does not take
1328 // place. Vectorizing in such cases does not make sense.
1329 // Store-load forwarding distance.
1331 // After this many iterations store-to-load forwarding conflicts should not
1332 // cause any slowdowns.
1333 const uint64_t NumItersForStoreLoadThroughMemory
= 8 * TypeByteSize
;
1334 // Maximum vector factor.
1335 uint64_t MaxVFWithoutSLForwardIssues
= std::min(
1336 VectorizerParams::MaxVectorWidth
* TypeByteSize
, MaxSafeDepDistBytes
);
1338 // Compute the smallest VF at which the store and load would be misaligned.
1339 for (uint64_t VF
= 2 * TypeByteSize
; VF
<= MaxVFWithoutSLForwardIssues
;
1341 // If the number of vector iteration between the store and the load are
1342 // small we could incur conflicts.
1343 if (Distance
% VF
&& Distance
/ VF
< NumItersForStoreLoadThroughMemory
) {
1344 MaxVFWithoutSLForwardIssues
= (VF
>> 1);
1349 if (MaxVFWithoutSLForwardIssues
< 2 * TypeByteSize
) {
1351 dbgs() << "LAA: Distance " << Distance
1352 << " that could cause a store-load forwarding conflict\n");
1356 if (MaxVFWithoutSLForwardIssues
< MaxSafeDepDistBytes
&&
1357 MaxVFWithoutSLForwardIssues
!=
1358 VectorizerParams::MaxVectorWidth
* TypeByteSize
)
1359 MaxSafeDepDistBytes
= MaxVFWithoutSLForwardIssues
;
1363 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S
) {
1368 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1369 /// memory accesses, that have the same stride whose absolute value is given
1370 /// in \p Stride, and that have the same type size \p TypeByteSize,
1371 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1372 /// possible to prove statically that the dependence distance is larger
1373 /// than the range that the accesses will travel through the execution of
1374 /// the loop. If so, return true; false otherwise. This is useful for
1375 /// example in loops such as the following (PR31098):
1376 /// for (i = 0; i < D; ++i) {
1380 static bool isSafeDependenceDistance(const DataLayout
&DL
, ScalarEvolution
&SE
,
1381 const SCEV
&BackedgeTakenCount
,
1382 const SCEV
&Dist
, uint64_t Stride
,
1383 uint64_t TypeByteSize
) {
1385 // If we can prove that
1386 // (**) |Dist| > BackedgeTakenCount * Step
1387 // where Step is the absolute stride of the memory accesses in bytes,
1388 // then there is no dependence.
1391 // We basically want to check if the absolute distance (|Dist/Step|)
1392 // is >= the loop iteration count (or > BackedgeTakenCount).
1393 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1394 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1395 // that the dependence distance is >= VF; This is checked elsewhere.
1396 // But in some cases we can prune unknown dependence distances early, and
1397 // even before selecting the VF, and without a runtime test, by comparing
1398 // the distance against the loop iteration count. Since the vectorized code
1399 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1400 // also guarantees that distance >= VF.
1402 const uint64_t ByteStride
= Stride
* TypeByteSize
;
1403 const SCEV
*Step
= SE
.getConstant(BackedgeTakenCount
.getType(), ByteStride
);
1404 const SCEV
*Product
= SE
.getMulExpr(&BackedgeTakenCount
, Step
);
1406 const SCEV
*CastedDist
= &Dist
;
1407 const SCEV
*CastedProduct
= Product
;
1408 uint64_t DistTypeSize
= DL
.getTypeAllocSize(Dist
.getType());
1409 uint64_t ProductTypeSize
= DL
.getTypeAllocSize(Product
->getType());
1411 // The dependence distance can be positive/negative, so we sign extend Dist;
1412 // The multiplication of the absolute stride in bytes and the
1413 // backedgeTakenCount is non-negative, so we zero extend Product.
1414 if (DistTypeSize
> ProductTypeSize
)
1415 CastedProduct
= SE
.getZeroExtendExpr(Product
, Dist
.getType());
1417 CastedDist
= SE
.getNoopOrSignExtend(&Dist
, Product
->getType());
1419 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1420 // (If so, then we have proven (**) because |Dist| >= Dist)
1421 const SCEV
*Minus
= SE
.getMinusSCEV(CastedDist
, CastedProduct
);
1422 if (SE
.isKnownPositive(Minus
))
1425 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1426 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1427 const SCEV
*NegDist
= SE
.getNegativeSCEV(CastedDist
);
1428 Minus
= SE
.getMinusSCEV(NegDist
, CastedProduct
);
1429 if (SE
.isKnownPositive(Minus
))
1435 /// Check the dependence for two accesses with the same stride \p Stride.
1436 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1439 /// \returns true if they are independent.
1440 static bool areStridedAccessesIndependent(uint64_t Distance
, uint64_t Stride
,
1441 uint64_t TypeByteSize
) {
1442 assert(Stride
> 1 && "The stride must be greater than 1");
1443 assert(TypeByteSize
> 0 && "The type size in byte must be non-zero");
1444 assert(Distance
> 0 && "The distance must be non-zero");
1446 // Skip if the distance is not multiple of type byte size.
1447 if (Distance
% TypeByteSize
)
1450 uint64_t ScaledDist
= Distance
/ TypeByteSize
;
1452 // No dependence if the scaled distance is not multiple of the stride.
1454 // for (i = 0; i < 1024 ; i += 4)
1455 // A[i+2] = A[i] + 1;
1457 // Two accesses in memory (scaled distance is 2, stride is 4):
1458 // | A[0] | | | | A[4] | | | |
1459 // | | | A[2] | | | | A[6] | |
1462 // for (i = 0; i < 1024 ; i += 3)
1463 // A[i+4] = A[i] + 1;
1465 // Two accesses in memory (scaled distance is 4, stride is 3):
1466 // | A[0] | | | A[3] | | | A[6] | | |
1467 // | | | | | A[4] | | | A[7] | |
1468 return ScaledDist
% Stride
;
1471 MemoryDepChecker::Dependence::DepType
1472 MemoryDepChecker::isDependent(const MemAccessInfo
&A
, unsigned AIdx
,
1473 const MemAccessInfo
&B
, unsigned BIdx
,
1474 const ValueToValueMap
&Strides
) {
1475 assert (AIdx
< BIdx
&& "Must pass arguments in program order");
1477 Value
*APtr
= A
.getPointer();
1478 Value
*BPtr
= B
.getPointer();
1479 bool AIsWrite
= A
.getInt();
1480 bool BIsWrite
= B
.getInt();
1482 // Two reads are independent.
1483 if (!AIsWrite
&& !BIsWrite
)
1484 return Dependence::NoDep
;
1486 // We cannot check pointers in different address spaces.
1487 if (APtr
->getType()->getPointerAddressSpace() !=
1488 BPtr
->getType()->getPointerAddressSpace())
1489 return Dependence::Unknown
;
1491 int64_t StrideAPtr
= getPtrStride(PSE
, APtr
, InnermostLoop
, Strides
, true);
1492 int64_t StrideBPtr
= getPtrStride(PSE
, BPtr
, InnermostLoop
, Strides
, true);
1494 const SCEV
*Src
= PSE
.getSCEV(APtr
);
1495 const SCEV
*Sink
= PSE
.getSCEV(BPtr
);
1497 // If the induction step is negative we have to invert source and sink of the
1499 if (StrideAPtr
< 0) {
1500 std::swap(APtr
, BPtr
);
1501 std::swap(Src
, Sink
);
1502 std::swap(AIsWrite
, BIsWrite
);
1503 std::swap(AIdx
, BIdx
);
1504 std::swap(StrideAPtr
, StrideBPtr
);
1507 const SCEV
*Dist
= PSE
.getSE()->getMinusSCEV(Sink
, Src
);
1509 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src
<< "Sink Scev: " << *Sink
1510 << "(Induction step: " << StrideAPtr
<< ")\n");
1511 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap
[AIdx
] << " to "
1512 << *InstMap
[BIdx
] << ": " << *Dist
<< "\n");
1514 // Need accesses with constant stride. We don't want to vectorize
1515 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1516 // the address space.
1517 if (!StrideAPtr
|| !StrideBPtr
|| StrideAPtr
!= StrideBPtr
){
1518 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1519 return Dependence::Unknown
;
1522 Type
*ATy
= APtr
->getType()->getPointerElementType();
1523 Type
*BTy
= BPtr
->getType()->getPointerElementType();
1524 auto &DL
= InnermostLoop
->getHeader()->getModule()->getDataLayout();
1525 uint64_t TypeByteSize
= DL
.getTypeAllocSize(ATy
);
1526 uint64_t Stride
= std::abs(StrideAPtr
);
1527 const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Dist
);
1529 if (!isa
<SCEVCouldNotCompute
>(Dist
) &&
1530 TypeByteSize
== DL
.getTypeAllocSize(BTy
) &&
1531 isSafeDependenceDistance(DL
, *(PSE
.getSE()),
1532 *(PSE
.getBackedgeTakenCount()), *Dist
, Stride
,
1534 return Dependence::NoDep
;
1536 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1537 FoundNonConstantDistanceDependence
= true;
1538 return Dependence::Unknown
;
1541 const APInt
&Val
= C
->getAPInt();
1542 int64_t Distance
= Val
.getSExtValue();
1544 // Attempt to prove strided accesses independent.
1545 if (std::abs(Distance
) > 0 && Stride
> 1 && ATy
== BTy
&&
1546 areStridedAccessesIndependent(std::abs(Distance
), Stride
, TypeByteSize
)) {
1547 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1548 return Dependence::NoDep
;
1551 // Negative distances are not plausible dependencies.
1552 if (Val
.isNegative()) {
1553 bool IsTrueDataDependence
= (AIsWrite
&& !BIsWrite
);
1554 if (IsTrueDataDependence
&& EnableForwardingConflictDetection
&&
1555 (couldPreventStoreLoadForward(Val
.abs().getZExtValue(), TypeByteSize
) ||
1557 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1558 return Dependence::ForwardButPreventsForwarding
;
1561 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1562 return Dependence::Forward
;
1565 // Write to the same location with the same size.
1566 // Could be improved to assert type sizes are the same (i32 == float, etc).
1569 return Dependence::Forward
;
1571 dbgs() << "LAA: Zero dependence difference but different types\n");
1572 return Dependence::Unknown
;
1575 assert(Val
.isStrictlyPositive() && "Expect a positive value");
1580 << "LAA: ReadWrite-Write positive dependency with different types\n");
1581 return Dependence::Unknown
;
1584 // Bail out early if passed-in parameters make vectorization not feasible.
1585 unsigned ForcedFactor
= (VectorizerParams::VectorizationFactor
?
1586 VectorizerParams::VectorizationFactor
: 1);
1587 unsigned ForcedUnroll
= (VectorizerParams::VectorizationInterleave
?
1588 VectorizerParams::VectorizationInterleave
: 1);
1589 // The minimum number of iterations for a vectorized/unrolled version.
1590 unsigned MinNumIter
= std::max(ForcedFactor
* ForcedUnroll
, 2U);
1592 // It's not vectorizable if the distance is smaller than the minimum distance
1593 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1594 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1595 // TypeByteSize (No need to plus the last gap distance).
1597 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1599 // int *B = (int *)((char *)A + 14);
1600 // for (i = 0 ; i < 1024 ; i += 2)
1604 // Two accesses in memory (stride is 2):
1605 // | A[0] | | A[2] | | A[4] | | A[6] | |
1606 // | B[0] | | B[2] | | B[4] |
1608 // Distance needs for vectorizing iterations except the last iteration:
1609 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1610 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1612 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1613 // 12, which is less than distance.
1615 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1616 // the minimum distance needed is 28, which is greater than distance. It is
1617 // not safe to do vectorization.
1618 uint64_t MinDistanceNeeded
=
1619 TypeByteSize
* Stride
* (MinNumIter
- 1) + TypeByteSize
;
1620 if (MinDistanceNeeded
> static_cast<uint64_t>(Distance
)) {
1621 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1622 << Distance
<< '\n');
1623 return Dependence::Backward
;
1626 // Unsafe if the minimum distance needed is greater than max safe distance.
1627 if (MinDistanceNeeded
> MaxSafeDepDistBytes
) {
1628 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1629 << MinDistanceNeeded
<< " size in bytes");
1630 return Dependence::Backward
;
1633 // Positive distance bigger than max vectorization factor.
1634 // FIXME: Should use max factor instead of max distance in bytes, which could
1635 // not handle different types.
1636 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1637 // void foo (int *A, char *B) {
1638 // for (unsigned i = 0; i < 1024; i++) {
1639 // A[i+2] = A[i] + 1;
1640 // B[i+2] = B[i] + 1;
1644 // This case is currently unsafe according to the max safe distance. If we
1645 // analyze the two accesses on array B, the max safe dependence distance
1646 // is 2. Then we analyze the accesses on array A, the minimum distance needed
1647 // is 8, which is less than 2 and forbidden vectorization, But actually
1648 // both A and B could be vectorized by 2 iterations.
1649 MaxSafeDepDistBytes
=
1650 std::min(static_cast<uint64_t>(Distance
), MaxSafeDepDistBytes
);
1652 bool IsTrueDataDependence
= (!AIsWrite
&& BIsWrite
);
1653 if (IsTrueDataDependence
&& EnableForwardingConflictDetection
&&
1654 couldPreventStoreLoadForward(Distance
, TypeByteSize
))
1655 return Dependence::BackwardVectorizableButPreventsForwarding
;
1657 uint64_t MaxVF
= MaxSafeDepDistBytes
/ (TypeByteSize
* Stride
);
1658 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val
.getSExtValue()
1659 << " with max VF = " << MaxVF
<< '\n');
1660 uint64_t MaxVFInBits
= MaxVF
* TypeByteSize
* 8;
1661 MaxSafeVectorWidthInBits
= std::min(MaxSafeVectorWidthInBits
, MaxVFInBits
);
1662 return Dependence::BackwardVectorizable
;
1665 bool MemoryDepChecker::areDepsSafe(DepCandidates
&AccessSets
,
1666 MemAccessInfoList
&CheckDeps
,
1667 const ValueToValueMap
&Strides
) {
1669 MaxSafeDepDistBytes
= -1;
1670 SmallPtrSet
<MemAccessInfo
, 8> Visited
;
1671 for (MemAccessInfo CurAccess
: CheckDeps
) {
1672 if (Visited
.count(CurAccess
))
1675 // Get the relevant memory access set.
1676 EquivalenceClasses
<MemAccessInfo
>::iterator I
=
1677 AccessSets
.findValue(AccessSets
.getLeaderValue(CurAccess
));
1679 // Check accesses within this set.
1680 EquivalenceClasses
<MemAccessInfo
>::member_iterator AI
=
1681 AccessSets
.member_begin(I
);
1682 EquivalenceClasses
<MemAccessInfo
>::member_iterator AE
=
1683 AccessSets
.member_end();
1685 // Check every access pair.
1687 Visited
.insert(*AI
);
1688 bool AIIsWrite
= AI
->getInt();
1689 // Check loads only against next equivalent class, but stores also against
1690 // other stores in the same equivalence class - to the same address.
1691 EquivalenceClasses
<MemAccessInfo
>::member_iterator OI
=
1692 (AIIsWrite
? AI
: std::next(AI
));
1694 // Check every accessing instruction pair in program order.
1695 for (std::vector
<unsigned>::iterator I1
= Accesses
[*AI
].begin(),
1696 I1E
= Accesses
[*AI
].end(); I1
!= I1E
; ++I1
)
1697 // Scan all accesses of another equivalence class, but only the next
1698 // accesses of the same equivalent class.
1699 for (std::vector
<unsigned>::iterator
1700 I2
= (OI
== AI
? std::next(I1
) : Accesses
[*OI
].begin()),
1701 I2E
= (OI
== AI
? I1E
: Accesses
[*OI
].end());
1703 auto A
= std::make_pair(&*AI
, *I1
);
1704 auto B
= std::make_pair(&*OI
, *I2
);
1710 Dependence::DepType Type
=
1711 isDependent(*A
.first
, A
.second
, *B
.first
, B
.second
, Strides
);
1712 mergeInStatus(Dependence::isSafeForVectorization(Type
));
1714 // Gather dependences unless we accumulated MaxDependences
1715 // dependences. In that case return as soon as we find the first
1716 // unsafe dependence. This puts a limit on this quadratic
1718 if (RecordDependences
) {
1719 if (Type
!= Dependence::NoDep
)
1720 Dependences
.push_back(Dependence(A
.second
, B
.second
, Type
));
1722 if (Dependences
.size() >= MaxDependences
) {
1723 RecordDependences
= false;
1724 Dependences
.clear();
1726 << "Too many dependences, stopped recording\n");
1729 if (!RecordDependences
&& !isSafeForVectorization())
1738 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences
.size() << "\n");
1739 return isSafeForVectorization();
1742 SmallVector
<Instruction
*, 4>
1743 MemoryDepChecker::getInstructionsForAccess(Value
*Ptr
, bool isWrite
) const {
1744 MemAccessInfo
Access(Ptr
, isWrite
);
1745 auto &IndexVector
= Accesses
.find(Access
)->second
;
1747 SmallVector
<Instruction
*, 4> Insts
;
1748 transform(IndexVector
,
1749 std::back_inserter(Insts
),
1750 [&](unsigned Idx
) { return this->InstMap
[Idx
]; });
1754 const char *MemoryDepChecker::Dependence::DepName
[] = {
1755 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1756 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1758 void MemoryDepChecker::Dependence::print(
1759 raw_ostream
&OS
, unsigned Depth
,
1760 const SmallVectorImpl
<Instruction
*> &Instrs
) const {
1761 OS
.indent(Depth
) << DepName
[Type
] << ":\n";
1762 OS
.indent(Depth
+ 2) << *Instrs
[Source
] << " -> \n";
1763 OS
.indent(Depth
+ 2) << *Instrs
[Destination
] << "\n";
1766 bool LoopAccessInfo::canAnalyzeLoop() {
1767 // We need to have a loop header.
1768 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1769 << TheLoop
->getHeader()->getParent()->getName() << ": "
1770 << TheLoop
->getHeader()->getName() << '\n');
1772 // We can only analyze innermost loops.
1773 if (!TheLoop
->isInnermost()) {
1774 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1775 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1779 // We must have a single backedge.
1780 if (TheLoop
->getNumBackEdges() != 1) {
1782 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1783 recordAnalysis("CFGNotUnderstood")
1784 << "loop control flow is not understood by analyzer";
1788 // ScalarEvolution needs to be able to find the exit count.
1789 const SCEV
*ExitCount
= PSE
->getBackedgeTakenCount();
1790 if (isa
<SCEVCouldNotCompute
>(ExitCount
)) {
1791 recordAnalysis("CantComputeNumberOfIterations")
1792 << "could not determine number of loop iterations";
1793 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1800 void LoopAccessInfo::analyzeLoop(AAResults
*AA
, LoopInfo
*LI
,
1801 const TargetLibraryInfo
*TLI
,
1802 DominatorTree
*DT
) {
1803 typedef SmallPtrSet
<Value
*, 16> ValueSet
;
1805 // Holds the Load and Store instructions.
1806 SmallVector
<LoadInst
*, 16> Loads
;
1807 SmallVector
<StoreInst
*, 16> Stores
;
1809 // Holds all the different accesses in the loop.
1810 unsigned NumReads
= 0;
1811 unsigned NumReadWrites
= 0;
1813 bool HasComplexMemInst
= false;
1815 // A runtime check is only legal to insert if there are no convergent calls.
1816 HasConvergentOp
= false;
1818 PtrRtChecking
->Pointers
.clear();
1819 PtrRtChecking
->Need
= false;
1821 const bool IsAnnotatedParallel
= TheLoop
->isAnnotatedParallel();
1823 const bool EnableMemAccessVersioningOfLoop
=
1824 EnableMemAccessVersioning
&&
1825 !TheLoop
->getHeader()->getParent()->hasOptSize();
1828 for (BasicBlock
*BB
: TheLoop
->blocks()) {
1829 // Scan the BB and collect legal loads and stores. Also detect any
1830 // convergent instructions.
1831 for (Instruction
&I
: *BB
) {
1832 if (auto *Call
= dyn_cast
<CallBase
>(&I
)) {
1833 if (Call
->isConvergent())
1834 HasConvergentOp
= true;
1837 // With both a non-vectorizable memory instruction and a convergent
1838 // operation, found in this loop, no reason to continue the search.
1839 if (HasComplexMemInst
&& HasConvergentOp
) {
1844 // Avoid hitting recordAnalysis multiple times.
1845 if (HasComplexMemInst
)
1848 // If this is a load, save it. If this instruction can read from memory
1849 // but is not a load, then we quit. Notice that we don't handle function
1850 // calls that read or write.
1851 if (I
.mayReadFromMemory()) {
1852 // Many math library functions read the rounding mode. We will only
1853 // vectorize a loop if it contains known function calls that don't set
1854 // the flag. Therefore, it is safe to ignore this read from memory.
1855 auto *Call
= dyn_cast
<CallInst
>(&I
);
1856 if (Call
&& getVectorIntrinsicIDForCall(Call
, TLI
))
1859 // If the function has an explicit vectorized counterpart, we can safely
1860 // assume that it can be vectorized.
1861 if (Call
&& !Call
->isNoBuiltin() && Call
->getCalledFunction() &&
1862 !VFDatabase::getMappings(*Call
).empty())
1865 auto *Ld
= dyn_cast
<LoadInst
>(&I
);
1867 recordAnalysis("CantVectorizeInstruction", Ld
)
1868 << "instruction cannot be vectorized";
1869 HasComplexMemInst
= true;
1872 if (!Ld
->isSimple() && !IsAnnotatedParallel
) {
1873 recordAnalysis("NonSimpleLoad", Ld
)
1874 << "read with atomic ordering or volatile read";
1875 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1876 HasComplexMemInst
= true;
1880 Loads
.push_back(Ld
);
1881 DepChecker
->addAccess(Ld
);
1882 if (EnableMemAccessVersioningOfLoop
)
1883 collectStridedAccess(Ld
);
1887 // Save 'store' instructions. Abort if other instructions write to memory.
1888 if (I
.mayWriteToMemory()) {
1889 auto *St
= dyn_cast
<StoreInst
>(&I
);
1891 recordAnalysis("CantVectorizeInstruction", St
)
1892 << "instruction cannot be vectorized";
1893 HasComplexMemInst
= true;
1896 if (!St
->isSimple() && !IsAnnotatedParallel
) {
1897 recordAnalysis("NonSimpleStore", St
)
1898 << "write with atomic ordering or volatile write";
1899 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1900 HasComplexMemInst
= true;
1904 Stores
.push_back(St
);
1905 DepChecker
->addAccess(St
);
1906 if (EnableMemAccessVersioningOfLoop
)
1907 collectStridedAccess(St
);
1912 if (HasComplexMemInst
) {
1917 // Now we have two lists that hold the loads and the stores.
1918 // Next, we find the pointers that they use.
1920 // Check if we see any stores. If there are no stores, then we don't
1921 // care if the pointers are *restrict*.
1922 if (!Stores
.size()) {
1923 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1928 MemoryDepChecker::DepCandidates DependentAccesses
;
1929 AccessAnalysis
Accesses(TheLoop
, AA
, LI
, DependentAccesses
, *PSE
);
1931 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
1932 // multiple times on the same object. If the ptr is accessed twice, once
1933 // for read and once for write, it will only appear once (on the write
1934 // list). This is okay, since we are going to check for conflicts between
1935 // writes and between reads and writes, but not between reads and reads.
1938 // Record uniform store addresses to identify if we have multiple stores
1939 // to the same address.
1940 ValueSet UniformStores
;
1942 for (StoreInst
*ST
: Stores
) {
1943 Value
*Ptr
= ST
->getPointerOperand();
1946 HasDependenceInvolvingLoopInvariantAddress
|=
1947 !UniformStores
.insert(Ptr
).second
;
1949 // If we did *not* see this pointer before, insert it to the read-write
1950 // list. At this phase it is only a 'write' list.
1951 if (Seen
.insert(Ptr
).second
) {
1954 MemoryLocation Loc
= MemoryLocation::get(ST
);
1955 // The TBAA metadata could have a control dependency on the predication
1956 // condition, so we cannot rely on it when determining whether or not we
1957 // need runtime pointer checks.
1958 if (blockNeedsPredication(ST
->getParent(), TheLoop
, DT
))
1959 Loc
.AATags
.TBAA
= nullptr;
1961 Accesses
.addStore(Loc
);
1965 if (IsAnnotatedParallel
) {
1967 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
1973 for (LoadInst
*LD
: Loads
) {
1974 Value
*Ptr
= LD
->getPointerOperand();
1975 // If we did *not* see this pointer before, insert it to the
1976 // read list. If we *did* see it before, then it is already in
1977 // the read-write list. This allows us to vectorize expressions
1978 // such as A[i] += x; Because the address of A[i] is a read-write
1979 // pointer. This only works if the index of A[i] is consecutive.
1980 // If the address of i is unknown (for example A[B[i]]) then we may
1981 // read a few words, modify, and write a few words, and some of the
1982 // words may be written to the same address.
1983 bool IsReadOnlyPtr
= false;
1984 if (Seen
.insert(Ptr
).second
||
1985 !getPtrStride(*PSE
, Ptr
, TheLoop
, SymbolicStrides
)) {
1987 IsReadOnlyPtr
= true;
1990 // See if there is an unsafe dependency between a load to a uniform address and
1991 // store to the same uniform address.
1992 if (UniformStores
.count(Ptr
)) {
1993 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
1994 "load and uniform store to the same address!\n");
1995 HasDependenceInvolvingLoopInvariantAddress
= true;
1998 MemoryLocation Loc
= MemoryLocation::get(LD
);
1999 // The TBAA metadata could have a control dependency on the predication
2000 // condition, so we cannot rely on it when determining whether or not we
2001 // need runtime pointer checks.
2002 if (blockNeedsPredication(LD
->getParent(), TheLoop
, DT
))
2003 Loc
.AATags
.TBAA
= nullptr;
2005 Accesses
.addLoad(Loc
, IsReadOnlyPtr
);
2008 // If we write (or read-write) to a single destination and there are no
2009 // other reads in this loop then is it safe to vectorize.
2010 if (NumReadWrites
== 1 && NumReads
== 0) {
2011 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2016 // Build dependence sets and check whether we need a runtime pointer bounds
2018 Accesses
.buildDependenceSets();
2020 // Find pointers with computable bounds. We are going to use this information
2021 // to place a runtime bound check.
2022 bool CanDoRTIfNeeded
= Accesses
.canCheckPtrAtRT(*PtrRtChecking
, PSE
->getSE(),
2023 TheLoop
, SymbolicStrides
);
2024 if (!CanDoRTIfNeeded
) {
2025 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
2026 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2027 << "the array bounds.\n");
2033 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2036 if (Accesses
.isDependencyCheckNeeded()) {
2037 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2038 CanVecMem
= DepChecker
->areDepsSafe(
2039 DependentAccesses
, Accesses
.getDependenciesToCheck(), SymbolicStrides
);
2040 MaxSafeDepDistBytes
= DepChecker
->getMaxSafeDepDistBytes();
2042 if (!CanVecMem
&& DepChecker
->shouldRetryWithRuntimeCheck()) {
2043 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2045 // Clear the dependency checks. We assume they are not needed.
2046 Accesses
.resetDepChecks(*DepChecker
);
2048 PtrRtChecking
->reset();
2049 PtrRtChecking
->Need
= true;
2051 auto *SE
= PSE
->getSE();
2052 CanDoRTIfNeeded
= Accesses
.canCheckPtrAtRT(*PtrRtChecking
, SE
, TheLoop
,
2053 SymbolicStrides
, true);
2055 // Check that we found the bounds for the pointer.
2056 if (!CanDoRTIfNeeded
) {
2057 recordAnalysis("CantCheckMemDepsAtRunTime")
2058 << "cannot check memory dependencies at runtime";
2059 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2068 if (HasConvergentOp
) {
2069 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2070 << "cannot add control dependency to convergent operation";
2071 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2072 "would be needed with a convergent operation\n");
2079 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2080 << (PtrRtChecking
->Need
? "" : " don't")
2081 << " need runtime memory checks.\n");
2083 recordAnalysis("UnsafeMemDep")
2084 << "unsafe dependent memory operations in loop. Use "
2085 "#pragma loop distribute(enable) to allow loop distribution "
2086 "to attempt to isolate the offending operations into a separate "
2088 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2092 bool LoopAccessInfo::blockNeedsPredication(BasicBlock
*BB
, Loop
*TheLoop
,
2093 DominatorTree
*DT
) {
2094 assert(TheLoop
->contains(BB
) && "Unknown block used");
2096 // Blocks that do not dominate the latch need predication.
2097 BasicBlock
* Latch
= TheLoop
->getLoopLatch();
2098 return !DT
->dominates(BB
, Latch
);
2101 OptimizationRemarkAnalysis
&LoopAccessInfo::recordAnalysis(StringRef RemarkName
,
2103 assert(!Report
&& "Multiple reports generated");
2105 Value
*CodeRegion
= TheLoop
->getHeader();
2106 DebugLoc DL
= TheLoop
->getStartLoc();
2109 CodeRegion
= I
->getParent();
2110 // If there is no debug location attached to the instruction, revert back to
2111 // using the loop's.
2112 if (I
->getDebugLoc())
2113 DL
= I
->getDebugLoc();
2116 Report
= std::make_unique
<OptimizationRemarkAnalysis
>(DEBUG_TYPE
, RemarkName
, DL
,
2121 bool LoopAccessInfo::isUniform(Value
*V
) const {
2122 auto *SE
= PSE
->getSE();
2123 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2124 // never considered uniform.
2125 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2126 // trivially loop-invariant FP values to be considered uniform.
2127 if (!SE
->isSCEVable(V
->getType()))
2129 return (SE
->isLoopInvariant(SE
->getSCEV(V
), TheLoop
));
2132 void LoopAccessInfo::collectStridedAccess(Value
*MemAccess
) {
2133 Value
*Ptr
= getLoadStorePointerOperand(MemAccess
);
2137 Value
*Stride
= getStrideFromPointer(Ptr
, PSE
->getSE(), TheLoop
);
2141 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2143 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr
<< " Stride: " << *Stride
<< "\n");
2145 // Avoid adding the "Stride == 1" predicate when we know that
2146 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2147 // or zero iteration loop, as Trip-Count <= Stride == 1.
2149 // TODO: We are currently not making a very informed decision on when it is
2150 // beneficial to apply stride versioning. It might make more sense that the
2151 // users of this analysis (such as the vectorizer) will trigger it, based on
2152 // their specific cost considerations; For example, in cases where stride
2153 // versioning does not help resolving memory accesses/dependences, the
2154 // vectorizer should evaluate the cost of the runtime test, and the benefit
2155 // of various possible stride specializations, considering the alternatives
2156 // of using gather/scatters (if available).
2158 const SCEV
*StrideExpr
= PSE
->getSCEV(Stride
);
2159 const SCEV
*BETakenCount
= PSE
->getBackedgeTakenCount();
2161 // Match the types so we can compare the stride and the BETakenCount.
2162 // The Stride can be positive/negative, so we sign extend Stride;
2163 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2164 const DataLayout
&DL
= TheLoop
->getHeader()->getModule()->getDataLayout();
2165 uint64_t StrideTypeSize
= DL
.getTypeAllocSize(StrideExpr
->getType());
2166 uint64_t BETypeSize
= DL
.getTypeAllocSize(BETakenCount
->getType());
2167 const SCEV
*CastedStride
= StrideExpr
;
2168 const SCEV
*CastedBECount
= BETakenCount
;
2169 ScalarEvolution
*SE
= PSE
->getSE();
2170 if (BETypeSize
>= StrideTypeSize
)
2171 CastedStride
= SE
->getNoopOrSignExtend(StrideExpr
, BETakenCount
->getType());
2173 CastedBECount
= SE
->getZeroExtendExpr(BETakenCount
, StrideExpr
->getType());
2174 const SCEV
*StrideMinusBETaken
= SE
->getMinusSCEV(CastedStride
, CastedBECount
);
2175 // Since TripCount == BackEdgeTakenCount + 1, checking:
2176 // "Stride >= TripCount" is equivalent to checking:
2177 // Stride - BETakenCount > 0
2178 if (SE
->isKnownPositive(StrideMinusBETaken
)) {
2180 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2181 "Stride==1 predicate will imply that the loop executes "
2185 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
2187 SymbolicStrides
[Ptr
] = Stride
;
2188 StrideSet
.insert(Stride
);
2191 LoopAccessInfo::LoopAccessInfo(Loop
*L
, ScalarEvolution
*SE
,
2192 const TargetLibraryInfo
*TLI
, AAResults
*AA
,
2193 DominatorTree
*DT
, LoopInfo
*LI
)
2194 : PSE(std::make_unique
<PredicatedScalarEvolution
>(*SE
, *L
)),
2195 PtrRtChecking(std::make_unique
<RuntimePointerChecking
>(SE
)),
2196 DepChecker(std::make_unique
<MemoryDepChecker
>(*PSE
, L
)), TheLoop(L
),
2197 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2198 HasConvergentOp(false),
2199 HasDependenceInvolvingLoopInvariantAddress(false) {
2200 if (canAnalyzeLoop())
2201 analyzeLoop(AA
, LI
, TLI
, DT
);
2204 void LoopAccessInfo::print(raw_ostream
&OS
, unsigned Depth
) const {
2206 OS
.indent(Depth
) << "Memory dependences are safe";
2207 if (MaxSafeDepDistBytes
!= -1ULL)
2208 OS
<< " with a maximum dependence distance of " << MaxSafeDepDistBytes
2210 if (PtrRtChecking
->Need
)
2211 OS
<< " with run-time checks";
2215 if (HasConvergentOp
)
2216 OS
.indent(Depth
) << "Has convergent operation in loop\n";
2219 OS
.indent(Depth
) << "Report: " << Report
->getMsg() << "\n";
2221 if (auto *Dependences
= DepChecker
->getDependences()) {
2222 OS
.indent(Depth
) << "Dependences:\n";
2223 for (auto &Dep
: *Dependences
) {
2224 Dep
.print(OS
, Depth
+ 2, DepChecker
->getMemoryInstructions());
2228 OS
.indent(Depth
) << "Too many dependences, not recorded\n";
2230 // List the pair of accesses need run-time checks to prove independence.
2231 PtrRtChecking
->print(OS
, Depth
);
2234 OS
.indent(Depth
) << "Non vectorizable stores to invariant address were "
2235 << (HasDependenceInvolvingLoopInvariantAddress
? "" : "not ")
2236 << "found in loop.\n";
2238 OS
.indent(Depth
) << "SCEV assumptions:\n";
2239 PSE
->getUnionPredicate().print(OS
, Depth
);
2243 OS
.indent(Depth
) << "Expressions re-written:\n";
2244 PSE
->print(OS
, Depth
);
2247 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID
) {
2248 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
2251 const LoopAccessInfo
&LoopAccessLegacyAnalysis::getInfo(Loop
*L
) {
2252 auto &LAI
= LoopAccessInfoMap
[L
];
2255 LAI
= std::make_unique
<LoopAccessInfo
>(L
, SE
, TLI
, AA
, DT
, LI
);
2260 void LoopAccessLegacyAnalysis::print(raw_ostream
&OS
, const Module
*M
) const {
2261 LoopAccessLegacyAnalysis
&LAA
= *const_cast<LoopAccessLegacyAnalysis
*>(this);
2263 for (Loop
*TopLevelLoop
: *LI
)
2264 for (Loop
*L
: depth_first(TopLevelLoop
)) {
2265 OS
.indent(2) << L
->getHeader()->getName() << ":\n";
2266 auto &LAI
= LAA
.getInfo(L
);
2271 bool LoopAccessLegacyAnalysis::runOnFunction(Function
&F
) {
2272 SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
2273 auto *TLIP
= getAnalysisIfAvailable
<TargetLibraryInfoWrapperPass
>();
2274 TLI
= TLIP
? &TLIP
->getTLI(F
) : nullptr;
2275 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
2276 DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
2277 LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
2282 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage
&AU
) const {
2283 AU
.addRequiredTransitive
<ScalarEvolutionWrapperPass
>();
2284 AU
.addRequiredTransitive
<AAResultsWrapperPass
>();
2285 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
2286 AU
.addRequiredTransitive
<LoopInfoWrapperPass
>();
2288 AU
.setPreservesAll();
2291 char LoopAccessLegacyAnalysis::ID
= 0;
2292 static const char laa_name
[] = "Loop Access Analysis";
2293 #define LAA_NAME "loop-accesses"
2295 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis
, LAA_NAME
, laa_name
, false, true)
2296 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
2297 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
)
2298 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
2299 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
2300 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis
, LAA_NAME
, laa_name
, false, true)
2302 AnalysisKey
LoopAccessAnalysis::Key
;
2304 LoopAccessInfo
LoopAccessAnalysis::run(Loop
&L
, LoopAnalysisManager
&AM
,
2305 LoopStandardAnalysisResults
&AR
) {
2306 return LoopAccessInfo(&L
, &AR
.SE
, &AR
.TLI
, &AR
.AA
, &AR
.DT
, &AR
.LI
);
2311 Pass
*createLAAPass() {
2312 return new LoopAccessLegacyAnalysis();
2315 } // end namespace llvm