Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Analysis / LoopAccessAnalysis.cpp
blob5130807cadd238ee1c96fb9644927839dbbf155e
1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpander.h"
34 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
35 #include "llvm/Analysis/TargetLibraryInfo.h"
36 #include "llvm/Analysis/ValueTracking.h"
37 #include "llvm/Analysis/VectorUtils.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/PassManager.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/IR/ValueHandle.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/CommandLine.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <cstdint>
64 #include <cstdlib>
65 #include <iterator>
66 #include <utility>
67 #include <vector>
69 using namespace llvm;
71 #define DEBUG_TYPE "loop-accesses"
73 static cl::opt<unsigned, true>
74 VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
76 cl::location(VectorizerParams::VectorizationFactor));
77 unsigned VectorizerParams::VectorizationFactor;
79 static cl::opt<unsigned, true>
80 VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
83 cl::location(
84 VectorizerParams::VectorizationInterleave));
85 unsigned VectorizerParams::VectorizationInterleave;
87 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
91 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
92 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
94 /// The maximum iterations used to merge memory checks
95 static cl::opt<unsigned> MemoryCheckMergeThreshold(
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
101 /// Maximum SIMD width.
102 const unsigned VectorizerParams::MaxVectorWidth = 64;
104 /// We collect dependences up to this threshold.
105 static cl::opt<unsigned>
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
111 /// This enables versioning on the strides of symbolically striding memory
112 /// accesses in code like the following.
113 /// for (i = 0; i < N; ++i)
114 /// A[i * Stride1] += B[i * Stride2] ...
116 /// Will be roughly translated to
117 /// if (Stride1 == 1 && Stride2 == 1) {
118 /// for (i = 0; i < N; i+=4)
119 /// A[i:i+3] += ...
120 /// } else
121 /// ...
122 static cl::opt<bool> EnableMemAccessVersioning(
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
126 /// Enable store-to-load forwarding conflict detection. This option can
127 /// be disabled for correctness testing.
128 static cl::opt<bool> EnableForwardingConflictDetection(
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
133 bool VectorizerParams::isInterleaveForced() {
134 return ::VectorizationInterleave.getNumOccurrences() > 0;
137 Value *llvm::stripIntegerCast(Value *V) {
138 if (auto *CI = dyn_cast<CastInst>(V))
139 if (CI->getOperand(0)->getType()->isIntegerTy())
140 return CI->getOperand(0);
141 return V;
144 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
145 const ValueToValueMap &PtrToStride,
146 Value *Ptr, Value *OrigPtr) {
147 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
149 // If there is an entry in the map return the SCEV of the pointer with the
150 // symbolic stride replaced by one.
151 ValueToValueMap::const_iterator SI =
152 PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
153 if (SI != PtrToStride.end()) {
154 Value *StrideVal = SI->second;
156 // Strip casts.
157 StrideVal = stripIntegerCast(StrideVal);
159 ScalarEvolution *SE = PSE.getSE();
160 const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
161 const auto *CT =
162 static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
164 PSE.addPredicate(*SE->getEqualPredicate(U, CT));
165 auto *Expr = PSE.getSCEV(Ptr);
167 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
168 << " by: " << *Expr << "\n");
169 return Expr;
172 // Otherwise, just return the SCEV of the original pointer.
173 return OrigSCEV;
176 /// Calculate Start and End points of memory access.
177 /// Let's assume A is the first access and B is a memory access on N-th loop
178 /// iteration. Then B is calculated as:
179 /// B = A + Step*N .
180 /// Step value may be positive or negative.
181 /// N is a calculated back-edge taken count:
182 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
183 /// Start and End points are calculated in the following way:
184 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
185 /// where SizeOfElt is the size of single memory access in bytes.
187 /// There is no conflict when the intervals are disjoint:
188 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
189 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
190 unsigned DepSetId, unsigned ASId,
191 const ValueToValueMap &Strides,
192 PredicatedScalarEvolution &PSE) {
193 // Get the stride replaced scev.
194 const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
195 ScalarEvolution *SE = PSE.getSE();
197 const SCEV *ScStart;
198 const SCEV *ScEnd;
200 if (SE->isLoopInvariant(Sc, Lp))
201 ScStart = ScEnd = Sc;
202 else {
203 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
204 assert(AR && "Invalid addrec expression");
205 const SCEV *Ex = PSE.getBackedgeTakenCount();
207 ScStart = AR->getStart();
208 ScEnd = AR->evaluateAtIteration(Ex, *SE);
209 const SCEV *Step = AR->getStepRecurrence(*SE);
211 // For expressions with negative step, the upper bound is ScStart and the
212 // lower bound is ScEnd.
213 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
214 if (CStep->getValue()->isNegative())
215 std::swap(ScStart, ScEnd);
216 } else {
217 // Fallback case: the step is not constant, but we can still
218 // get the upper and lower bounds of the interval by using min/max
219 // expressions.
220 ScStart = SE->getUMinExpr(ScStart, ScEnd);
221 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
223 // Add the size of the pointed element to ScEnd.
224 unsigned EltSize =
225 Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8;
226 const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize);
227 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
230 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
233 SmallVector<RuntimePointerChecking::PointerCheck, 4>
234 RuntimePointerChecking::generateChecks() const {
235 SmallVector<PointerCheck, 4> Checks;
237 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
238 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
239 const RuntimePointerChecking::CheckingPtrGroup &CGI = CheckingGroups[I];
240 const RuntimePointerChecking::CheckingPtrGroup &CGJ = CheckingGroups[J];
242 if (needsChecking(CGI, CGJ))
243 Checks.push_back(std::make_pair(&CGI, &CGJ));
246 return Checks;
249 void RuntimePointerChecking::generateChecks(
250 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
251 assert(Checks.empty() && "Checks is not empty");
252 groupChecks(DepCands, UseDependencies);
253 Checks = generateChecks();
256 bool RuntimePointerChecking::needsChecking(const CheckingPtrGroup &M,
257 const CheckingPtrGroup &N) const {
258 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
259 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
260 if (needsChecking(M.Members[I], N.Members[J]))
261 return true;
262 return false;
265 /// Compare \p I and \p J and return the minimum.
266 /// Return nullptr in case we couldn't find an answer.
267 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
268 ScalarEvolution *SE) {
269 const SCEV *Diff = SE->getMinusSCEV(J, I);
270 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
272 if (!C)
273 return nullptr;
274 if (C->getValue()->isNegative())
275 return J;
276 return I;
279 bool RuntimePointerChecking::CheckingPtrGroup::addPointer(unsigned Index) {
280 const SCEV *Start = RtCheck.Pointers[Index].Start;
281 const SCEV *End = RtCheck.Pointers[Index].End;
283 // Compare the starts and ends with the known minimum and maximum
284 // of this set. We need to know how we compare against the min/max
285 // of the set in order to be able to emit memchecks.
286 const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
287 if (!Min0)
288 return false;
290 const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
291 if (!Min1)
292 return false;
294 // Update the low bound expression if we've found a new min value.
295 if (Min0 == Start)
296 Low = Start;
298 // Update the high bound expression if we've found a new max value.
299 if (Min1 != End)
300 High = End;
302 Members.push_back(Index);
303 return true;
306 void RuntimePointerChecking::groupChecks(
307 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
308 // We build the groups from dependency candidates equivalence classes
309 // because:
310 // - We know that pointers in the same equivalence class share
311 // the same underlying object and therefore there is a chance
312 // that we can compare pointers
313 // - We wouldn't be able to merge two pointers for which we need
314 // to emit a memcheck. The classes in DepCands are already
315 // conveniently built such that no two pointers in the same
316 // class need checking against each other.
318 // We use the following (greedy) algorithm to construct the groups
319 // For every pointer in the equivalence class:
320 // For each existing group:
321 // - if the difference between this pointer and the min/max bounds
322 // of the group is a constant, then make the pointer part of the
323 // group and update the min/max bounds of that group as required.
325 CheckingGroups.clear();
327 // If we need to check two pointers to the same underlying object
328 // with a non-constant difference, we shouldn't perform any pointer
329 // grouping with those pointers. This is because we can easily get
330 // into cases where the resulting check would return false, even when
331 // the accesses are safe.
333 // The following example shows this:
334 // for (i = 0; i < 1000; ++i)
335 // a[5000 + i * m] = a[i] + a[i + 9000]
337 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
338 // (0, 10000) which is always false. However, if m is 1, there is no
339 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
340 // us to perform an accurate check in this case.
342 // The above case requires that we have an UnknownDependence between
343 // accesses to the same underlying object. This cannot happen unless
344 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
345 // is also false. In this case we will use the fallback path and create
346 // separate checking groups for all pointers.
348 // If we don't have the dependency partitions, construct a new
349 // checking pointer group for each pointer. This is also required
350 // for correctness, because in this case we can have checking between
351 // pointers to the same underlying object.
352 if (!UseDependencies) {
353 for (unsigned I = 0; I < Pointers.size(); ++I)
354 CheckingGroups.push_back(CheckingPtrGroup(I, *this));
355 return;
358 unsigned TotalComparisons = 0;
360 DenseMap<Value *, unsigned> PositionMap;
361 for (unsigned Index = 0; Index < Pointers.size(); ++Index)
362 PositionMap[Pointers[Index].PointerValue] = Index;
364 // We need to keep track of what pointers we've already seen so we
365 // don't process them twice.
366 SmallSet<unsigned, 2> Seen;
368 // Go through all equivalence classes, get the "pointer check groups"
369 // and add them to the overall solution. We use the order in which accesses
370 // appear in 'Pointers' to enforce determinism.
371 for (unsigned I = 0; I < Pointers.size(); ++I) {
372 // We've seen this pointer before, and therefore already processed
373 // its equivalence class.
374 if (Seen.count(I))
375 continue;
377 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
378 Pointers[I].IsWritePtr);
380 SmallVector<CheckingPtrGroup, 2> Groups;
381 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
383 // Because DepCands is constructed by visiting accesses in the order in
384 // which they appear in alias sets (which is deterministic) and the
385 // iteration order within an equivalence class member is only dependent on
386 // the order in which unions and insertions are performed on the
387 // equivalence class, the iteration order is deterministic.
388 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
389 MI != ME; ++MI) {
390 unsigned Pointer = PositionMap[MI->getPointer()];
391 bool Merged = false;
392 // Mark this pointer as seen.
393 Seen.insert(Pointer);
395 // Go through all the existing sets and see if we can find one
396 // which can include this pointer.
397 for (CheckingPtrGroup &Group : Groups) {
398 // Don't perform more than a certain amount of comparisons.
399 // This should limit the cost of grouping the pointers to something
400 // reasonable. If we do end up hitting this threshold, the algorithm
401 // will create separate groups for all remaining pointers.
402 if (TotalComparisons > MemoryCheckMergeThreshold)
403 break;
405 TotalComparisons++;
407 if (Group.addPointer(Pointer)) {
408 Merged = true;
409 break;
413 if (!Merged)
414 // We couldn't add this pointer to any existing set or the threshold
415 // for the number of comparisons has been reached. Create a new group
416 // to hold the current pointer.
417 Groups.push_back(CheckingPtrGroup(Pointer, *this));
420 // We've computed the grouped checks for this partition.
421 // Save the results and continue with the next one.
422 llvm::copy(Groups, std::back_inserter(CheckingGroups));
426 bool RuntimePointerChecking::arePointersInSamePartition(
427 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
428 unsigned PtrIdx2) {
429 return (PtrToPartition[PtrIdx1] != -1 &&
430 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
433 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
434 const PointerInfo &PointerI = Pointers[I];
435 const PointerInfo &PointerJ = Pointers[J];
437 // No need to check if two readonly pointers intersect.
438 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
439 return false;
441 // Only need to check pointers between two different dependency sets.
442 if (PointerI.DependencySetId == PointerJ.DependencySetId)
443 return false;
445 // Only need to check pointers in the same alias set.
446 if (PointerI.AliasSetId != PointerJ.AliasSetId)
447 return false;
449 return true;
452 void RuntimePointerChecking::printChecks(
453 raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
454 unsigned Depth) const {
455 unsigned N = 0;
456 for (const auto &Check : Checks) {
457 const auto &First = Check.first->Members, &Second = Check.second->Members;
459 OS.indent(Depth) << "Check " << N++ << ":\n";
461 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
462 for (unsigned K = 0; K < First.size(); ++K)
463 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
465 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
466 for (unsigned K = 0; K < Second.size(); ++K)
467 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
471 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
473 OS.indent(Depth) << "Run-time memory checks:\n";
474 printChecks(OS, Checks, Depth);
476 OS.indent(Depth) << "Grouped accesses:\n";
477 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
478 const auto &CG = CheckingGroups[I];
480 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
481 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
482 << ")\n";
483 for (unsigned J = 0; J < CG.Members.size(); ++J) {
484 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
485 << "\n";
490 namespace {
492 /// Analyses memory accesses in a loop.
494 /// Checks whether run time pointer checks are needed and builds sets for data
495 /// dependence checking.
496 class AccessAnalysis {
497 public:
498 /// Read or write access location.
499 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
500 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
502 AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AliasAnalysis *AA,
503 LoopInfo *LI, MemoryDepChecker::DepCandidates &DA,
504 PredicatedScalarEvolution &PSE)
505 : DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
506 IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
508 /// Register a load and whether it is only read from.
509 void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
510 Value *Ptr = const_cast<Value*>(Loc.Ptr);
511 AST.add(Ptr, LocationSize::unknown(), Loc.AATags);
512 Accesses.insert(MemAccessInfo(Ptr, false));
513 if (IsReadOnly)
514 ReadOnlyPtr.insert(Ptr);
517 /// Register a store.
518 void addStore(MemoryLocation &Loc) {
519 Value *Ptr = const_cast<Value*>(Loc.Ptr);
520 AST.add(Ptr, LocationSize::unknown(), Loc.AATags);
521 Accesses.insert(MemAccessInfo(Ptr, true));
524 /// Check if we can emit a run-time no-alias check for \p Access.
526 /// Returns true if we can emit a run-time no alias check for \p Access.
527 /// If we can check this access, this also adds it to a dependence set and
528 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
529 /// we will attempt to use additional run-time checks in order to get
530 /// the bounds of the pointer.
531 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
532 MemAccessInfo Access,
533 const ValueToValueMap &Strides,
534 DenseMap<Value *, unsigned> &DepSetId,
535 Loop *TheLoop, unsigned &RunningDepId,
536 unsigned ASId, bool ShouldCheckStride,
537 bool Assume);
539 /// Check whether we can check the pointers at runtime for
540 /// non-intersection.
542 /// Returns true if we need no check or if we do and we can generate them
543 /// (i.e. the pointers have computable bounds).
544 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
545 Loop *TheLoop, const ValueToValueMap &Strides,
546 bool ShouldCheckWrap = false);
548 /// Goes over all memory accesses, checks whether a RT check is needed
549 /// and builds sets of dependent accesses.
550 void buildDependenceSets() {
551 processMemAccesses();
554 /// Initial processing of memory accesses determined that we need to
555 /// perform dependency checking.
557 /// Note that this can later be cleared if we retry memcheck analysis without
558 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
559 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
561 /// We decided that no dependence analysis would be used. Reset the state.
562 void resetDepChecks(MemoryDepChecker &DepChecker) {
563 CheckDeps.clear();
564 DepChecker.clearDependences();
567 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
569 private:
570 typedef SetVector<MemAccessInfo> PtrAccessSet;
572 /// Go over all memory access and check whether runtime pointer checks
573 /// are needed and build sets of dependency check candidates.
574 void processMemAccesses();
576 /// Set of all accesses.
577 PtrAccessSet Accesses;
579 const DataLayout &DL;
581 /// The loop being checked.
582 const Loop *TheLoop;
584 /// List of accesses that need a further dependence check.
585 MemAccessInfoList CheckDeps;
587 /// Set of pointers that are read only.
588 SmallPtrSet<Value*, 16> ReadOnlyPtr;
590 /// An alias set tracker to partition the access set by underlying object and
591 //intrinsic property (such as TBAA metadata).
592 AliasSetTracker AST;
594 LoopInfo *LI;
596 /// Sets of potentially dependent accesses - members of one set share an
597 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
598 /// dependence check.
599 MemoryDepChecker::DepCandidates &DepCands;
601 /// Initial processing of memory accesses determined that we may need
602 /// to add memchecks. Perform the analysis to determine the necessary checks.
604 /// Note that, this is different from isDependencyCheckNeeded. When we retry
605 /// memcheck analysis without dependency checking
606 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
607 /// cleared while this remains set if we have potentially dependent accesses.
608 bool IsRTCheckAnalysisNeeded;
610 /// The SCEV predicate containing all the SCEV-related assumptions.
611 PredicatedScalarEvolution &PSE;
614 } // end anonymous namespace
616 /// Check whether a pointer can participate in a runtime bounds check.
617 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
618 /// by adding run-time checks (overflow checks) if necessary.
619 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
620 const ValueToValueMap &Strides, Value *Ptr,
621 Loop *L, bool Assume) {
622 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
624 // The bounds for loop-invariant pointer is trivial.
625 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
626 return true;
628 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
630 if (!AR && Assume)
631 AR = PSE.getAsAddRec(Ptr);
633 if (!AR)
634 return false;
636 return AR->isAffine();
639 /// Check whether a pointer address cannot wrap.
640 static bool isNoWrap(PredicatedScalarEvolution &PSE,
641 const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
642 const SCEV *PtrScev = PSE.getSCEV(Ptr);
643 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
644 return true;
646 int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
647 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
648 return true;
650 return false;
653 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
654 MemAccessInfo Access,
655 const ValueToValueMap &StridesMap,
656 DenseMap<Value *, unsigned> &DepSetId,
657 Loop *TheLoop, unsigned &RunningDepId,
658 unsigned ASId, bool ShouldCheckWrap,
659 bool Assume) {
660 Value *Ptr = Access.getPointer();
662 if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume))
663 return false;
665 // When we run after a failing dependency check we have to make sure
666 // we don't have wrapping pointers.
667 if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) {
668 auto *Expr = PSE.getSCEV(Ptr);
669 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
670 return false;
671 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
674 // The id of the dependence set.
675 unsigned DepId;
677 if (isDependencyCheckNeeded()) {
678 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
679 unsigned &LeaderId = DepSetId[Leader];
680 if (!LeaderId)
681 LeaderId = RunningDepId++;
682 DepId = LeaderId;
683 } else
684 // Each access has its own dependence set.
685 DepId = RunningDepId++;
687 bool IsWrite = Access.getInt();
688 RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
689 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
691 return true;
694 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
695 ScalarEvolution *SE, Loop *TheLoop,
696 const ValueToValueMap &StridesMap,
697 bool ShouldCheckWrap) {
698 // Find pointers with computable bounds. We are going to use this information
699 // to place a runtime bound check.
700 bool CanDoRT = true;
702 bool NeedRTCheck = false;
703 if (!IsRTCheckAnalysisNeeded) return true;
705 bool IsDepCheckNeeded = isDependencyCheckNeeded();
707 // We assign a consecutive id to access from different alias sets.
708 // Accesses between different groups doesn't need to be checked.
709 unsigned ASId = 1;
710 for (auto &AS : AST) {
711 int NumReadPtrChecks = 0;
712 int NumWritePtrChecks = 0;
713 bool CanDoAliasSetRT = true;
715 // We assign consecutive id to access from different dependence sets.
716 // Accesses within the same set don't need a runtime check.
717 unsigned RunningDepId = 1;
718 DenseMap<Value *, unsigned> DepSetId;
720 SmallVector<MemAccessInfo, 4> Retries;
722 for (auto A : AS) {
723 Value *Ptr = A.getValue();
724 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
725 MemAccessInfo Access(Ptr, IsWrite);
727 if (IsWrite)
728 ++NumWritePtrChecks;
729 else
730 ++NumReadPtrChecks;
732 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
733 RunningDepId, ASId, ShouldCheckWrap, false)) {
734 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n');
735 Retries.push_back(Access);
736 CanDoAliasSetRT = false;
740 // If we have at least two writes or one write and a read then we need to
741 // check them. But there is no need to checks if there is only one
742 // dependence set for this alias set.
744 // Note that this function computes CanDoRT and NeedRTCheck independently.
745 // For example CanDoRT=false, NeedRTCheck=false means that we have a pointer
746 // for which we couldn't find the bounds but we don't actually need to emit
747 // any checks so it does not matter.
748 bool NeedsAliasSetRTCheck = false;
749 if (!(IsDepCheckNeeded && CanDoAliasSetRT && RunningDepId == 2))
750 NeedsAliasSetRTCheck = (NumWritePtrChecks >= 2 ||
751 (NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1));
753 // We need to perform run-time alias checks, but some pointers had bounds
754 // that couldn't be checked.
755 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
756 // Reset the CanDoSetRt flag and retry all accesses that have failed.
757 // We know that we need these checks, so we can now be more aggressive
758 // and add further checks if required (overflow checks).
759 CanDoAliasSetRT = true;
760 for (auto Access : Retries)
761 if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId,
762 TheLoop, RunningDepId, ASId,
763 ShouldCheckWrap, /*Assume=*/true)) {
764 CanDoAliasSetRT = false;
765 break;
769 CanDoRT &= CanDoAliasSetRT;
770 NeedRTCheck |= NeedsAliasSetRTCheck;
771 ++ASId;
774 // If the pointers that we would use for the bounds comparison have different
775 // address spaces, assume the values aren't directly comparable, so we can't
776 // use them for the runtime check. We also have to assume they could
777 // overlap. In the future there should be metadata for whether address spaces
778 // are disjoint.
779 unsigned NumPointers = RtCheck.Pointers.size();
780 for (unsigned i = 0; i < NumPointers; ++i) {
781 for (unsigned j = i + 1; j < NumPointers; ++j) {
782 // Only need to check pointers between two different dependency sets.
783 if (RtCheck.Pointers[i].DependencySetId ==
784 RtCheck.Pointers[j].DependencySetId)
785 continue;
786 // Only need to check pointers in the same alias set.
787 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
788 continue;
790 Value *PtrI = RtCheck.Pointers[i].PointerValue;
791 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
793 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
794 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
795 if (ASi != ASj) {
796 LLVM_DEBUG(
797 dbgs() << "LAA: Runtime check would require comparison between"
798 " different address spaces\n");
799 return false;
804 if (NeedRTCheck && CanDoRT)
805 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
807 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
808 << " pointer comparisons.\n");
810 RtCheck.Need = NeedRTCheck;
812 bool CanDoRTIfNeeded = !NeedRTCheck || CanDoRT;
813 if (!CanDoRTIfNeeded)
814 RtCheck.reset();
815 return CanDoRTIfNeeded;
818 void AccessAnalysis::processMemAccesses() {
819 // We process the set twice: first we process read-write pointers, last we
820 // process read-only pointers. This allows us to skip dependence tests for
821 // read-only pointers.
823 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
824 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
825 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
826 LLVM_DEBUG({
827 for (auto A : Accesses)
828 dbgs() << "\t" << *A.getPointer() << " (" <<
829 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
830 "read-only" : "read")) << ")\n";
833 // The AliasSetTracker has nicely partitioned our pointers by metadata
834 // compatibility and potential for underlying-object overlap. As a result, we
835 // only need to check for potential pointer dependencies within each alias
836 // set.
837 for (auto &AS : AST) {
838 // Note that both the alias-set tracker and the alias sets themselves used
839 // linked lists internally and so the iteration order here is deterministic
840 // (matching the original instruction order within each set).
842 bool SetHasWrite = false;
844 // Map of pointers to last access encountered.
845 typedef DenseMap<Value*, MemAccessInfo> UnderlyingObjToAccessMap;
846 UnderlyingObjToAccessMap ObjToLastAccess;
848 // Set of access to check after all writes have been processed.
849 PtrAccessSet DeferredAccesses;
851 // Iterate over each alias set twice, once to process read/write pointers,
852 // and then to process read-only pointers.
853 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
854 bool UseDeferred = SetIteration > 0;
855 PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
857 for (auto AV : AS) {
858 Value *Ptr = AV.getValue();
860 // For a single memory access in AliasSetTracker, Accesses may contain
861 // both read and write, and they both need to be handled for CheckDeps.
862 for (auto AC : S) {
863 if (AC.getPointer() != Ptr)
864 continue;
866 bool IsWrite = AC.getInt();
868 // If we're using the deferred access set, then it contains only
869 // reads.
870 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
871 if (UseDeferred && !IsReadOnlyPtr)
872 continue;
873 // Otherwise, the pointer must be in the PtrAccessSet, either as a
874 // read or a write.
875 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
876 S.count(MemAccessInfo(Ptr, false))) &&
877 "Alias-set pointer not in the access set?");
879 MemAccessInfo Access(Ptr, IsWrite);
880 DepCands.insert(Access);
882 // Memorize read-only pointers for later processing and skip them in
883 // the first round (they need to be checked after we have seen all
884 // write pointers). Note: we also mark pointer that are not
885 // consecutive as "read-only" pointers (so that we check
886 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
887 if (!UseDeferred && IsReadOnlyPtr) {
888 DeferredAccesses.insert(Access);
889 continue;
892 // If this is a write - check other reads and writes for conflicts. If
893 // this is a read only check other writes for conflicts (but only if
894 // there is no other write to the ptr - this is an optimization to
895 // catch "a[i] = a[i] + " without having to do a dependence check).
896 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
897 CheckDeps.push_back(Access);
898 IsRTCheckAnalysisNeeded = true;
901 if (IsWrite)
902 SetHasWrite = true;
904 // Create sets of pointers connected by a shared alias set and
905 // underlying object.
906 typedef SmallVector<Value *, 16> ValueVector;
907 ValueVector TempObjects;
909 GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
910 LLVM_DEBUG(dbgs()
911 << "Underlying objects for pointer " << *Ptr << "\n");
912 for (Value *UnderlyingObj : TempObjects) {
913 // nullptr never alias, don't join sets for pointer that have "null"
914 // in their UnderlyingObjects list.
915 if (isa<ConstantPointerNull>(UnderlyingObj) &&
916 !NullPointerIsDefined(
917 TheLoop->getHeader()->getParent(),
918 UnderlyingObj->getType()->getPointerAddressSpace()))
919 continue;
921 UnderlyingObjToAccessMap::iterator Prev =
922 ObjToLastAccess.find(UnderlyingObj);
923 if (Prev != ObjToLastAccess.end())
924 DepCands.unionSets(Access, Prev->second);
926 ObjToLastAccess[UnderlyingObj] = Access;
927 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
935 static bool isInBoundsGep(Value *Ptr) {
936 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
937 return GEP->isInBounds();
938 return false;
941 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
942 /// i.e. monotonically increasing/decreasing.
943 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
944 PredicatedScalarEvolution &PSE, const Loop *L) {
945 // FIXME: This should probably only return true for NUW.
946 if (AR->getNoWrapFlags(SCEV::NoWrapMask))
947 return true;
949 // Scalar evolution does not propagate the non-wrapping flags to values that
950 // are derived from a non-wrapping induction variable because non-wrapping
951 // could be flow-sensitive.
953 // Look through the potentially overflowing instruction to try to prove
954 // non-wrapping for the *specific* value of Ptr.
956 // The arithmetic implied by an inbounds GEP can't overflow.
957 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
958 if (!GEP || !GEP->isInBounds())
959 return false;
961 // Make sure there is only one non-const index and analyze that.
962 Value *NonConstIndex = nullptr;
963 for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end()))
964 if (!isa<ConstantInt>(Index)) {
965 if (NonConstIndex)
966 return false;
967 NonConstIndex = Index;
969 if (!NonConstIndex)
970 // The recurrence is on the pointer, ignore for now.
971 return false;
973 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
974 // AddRec using a NSW operation.
975 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
976 if (OBO->hasNoSignedWrap() &&
977 // Assume constant for other the operand so that the AddRec can be
978 // easily found.
979 isa<ConstantInt>(OBO->getOperand(1))) {
980 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
982 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
983 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
986 return false;
989 /// Check whether the access through \p Ptr has a constant stride.
990 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
991 const Loop *Lp, const ValueToValueMap &StridesMap,
992 bool Assume, bool ShouldCheckWrap) {
993 Type *Ty = Ptr->getType();
994 assert(Ty->isPointerTy() && "Unexpected non-ptr");
996 // Make sure that the pointer does not point to aggregate types.
997 auto *PtrTy = cast<PointerType>(Ty);
998 if (PtrTy->getElementType()->isAggregateType()) {
999 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
1000 << *Ptr << "\n");
1001 return 0;
1004 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1006 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1007 if (Assume && !AR)
1008 AR = PSE.getAsAddRec(Ptr);
1010 if (!AR) {
1011 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1012 << " SCEV: " << *PtrScev << "\n");
1013 return 0;
1016 // The access function must stride over the innermost loop.
1017 if (Lp != AR->getLoop()) {
1018 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1019 << *Ptr << " SCEV: " << *AR << "\n");
1020 return 0;
1023 // The address calculation must not wrap. Otherwise, a dependence could be
1024 // inverted.
1025 // An inbounds getelementptr that is a AddRec with a unit stride
1026 // cannot wrap per definition. The unit stride requirement is checked later.
1027 // An getelementptr without an inbounds attribute and unit stride would have
1028 // to access the pointer value "0" which is undefined behavior in address
1029 // space 0, therefore we can also vectorize this case.
1030 bool IsInBoundsGEP = isInBoundsGep(Ptr);
1031 bool IsNoWrapAddRec = !ShouldCheckWrap ||
1032 PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
1033 isNoWrapAddRec(Ptr, AR, PSE, Lp);
1034 if (!IsNoWrapAddRec && !IsInBoundsGEP &&
1035 NullPointerIsDefined(Lp->getHeader()->getParent(),
1036 PtrTy->getAddressSpace())) {
1037 if (Assume) {
1038 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1039 IsNoWrapAddRec = true;
1040 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1041 << "LAA: Pointer: " << *Ptr << "\n"
1042 << "LAA: SCEV: " << *AR << "\n"
1043 << "LAA: Added an overflow assumption\n");
1044 } else {
1045 LLVM_DEBUG(
1046 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1047 << *Ptr << " SCEV: " << *AR << "\n");
1048 return 0;
1052 // Check the step is constant.
1053 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1055 // Calculate the pointer stride and check if it is constant.
1056 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1057 if (!C) {
1058 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1059 << " SCEV: " << *AR << "\n");
1060 return 0;
1063 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1064 int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
1065 const APInt &APStepVal = C->getAPInt();
1067 // Huge step value - give up.
1068 if (APStepVal.getBitWidth() > 64)
1069 return 0;
1071 int64_t StepVal = APStepVal.getSExtValue();
1073 // Strided access.
1074 int64_t Stride = StepVal / Size;
1075 int64_t Rem = StepVal % Size;
1076 if (Rem)
1077 return 0;
1079 // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1080 // know we can't "wrap around the address space". In case of address space
1081 // zero we know that this won't happen without triggering undefined behavior.
1082 if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
1083 (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
1084 PtrTy->getAddressSpace()))) {
1085 if (Assume) {
1086 // We can avoid this case by adding a run-time check.
1087 LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1088 << "inbounds or in address space 0 may wrap:\n"
1089 << "LAA: Pointer: " << *Ptr << "\n"
1090 << "LAA: SCEV: " << *AR << "\n"
1091 << "LAA: Added an overflow assumption\n");
1092 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1093 } else
1094 return 0;
1097 return Stride;
1100 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
1101 ScalarEvolution &SE,
1102 SmallVectorImpl<unsigned> &SortedIndices) {
1103 assert(llvm::all_of(
1104 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1105 "Expected list of pointer operands.");
1106 SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
1107 OffValPairs.reserve(VL.size());
1109 // Walk over the pointers, and map each of them to an offset relative to
1110 // first pointer in the array.
1111 Value *Ptr0 = VL[0];
1112 const SCEV *Scev0 = SE.getSCEV(Ptr0);
1113 Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
1115 llvm::SmallSet<int64_t, 4> Offsets;
1116 for (auto *Ptr : VL) {
1117 // TODO: Outline this code as a special, more time consuming, version of
1118 // computeConstantDifference() function.
1119 if (Ptr->getType()->getPointerAddressSpace() !=
1120 Ptr0->getType()->getPointerAddressSpace())
1121 return false;
1122 // If a pointer refers to a different underlying object, bail - the
1123 // pointers are by definition incomparable.
1124 Value *CurrObj = GetUnderlyingObject(Ptr, DL);
1125 if (CurrObj != Obj0)
1126 return false;
1128 const SCEV *Scev = SE.getSCEV(Ptr);
1129 const auto *Diff = dyn_cast<SCEVConstant>(SE.getMinusSCEV(Scev, Scev0));
1130 // The pointers may not have a constant offset from each other, or SCEV
1131 // may just not be smart enough to figure out they do. Regardless,
1132 // there's nothing we can do.
1133 if (!Diff)
1134 return false;
1136 // Check if the pointer with the same offset is found.
1137 int64_t Offset = Diff->getAPInt().getSExtValue();
1138 if (!Offsets.insert(Offset).second)
1139 return false;
1140 OffValPairs.emplace_back(Offset, Ptr);
1142 SortedIndices.clear();
1143 SortedIndices.resize(VL.size());
1144 std::iota(SortedIndices.begin(), SortedIndices.end(), 0);
1146 // Sort the memory accesses and keep the order of their uses in UseOrder.
1147 std::stable_sort(SortedIndices.begin(), SortedIndices.end(),
1148 [&OffValPairs](unsigned Left, unsigned Right) {
1149 return OffValPairs[Left].first < OffValPairs[Right].first;
1152 // Check if the order is consecutive already.
1153 if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) {
1154 return I == SortedIndices[I];
1156 SortedIndices.clear();
1158 return true;
1161 /// Take the address space operand from the Load/Store instruction.
1162 /// Returns -1 if this is not a valid Load/Store instruction.
1163 static unsigned getAddressSpaceOperand(Value *I) {
1164 if (LoadInst *L = dyn_cast<LoadInst>(I))
1165 return L->getPointerAddressSpace();
1166 if (StoreInst *S = dyn_cast<StoreInst>(I))
1167 return S->getPointerAddressSpace();
1168 return -1;
1171 /// Returns true if the memory operations \p A and \p B are consecutive.
1172 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1173 ScalarEvolution &SE, bool CheckType) {
1174 Value *PtrA = getLoadStorePointerOperand(A);
1175 Value *PtrB = getLoadStorePointerOperand(B);
1176 unsigned ASA = getAddressSpaceOperand(A);
1177 unsigned ASB = getAddressSpaceOperand(B);
1179 // Check that the address spaces match and that the pointers are valid.
1180 if (!PtrA || !PtrB || (ASA != ASB))
1181 return false;
1183 // Make sure that A and B are different pointers.
1184 if (PtrA == PtrB)
1185 return false;
1187 // Make sure that A and B have the same type if required.
1188 if (CheckType && PtrA->getType() != PtrB->getType())
1189 return false;
1191 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1192 Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1193 APInt Size(IdxWidth, DL.getTypeStoreSize(Ty));
1195 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1196 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1197 PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1199 // OffsetDelta = OffsetB - OffsetA;
1200 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
1201 const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
1202 const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
1203 const SCEVConstant *OffsetDeltaC = dyn_cast<SCEVConstant>(OffsetDeltaSCEV);
1204 const APInt &OffsetDelta = OffsetDeltaC->getAPInt();
1205 // Check if they are based on the same pointer. That makes the offsets
1206 // sufficient.
1207 if (PtrA == PtrB)
1208 return OffsetDelta == Size;
1210 // Compute the necessary base pointer delta to have the necessary final delta
1211 // equal to the size.
1212 // BaseDelta = Size - OffsetDelta;
1213 const SCEV *SizeSCEV = SE.getConstant(Size);
1214 const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
1216 // Otherwise compute the distance with SCEV between the base pointers.
1217 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1218 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1219 const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
1220 return X == PtrSCEVB;
1223 MemoryDepChecker::VectorizationSafetyStatus
1224 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1225 switch (Type) {
1226 case NoDep:
1227 case Forward:
1228 case BackwardVectorizable:
1229 return VectorizationSafetyStatus::Safe;
1231 case Unknown:
1232 return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1233 case ForwardButPreventsForwarding:
1234 case Backward:
1235 case BackwardVectorizableButPreventsForwarding:
1236 return VectorizationSafetyStatus::Unsafe;
1238 llvm_unreachable("unexpected DepType!");
1241 bool MemoryDepChecker::Dependence::isBackward() const {
1242 switch (Type) {
1243 case NoDep:
1244 case Forward:
1245 case ForwardButPreventsForwarding:
1246 case Unknown:
1247 return false;
1249 case BackwardVectorizable:
1250 case Backward:
1251 case BackwardVectorizableButPreventsForwarding:
1252 return true;
1254 llvm_unreachable("unexpected DepType!");
1257 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1258 return isBackward() || Type == Unknown;
1261 bool MemoryDepChecker::Dependence::isForward() const {
1262 switch (Type) {
1263 case Forward:
1264 case ForwardButPreventsForwarding:
1265 return true;
1267 case NoDep:
1268 case Unknown:
1269 case BackwardVectorizable:
1270 case Backward:
1271 case BackwardVectorizableButPreventsForwarding:
1272 return false;
1274 llvm_unreachable("unexpected DepType!");
1277 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1278 uint64_t TypeByteSize) {
1279 // If loads occur at a distance that is not a multiple of a feasible vector
1280 // factor store-load forwarding does not take place.
1281 // Positive dependences might cause troubles because vectorizing them might
1282 // prevent store-load forwarding making vectorized code run a lot slower.
1283 // a[i] = a[i-3] ^ a[i-8];
1284 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1285 // hence on your typical architecture store-load forwarding does not take
1286 // place. Vectorizing in such cases does not make sense.
1287 // Store-load forwarding distance.
1289 // After this many iterations store-to-load forwarding conflicts should not
1290 // cause any slowdowns.
1291 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1292 // Maximum vector factor.
1293 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1294 VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1296 // Compute the smallest VF at which the store and load would be misaligned.
1297 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1298 VF *= 2) {
1299 // If the number of vector iteration between the store and the load are
1300 // small we could incur conflicts.
1301 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1302 MaxVFWithoutSLForwardIssues = (VF >>= 1);
1303 break;
1307 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1308 LLVM_DEBUG(
1309 dbgs() << "LAA: Distance " << Distance
1310 << " that could cause a store-load forwarding conflict\n");
1311 return true;
1314 if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1315 MaxVFWithoutSLForwardIssues !=
1316 VectorizerParams::MaxVectorWidth * TypeByteSize)
1317 MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1318 return false;
1321 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1322 if (Status < S)
1323 Status = S;
1326 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1327 /// memory accesses, that have the same stride whose absolute value is given
1328 /// in \p Stride, and that have the same type size \p TypeByteSize,
1329 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1330 /// possible to prove statically that the dependence distance is larger
1331 /// than the range that the accesses will travel through the execution of
1332 /// the loop. If so, return true; false otherwise. This is useful for
1333 /// example in loops such as the following (PR31098):
1334 /// for (i = 0; i < D; ++i) {
1335 /// = out[i];
1336 /// out[i+D] =
1337 /// }
1338 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1339 const SCEV &BackedgeTakenCount,
1340 const SCEV &Dist, uint64_t Stride,
1341 uint64_t TypeByteSize) {
1343 // If we can prove that
1344 // (**) |Dist| > BackedgeTakenCount * Step
1345 // where Step is the absolute stride of the memory accesses in bytes,
1346 // then there is no dependence.
1348 // Rationale:
1349 // We basically want to check if the absolute distance (|Dist/Step|)
1350 // is >= the loop iteration count (or > BackedgeTakenCount).
1351 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1352 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1353 // that the dependence distance is >= VF; This is checked elsewhere.
1354 // But in some cases we can prune unknown dependence distances early, and
1355 // even before selecting the VF, and without a runtime test, by comparing
1356 // the distance against the loop iteration count. Since the vectorized code
1357 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1358 // also guarantees that distance >= VF.
1360 const uint64_t ByteStride = Stride * TypeByteSize;
1361 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1362 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1364 const SCEV *CastedDist = &Dist;
1365 const SCEV *CastedProduct = Product;
1366 uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1367 uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1369 // The dependence distance can be positive/negative, so we sign extend Dist;
1370 // The multiplication of the absolute stride in bytes and the
1371 // backedgeTakenCount is non-negative, so we zero extend Product.
1372 if (DistTypeSize > ProductTypeSize)
1373 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1374 else
1375 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1377 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1378 // (If so, then we have proven (**) because |Dist| >= Dist)
1379 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1380 if (SE.isKnownPositive(Minus))
1381 return true;
1383 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1384 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1385 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1386 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1387 if (SE.isKnownPositive(Minus))
1388 return true;
1390 return false;
1393 /// Check the dependence for two accesses with the same stride \p Stride.
1394 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1395 /// bytes.
1397 /// \returns true if they are independent.
1398 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1399 uint64_t TypeByteSize) {
1400 assert(Stride > 1 && "The stride must be greater than 1");
1401 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1402 assert(Distance > 0 && "The distance must be non-zero");
1404 // Skip if the distance is not multiple of type byte size.
1405 if (Distance % TypeByteSize)
1406 return false;
1408 uint64_t ScaledDist = Distance / TypeByteSize;
1410 // No dependence if the scaled distance is not multiple of the stride.
1411 // E.g.
1412 // for (i = 0; i < 1024 ; i += 4)
1413 // A[i+2] = A[i] + 1;
1415 // Two accesses in memory (scaled distance is 2, stride is 4):
1416 // | A[0] | | | | A[4] | | | |
1417 // | | | A[2] | | | | A[6] | |
1419 // E.g.
1420 // for (i = 0; i < 1024 ; i += 3)
1421 // A[i+4] = A[i] + 1;
1423 // Two accesses in memory (scaled distance is 4, stride is 3):
1424 // | A[0] | | | A[3] | | | A[6] | | |
1425 // | | | | | A[4] | | | A[7] | |
1426 return ScaledDist % Stride;
1429 MemoryDepChecker::Dependence::DepType
1430 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1431 const MemAccessInfo &B, unsigned BIdx,
1432 const ValueToValueMap &Strides) {
1433 assert (AIdx < BIdx && "Must pass arguments in program order");
1435 Value *APtr = A.getPointer();
1436 Value *BPtr = B.getPointer();
1437 bool AIsWrite = A.getInt();
1438 bool BIsWrite = B.getInt();
1440 // Two reads are independent.
1441 if (!AIsWrite && !BIsWrite)
1442 return Dependence::NoDep;
1444 // We cannot check pointers in different address spaces.
1445 if (APtr->getType()->getPointerAddressSpace() !=
1446 BPtr->getType()->getPointerAddressSpace())
1447 return Dependence::Unknown;
1449 int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1450 int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1452 const SCEV *Src = PSE.getSCEV(APtr);
1453 const SCEV *Sink = PSE.getSCEV(BPtr);
1455 // If the induction step is negative we have to invert source and sink of the
1456 // dependence.
1457 if (StrideAPtr < 0) {
1458 std::swap(APtr, BPtr);
1459 std::swap(Src, Sink);
1460 std::swap(AIsWrite, BIsWrite);
1461 std::swap(AIdx, BIdx);
1462 std::swap(StrideAPtr, StrideBPtr);
1465 const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1467 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1468 << "(Induction step: " << StrideAPtr << ")\n");
1469 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1470 << *InstMap[BIdx] << ": " << *Dist << "\n");
1472 // Need accesses with constant stride. We don't want to vectorize
1473 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1474 // the address space.
1475 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1476 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1477 return Dependence::Unknown;
1480 Type *ATy = APtr->getType()->getPointerElementType();
1481 Type *BTy = BPtr->getType()->getPointerElementType();
1482 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1483 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1484 uint64_t Stride = std::abs(StrideAPtr);
1485 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1486 if (!C) {
1487 if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
1488 isSafeDependenceDistance(DL, *(PSE.getSE()),
1489 *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1490 TypeByteSize))
1491 return Dependence::NoDep;
1493 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1494 FoundNonConstantDistanceDependence = true;
1495 return Dependence::Unknown;
1498 const APInt &Val = C->getAPInt();
1499 int64_t Distance = Val.getSExtValue();
1501 // Attempt to prove strided accesses independent.
1502 if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1503 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1504 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1505 return Dependence::NoDep;
1508 // Negative distances are not plausible dependencies.
1509 if (Val.isNegative()) {
1510 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1511 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1512 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1513 ATy != BTy)) {
1514 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1515 return Dependence::ForwardButPreventsForwarding;
1518 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1519 return Dependence::Forward;
1522 // Write to the same location with the same size.
1523 // Could be improved to assert type sizes are the same (i32 == float, etc).
1524 if (Val == 0) {
1525 if (ATy == BTy)
1526 return Dependence::Forward;
1527 LLVM_DEBUG(
1528 dbgs() << "LAA: Zero dependence difference but different types\n");
1529 return Dependence::Unknown;
1532 assert(Val.isStrictlyPositive() && "Expect a positive value");
1534 if (ATy != BTy) {
1535 LLVM_DEBUG(
1536 dbgs()
1537 << "LAA: ReadWrite-Write positive dependency with different types\n");
1538 return Dependence::Unknown;
1541 // Bail out early if passed-in parameters make vectorization not feasible.
1542 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1543 VectorizerParams::VectorizationFactor : 1);
1544 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1545 VectorizerParams::VectorizationInterleave : 1);
1546 // The minimum number of iterations for a vectorized/unrolled version.
1547 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1549 // It's not vectorizable if the distance is smaller than the minimum distance
1550 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1551 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1552 // TypeByteSize (No need to plus the last gap distance).
1554 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1555 // foo(int *A) {
1556 // int *B = (int *)((char *)A + 14);
1557 // for (i = 0 ; i < 1024 ; i += 2)
1558 // B[i] = A[i] + 1;
1559 // }
1561 // Two accesses in memory (stride is 2):
1562 // | A[0] | | A[2] | | A[4] | | A[6] | |
1563 // | B[0] | | B[2] | | B[4] |
1565 // Distance needs for vectorizing iterations except the last iteration:
1566 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1567 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1569 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1570 // 12, which is less than distance.
1572 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1573 // the minimum distance needed is 28, which is greater than distance. It is
1574 // not safe to do vectorization.
1575 uint64_t MinDistanceNeeded =
1576 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1577 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1578 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1579 << Distance << '\n');
1580 return Dependence::Backward;
1583 // Unsafe if the minimum distance needed is greater than max safe distance.
1584 if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1585 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1586 << MinDistanceNeeded << " size in bytes");
1587 return Dependence::Backward;
1590 // Positive distance bigger than max vectorization factor.
1591 // FIXME: Should use max factor instead of max distance in bytes, which could
1592 // not handle different types.
1593 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1594 // void foo (int *A, char *B) {
1595 // for (unsigned i = 0; i < 1024; i++) {
1596 // A[i+2] = A[i] + 1;
1597 // B[i+2] = B[i] + 1;
1598 // }
1599 // }
1601 // This case is currently unsafe according to the max safe distance. If we
1602 // analyze the two accesses on array B, the max safe dependence distance
1603 // is 2. Then we analyze the accesses on array A, the minimum distance needed
1604 // is 8, which is less than 2 and forbidden vectorization, But actually
1605 // both A and B could be vectorized by 2 iterations.
1606 MaxSafeDepDistBytes =
1607 std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1609 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1610 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1611 couldPreventStoreLoadForward(Distance, TypeByteSize))
1612 return Dependence::BackwardVectorizableButPreventsForwarding;
1614 uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1615 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1616 << " with max VF = " << MaxVF << '\n');
1617 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1618 MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits);
1619 return Dependence::BackwardVectorizable;
1622 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1623 MemAccessInfoList &CheckDeps,
1624 const ValueToValueMap &Strides) {
1626 MaxSafeDepDistBytes = -1;
1627 SmallPtrSet<MemAccessInfo, 8> Visited;
1628 for (MemAccessInfo CurAccess : CheckDeps) {
1629 if (Visited.count(CurAccess))
1630 continue;
1632 // Get the relevant memory access set.
1633 EquivalenceClasses<MemAccessInfo>::iterator I =
1634 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1636 // Check accesses within this set.
1637 EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1638 AccessSets.member_begin(I);
1639 EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1640 AccessSets.member_end();
1642 // Check every access pair.
1643 while (AI != AE) {
1644 Visited.insert(*AI);
1645 EquivalenceClasses<MemAccessInfo>::member_iterator OI = std::next(AI);
1646 while (OI != AE) {
1647 // Check every accessing instruction pair in program order.
1648 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1649 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1650 for (std::vector<unsigned>::iterator I2 = Accesses[*OI].begin(),
1651 I2E = Accesses[*OI].end(); I2 != I2E; ++I2) {
1652 auto A = std::make_pair(&*AI, *I1);
1653 auto B = std::make_pair(&*OI, *I2);
1655 assert(*I1 != *I2);
1656 if (*I1 > *I2)
1657 std::swap(A, B);
1659 Dependence::DepType Type =
1660 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1661 mergeInStatus(Dependence::isSafeForVectorization(Type));
1663 // Gather dependences unless we accumulated MaxDependences
1664 // dependences. In that case return as soon as we find the first
1665 // unsafe dependence. This puts a limit on this quadratic
1666 // algorithm.
1667 if (RecordDependences) {
1668 if (Type != Dependence::NoDep)
1669 Dependences.push_back(Dependence(A.second, B.second, Type));
1671 if (Dependences.size() >= MaxDependences) {
1672 RecordDependences = false;
1673 Dependences.clear();
1674 LLVM_DEBUG(dbgs()
1675 << "Too many dependences, stopped recording\n");
1678 if (!RecordDependences && !isSafeForVectorization())
1679 return false;
1681 ++OI;
1683 AI++;
1687 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1688 return isSafeForVectorization();
1691 SmallVector<Instruction *, 4>
1692 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1693 MemAccessInfo Access(Ptr, isWrite);
1694 auto &IndexVector = Accesses.find(Access)->second;
1696 SmallVector<Instruction *, 4> Insts;
1697 transform(IndexVector,
1698 std::back_inserter(Insts),
1699 [&](unsigned Idx) { return this->InstMap[Idx]; });
1700 return Insts;
1703 const char *MemoryDepChecker::Dependence::DepName[] = {
1704 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1705 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1707 void MemoryDepChecker::Dependence::print(
1708 raw_ostream &OS, unsigned Depth,
1709 const SmallVectorImpl<Instruction *> &Instrs) const {
1710 OS.indent(Depth) << DepName[Type] << ":\n";
1711 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1712 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1715 bool LoopAccessInfo::canAnalyzeLoop() {
1716 // We need to have a loop header.
1717 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1718 << TheLoop->getHeader()->getParent()->getName() << ": "
1719 << TheLoop->getHeader()->getName() << '\n');
1721 // We can only analyze innermost loops.
1722 if (!TheLoop->empty()) {
1723 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1724 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1725 return false;
1728 // We must have a single backedge.
1729 if (TheLoop->getNumBackEdges() != 1) {
1730 LLVM_DEBUG(
1731 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1732 recordAnalysis("CFGNotUnderstood")
1733 << "loop control flow is not understood by analyzer";
1734 return false;
1737 // We must have a single exiting block.
1738 if (!TheLoop->getExitingBlock()) {
1739 LLVM_DEBUG(
1740 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1741 recordAnalysis("CFGNotUnderstood")
1742 << "loop control flow is not understood by analyzer";
1743 return false;
1746 // We only handle bottom-tested loops, i.e. loop in which the condition is
1747 // checked at the end of each iteration. With that we can assume that all
1748 // instructions in the loop are executed the same number of times.
1749 if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1750 LLVM_DEBUG(
1751 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1752 recordAnalysis("CFGNotUnderstood")
1753 << "loop control flow is not understood by analyzer";
1754 return false;
1757 // ScalarEvolution needs to be able to find the exit count.
1758 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1759 if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
1760 recordAnalysis("CantComputeNumberOfIterations")
1761 << "could not determine number of loop iterations";
1762 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1763 return false;
1766 return true;
1769 void LoopAccessInfo::analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
1770 const TargetLibraryInfo *TLI,
1771 DominatorTree *DT) {
1772 typedef SmallPtrSet<Value*, 16> ValueSet;
1774 // Holds the Load and Store instructions.
1775 SmallVector<LoadInst *, 16> Loads;
1776 SmallVector<StoreInst *, 16> Stores;
1778 // Holds all the different accesses in the loop.
1779 unsigned NumReads = 0;
1780 unsigned NumReadWrites = 0;
1782 PtrRtChecking->Pointers.clear();
1783 PtrRtChecking->Need = false;
1785 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1787 // For each block.
1788 for (BasicBlock *BB : TheLoop->blocks()) {
1789 // Scan the BB and collect legal loads and stores.
1790 for (Instruction &I : *BB) {
1791 // If this is a load, save it. If this instruction can read from memory
1792 // but is not a load, then we quit. Notice that we don't handle function
1793 // calls that read or write.
1794 if (I.mayReadFromMemory()) {
1795 // Many math library functions read the rounding mode. We will only
1796 // vectorize a loop if it contains known function calls that don't set
1797 // the flag. Therefore, it is safe to ignore this read from memory.
1798 auto *Call = dyn_cast<CallInst>(&I);
1799 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1800 continue;
1802 // If the function has an explicit vectorized counterpart, we can safely
1803 // assume that it can be vectorized.
1804 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1805 TLI->isFunctionVectorizable(Call->getCalledFunction()->getName()))
1806 continue;
1808 auto *Ld = dyn_cast<LoadInst>(&I);
1809 if (!Ld || (!Ld->isSimple() && !IsAnnotatedParallel)) {
1810 recordAnalysis("NonSimpleLoad", Ld)
1811 << "read with atomic ordering or volatile read";
1812 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1813 CanVecMem = false;
1814 return;
1816 NumLoads++;
1817 Loads.push_back(Ld);
1818 DepChecker->addAccess(Ld);
1819 if (EnableMemAccessVersioning)
1820 collectStridedAccess(Ld);
1821 continue;
1824 // Save 'store' instructions. Abort if other instructions write to memory.
1825 if (I.mayWriteToMemory()) {
1826 auto *St = dyn_cast<StoreInst>(&I);
1827 if (!St) {
1828 recordAnalysis("CantVectorizeInstruction", St)
1829 << "instruction cannot be vectorized";
1830 CanVecMem = false;
1831 return;
1833 if (!St->isSimple() && !IsAnnotatedParallel) {
1834 recordAnalysis("NonSimpleStore", St)
1835 << "write with atomic ordering or volatile write";
1836 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1837 CanVecMem = false;
1838 return;
1840 NumStores++;
1841 Stores.push_back(St);
1842 DepChecker->addAccess(St);
1843 if (EnableMemAccessVersioning)
1844 collectStridedAccess(St);
1846 } // Next instr.
1847 } // Next block.
1849 // Now we have two lists that hold the loads and the stores.
1850 // Next, we find the pointers that they use.
1852 // Check if we see any stores. If there are no stores, then we don't
1853 // care if the pointers are *restrict*.
1854 if (!Stores.size()) {
1855 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1856 CanVecMem = true;
1857 return;
1860 MemoryDepChecker::DepCandidates DependentAccesses;
1861 AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1862 TheLoop, AA, LI, DependentAccesses, *PSE);
1864 // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1865 // multiple times on the same object. If the ptr is accessed twice, once
1866 // for read and once for write, it will only appear once (on the write
1867 // list). This is okay, since we are going to check for conflicts between
1868 // writes and between reads and writes, but not between reads and reads.
1869 ValueSet Seen;
1871 // Record uniform store addresses to identify if we have multiple stores
1872 // to the same address.
1873 ValueSet UniformStores;
1875 for (StoreInst *ST : Stores) {
1876 Value *Ptr = ST->getPointerOperand();
1878 if (isUniform(Ptr))
1879 HasDependenceInvolvingLoopInvariantAddress |=
1880 !UniformStores.insert(Ptr).second;
1882 // If we did *not* see this pointer before, insert it to the read-write
1883 // list. At this phase it is only a 'write' list.
1884 if (Seen.insert(Ptr).second) {
1885 ++NumReadWrites;
1887 MemoryLocation Loc = MemoryLocation::get(ST);
1888 // The TBAA metadata could have a control dependency on the predication
1889 // condition, so we cannot rely on it when determining whether or not we
1890 // need runtime pointer checks.
1891 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1892 Loc.AATags.TBAA = nullptr;
1894 Accesses.addStore(Loc);
1898 if (IsAnnotatedParallel) {
1899 LLVM_DEBUG(
1900 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
1901 << "checks.\n");
1902 CanVecMem = true;
1903 return;
1906 for (LoadInst *LD : Loads) {
1907 Value *Ptr = LD->getPointerOperand();
1908 // If we did *not* see this pointer before, insert it to the
1909 // read list. If we *did* see it before, then it is already in
1910 // the read-write list. This allows us to vectorize expressions
1911 // such as A[i] += x; Because the address of A[i] is a read-write
1912 // pointer. This only works if the index of A[i] is consecutive.
1913 // If the address of i is unknown (for example A[B[i]]) then we may
1914 // read a few words, modify, and write a few words, and some of the
1915 // words may be written to the same address.
1916 bool IsReadOnlyPtr = false;
1917 if (Seen.insert(Ptr).second ||
1918 !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
1919 ++NumReads;
1920 IsReadOnlyPtr = true;
1923 // See if there is an unsafe dependency between a load to a uniform address and
1924 // store to the same uniform address.
1925 if (UniformStores.count(Ptr)) {
1926 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
1927 "load and uniform store to the same address!\n");
1928 HasDependenceInvolvingLoopInvariantAddress = true;
1931 MemoryLocation Loc = MemoryLocation::get(LD);
1932 // The TBAA metadata could have a control dependency on the predication
1933 // condition, so we cannot rely on it when determining whether or not we
1934 // need runtime pointer checks.
1935 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
1936 Loc.AATags.TBAA = nullptr;
1938 Accesses.addLoad(Loc, IsReadOnlyPtr);
1941 // If we write (or read-write) to a single destination and there are no
1942 // other reads in this loop then is it safe to vectorize.
1943 if (NumReadWrites == 1 && NumReads == 0) {
1944 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
1945 CanVecMem = true;
1946 return;
1949 // Build dependence sets and check whether we need a runtime pointer bounds
1950 // check.
1951 Accesses.buildDependenceSets();
1953 // Find pointers with computable bounds. We are going to use this information
1954 // to place a runtime bound check.
1955 bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
1956 TheLoop, SymbolicStrides);
1957 if (!CanDoRTIfNeeded) {
1958 recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
1959 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
1960 << "the array bounds.\n");
1961 CanVecMem = false;
1962 return;
1965 LLVM_DEBUG(
1966 dbgs() << "LAA: We can perform a memory runtime check if needed.\n");
1968 CanVecMem = true;
1969 if (Accesses.isDependencyCheckNeeded()) {
1970 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
1971 CanVecMem = DepChecker->areDepsSafe(
1972 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
1973 MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
1975 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
1976 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
1978 // Clear the dependency checks. We assume they are not needed.
1979 Accesses.resetDepChecks(*DepChecker);
1981 PtrRtChecking->reset();
1982 PtrRtChecking->Need = true;
1984 auto *SE = PSE->getSE();
1985 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
1986 SymbolicStrides, true);
1988 // Check that we found the bounds for the pointer.
1989 if (!CanDoRTIfNeeded) {
1990 recordAnalysis("CantCheckMemDepsAtRunTime")
1991 << "cannot check memory dependencies at runtime";
1992 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
1993 CanVecMem = false;
1994 return;
1997 CanVecMem = true;
2001 if (CanVecMem)
2002 LLVM_DEBUG(
2003 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2004 << (PtrRtChecking->Need ? "" : " don't")
2005 << " need runtime memory checks.\n");
2006 else {
2007 recordAnalysis("UnsafeMemDep")
2008 << "unsafe dependent memory operations in loop. Use "
2009 "#pragma loop distribute(enable) to allow loop distribution "
2010 "to attempt to isolate the offending operations into a separate "
2011 "loop";
2012 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2016 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2017 DominatorTree *DT) {
2018 assert(TheLoop->contains(BB) && "Unknown block used");
2020 // Blocks that do not dominate the latch need predication.
2021 BasicBlock* Latch = TheLoop->getLoopLatch();
2022 return !DT->dominates(BB, Latch);
2025 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2026 Instruction *I) {
2027 assert(!Report && "Multiple reports generated");
2029 Value *CodeRegion = TheLoop->getHeader();
2030 DebugLoc DL = TheLoop->getStartLoc();
2032 if (I) {
2033 CodeRegion = I->getParent();
2034 // If there is no debug location attached to the instruction, revert back to
2035 // using the loop's.
2036 if (I->getDebugLoc())
2037 DL = I->getDebugLoc();
2040 Report = make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2041 CodeRegion);
2042 return *Report;
2045 bool LoopAccessInfo::isUniform(Value *V) const {
2046 auto *SE = PSE->getSE();
2047 // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2048 // never considered uniform.
2049 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2050 // trivially loop-invariant FP values to be considered uniform.
2051 if (!SE->isSCEVable(V->getType()))
2052 return false;
2053 return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2056 // FIXME: this function is currently a duplicate of the one in
2057 // LoopVectorize.cpp.
2058 static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
2059 Instruction *Loc) {
2060 if (FirstInst)
2061 return FirstInst;
2062 if (Instruction *I = dyn_cast<Instruction>(V))
2063 return I->getParent() == Loc->getParent() ? I : nullptr;
2064 return nullptr;
2067 namespace {
2069 /// IR Values for the lower and upper bounds of a pointer evolution. We
2070 /// need to use value-handles because SCEV expansion can invalidate previously
2071 /// expanded values. Thus expansion of a pointer can invalidate the bounds for
2072 /// a previous one.
2073 struct PointerBounds {
2074 TrackingVH<Value> Start;
2075 TrackingVH<Value> End;
2078 } // end anonymous namespace
2080 /// Expand code for the lower and upper bound of the pointer group \p CG
2081 /// in \p TheLoop. \return the values for the bounds.
2082 static PointerBounds
2083 expandBounds(const RuntimePointerChecking::CheckingPtrGroup *CG, Loop *TheLoop,
2084 Instruction *Loc, SCEVExpander &Exp, ScalarEvolution *SE,
2085 const RuntimePointerChecking &PtrRtChecking) {
2086 Value *Ptr = PtrRtChecking.Pointers[CG->Members[0]].PointerValue;
2087 const SCEV *Sc = SE->getSCEV(Ptr);
2089 unsigned AS = Ptr->getType()->getPointerAddressSpace();
2090 LLVMContext &Ctx = Loc->getContext();
2092 // Use this type for pointer arithmetic.
2093 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, AS);
2095 if (SE->isLoopInvariant(Sc, TheLoop)) {
2096 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for a loop invariant ptr:"
2097 << *Ptr << "\n");
2098 // Ptr could be in the loop body. If so, expand a new one at the correct
2099 // location.
2100 Instruction *Inst = dyn_cast<Instruction>(Ptr);
2101 Value *NewPtr = (Inst && TheLoop->contains(Inst))
2102 ? Exp.expandCodeFor(Sc, PtrArithTy, Loc)
2103 : Ptr;
2104 // We must return a half-open range, which means incrementing Sc.
2105 const SCEV *ScPlusOne = SE->getAddExpr(Sc, SE->getOne(PtrArithTy));
2106 Value *NewPtrPlusOne = Exp.expandCodeFor(ScPlusOne, PtrArithTy, Loc);
2107 return {NewPtr, NewPtrPlusOne};
2108 } else {
2109 Value *Start = nullptr, *End = nullptr;
2110 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n");
2111 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc);
2112 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc);
2113 LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High
2114 << "\n");
2115 return {Start, End};
2119 /// Turns a collection of checks into a collection of expanded upper and
2120 /// lower bounds for both pointers in the check.
2121 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> expandBounds(
2122 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks,
2123 Loop *L, Instruction *Loc, ScalarEvolution *SE, SCEVExpander &Exp,
2124 const RuntimePointerChecking &PtrRtChecking) {
2125 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds;
2127 // Here we're relying on the SCEV Expander's cache to only emit code for the
2128 // same bounds once.
2129 transform(
2130 PointerChecks, std::back_inserter(ChecksWithBounds),
2131 [&](const RuntimePointerChecking::PointerCheck &Check) {
2132 PointerBounds
2133 First = expandBounds(Check.first, L, Loc, Exp, SE, PtrRtChecking),
2134 Second = expandBounds(Check.second, L, Loc, Exp, SE, PtrRtChecking);
2135 return std::make_pair(First, Second);
2138 return ChecksWithBounds;
2141 std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks(
2142 Instruction *Loc,
2143 const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks)
2144 const {
2145 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2146 auto *SE = PSE->getSE();
2147 SCEVExpander Exp(*SE, DL, "induction");
2148 auto ExpandedChecks =
2149 expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, *PtrRtChecking);
2151 LLVMContext &Ctx = Loc->getContext();
2152 Instruction *FirstInst = nullptr;
2153 IRBuilder<> ChkBuilder(Loc);
2154 // Our instructions might fold to a constant.
2155 Value *MemoryRuntimeCheck = nullptr;
2157 for (const auto &Check : ExpandedChecks) {
2158 const PointerBounds &A = Check.first, &B = Check.second;
2159 // Check if two pointers (A and B) conflict where conflict is computed as:
2160 // start(A) <= end(B) && start(B) <= end(A)
2161 unsigned AS0 = A.Start->getType()->getPointerAddressSpace();
2162 unsigned AS1 = B.Start->getType()->getPointerAddressSpace();
2164 assert((AS0 == B.End->getType()->getPointerAddressSpace()) &&
2165 (AS1 == A.End->getType()->getPointerAddressSpace()) &&
2166 "Trying to bounds check pointers with different address spaces");
2168 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0);
2169 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1);
2171 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc");
2172 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc");
2173 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc");
2174 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc");
2176 // [A|B].Start points to the first accessed byte under base [A|B].
2177 // [A|B].End points to the last accessed byte, plus one.
2178 // There is no conflict when the intervals are disjoint:
2179 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End)
2181 // bound0 = (B.Start < A.End)
2182 // bound1 = (A.Start < B.End)
2183 // IsConflict = bound0 & bound1
2184 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0");
2185 FirstInst = getFirstInst(FirstInst, Cmp0, Loc);
2186 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1");
2187 FirstInst = getFirstInst(FirstInst, Cmp1, Loc);
2188 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict");
2189 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
2190 if (MemoryRuntimeCheck) {
2191 IsConflict =
2192 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx");
2193 FirstInst = getFirstInst(FirstInst, IsConflict, Loc);
2195 MemoryRuntimeCheck = IsConflict;
2198 if (!MemoryRuntimeCheck)
2199 return std::make_pair(nullptr, nullptr);
2201 // We have to do this trickery because the IRBuilder might fold the check to a
2202 // constant expression in which case there is no Instruction anchored in a
2203 // the block.
2204 Instruction *Check = BinaryOperator::CreateAnd(MemoryRuntimeCheck,
2205 ConstantInt::getTrue(Ctx));
2206 ChkBuilder.Insert(Check, "memcheck.conflict");
2207 FirstInst = getFirstInst(FirstInst, Check, Loc);
2208 return std::make_pair(FirstInst, Check);
2211 std::pair<Instruction *, Instruction *>
2212 LoopAccessInfo::addRuntimeChecks(Instruction *Loc) const {
2213 if (!PtrRtChecking->Need)
2214 return std::make_pair(nullptr, nullptr);
2216 return addRuntimeChecks(Loc, PtrRtChecking->getChecks());
2219 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2220 Value *Ptr = nullptr;
2221 if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
2222 Ptr = LI->getPointerOperand();
2223 else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
2224 Ptr = SI->getPointerOperand();
2225 else
2226 return;
2228 Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2229 if (!Stride)
2230 return;
2232 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2233 "versioning:");
2234 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2236 // Avoid adding the "Stride == 1" predicate when we know that
2237 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2238 // or zero iteration loop, as Trip-Count <= Stride == 1.
2240 // TODO: We are currently not making a very informed decision on when it is
2241 // beneficial to apply stride versioning. It might make more sense that the
2242 // users of this analysis (such as the vectorizer) will trigger it, based on
2243 // their specific cost considerations; For example, in cases where stride
2244 // versioning does not help resolving memory accesses/dependences, the
2245 // vectorizer should evaluate the cost of the runtime test, and the benefit
2246 // of various possible stride specializations, considering the alternatives
2247 // of using gather/scatters (if available).
2249 const SCEV *StrideExpr = PSE->getSCEV(Stride);
2250 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2252 // Match the types so we can compare the stride and the BETakenCount.
2253 // The Stride can be positive/negative, so we sign extend Stride;
2254 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2255 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2256 uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
2257 uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
2258 const SCEV *CastedStride = StrideExpr;
2259 const SCEV *CastedBECount = BETakenCount;
2260 ScalarEvolution *SE = PSE->getSE();
2261 if (BETypeSize >= StrideTypeSize)
2262 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2263 else
2264 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2265 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2266 // Since TripCount == BackEdgeTakenCount + 1, checking:
2267 // "Stride >= TripCount" is equivalent to checking:
2268 // Stride - BETakenCount > 0
2269 if (SE->isKnownPositive(StrideMinusBETaken)) {
2270 LLVM_DEBUG(
2271 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2272 "Stride==1 predicate will imply that the loop executes "
2273 "at most once.\n");
2274 return;
2276 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
2278 SymbolicStrides[Ptr] = Stride;
2279 StrideSet.insert(Stride);
2282 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2283 const TargetLibraryInfo *TLI, AliasAnalysis *AA,
2284 DominatorTree *DT, LoopInfo *LI)
2285 : PSE(llvm::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2286 PtrRtChecking(llvm::make_unique<RuntimePointerChecking>(SE)),
2287 DepChecker(llvm::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
2288 NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2289 HasDependenceInvolvingLoopInvariantAddress(false) {
2290 if (canAnalyzeLoop())
2291 analyzeLoop(AA, LI, TLI, DT);
2294 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2295 if (CanVecMem) {
2296 OS.indent(Depth) << "Memory dependences are safe";
2297 if (MaxSafeDepDistBytes != -1ULL)
2298 OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2299 << " bytes";
2300 if (PtrRtChecking->Need)
2301 OS << " with run-time checks";
2302 OS << "\n";
2305 if (Report)
2306 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2308 if (auto *Dependences = DepChecker->getDependences()) {
2309 OS.indent(Depth) << "Dependences:\n";
2310 for (auto &Dep : *Dependences) {
2311 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2312 OS << "\n";
2314 } else
2315 OS.indent(Depth) << "Too many dependences, not recorded\n";
2317 // List the pair of accesses need run-time checks to prove independence.
2318 PtrRtChecking->print(OS, Depth);
2319 OS << "\n";
2321 OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2322 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2323 << "found in loop.\n";
2325 OS.indent(Depth) << "SCEV assumptions:\n";
2326 PSE->getUnionPredicate().print(OS, Depth);
2328 OS << "\n";
2330 OS.indent(Depth) << "Expressions re-written:\n";
2331 PSE->print(OS, Depth);
2334 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2335 auto &LAI = LoopAccessInfoMap[L];
2337 if (!LAI)
2338 LAI = llvm::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2340 return *LAI.get();
2343 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2344 LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2346 for (Loop *TopLevelLoop : *LI)
2347 for (Loop *L : depth_first(TopLevelLoop)) {
2348 OS.indent(2) << L->getHeader()->getName() << ":\n";
2349 auto &LAI = LAA.getInfo(L);
2350 LAI.print(OS, 4);
2354 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2355 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2356 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2357 TLI = TLIP ? &TLIP->getTLI() : nullptr;
2358 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2359 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2360 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2362 return false;
2365 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2366 AU.addRequired<ScalarEvolutionWrapperPass>();
2367 AU.addRequired<AAResultsWrapperPass>();
2368 AU.addRequired<DominatorTreeWrapperPass>();
2369 AU.addRequired<LoopInfoWrapperPass>();
2371 AU.setPreservesAll();
2374 char LoopAccessLegacyAnalysis::ID = 0;
2375 static const char laa_name[] = "Loop Access Analysis";
2376 #define LAA_NAME "loop-accesses"
2378 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2379 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2380 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2381 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2382 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2383 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2385 AnalysisKey LoopAccessAnalysis::Key;
2387 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2388 LoopStandardAnalysisResults &AR) {
2389 return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2392 namespace llvm {
2394 Pass *createLAAPass() {
2395 return new LoopAccessLegacyAnalysis();
2398 } // end namespace llvm