[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / llvm / lib / Analysis / LoopAccessAnalysis.cpp
blob3d1edd5f038a25ed99976fa74b30acd8f22b7339
1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/EquivalenceClasses.h"
18 #include "llvm/ADT/PointerIntPair.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AliasSetTracker.h"
26 #include "llvm/Analysis/LoopAnalysisManager.h"
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/Analysis/LoopIterator.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
31 #include "llvm/Analysis/ScalarEvolution.h"
32 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
33 #include "llvm/Analysis/TargetLibraryInfo.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/Analysis/VectorUtils.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DebugLoc.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/DiagnosticInfo.h"
42 #include "llvm/IR/Dominators.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GetElementPtrTypeIterator.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/PatternMatch.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/IR/ValueHandle.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstdint>
62 #include <iterator>
63 #include <utility>
64 #include <vector>
66 using namespace llvm;
67 using namespace llvm::PatternMatch;
69 #define DEBUG_TYPE "loop-accesses"
71 static cl::opt<unsigned, true>
72 VectorizationFactor("force-vector-width", cl::Hidden,
73 cl::desc("Sets the SIMD width. Zero is autoselect."),
74 cl::location(VectorizerParams::VectorizationFactor));
75 unsigned VectorizerParams::VectorizationFactor;
77 static cl::opt<unsigned, true>
78 VectorizationInterleave("force-vector-interleave", cl::Hidden,
79 cl::desc("Sets the vectorization interleave count. "
80 "Zero is autoselect."),
81 cl::location(
82 VectorizerParams::VectorizationInterleave));
83 unsigned VectorizerParams::VectorizationInterleave;
85 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
86 "runtime-memory-check-threshold", cl::Hidden,
87 cl::desc("When performing memory disambiguation checks at runtime do not "
88 "generate more than this number of comparisons (default = 8)."),
89 cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
90 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
92 /// The maximum iterations used to merge memory checks
93 static cl::opt<unsigned> MemoryCheckMergeThreshold(
94 "memory-check-merge-threshold", cl::Hidden,
95 cl::desc("Maximum number of comparisons done when trying to merge "
96 "runtime memory checks. (default = 100)"),
97 cl::init(100));
99 /// Maximum SIMD width.
100 const unsigned VectorizerParams::MaxVectorWidth = 64;
102 /// We collect dependences up to this threshold.
103 static cl::opt<unsigned>
104 MaxDependences("max-dependences", cl::Hidden,
105 cl::desc("Maximum number of dependences collected by "
106 "loop-access analysis (default = 100)"),
107 cl::init(100));
109 /// This enables versioning on the strides of symbolically striding memory
110 /// accesses in code like the following.
111 /// for (i = 0; i < N; ++i)
112 /// A[i * Stride1] += B[i * Stride2] ...
114 /// Will be roughly translated to
115 /// if (Stride1 == 1 && Stride2 == 1) {
116 /// for (i = 0; i < N; i+=4)
117 /// A[i:i+3] += ...
118 /// } else
119 /// ...
120 static cl::opt<bool> EnableMemAccessVersioning(
121 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
122 cl::desc("Enable symbolic stride memory access versioning"));
124 /// Enable store-to-load forwarding conflict detection. This option can
125 /// be disabled for correctness testing.
126 static cl::opt<bool> EnableForwardingConflictDetection(
127 "store-to-load-forwarding-conflict-detection", cl::Hidden,
128 cl::desc("Enable conflict detection in loop-access analysis"),
129 cl::init(true));
131 static cl::opt<unsigned> MaxForkedSCEVDepth(
132 "max-forked-scev-depth", cl::Hidden,
133 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
134 cl::init(5));
136 static cl::opt<bool> SpeculateUnitStride(
137 "laa-speculate-unit-stride", cl::Hidden,
138 cl::desc("Speculate that non-constant strides are unit in LAA"),
139 cl::init(true));
141 static cl::opt<bool, true> HoistRuntimeChecks(
142 "hoist-runtime-checks", cl::Hidden,
143 cl::desc(
144 "Hoist inner loop runtime memory checks to outer loop if possible"),
145 cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(false));
146 bool VectorizerParams::HoistRuntimeChecks;
148 bool VectorizerParams::isInterleaveForced() {
149 return ::VectorizationInterleave.getNumOccurrences() > 0;
152 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
153 const DenseMap<Value *, const SCEV *> &PtrToStride,
154 Value *Ptr) {
155 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
157 // If there is an entry in the map return the SCEV of the pointer with the
158 // symbolic stride replaced by one.
159 DenseMap<Value *, const SCEV *>::const_iterator SI = PtrToStride.find(Ptr);
160 if (SI == PtrToStride.end())
161 // For a non-symbolic stride, just return the original expression.
162 return OrigSCEV;
164 const SCEV *StrideSCEV = SI->second;
165 // Note: This assert is both overly strong and overly weak. The actual
166 // invariant here is that StrideSCEV should be loop invariant. The only
167 // such invariant strides we happen to speculate right now are unknowns
168 // and thus this is a reasonable proxy of the actual invariant.
169 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
171 ScalarEvolution *SE = PSE.getSE();
172 const auto *CT = SE->getOne(StrideSCEV->getType());
173 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
174 auto *Expr = PSE.getSCEV(Ptr);
176 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
177 << " by: " << *Expr << "\n");
178 return Expr;
181 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
182 unsigned Index, RuntimePointerChecking &RtCheck)
183 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
184 AddressSpace(RtCheck.Pointers[Index]
185 .PointerValue->getType()
186 ->getPointerAddressSpace()),
187 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
188 Members.push_back(Index);
191 /// Calculate Start and End points of memory access.
192 /// Let's assume A is the first access and B is a memory access on N-th loop
193 /// iteration. Then B is calculated as:
194 /// B = A + Step*N .
195 /// Step value may be positive or negative.
196 /// N is a calculated back-edge taken count:
197 /// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
198 /// Start and End points are calculated in the following way:
199 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
200 /// where SizeOfElt is the size of single memory access in bytes.
202 /// There is no conflict when the intervals are disjoint:
203 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
204 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
205 Type *AccessTy, bool WritePtr,
206 unsigned DepSetId, unsigned ASId,
207 PredicatedScalarEvolution &PSE,
208 bool NeedsFreeze) {
209 ScalarEvolution *SE = PSE.getSE();
211 const SCEV *ScStart;
212 const SCEV *ScEnd;
214 if (SE->isLoopInvariant(PtrExpr, Lp)) {
215 ScStart = ScEnd = PtrExpr;
216 } else {
217 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
218 assert(AR && "Invalid addrec expression");
219 const SCEV *Ex = PSE.getBackedgeTakenCount();
221 ScStart = AR->getStart();
222 ScEnd = AR->evaluateAtIteration(Ex, *SE);
223 const SCEV *Step = AR->getStepRecurrence(*SE);
225 // For expressions with negative step, the upper bound is ScStart and the
226 // lower bound is ScEnd.
227 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
228 if (CStep->getValue()->isNegative())
229 std::swap(ScStart, ScEnd);
230 } else {
231 // Fallback case: the step is not constant, but we can still
232 // get the upper and lower bounds of the interval by using min/max
233 // expressions.
234 ScStart = SE->getUMinExpr(ScStart, ScEnd);
235 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
238 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
239 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
241 // Add the size of the pointed element to ScEnd.
242 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
243 Type *IdxTy = DL.getIndexType(Ptr->getType());
244 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
245 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
247 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
248 NeedsFreeze);
251 void RuntimePointerChecking::tryToCreateDiffCheck(
252 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
253 if (!CanUseDiffCheck)
254 return;
256 // If either group contains multiple different pointers, bail out.
257 // TODO: Support multiple pointers by using the minimum or maximum pointer,
258 // depending on src & sink.
259 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
260 CanUseDiffCheck = false;
261 return;
264 PointerInfo *Src = &Pointers[CGI.Members[0]];
265 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
267 // If either pointer is read and written, multiple checks may be needed. Bail
268 // out.
269 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
270 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
271 CanUseDiffCheck = false;
272 return;
275 ArrayRef<unsigned> AccSrc =
276 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
277 ArrayRef<unsigned> AccSink =
278 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
279 // If either pointer is accessed multiple times, there may not be a clear
280 // src/sink relation. Bail out for now.
281 if (AccSrc.size() != 1 || AccSink.size() != 1) {
282 CanUseDiffCheck = false;
283 return;
285 // If the sink is accessed before src, swap src/sink.
286 if (AccSink[0] < AccSrc[0])
287 std::swap(Src, Sink);
289 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
290 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
291 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
292 SinkAR->getLoop() != DC.getInnermostLoop()) {
293 CanUseDiffCheck = false;
294 return;
297 SmallVector<Instruction *, 4> SrcInsts =
298 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
299 SmallVector<Instruction *, 4> SinkInsts =
300 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
301 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
302 Type *DstTy = getLoadStoreType(SinkInsts[0]);
303 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
304 CanUseDiffCheck = false;
305 return;
307 const DataLayout &DL =
308 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
309 unsigned AllocSize =
310 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
312 // Only matching constant steps matching the AllocSize are supported at the
313 // moment. This simplifies the difference computation. Can be extended in the
314 // future.
315 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
316 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
317 Step->getAPInt().abs() != AllocSize) {
318 CanUseDiffCheck = false;
319 return;
322 IntegerType *IntTy =
323 IntegerType::get(Src->PointerValue->getContext(),
324 DL.getPointerSizeInBits(CGI.AddressSpace));
326 // When counting down, the dependence distance needs to be swapped.
327 if (Step->getValue()->isNegative())
328 std::swap(SinkAR, SrcAR);
330 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
331 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
332 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
333 isa<SCEVCouldNotCompute>(SrcStartInt)) {
334 CanUseDiffCheck = false;
335 return;
338 const Loop *InnerLoop = SrcAR->getLoop();
339 // If the start values for both Src and Sink also vary according to an outer
340 // loop, then it's probably better to avoid creating diff checks because
341 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
342 // do the expanded full range overlap checks, which can be hoisted.
343 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
344 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
345 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
346 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
347 const Loop *StartARLoop = SrcStartAR->getLoop();
348 if (StartARLoop == SinkStartAR->getLoop() &&
349 StartARLoop == InnerLoop->getParentLoop()) {
350 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
351 "cannot be hoisted out of the outer loop\n");
352 CanUseDiffCheck = false;
353 return;
357 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
358 << "SrcStart: " << *SrcStartInt << '\n'
359 << "SinkStartInt: " << *SinkStartInt << '\n');
360 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
361 Src->NeedsFreeze || Sink->NeedsFreeze);
364 SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
365 SmallVector<RuntimePointerCheck, 4> Checks;
367 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
368 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
369 const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
370 const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
372 if (needsChecking(CGI, CGJ)) {
373 tryToCreateDiffCheck(CGI, CGJ);
374 Checks.push_back(std::make_pair(&CGI, &CGJ));
378 return Checks;
381 void RuntimePointerChecking::generateChecks(
382 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
383 assert(Checks.empty() && "Checks is not empty");
384 groupChecks(DepCands, UseDependencies);
385 Checks = generateChecks();
388 bool RuntimePointerChecking::needsChecking(
389 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
390 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
391 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
392 if (needsChecking(M.Members[I], N.Members[J]))
393 return true;
394 return false;
397 /// Compare \p I and \p J and return the minimum.
398 /// Return nullptr in case we couldn't find an answer.
399 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
400 ScalarEvolution *SE) {
401 const SCEV *Diff = SE->getMinusSCEV(J, I);
402 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
404 if (!C)
405 return nullptr;
406 if (C->getValue()->isNegative())
407 return J;
408 return I;
411 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
412 RuntimePointerChecking &RtCheck) {
413 return addPointer(
414 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
415 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
416 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
419 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
420 const SCEV *End, unsigned AS,
421 bool NeedsFreeze,
422 ScalarEvolution &SE) {
423 assert(AddressSpace == AS &&
424 "all pointers in a checking group must be in the same address space");
426 // Compare the starts and ends with the known minimum and maximum
427 // of this set. We need to know how we compare against the min/max
428 // of the set in order to be able to emit memchecks.
429 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
430 if (!Min0)
431 return false;
433 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
434 if (!Min1)
435 return false;
437 // Update the low bound expression if we've found a new min value.
438 if (Min0 == Start)
439 Low = Start;
441 // Update the high bound expression if we've found a new max value.
442 if (Min1 != End)
443 High = End;
445 Members.push_back(Index);
446 this->NeedsFreeze |= NeedsFreeze;
447 return true;
450 void RuntimePointerChecking::groupChecks(
451 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
452 // We build the groups from dependency candidates equivalence classes
453 // because:
454 // - We know that pointers in the same equivalence class share
455 // the same underlying object and therefore there is a chance
456 // that we can compare pointers
457 // - We wouldn't be able to merge two pointers for which we need
458 // to emit a memcheck. The classes in DepCands are already
459 // conveniently built such that no two pointers in the same
460 // class need checking against each other.
462 // We use the following (greedy) algorithm to construct the groups
463 // For every pointer in the equivalence class:
464 // For each existing group:
465 // - if the difference between this pointer and the min/max bounds
466 // of the group is a constant, then make the pointer part of the
467 // group and update the min/max bounds of that group as required.
469 CheckingGroups.clear();
471 // If we need to check two pointers to the same underlying object
472 // with a non-constant difference, we shouldn't perform any pointer
473 // grouping with those pointers. This is because we can easily get
474 // into cases where the resulting check would return false, even when
475 // the accesses are safe.
477 // The following example shows this:
478 // for (i = 0; i < 1000; ++i)
479 // a[5000 + i * m] = a[i] + a[i + 9000]
481 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
482 // (0, 10000) which is always false. However, if m is 1, there is no
483 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
484 // us to perform an accurate check in this case.
486 // The above case requires that we have an UnknownDependence between
487 // accesses to the same underlying object. This cannot happen unless
488 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
489 // is also false. In this case we will use the fallback path and create
490 // separate checking groups for all pointers.
492 // If we don't have the dependency partitions, construct a new
493 // checking pointer group for each pointer. This is also required
494 // for correctness, because in this case we can have checking between
495 // pointers to the same underlying object.
496 if (!UseDependencies) {
497 for (unsigned I = 0; I < Pointers.size(); ++I)
498 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
499 return;
502 unsigned TotalComparisons = 0;
504 DenseMap<Value *, SmallVector<unsigned>> PositionMap;
505 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
506 auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
507 Iter.first->second.push_back(Index);
510 // We need to keep track of what pointers we've already seen so we
511 // don't process them twice.
512 SmallSet<unsigned, 2> Seen;
514 // Go through all equivalence classes, get the "pointer check groups"
515 // and add them to the overall solution. We use the order in which accesses
516 // appear in 'Pointers' to enforce determinism.
517 for (unsigned I = 0; I < Pointers.size(); ++I) {
518 // We've seen this pointer before, and therefore already processed
519 // its equivalence class.
520 if (Seen.count(I))
521 continue;
523 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
524 Pointers[I].IsWritePtr);
526 SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
527 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
529 // Because DepCands is constructed by visiting accesses in the order in
530 // which they appear in alias sets (which is deterministic) and the
531 // iteration order within an equivalence class member is only dependent on
532 // the order in which unions and insertions are performed on the
533 // equivalence class, the iteration order is deterministic.
534 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
535 MI != ME; ++MI) {
536 auto PointerI = PositionMap.find(MI->getPointer());
537 assert(PointerI != PositionMap.end() &&
538 "pointer in equivalence class not found in PositionMap");
539 for (unsigned Pointer : PointerI->second) {
540 bool Merged = false;
541 // Mark this pointer as seen.
542 Seen.insert(Pointer);
544 // Go through all the existing sets and see if we can find one
545 // which can include this pointer.
546 for (RuntimeCheckingPtrGroup &Group : Groups) {
547 // Don't perform more than a certain amount of comparisons.
548 // This should limit the cost of grouping the pointers to something
549 // reasonable. If we do end up hitting this threshold, the algorithm
550 // will create separate groups for all remaining pointers.
551 if (TotalComparisons > MemoryCheckMergeThreshold)
552 break;
554 TotalComparisons++;
556 if (Group.addPointer(Pointer, *this)) {
557 Merged = true;
558 break;
562 if (!Merged)
563 // We couldn't add this pointer to any existing set or the threshold
564 // for the number of comparisons has been reached. Create a new group
565 // to hold the current pointer.
566 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
570 // We've computed the grouped checks for this partition.
571 // Save the results and continue with the next one.
572 llvm::copy(Groups, std::back_inserter(CheckingGroups));
576 bool RuntimePointerChecking::arePointersInSamePartition(
577 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
578 unsigned PtrIdx2) {
579 return (PtrToPartition[PtrIdx1] != -1 &&
580 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
583 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
584 const PointerInfo &PointerI = Pointers[I];
585 const PointerInfo &PointerJ = Pointers[J];
587 // No need to check if two readonly pointers intersect.
588 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
589 return false;
591 // Only need to check pointers between two different dependency sets.
592 if (PointerI.DependencySetId == PointerJ.DependencySetId)
593 return false;
595 // Only need to check pointers in the same alias set.
596 if (PointerI.AliasSetId != PointerJ.AliasSetId)
597 return false;
599 return true;
602 void RuntimePointerChecking::printChecks(
603 raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
604 unsigned Depth) const {
605 unsigned N = 0;
606 for (const auto &Check : Checks) {
607 const auto &First = Check.first->Members, &Second = Check.second->Members;
609 OS.indent(Depth) << "Check " << N++ << ":\n";
611 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
612 for (unsigned K = 0; K < First.size(); ++K)
613 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
615 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
616 for (unsigned K = 0; K < Second.size(); ++K)
617 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
621 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
623 OS.indent(Depth) << "Run-time memory checks:\n";
624 printChecks(OS, Checks, Depth);
626 OS.indent(Depth) << "Grouped accesses:\n";
627 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
628 const auto &CG = CheckingGroups[I];
630 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
631 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
632 << ")\n";
633 for (unsigned J = 0; J < CG.Members.size(); ++J) {
634 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
635 << "\n";
640 namespace {
642 /// Analyses memory accesses in a loop.
644 /// Checks whether run time pointer checks are needed and builds sets for data
645 /// dependence checking.
646 class AccessAnalysis {
647 public:
648 /// Read or write access location.
649 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
650 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
652 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
653 MemoryDepChecker::DepCandidates &DA,
654 PredicatedScalarEvolution &PSE)
655 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE) {
656 // We're analyzing dependences across loop iterations.
657 BAA.enableCrossIterationMode();
660 /// Register a load and whether it is only read from.
661 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
662 Value *Ptr = const_cast<Value*>(Loc.Ptr);
663 AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
664 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
665 if (IsReadOnly)
666 ReadOnlyPtr.insert(Ptr);
669 /// Register a store.
670 void addStore(MemoryLocation &Loc, Type *AccessTy) {
671 Value *Ptr = const_cast<Value*>(Loc.Ptr);
672 AST.add(Ptr, LocationSize::beforeOrAfterPointer(), Loc.AATags);
673 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
676 /// Check if we can emit a run-time no-alias check for \p Access.
678 /// Returns true if we can emit a run-time no alias check for \p Access.
679 /// If we can check this access, this also adds it to a dependence set and
680 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
681 /// we will attempt to use additional run-time checks in order to get
682 /// the bounds of the pointer.
683 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
684 MemAccessInfo Access, Type *AccessTy,
685 const DenseMap<Value *, const SCEV *> &Strides,
686 DenseMap<Value *, unsigned> &DepSetId,
687 Loop *TheLoop, unsigned &RunningDepId,
688 unsigned ASId, bool ShouldCheckStride, bool Assume);
690 /// Check whether we can check the pointers at runtime for
691 /// non-intersection.
693 /// Returns true if we need no check or if we do and we can generate them
694 /// (i.e. the pointers have computable bounds).
695 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
696 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
697 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
699 /// Goes over all memory accesses, checks whether a RT check is needed
700 /// and builds sets of dependent accesses.
701 void buildDependenceSets() {
702 processMemAccesses();
705 /// Initial processing of memory accesses determined that we need to
706 /// perform dependency checking.
708 /// Note that this can later be cleared if we retry memcheck analysis without
709 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
710 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
712 /// We decided that no dependence analysis would be used. Reset the state.
713 void resetDepChecks(MemoryDepChecker &DepChecker) {
714 CheckDeps.clear();
715 DepChecker.clearDependences();
718 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
720 private:
721 typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
723 /// Go over all memory access and check whether runtime pointer checks
724 /// are needed and build sets of dependency check candidates.
725 void processMemAccesses();
727 /// Map of all accesses. Values are the types used to access memory pointed to
728 /// by the pointer.
729 PtrAccessMap Accesses;
731 /// The loop being checked.
732 const Loop *TheLoop;
734 /// List of accesses that need a further dependence check.
735 MemAccessInfoList CheckDeps;
737 /// Set of pointers that are read only.
738 SmallPtrSet<Value*, 16> ReadOnlyPtr;
740 /// Batched alias analysis results.
741 BatchAAResults BAA;
743 /// An alias set tracker to partition the access set by underlying object and
744 //intrinsic property (such as TBAA metadata).
745 AliasSetTracker AST;
747 LoopInfo *LI;
749 /// Sets of potentially dependent accesses - members of one set share an
750 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
751 /// dependence check.
752 MemoryDepChecker::DepCandidates &DepCands;
754 /// Initial processing of memory accesses determined that we may need
755 /// to add memchecks. Perform the analysis to determine the necessary checks.
757 /// Note that, this is different from isDependencyCheckNeeded. When we retry
758 /// memcheck analysis without dependency checking
759 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
760 /// cleared while this remains set if we have potentially dependent accesses.
761 bool IsRTCheckAnalysisNeeded = false;
763 /// The SCEV predicate containing all the SCEV-related assumptions.
764 PredicatedScalarEvolution &PSE;
767 } // end anonymous namespace
769 /// Check whether a pointer can participate in a runtime bounds check.
770 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
771 /// by adding run-time checks (overflow checks) if necessary.
772 static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr,
773 const SCEV *PtrScev, Loop *L, bool Assume) {
774 // The bounds for loop-invariant pointer is trivial.
775 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
776 return true;
778 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
780 if (!AR && Assume)
781 AR = PSE.getAsAddRec(Ptr);
783 if (!AR)
784 return false;
786 return AR->isAffine();
789 /// Check whether a pointer address cannot wrap.
790 static bool isNoWrap(PredicatedScalarEvolution &PSE,
791 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
792 Loop *L) {
793 const SCEV *PtrScev = PSE.getSCEV(Ptr);
794 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
795 return true;
797 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
798 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
799 return true;
801 return false;
804 static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
805 function_ref<void(Value *)> AddPointer) {
806 SmallPtrSet<Value *, 8> Visited;
807 SmallVector<Value *> WorkList;
808 WorkList.push_back(StartPtr);
810 while (!WorkList.empty()) {
811 Value *Ptr = WorkList.pop_back_val();
812 if (!Visited.insert(Ptr).second)
813 continue;
814 auto *PN = dyn_cast<PHINode>(Ptr);
815 // SCEV does not look through non-header PHIs inside the loop. Such phis
816 // can be analyzed by adding separate accesses for each incoming pointer
817 // value.
818 if (PN && InnermostLoop.contains(PN->getParent()) &&
819 PN->getParent() != InnermostLoop.getHeader()) {
820 for (const Use &Inc : PN->incoming_values())
821 WorkList.push_back(Inc);
822 } else
823 AddPointer(Ptr);
827 // Walk back through the IR for a pointer, looking for a select like the
828 // following:
830 // %offset = select i1 %cmp, i64 %a, i64 %b
831 // %addr = getelementptr double, double* %base, i64 %offset
832 // %ld = load double, double* %addr, align 8
834 // We won't be able to form a single SCEVAddRecExpr from this since the
835 // address for each loop iteration depends on %cmp. We could potentially
836 // produce multiple valid SCEVAddRecExprs, though, and check all of them for
837 // memory safety/aliasing if needed.
839 // If we encounter some IR we don't yet handle, or something obviously fine
840 // like a constant, then we just add the SCEV for that term to the list passed
841 // in by the caller. If we have a node that may potentially yield a valid
842 // SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
843 // ourselves before adding to the list.
844 static void findForkedSCEVs(
845 ScalarEvolution *SE, const Loop *L, Value *Ptr,
846 SmallVectorImpl<PointerIntPair<const SCEV *, 1, bool>> &ScevList,
847 unsigned Depth) {
848 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
849 // we've exceeded our limit on recursion, just return whatever we have
850 // regardless of whether it can be used for a forked pointer or not, along
851 // with an indication of whether it might be a poison or undef value.
852 const SCEV *Scev = SE->getSCEV(Ptr);
853 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
854 !isa<Instruction>(Ptr) || Depth == 0) {
855 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
856 return;
859 Depth--;
861 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
862 return get<1>(S);
865 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
866 switch (Opcode) {
867 case Instruction::Add:
868 return SE->getAddExpr(L, R);
869 case Instruction::Sub:
870 return SE->getMinusSCEV(L, R);
871 default:
872 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
876 Instruction *I = cast<Instruction>(Ptr);
877 unsigned Opcode = I->getOpcode();
878 switch (Opcode) {
879 case Instruction::GetElementPtr: {
880 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
881 Type *SourceTy = GEP->getSourceElementType();
882 // We only handle base + single offset GEPs here for now.
883 // Not dealing with preexisting gathers yet, so no vectors.
884 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
885 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
886 break;
888 SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> BaseScevs;
889 SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> OffsetScevs;
890 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
891 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
893 // See if we need to freeze our fork...
894 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
895 any_of(OffsetScevs, UndefPoisonCheck);
897 // Check that we only have a single fork, on either the base or the offset.
898 // Copy the SCEV across for the one without a fork in order to generate
899 // the full SCEV for both sides of the GEP.
900 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
901 BaseScevs.push_back(BaseScevs[0]);
902 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
903 OffsetScevs.push_back(OffsetScevs[0]);
904 else {
905 ScevList.emplace_back(Scev, NeedsFreeze);
906 break;
909 // Find the pointer type we need to extend to.
910 Type *IntPtrTy = SE->getEffectiveSCEVType(
911 SE->getSCEV(GEP->getPointerOperand())->getType());
913 // Find the size of the type being pointed to. We only have a single
914 // index term (guarded above) so we don't need to index into arrays or
915 // structures, just get the size of the scalar value.
916 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
918 // Scale up the offsets by the size of the type, then add to the bases.
919 const SCEV *Scaled1 = SE->getMulExpr(
920 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
921 const SCEV *Scaled2 = SE->getMulExpr(
922 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
923 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
924 NeedsFreeze);
925 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
926 NeedsFreeze);
927 break;
929 case Instruction::Select: {
930 SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
931 // A select means we've found a forked pointer, but we currently only
932 // support a single select per pointer so if there's another behind this
933 // then we just bail out and return the generic SCEV.
934 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
935 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
936 if (ChildScevs.size() == 2) {
937 ScevList.push_back(ChildScevs[0]);
938 ScevList.push_back(ChildScevs[1]);
939 } else
940 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
941 break;
943 case Instruction::PHI: {
944 SmallVector<PointerIntPair<const SCEV *, 1, bool>, 2> ChildScevs;
945 // A phi means we've found a forked pointer, but we currently only
946 // support a single phi per pointer so if there's another behind this
947 // then we just bail out and return the generic SCEV.
948 if (I->getNumOperands() == 2) {
949 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
950 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
952 if (ChildScevs.size() == 2) {
953 ScevList.push_back(ChildScevs[0]);
954 ScevList.push_back(ChildScevs[1]);
955 } else
956 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
957 break;
959 case Instruction::Add:
960 case Instruction::Sub: {
961 SmallVector<PointerIntPair<const SCEV *, 1, bool>> LScevs;
962 SmallVector<PointerIntPair<const SCEV *, 1, bool>> RScevs;
963 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
964 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
966 // See if we need to freeze our fork...
967 bool NeedsFreeze =
968 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
970 // Check that we only have a single fork, on either the left or right side.
971 // Copy the SCEV across for the one without a fork in order to generate
972 // the full SCEV for both sides of the BinOp.
973 if (LScevs.size() == 2 && RScevs.size() == 1)
974 RScevs.push_back(RScevs[0]);
975 else if (RScevs.size() == 2 && LScevs.size() == 1)
976 LScevs.push_back(LScevs[0]);
977 else {
978 ScevList.emplace_back(Scev, NeedsFreeze);
979 break;
982 ScevList.emplace_back(
983 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
984 NeedsFreeze);
985 ScevList.emplace_back(
986 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
987 NeedsFreeze);
988 break;
990 default:
991 // Just return the current SCEV if we haven't handled the instruction yet.
992 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
993 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
994 break;
998 static SmallVector<PointerIntPair<const SCEV *, 1, bool>>
999 findForkedPointer(PredicatedScalarEvolution &PSE,
1000 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1001 const Loop *L) {
1002 ScalarEvolution *SE = PSE.getSE();
1003 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1004 SmallVector<PointerIntPair<const SCEV *, 1, bool>> Scevs;
1005 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1007 // For now, we will only accept a forked pointer with two possible SCEVs
1008 // that are either SCEVAddRecExprs or loop invariant.
1009 if (Scevs.size() == 2 &&
1010 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1011 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1012 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1013 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1014 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1015 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1016 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1017 return Scevs;
1020 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1023 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1024 MemAccessInfo Access, Type *AccessTy,
1025 const DenseMap<Value *, const SCEV *> &StridesMap,
1026 DenseMap<Value *, unsigned> &DepSetId,
1027 Loop *TheLoop, unsigned &RunningDepId,
1028 unsigned ASId, bool ShouldCheckWrap,
1029 bool Assume) {
1030 Value *Ptr = Access.getPointer();
1032 SmallVector<PointerIntPair<const SCEV *, 1, bool>> TranslatedPtrs =
1033 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1035 for (auto &P : TranslatedPtrs) {
1036 const SCEV *PtrExpr = get<0>(P);
1037 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1038 return false;
1040 // When we run after a failing dependency check we have to make sure
1041 // we don't have wrapping pointers.
1042 if (ShouldCheckWrap) {
1043 // Skip wrap checking when translating pointers.
1044 if (TranslatedPtrs.size() > 1)
1045 return false;
1047 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1048 auto *Expr = PSE.getSCEV(Ptr);
1049 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1050 return false;
1051 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1054 // If there's only one option for Ptr, look it up after bounds and wrap
1055 // checking, because assumptions might have been added to PSE.
1056 if (TranslatedPtrs.size() == 1)
1057 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1058 false};
1061 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1062 // The id of the dependence set.
1063 unsigned DepId;
1065 if (isDependencyCheckNeeded()) {
1066 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1067 unsigned &LeaderId = DepSetId[Leader];
1068 if (!LeaderId)
1069 LeaderId = RunningDepId++;
1070 DepId = LeaderId;
1071 } else
1072 // Each access has its own dependence set.
1073 DepId = RunningDepId++;
1075 bool IsWrite = Access.getInt();
1076 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1077 NeedsFreeze);
1078 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1081 return true;
1084 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1085 ScalarEvolution *SE, Loop *TheLoop,
1086 const DenseMap<Value *, const SCEV *> &StridesMap,
1087 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1088 // Find pointers with computable bounds. We are going to use this information
1089 // to place a runtime bound check.
1090 bool CanDoRT = true;
1092 bool MayNeedRTCheck = false;
1093 if (!IsRTCheckAnalysisNeeded) return true;
1095 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1097 // We assign a consecutive id to access from different alias sets.
1098 // Accesses between different groups doesn't need to be checked.
1099 unsigned ASId = 0;
1100 for (auto &AS : AST) {
1101 int NumReadPtrChecks = 0;
1102 int NumWritePtrChecks = 0;
1103 bool CanDoAliasSetRT = true;
1104 ++ASId;
1106 // We assign consecutive id to access from different dependence sets.
1107 // Accesses within the same set don't need a runtime check.
1108 unsigned RunningDepId = 1;
1109 DenseMap<Value *, unsigned> DepSetId;
1111 SmallVector<std::pair<MemAccessInfo, Type *>, 4> Retries;
1113 // First, count how many write and read accesses are in the alias set. Also
1114 // collect MemAccessInfos for later.
1115 SmallVector<MemAccessInfo, 4> AccessInfos;
1116 for (const auto &A : AS) {
1117 Value *Ptr = A.getValue();
1118 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1120 if (IsWrite)
1121 ++NumWritePtrChecks;
1122 else
1123 ++NumReadPtrChecks;
1124 AccessInfos.emplace_back(Ptr, IsWrite);
1127 // We do not need runtime checks for this alias set, if there are no writes
1128 // or a single write and no reads.
1129 if (NumWritePtrChecks == 0 ||
1130 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1131 assert((AS.size() <= 1 ||
1132 all_of(AS,
1133 [this](auto AC) {
1134 MemAccessInfo AccessWrite(AC.getValue(), true);
1135 return DepCands.findValue(AccessWrite) == DepCands.end();
1136 })) &&
1137 "Can only skip updating CanDoRT below, if all entries in AS "
1138 "are reads or there is at most 1 entry");
1139 continue;
1142 for (auto &Access : AccessInfos) {
1143 for (const auto &AccessTy : Accesses[Access]) {
1144 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1145 DepSetId, TheLoop, RunningDepId, ASId,
1146 ShouldCheckWrap, false)) {
1147 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1148 << *Access.getPointer() << '\n');
1149 Retries.push_back({Access, AccessTy});
1150 CanDoAliasSetRT = false;
1155 // Note that this function computes CanDoRT and MayNeedRTCheck
1156 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1157 // we have a pointer for which we couldn't find the bounds but we don't
1158 // actually need to emit any checks so it does not matter.
1160 // We need runtime checks for this alias set, if there are at least 2
1161 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1162 // any bound checks (because in that case the number of dependence sets is
1163 // incomplete).
1164 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1166 // We need to perform run-time alias checks, but some pointers had bounds
1167 // that couldn't be checked.
1168 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1169 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1170 // We know that we need these checks, so we can now be more aggressive
1171 // and add further checks if required (overflow checks).
1172 CanDoAliasSetRT = true;
1173 for (auto Retry : Retries) {
1174 MemAccessInfo Access = Retry.first;
1175 Type *AccessTy = Retry.second;
1176 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1177 DepSetId, TheLoop, RunningDepId, ASId,
1178 ShouldCheckWrap, /*Assume=*/true)) {
1179 CanDoAliasSetRT = false;
1180 UncomputablePtr = Access.getPointer();
1181 break;
1186 CanDoRT &= CanDoAliasSetRT;
1187 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1188 ++ASId;
1191 // If the pointers that we would use for the bounds comparison have different
1192 // address spaces, assume the values aren't directly comparable, so we can't
1193 // use them for the runtime check. We also have to assume they could
1194 // overlap. In the future there should be metadata for whether address spaces
1195 // are disjoint.
1196 unsigned NumPointers = RtCheck.Pointers.size();
1197 for (unsigned i = 0; i < NumPointers; ++i) {
1198 for (unsigned j = i + 1; j < NumPointers; ++j) {
1199 // Only need to check pointers between two different dependency sets.
1200 if (RtCheck.Pointers[i].DependencySetId ==
1201 RtCheck.Pointers[j].DependencySetId)
1202 continue;
1203 // Only need to check pointers in the same alias set.
1204 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1205 continue;
1207 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1208 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1210 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1211 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1212 if (ASi != ASj) {
1213 LLVM_DEBUG(
1214 dbgs() << "LAA: Runtime check would require comparison between"
1215 " different address spaces\n");
1216 return false;
1221 if (MayNeedRTCheck && CanDoRT)
1222 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1224 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1225 << " pointer comparisons.\n");
1227 // If we can do run-time checks, but there are no checks, no runtime checks
1228 // are needed. This can happen when all pointers point to the same underlying
1229 // object for example.
1230 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1232 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1233 if (!CanDoRTIfNeeded)
1234 RtCheck.reset();
1235 return CanDoRTIfNeeded;
1238 void AccessAnalysis::processMemAccesses() {
1239 // We process the set twice: first we process read-write pointers, last we
1240 // process read-only pointers. This allows us to skip dependence tests for
1241 // read-only pointers.
1243 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1244 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1245 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1246 LLVM_DEBUG({
1247 for (auto A : Accesses)
1248 dbgs() << "\t" << *A.first.getPointer() << " ("
1249 << (A.first.getInt()
1250 ? "write"
1251 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1252 : "read"))
1253 << ")\n";
1256 // The AliasSetTracker has nicely partitioned our pointers by metadata
1257 // compatibility and potential for underlying-object overlap. As a result, we
1258 // only need to check for potential pointer dependencies within each alias
1259 // set.
1260 for (const auto &AS : AST) {
1261 // Note that both the alias-set tracker and the alias sets themselves used
1262 // linked lists internally and so the iteration order here is deterministic
1263 // (matching the original instruction order within each set).
1265 bool SetHasWrite = false;
1267 // Map of pointers to last access encountered.
1268 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1269 UnderlyingObjToAccessMap ObjToLastAccess;
1271 // Set of access to check after all writes have been processed.
1272 PtrAccessMap DeferredAccesses;
1274 // Iterate over each alias set twice, once to process read/write pointers,
1275 // and then to process read-only pointers.
1276 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1277 bool UseDeferred = SetIteration > 0;
1278 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1280 for (const auto &AV : AS) {
1281 Value *Ptr = AV.getValue();
1283 // For a single memory access in AliasSetTracker, Accesses may contain
1284 // both read and write, and they both need to be handled for CheckDeps.
1285 for (const auto &AC : S) {
1286 if (AC.first.getPointer() != Ptr)
1287 continue;
1289 bool IsWrite = AC.first.getInt();
1291 // If we're using the deferred access set, then it contains only
1292 // reads.
1293 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1294 if (UseDeferred && !IsReadOnlyPtr)
1295 continue;
1296 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1297 // read or a write.
1298 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1299 S.count(MemAccessInfo(Ptr, false))) &&
1300 "Alias-set pointer not in the access set?");
1302 MemAccessInfo Access(Ptr, IsWrite);
1303 DepCands.insert(Access);
1305 // Memorize read-only pointers for later processing and skip them in
1306 // the first round (they need to be checked after we have seen all
1307 // write pointers). Note: we also mark pointer that are not
1308 // consecutive as "read-only" pointers (so that we check
1309 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1310 if (!UseDeferred && IsReadOnlyPtr) {
1311 // We only use the pointer keys, the types vector values don't
1312 // matter.
1313 DeferredAccesses.insert({Access, {}});
1314 continue;
1317 // If this is a write - check other reads and writes for conflicts. If
1318 // this is a read only check other writes for conflicts (but only if
1319 // there is no other write to the ptr - this is an optimization to
1320 // catch "a[i] = a[i] + " without having to do a dependence check).
1321 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1322 CheckDeps.push_back(Access);
1323 IsRTCheckAnalysisNeeded = true;
1326 if (IsWrite)
1327 SetHasWrite = true;
1329 // Create sets of pointers connected by a shared alias set and
1330 // underlying object.
1331 typedef SmallVector<const Value *, 16> ValueVector;
1332 ValueVector TempObjects;
1334 getUnderlyingObjects(Ptr, TempObjects, LI);
1335 LLVM_DEBUG(dbgs()
1336 << "Underlying objects for pointer " << *Ptr << "\n");
1337 for (const Value *UnderlyingObj : TempObjects) {
1338 // nullptr never alias, don't join sets for pointer that have "null"
1339 // in their UnderlyingObjects list.
1340 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1341 !NullPointerIsDefined(
1342 TheLoop->getHeader()->getParent(),
1343 UnderlyingObj->getType()->getPointerAddressSpace()))
1344 continue;
1346 UnderlyingObjToAccessMap::iterator Prev =
1347 ObjToLastAccess.find(UnderlyingObj);
1348 if (Prev != ObjToLastAccess.end())
1349 DepCands.unionSets(Access, Prev->second);
1351 ObjToLastAccess[UnderlyingObj] = Access;
1352 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1360 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1361 /// i.e. monotonically increasing/decreasing.
1362 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1363 PredicatedScalarEvolution &PSE, const Loop *L) {
1365 // FIXME: This should probably only return true for NUW.
1366 if (AR->getNoWrapFlags(SCEV::NoWrapMask))
1367 return true;
1369 if (PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
1370 return true;
1372 // Scalar evolution does not propagate the non-wrapping flags to values that
1373 // are derived from a non-wrapping induction variable because non-wrapping
1374 // could be flow-sensitive.
1376 // Look through the potentially overflowing instruction to try to prove
1377 // non-wrapping for the *specific* value of Ptr.
1379 // The arithmetic implied by an inbounds GEP can't overflow.
1380 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1381 if (!GEP || !GEP->isInBounds())
1382 return false;
1384 // Make sure there is only one non-const index and analyze that.
1385 Value *NonConstIndex = nullptr;
1386 for (Value *Index : GEP->indices())
1387 if (!isa<ConstantInt>(Index)) {
1388 if (NonConstIndex)
1389 return false;
1390 NonConstIndex = Index;
1392 if (!NonConstIndex)
1393 // The recurrence is on the pointer, ignore for now.
1394 return false;
1396 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1397 // AddRec using a NSW operation.
1398 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1399 if (OBO->hasNoSignedWrap() &&
1400 // Assume constant for other the operand so that the AddRec can be
1401 // easily found.
1402 isa<ConstantInt>(OBO->getOperand(1))) {
1403 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1405 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1406 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1409 return false;
1412 /// Check whether the access through \p Ptr has a constant stride.
1413 std::optional<int64_t> llvm::getPtrStride(PredicatedScalarEvolution &PSE,
1414 Type *AccessTy, Value *Ptr,
1415 const Loop *Lp,
1416 const DenseMap<Value *, const SCEV *> &StridesMap,
1417 bool Assume, bool ShouldCheckWrap) {
1418 Type *Ty = Ptr->getType();
1419 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1421 if (isa<ScalableVectorType>(AccessTy)) {
1422 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1423 << "\n");
1424 return std::nullopt;
1427 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1429 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1430 if (Assume && !AR)
1431 AR = PSE.getAsAddRec(Ptr);
1433 if (!AR) {
1434 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1435 << " SCEV: " << *PtrScev << "\n");
1436 return std::nullopt;
1439 // The access function must stride over the innermost loop.
1440 if (Lp != AR->getLoop()) {
1441 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1442 << *Ptr << " SCEV: " << *AR << "\n");
1443 return std::nullopt;
1446 // Check the step is constant.
1447 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1449 // Calculate the pointer stride and check if it is constant.
1450 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1451 if (!C) {
1452 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1453 << " SCEV: " << *AR << "\n");
1454 return std::nullopt;
1457 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1458 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1459 int64_t Size = AllocSize.getFixedValue();
1460 const APInt &APStepVal = C->getAPInt();
1462 // Huge step value - give up.
1463 if (APStepVal.getBitWidth() > 64)
1464 return std::nullopt;
1466 int64_t StepVal = APStepVal.getSExtValue();
1468 // Strided access.
1469 int64_t Stride = StepVal / Size;
1470 int64_t Rem = StepVal % Size;
1471 if (Rem)
1472 return std::nullopt;
1474 if (!ShouldCheckWrap)
1475 return Stride;
1477 // The address calculation must not wrap. Otherwise, a dependence could be
1478 // inverted.
1479 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1480 return Stride;
1482 // An inbounds getelementptr that is a AddRec with a unit stride
1483 // cannot wrap per definition. If it did, the result would be poison
1484 // and any memory access dependent on it would be immediate UB
1485 // when executed.
1486 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1487 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1488 return Stride;
1490 // If the null pointer is undefined, then a access sequence which would
1491 // otherwise access it can be assumed not to unsigned wrap. Note that this
1492 // assumes the object in memory is aligned to the natural alignment.
1493 unsigned AddrSpace = Ty->getPointerAddressSpace();
1494 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1495 (Stride == 1 || Stride == -1))
1496 return Stride;
1498 if (Assume) {
1499 PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1500 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1501 << "LAA: Pointer: " << *Ptr << "\n"
1502 << "LAA: SCEV: " << *AR << "\n"
1503 << "LAA: Added an overflow assumption\n");
1504 return Stride;
1506 LLVM_DEBUG(
1507 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1508 << *Ptr << " SCEV: " << *AR << "\n");
1509 return std::nullopt;
1512 std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1513 Type *ElemTyB, Value *PtrB,
1514 const DataLayout &DL,
1515 ScalarEvolution &SE, bool StrictCheck,
1516 bool CheckType) {
1517 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1519 // Make sure that A and B are different pointers.
1520 if (PtrA == PtrB)
1521 return 0;
1523 // Make sure that the element types are the same if required.
1524 if (CheckType && ElemTyA != ElemTyB)
1525 return std::nullopt;
1527 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1528 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1530 // Check that the address spaces match.
1531 if (ASA != ASB)
1532 return std::nullopt;
1533 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1535 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1536 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1537 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1539 int Val;
1540 if (PtrA1 == PtrB1) {
1541 // Retrieve the address space again as pointer stripping now tracks through
1542 // `addrspacecast`.
1543 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1544 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1545 // Check that the address spaces match and that the pointers are valid.
1546 if (ASA != ASB)
1547 return std::nullopt;
1549 IdxWidth = DL.getIndexSizeInBits(ASA);
1550 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1551 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1553 OffsetB -= OffsetA;
1554 Val = OffsetB.getSExtValue();
1555 } else {
1556 // Otherwise compute the distance with SCEV between the base pointers.
1557 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1558 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1559 const auto *Diff =
1560 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1561 if (!Diff)
1562 return std::nullopt;
1563 Val = Diff->getAPInt().getSExtValue();
1565 int Size = DL.getTypeStoreSize(ElemTyA);
1566 int Dist = Val / Size;
1568 // Ensure that the calculated distance matches the type-based one after all
1569 // the bitcasts removal in the provided pointers.
1570 if (!StrictCheck || Dist * Size == Val)
1571 return Dist;
1572 return std::nullopt;
1575 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
1576 const DataLayout &DL, ScalarEvolution &SE,
1577 SmallVectorImpl<unsigned> &SortedIndices) {
1578 assert(llvm::all_of(
1579 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1580 "Expected list of pointer operands.");
1581 // Walk over the pointers, and map each of them to an offset relative to
1582 // first pointer in the array.
1583 Value *Ptr0 = VL[0];
1585 using DistOrdPair = std::pair<int64_t, int>;
1586 auto Compare = llvm::less_first();
1587 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1588 Offsets.emplace(0, 0);
1589 int Cnt = 1;
1590 bool IsConsecutive = true;
1591 for (auto *Ptr : VL.drop_front()) {
1592 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1593 /*StrictCheck=*/true);
1594 if (!Diff)
1595 return false;
1597 // Check if the pointer with the same offset is found.
1598 int64_t Offset = *Diff;
1599 auto Res = Offsets.emplace(Offset, Cnt);
1600 if (!Res.second)
1601 return false;
1602 // Consecutive order if the inserted element is the last one.
1603 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1604 ++Cnt;
1606 SortedIndices.clear();
1607 if (!IsConsecutive) {
1608 // Fill SortedIndices array only if it is non-consecutive.
1609 SortedIndices.resize(VL.size());
1610 Cnt = 0;
1611 for (const std::pair<int64_t, int> &Pair : Offsets) {
1612 SortedIndices[Cnt] = Pair.second;
1613 ++Cnt;
1616 return true;
1619 /// Returns true if the memory operations \p A and \p B are consecutive.
1620 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1621 ScalarEvolution &SE, bool CheckType) {
1622 Value *PtrA = getLoadStorePointerOperand(A);
1623 Value *PtrB = getLoadStorePointerOperand(B);
1624 if (!PtrA || !PtrB)
1625 return false;
1626 Type *ElemTyA = getLoadStoreType(A);
1627 Type *ElemTyB = getLoadStoreType(B);
1628 std::optional<int> Diff =
1629 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1630 /*StrictCheck=*/true, CheckType);
1631 return Diff && *Diff == 1;
1634 void MemoryDepChecker::addAccess(StoreInst *SI) {
1635 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1636 [this, SI](Value *Ptr) {
1637 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1638 InstMap.push_back(SI);
1639 ++AccessIdx;
1643 void MemoryDepChecker::addAccess(LoadInst *LI) {
1644 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1645 [this, LI](Value *Ptr) {
1646 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1647 InstMap.push_back(LI);
1648 ++AccessIdx;
1652 MemoryDepChecker::VectorizationSafetyStatus
1653 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1654 switch (Type) {
1655 case NoDep:
1656 case Forward:
1657 case BackwardVectorizable:
1658 return VectorizationSafetyStatus::Safe;
1660 case Unknown:
1661 return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1662 case ForwardButPreventsForwarding:
1663 case Backward:
1664 case BackwardVectorizableButPreventsForwarding:
1665 return VectorizationSafetyStatus::Unsafe;
1667 llvm_unreachable("unexpected DepType!");
1670 bool MemoryDepChecker::Dependence::isBackward() const {
1671 switch (Type) {
1672 case NoDep:
1673 case Forward:
1674 case ForwardButPreventsForwarding:
1675 case Unknown:
1676 return false;
1678 case BackwardVectorizable:
1679 case Backward:
1680 case BackwardVectorizableButPreventsForwarding:
1681 return true;
1683 llvm_unreachable("unexpected DepType!");
1686 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1687 return isBackward() || Type == Unknown;
1690 bool MemoryDepChecker::Dependence::isForward() const {
1691 switch (Type) {
1692 case Forward:
1693 case ForwardButPreventsForwarding:
1694 return true;
1696 case NoDep:
1697 case Unknown:
1698 case BackwardVectorizable:
1699 case Backward:
1700 case BackwardVectorizableButPreventsForwarding:
1701 return false;
1703 llvm_unreachable("unexpected DepType!");
1706 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1707 uint64_t TypeByteSize) {
1708 // If loads occur at a distance that is not a multiple of a feasible vector
1709 // factor store-load forwarding does not take place.
1710 // Positive dependences might cause troubles because vectorizing them might
1711 // prevent store-load forwarding making vectorized code run a lot slower.
1712 // a[i] = a[i-3] ^ a[i-8];
1713 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1714 // hence on your typical architecture store-load forwarding does not take
1715 // place. Vectorizing in such cases does not make sense.
1716 // Store-load forwarding distance.
1718 // After this many iterations store-to-load forwarding conflicts should not
1719 // cause any slowdowns.
1720 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1721 // Maximum vector factor.
1722 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1723 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1725 // Compute the smallest VF at which the store and load would be misaligned.
1726 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1727 VF *= 2) {
1728 // If the number of vector iteration between the store and the load are
1729 // small we could incur conflicts.
1730 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1731 MaxVFWithoutSLForwardIssues = (VF >> 1);
1732 break;
1736 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1737 LLVM_DEBUG(
1738 dbgs() << "LAA: Distance " << Distance
1739 << " that could cause a store-load forwarding conflict\n");
1740 return true;
1743 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1744 MaxVFWithoutSLForwardIssues !=
1745 VectorizerParams::MaxVectorWidth * TypeByteSize)
1746 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1747 return false;
1750 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1751 if (Status < S)
1752 Status = S;
1755 /// Given a dependence-distance \p Dist between two
1756 /// memory accesses, that have the same stride whose absolute value is given
1757 /// in \p Stride, and that have the same type size \p TypeByteSize,
1758 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1759 /// possible to prove statically that the dependence distance is larger
1760 /// than the range that the accesses will travel through the execution of
1761 /// the loop. If so, return true; false otherwise. This is useful for
1762 /// example in loops such as the following (PR31098):
1763 /// for (i = 0; i < D; ++i) {
1764 /// = out[i];
1765 /// out[i+D] =
1766 /// }
1767 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1768 const SCEV &BackedgeTakenCount,
1769 const SCEV &Dist, uint64_t Stride,
1770 uint64_t TypeByteSize) {
1772 // If we can prove that
1773 // (**) |Dist| > BackedgeTakenCount * Step
1774 // where Step is the absolute stride of the memory accesses in bytes,
1775 // then there is no dependence.
1777 // Rationale:
1778 // We basically want to check if the absolute distance (|Dist/Step|)
1779 // is >= the loop iteration count (or > BackedgeTakenCount).
1780 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1781 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1782 // that the dependence distance is >= VF; This is checked elsewhere.
1783 // But in some cases we can prune dependence distances early, and
1784 // even before selecting the VF, and without a runtime test, by comparing
1785 // the distance against the loop iteration count. Since the vectorized code
1786 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1787 // also guarantees that distance >= VF.
1789 const uint64_t ByteStride = Stride * TypeByteSize;
1790 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1791 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1793 const SCEV *CastedDist = &Dist;
1794 const SCEV *CastedProduct = Product;
1795 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1796 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1798 // The dependence distance can be positive/negative, so we sign extend Dist;
1799 // The multiplication of the absolute stride in bytes and the
1800 // backedgeTakenCount is non-negative, so we zero extend Product.
1801 if (DistTypeSizeBits > ProductTypeSizeBits)
1802 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1803 else
1804 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1806 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1807 // (If so, then we have proven (**) because |Dist| >= Dist)
1808 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1809 if (SE.isKnownPositive(Minus))
1810 return true;
1812 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1813 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1814 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1815 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1816 if (SE.isKnownPositive(Minus))
1817 return true;
1819 return false;
1822 /// Check the dependence for two accesses with the same stride \p Stride.
1823 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1824 /// bytes.
1826 /// \returns true if they are independent.
1827 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1828 uint64_t TypeByteSize) {
1829 assert(Stride > 1 && "The stride must be greater than 1");
1830 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1831 assert(Distance > 0 && "The distance must be non-zero");
1833 // Skip if the distance is not multiple of type byte size.
1834 if (Distance % TypeByteSize)
1835 return false;
1837 uint64_t ScaledDist = Distance / TypeByteSize;
1839 // No dependence if the scaled distance is not multiple of the stride.
1840 // E.g.
1841 // for (i = 0; i < 1024 ; i += 4)
1842 // A[i+2] = A[i] + 1;
1844 // Two accesses in memory (scaled distance is 2, stride is 4):
1845 // | A[0] | | | | A[4] | | | |
1846 // | | | A[2] | | | | A[6] | |
1848 // E.g.
1849 // for (i = 0; i < 1024 ; i += 3)
1850 // A[i+4] = A[i] + 1;
1852 // Two accesses in memory (scaled distance is 4, stride is 3):
1853 // | A[0] | | | A[3] | | | A[6] | | |
1854 // | | | | | A[4] | | | A[7] | |
1855 return ScaledDist % Stride;
1858 MemoryDepChecker::Dependence::DepType
1859 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1860 const MemAccessInfo &B, unsigned BIdx,
1861 const DenseMap<Value *, const SCEV *> &Strides) {
1862 assert (AIdx < BIdx && "Must pass arguments in program order");
1864 auto [APtr, AIsWrite] = A;
1865 auto [BPtr, BIsWrite] = B;
1866 Type *ATy = getLoadStoreType(InstMap[AIdx]);
1867 Type *BTy = getLoadStoreType(InstMap[BIdx]);
1869 // Two reads are independent.
1870 if (!AIsWrite && !BIsWrite)
1871 return Dependence::NoDep;
1873 // We cannot check pointers in different address spaces.
1874 if (APtr->getType()->getPointerAddressSpace() !=
1875 BPtr->getType()->getPointerAddressSpace())
1876 return Dependence::Unknown;
1878 int64_t StrideAPtr =
1879 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1880 int64_t StrideBPtr =
1881 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1883 const SCEV *Src = PSE.getSCEV(APtr);
1884 const SCEV *Sink = PSE.getSCEV(BPtr);
1886 // If the induction step is negative we have to invert source and sink of the
1887 // dependence.
1888 if (StrideAPtr < 0) {
1889 std::swap(APtr, BPtr);
1890 std::swap(ATy, BTy);
1891 std::swap(Src, Sink);
1892 std::swap(AIsWrite, BIsWrite);
1893 std::swap(AIdx, BIdx);
1894 std::swap(StrideAPtr, StrideBPtr);
1897 ScalarEvolution &SE = *PSE.getSE();
1898 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1900 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1901 << "(Induction step: " << StrideAPtr << ")\n");
1902 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1903 << *InstMap[BIdx] << ": " << *Dist << "\n");
1905 // Need accesses with constant stride. We don't want to vectorize
1906 // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1907 // the address space.
1908 if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1909 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1910 return Dependence::Unknown;
1913 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1914 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1915 bool HasSameSize =
1916 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
1917 uint64_t Stride = std::abs(StrideAPtr);
1919 if (!isa<SCEVCouldNotCompute>(Dist) && HasSameSize &&
1920 isSafeDependenceDistance(DL, SE, *(PSE.getBackedgeTakenCount()), *Dist,
1921 Stride, TypeByteSize))
1922 return Dependence::NoDep;
1924 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1925 if (!C) {
1926 LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1927 FoundNonConstantDistanceDependence = true;
1928 return Dependence::Unknown;
1931 const APInt &Val = C->getAPInt();
1932 int64_t Distance = Val.getSExtValue();
1934 // Attempt to prove strided accesses independent.
1935 if (std::abs(Distance) > 0 && Stride > 1 && HasSameSize &&
1936 areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1937 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1938 return Dependence::NoDep;
1941 // Negative distances are not plausible dependencies.
1942 if (Val.isNegative()) {
1943 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1944 // There is no need to update MaxSafeVectorWidthInBits after call to
1945 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes,
1946 // since a forward dependency will allow vectorization using any width.
1947 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1948 (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1949 !HasSameSize)) {
1950 LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1951 return Dependence::ForwardButPreventsForwarding;
1954 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1955 return Dependence::Forward;
1958 // Write to the same location with the same size.
1959 if (Val == 0) {
1960 if (HasSameSize)
1961 return Dependence::Forward;
1962 LLVM_DEBUG(
1963 dbgs() << "LAA: Zero dependence difference but different type sizes\n");
1964 return Dependence::Unknown;
1967 assert(Val.isStrictlyPositive() && "Expect a positive value");
1969 if (!HasSameSize) {
1970 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
1971 "different type sizes\n");
1972 return Dependence::Unknown;
1975 // Bail out early if passed-in parameters make vectorization not feasible.
1976 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1977 VectorizerParams::VectorizationFactor : 1);
1978 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1979 VectorizerParams::VectorizationInterleave : 1);
1980 // The minimum number of iterations for a vectorized/unrolled version.
1981 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1983 // It's not vectorizable if the distance is smaller than the minimum distance
1984 // needed for a vectroized/unrolled version. Vectorizing one iteration in
1985 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1986 // TypeByteSize (No need to plus the last gap distance).
1988 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1989 // foo(int *A) {
1990 // int *B = (int *)((char *)A + 14);
1991 // for (i = 0 ; i < 1024 ; i += 2)
1992 // B[i] = A[i] + 1;
1993 // }
1995 // Two accesses in memory (stride is 2):
1996 // | A[0] | | A[2] | | A[4] | | A[6] | |
1997 // | B[0] | | B[2] | | B[4] |
1999 // Distance needs for vectorizing iterations except the last iteration:
2000 // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
2001 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2003 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2004 // 12, which is less than distance.
2006 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2007 // the minimum distance needed is 28, which is greater than distance. It is
2008 // not safe to do vectorization.
2009 uint64_t MinDistanceNeeded =
2010 TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
2011 if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
2012 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
2013 << Distance << '\n');
2014 return Dependence::Backward;
2017 // Unsafe if the minimum distance needed is greater than smallest dependence
2018 // distance distance.
2019 if (MinDistanceNeeded > MinDepDistBytes) {
2020 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2021 << MinDistanceNeeded << " size in bytes\n");
2022 return Dependence::Backward;
2025 // Positive distance bigger than max vectorization factor.
2026 // FIXME: Should use max factor instead of max distance in bytes, which could
2027 // not handle different types.
2028 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2029 // void foo (int *A, char *B) {
2030 // for (unsigned i = 0; i < 1024; i++) {
2031 // A[i+2] = A[i] + 1;
2032 // B[i+2] = B[i] + 1;
2033 // }
2034 // }
2036 // This case is currently unsafe according to the max safe distance. If we
2037 // analyze the two accesses on array B, the max safe dependence distance
2038 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2039 // is 8, which is less than 2 and forbidden vectorization, But actually
2040 // both A and B could be vectorized by 2 iterations.
2041 MinDepDistBytes =
2042 std::min(static_cast<uint64_t>(Distance), MinDepDistBytes);
2044 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2045 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2046 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2047 couldPreventStoreLoadForward(Distance, TypeByteSize)) {
2048 // Sanity check that we didn't update MinDepDistBytes when calling
2049 // couldPreventStoreLoadForward
2050 assert(MinDepDistBytes == MinDepDistBytesOld &&
2051 "An update to MinDepDistBytes requires an update to "
2052 "MaxSafeVectorWidthInBits");
2053 (void)MinDepDistBytesOld;
2054 return Dependence::BackwardVectorizableButPreventsForwarding;
2057 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2058 // since there is a backwards dependency.
2059 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * Stride);
2060 LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
2061 << " with max VF = " << MaxVF << '\n');
2062 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2063 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2064 return Dependence::BackwardVectorizable;
2067 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
2068 MemAccessInfoList &CheckDeps,
2069 const DenseMap<Value *, const SCEV *> &Strides) {
2071 MinDepDistBytes = -1;
2072 SmallPtrSet<MemAccessInfo, 8> Visited;
2073 for (MemAccessInfo CurAccess : CheckDeps) {
2074 if (Visited.count(CurAccess))
2075 continue;
2077 // Get the relevant memory access set.
2078 EquivalenceClasses<MemAccessInfo>::iterator I =
2079 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2081 // Check accesses within this set.
2082 EquivalenceClasses<MemAccessInfo>::member_iterator AI =
2083 AccessSets.member_begin(I);
2084 EquivalenceClasses<MemAccessInfo>::member_iterator AE =
2085 AccessSets.member_end();
2087 // Check every access pair.
2088 while (AI != AE) {
2089 Visited.insert(*AI);
2090 bool AIIsWrite = AI->getInt();
2091 // Check loads only against next equivalent class, but stores also against
2092 // other stores in the same equivalence class - to the same address.
2093 EquivalenceClasses<MemAccessInfo>::member_iterator OI =
2094 (AIIsWrite ? AI : std::next(AI));
2095 while (OI != AE) {
2096 // Check every accessing instruction pair in program order.
2097 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2098 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2099 // Scan all accesses of another equivalence class, but only the next
2100 // accesses of the same equivalent class.
2101 for (std::vector<unsigned>::iterator
2102 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2103 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2104 I2 != I2E; ++I2) {
2105 auto A = std::make_pair(&*AI, *I1);
2106 auto B = std::make_pair(&*OI, *I2);
2108 assert(*I1 != *I2);
2109 if (*I1 > *I2)
2110 std::swap(A, B);
2112 Dependence::DepType Type =
2113 isDependent(*A.first, A.second, *B.first, B.second, Strides);
2114 mergeInStatus(Dependence::isSafeForVectorization(Type));
2116 // Gather dependences unless we accumulated MaxDependences
2117 // dependences. In that case return as soon as we find the first
2118 // unsafe dependence. This puts a limit on this quadratic
2119 // algorithm.
2120 if (RecordDependences) {
2121 if (Type != Dependence::NoDep)
2122 Dependences.push_back(Dependence(A.second, B.second, Type));
2124 if (Dependences.size() >= MaxDependences) {
2125 RecordDependences = false;
2126 Dependences.clear();
2127 LLVM_DEBUG(dbgs()
2128 << "Too many dependences, stopped recording\n");
2131 if (!RecordDependences && !isSafeForVectorization())
2132 return false;
2134 ++OI;
2136 AI++;
2140 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2141 return isSafeForVectorization();
2144 SmallVector<Instruction *, 4>
2145 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
2146 MemAccessInfo Access(Ptr, isWrite);
2147 auto &IndexVector = Accesses.find(Access)->second;
2149 SmallVector<Instruction *, 4> Insts;
2150 transform(IndexVector,
2151 std::back_inserter(Insts),
2152 [&](unsigned Idx) { return this->InstMap[Idx]; });
2153 return Insts;
2156 const char *MemoryDepChecker::Dependence::DepName[] = {
2157 "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
2158 "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
2160 void MemoryDepChecker::Dependence::print(
2161 raw_ostream &OS, unsigned Depth,
2162 const SmallVectorImpl<Instruction *> &Instrs) const {
2163 OS.indent(Depth) << DepName[Type] << ":\n";
2164 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2165 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2168 bool LoopAccessInfo::canAnalyzeLoop() {
2169 // We need to have a loop header.
2170 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2171 << TheLoop->getHeader()->getParent()->getName() << ": "
2172 << TheLoop->getHeader()->getName() << '\n');
2174 // We can only analyze innermost loops.
2175 if (!TheLoop->isInnermost()) {
2176 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2177 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2178 return false;
2181 // We must have a single backedge.
2182 if (TheLoop->getNumBackEdges() != 1) {
2183 LLVM_DEBUG(
2184 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2185 recordAnalysis("CFGNotUnderstood")
2186 << "loop control flow is not understood by analyzer";
2187 return false;
2190 // ScalarEvolution needs to be able to find the exit count.
2191 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2192 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2193 recordAnalysis("CantComputeNumberOfIterations")
2194 << "could not determine number of loop iterations";
2195 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2196 return false;
2199 return true;
2202 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2203 const TargetLibraryInfo *TLI,
2204 DominatorTree *DT) {
2205 // Holds the Load and Store instructions.
2206 SmallVector<LoadInst *, 16> Loads;
2207 SmallVector<StoreInst *, 16> Stores;
2209 // Holds all the different accesses in the loop.
2210 unsigned NumReads = 0;
2211 unsigned NumReadWrites = 0;
2213 bool HasComplexMemInst = false;
2215 // A runtime check is only legal to insert if there are no convergent calls.
2216 HasConvergentOp = false;
2218 PtrRtChecking->Pointers.clear();
2219 PtrRtChecking->Need = false;
2221 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2223 const bool EnableMemAccessVersioningOfLoop =
2224 EnableMemAccessVersioning &&
2225 !TheLoop->getHeader()->getParent()->hasOptSize();
2227 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2228 // loop info, as it may be arbitrary.
2229 LoopBlocksRPO RPOT(TheLoop);
2230 RPOT.perform(LI);
2231 for (BasicBlock *BB : RPOT) {
2232 // Scan the BB and collect legal loads and stores. Also detect any
2233 // convergent instructions.
2234 for (Instruction &I : *BB) {
2235 if (auto *Call = dyn_cast<CallBase>(&I)) {
2236 if (Call->isConvergent())
2237 HasConvergentOp = true;
2240 // With both a non-vectorizable memory instruction and a convergent
2241 // operation, found in this loop, no reason to continue the search.
2242 if (HasComplexMemInst && HasConvergentOp) {
2243 CanVecMem = false;
2244 return;
2247 // Avoid hitting recordAnalysis multiple times.
2248 if (HasComplexMemInst)
2249 continue;
2251 // Many math library functions read the rounding mode. We will only
2252 // vectorize a loop if it contains known function calls that don't set
2253 // the flag. Therefore, it is safe to ignore this read from memory.
2254 auto *Call = dyn_cast<CallInst>(&I);
2255 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2256 continue;
2258 // If this is a load, save it. If this instruction can read from memory
2259 // but is not a load, then we quit. Notice that we don't handle function
2260 // calls that read or write.
2261 if (I.mayReadFromMemory()) {
2262 // If the function has an explicit vectorized counterpart, we can safely
2263 // assume that it can be vectorized.
2264 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2265 !VFDatabase::getMappings(*Call).empty())
2266 continue;
2268 auto *Ld = dyn_cast<LoadInst>(&I);
2269 if (!Ld) {
2270 recordAnalysis("CantVectorizeInstruction", Ld)
2271 << "instruction cannot be vectorized";
2272 HasComplexMemInst = true;
2273 continue;
2275 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2276 recordAnalysis("NonSimpleLoad", Ld)
2277 << "read with atomic ordering or volatile read";
2278 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2279 HasComplexMemInst = true;
2280 continue;
2282 NumLoads++;
2283 Loads.push_back(Ld);
2284 DepChecker->addAccess(Ld);
2285 if (EnableMemAccessVersioningOfLoop)
2286 collectStridedAccess(Ld);
2287 continue;
2290 // Save 'store' instructions. Abort if other instructions write to memory.
2291 if (I.mayWriteToMemory()) {
2292 auto *St = dyn_cast<StoreInst>(&I);
2293 if (!St) {
2294 recordAnalysis("CantVectorizeInstruction", St)
2295 << "instruction cannot be vectorized";
2296 HasComplexMemInst = true;
2297 continue;
2299 if (!St->isSimple() && !IsAnnotatedParallel) {
2300 recordAnalysis("NonSimpleStore", St)
2301 << "write with atomic ordering or volatile write";
2302 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2303 HasComplexMemInst = true;
2304 continue;
2306 NumStores++;
2307 Stores.push_back(St);
2308 DepChecker->addAccess(St);
2309 if (EnableMemAccessVersioningOfLoop)
2310 collectStridedAccess(St);
2312 } // Next instr.
2313 } // Next block.
2315 if (HasComplexMemInst) {
2316 CanVecMem = false;
2317 return;
2320 // Now we have two lists that hold the loads and the stores.
2321 // Next, we find the pointers that they use.
2323 // Check if we see any stores. If there are no stores, then we don't
2324 // care if the pointers are *restrict*.
2325 if (!Stores.size()) {
2326 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2327 CanVecMem = true;
2328 return;
2331 MemoryDepChecker::DepCandidates DependentAccesses;
2332 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE);
2334 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2335 // multiple times on the same object. If the ptr is accessed twice, once
2336 // for read and once for write, it will only appear once (on the write
2337 // list). This is okay, since we are going to check for conflicts between
2338 // writes and between reads and writes, but not between reads and reads.
2339 SmallSet<std::pair<Value *, Type *>, 16> Seen;
2341 // Record uniform store addresses to identify if we have multiple stores
2342 // to the same address.
2343 SmallPtrSet<Value *, 16> UniformStores;
2345 for (StoreInst *ST : Stores) {
2346 Value *Ptr = ST->getPointerOperand();
2348 if (isInvariant(Ptr)) {
2349 // Record store instructions to loop invariant addresses
2350 StoresToInvariantAddresses.push_back(ST);
2351 HasDependenceInvolvingLoopInvariantAddress |=
2352 !UniformStores.insert(Ptr).second;
2355 // If we did *not* see this pointer before, insert it to the read-write
2356 // list. At this phase it is only a 'write' list.
2357 Type *AccessTy = getLoadStoreType(ST);
2358 if (Seen.insert({Ptr, AccessTy}).second) {
2359 ++NumReadWrites;
2361 MemoryLocation Loc = MemoryLocation::get(ST);
2362 // The TBAA metadata could have a control dependency on the predication
2363 // condition, so we cannot rely on it when determining whether or not we
2364 // need runtime pointer checks.
2365 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2366 Loc.AATags.TBAA = nullptr;
2368 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2369 [&Accesses, AccessTy, Loc](Value *Ptr) {
2370 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2371 Accesses.addStore(NewLoc, AccessTy);
2376 if (IsAnnotatedParallel) {
2377 LLVM_DEBUG(
2378 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2379 << "checks.\n");
2380 CanVecMem = true;
2381 return;
2384 for (LoadInst *LD : Loads) {
2385 Value *Ptr = LD->getPointerOperand();
2386 // If we did *not* see this pointer before, insert it to the
2387 // read list. If we *did* see it before, then it is already in
2388 // the read-write list. This allows us to vectorize expressions
2389 // such as A[i] += x; Because the address of A[i] is a read-write
2390 // pointer. This only works if the index of A[i] is consecutive.
2391 // If the address of i is unknown (for example A[B[i]]) then we may
2392 // read a few words, modify, and write a few words, and some of the
2393 // words may be written to the same address.
2394 bool IsReadOnlyPtr = false;
2395 Type *AccessTy = getLoadStoreType(LD);
2396 if (Seen.insert({Ptr, AccessTy}).second ||
2397 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2398 ++NumReads;
2399 IsReadOnlyPtr = true;
2402 // See if there is an unsafe dependency between a load to a uniform address and
2403 // store to the same uniform address.
2404 if (UniformStores.count(Ptr)) {
2405 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2406 "load and uniform store to the same address!\n");
2407 HasDependenceInvolvingLoopInvariantAddress = true;
2410 MemoryLocation Loc = MemoryLocation::get(LD);
2411 // The TBAA metadata could have a control dependency on the predication
2412 // condition, so we cannot rely on it when determining whether or not we
2413 // need runtime pointer checks.
2414 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2415 Loc.AATags.TBAA = nullptr;
2417 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2418 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2419 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2420 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2424 // If we write (or read-write) to a single destination and there are no
2425 // other reads in this loop then is it safe to vectorize.
2426 if (NumReadWrites == 1 && NumReads == 0) {
2427 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2428 CanVecMem = true;
2429 return;
2432 // Build dependence sets and check whether we need a runtime pointer bounds
2433 // check.
2434 Accesses.buildDependenceSets();
2436 // Find pointers with computable bounds. We are going to use this information
2437 // to place a runtime bound check.
2438 Value *UncomputablePtr = nullptr;
2439 bool CanDoRTIfNeeded =
2440 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2441 SymbolicStrides, UncomputablePtr, false);
2442 if (!CanDoRTIfNeeded) {
2443 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2444 recordAnalysis("CantIdentifyArrayBounds", I)
2445 << "cannot identify array bounds";
2446 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2447 << "the array bounds.\n");
2448 CanVecMem = false;
2449 return;
2452 LLVM_DEBUG(
2453 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2455 CanVecMem = true;
2456 if (Accesses.isDependencyCheckNeeded()) {
2457 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2458 CanVecMem = DepChecker->areDepsSafe(
2459 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2461 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2462 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2464 // Clear the dependency checks. We assume they are not needed.
2465 Accesses.resetDepChecks(*DepChecker);
2467 PtrRtChecking->reset();
2468 PtrRtChecking->Need = true;
2470 auto *SE = PSE->getSE();
2471 UncomputablePtr = nullptr;
2472 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2473 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2475 // Check that we found the bounds for the pointer.
2476 if (!CanDoRTIfNeeded) {
2477 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2478 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2479 << "cannot check memory dependencies at runtime";
2480 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2481 CanVecMem = false;
2482 return;
2485 CanVecMem = true;
2489 if (HasConvergentOp) {
2490 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2491 << "cannot add control dependency to convergent operation";
2492 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2493 "would be needed with a convergent operation\n");
2494 CanVecMem = false;
2495 return;
2498 if (CanVecMem)
2499 LLVM_DEBUG(
2500 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2501 << (PtrRtChecking->Need ? "" : " don't")
2502 << " need runtime memory checks.\n");
2503 else
2504 emitUnsafeDependenceRemark();
2507 void LoopAccessInfo::emitUnsafeDependenceRemark() {
2508 auto Deps = getDepChecker().getDependences();
2509 if (!Deps)
2510 return;
2511 auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2512 return MemoryDepChecker::Dependence::isSafeForVectorization(D.Type) !=
2513 MemoryDepChecker::VectorizationSafetyStatus::Safe;
2515 if (Found == Deps->end())
2516 return;
2517 MemoryDepChecker::Dependence Dep = *Found;
2519 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2521 // Emit remark for first unsafe dependence
2522 bool HasForcedDistribution = false;
2523 std::optional<const MDOperand *> Value =
2524 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2525 if (Value) {
2526 const MDOperand *Op = *Value;
2527 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2528 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2531 const std::string Info =
2532 HasForcedDistribution
2533 ? "unsafe dependent memory operations in loop."
2534 : "unsafe dependent memory operations in loop. Use "
2535 "#pragma clang loop distribute(enable) to allow loop distribution "
2536 "to attempt to isolate the offending operations into a separate "
2537 "loop";
2538 OptimizationRemarkAnalysis &R =
2539 recordAnalysis("UnsafeDep", Dep.getDestination(*this)) << Info;
2541 switch (Dep.Type) {
2542 case MemoryDepChecker::Dependence::NoDep:
2543 case MemoryDepChecker::Dependence::Forward:
2544 case MemoryDepChecker::Dependence::BackwardVectorizable:
2545 llvm_unreachable("Unexpected dependence");
2546 case MemoryDepChecker::Dependence::Backward:
2547 R << "\nBackward loop carried data dependence.";
2548 break;
2549 case MemoryDepChecker::Dependence::ForwardButPreventsForwarding:
2550 R << "\nForward loop carried data dependence that prevents "
2551 "store-to-load forwarding.";
2552 break;
2553 case MemoryDepChecker::Dependence::BackwardVectorizableButPreventsForwarding:
2554 R << "\nBackward loop carried data dependence that prevents "
2555 "store-to-load forwarding.";
2556 break;
2557 case MemoryDepChecker::Dependence::Unknown:
2558 R << "\nUnknown data dependence.";
2559 break;
2562 if (Instruction *I = Dep.getSource(*this)) {
2563 DebugLoc SourceLoc = I->getDebugLoc();
2564 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2565 SourceLoc = DD->getDebugLoc();
2566 if (SourceLoc)
2567 R << " Memory location is the same as accessed at "
2568 << ore::NV("Location", SourceLoc);
2572 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2573 DominatorTree *DT) {
2574 assert(TheLoop->contains(BB) && "Unknown block used");
2576 // Blocks that do not dominate the latch need predication.
2577 BasicBlock* Latch = TheLoop->getLoopLatch();
2578 return !DT->dominates(BB, Latch);
2581 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2582 Instruction *I) {
2583 assert(!Report && "Multiple reports generated");
2585 Value *CodeRegion = TheLoop->getHeader();
2586 DebugLoc DL = TheLoop->getStartLoc();
2588 if (I) {
2589 CodeRegion = I->getParent();
2590 // If there is no debug location attached to the instruction, revert back to
2591 // using the loop's.
2592 if (I->getDebugLoc())
2593 DL = I->getDebugLoc();
2596 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2597 CodeRegion);
2598 return *Report;
2601 bool LoopAccessInfo::isInvariant(Value *V) const {
2602 auto *SE = PSE->getSE();
2603 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2604 // trivially loop-invariant FP values to be considered invariant.
2605 if (!SE->isSCEVable(V->getType()))
2606 return false;
2607 const SCEV *S = SE->getSCEV(V);
2608 return SE->isLoopInvariant(S, TheLoop);
2611 /// Find the operand of the GEP that should be checked for consecutive
2612 /// stores. This ignores trailing indices that have no effect on the final
2613 /// pointer.
2614 static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2615 const DataLayout &DL = Gep->getModule()->getDataLayout();
2616 unsigned LastOperand = Gep->getNumOperands() - 1;
2617 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2619 // Walk backwards and try to peel off zeros.
2620 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2621 // Find the type we're currently indexing into.
2622 gep_type_iterator GEPTI = gep_type_begin(Gep);
2623 std::advance(GEPTI, LastOperand - 2);
2625 // If it's a type with the same allocation size as the result of the GEP we
2626 // can peel off the zero index.
2627 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
2628 break;
2629 --LastOperand;
2632 return LastOperand;
2635 /// If the argument is a GEP, then returns the operand identified by
2636 /// getGEPInductionOperand. However, if there is some other non-loop-invariant
2637 /// operand, it returns that instead.
2638 static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2639 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2640 if (!GEP)
2641 return Ptr;
2643 unsigned InductionOperand = getGEPInductionOperand(GEP);
2645 // Check that all of the gep indices are uniform except for our induction
2646 // operand.
2647 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2648 if (i != InductionOperand &&
2649 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2650 return Ptr;
2651 return GEP->getOperand(InductionOperand);
2654 /// If a value has only one user that is a CastInst, return it.
2655 static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
2656 Value *UniqueCast = nullptr;
2657 for (User *U : Ptr->users()) {
2658 CastInst *CI = dyn_cast<CastInst>(U);
2659 if (CI && CI->getType() == Ty) {
2660 if (!UniqueCast)
2661 UniqueCast = CI;
2662 else
2663 return nullptr;
2666 return UniqueCast;
2669 /// Get the stride of a pointer access in a loop. Looks for symbolic
2670 /// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2671 static const SCEV *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp) {
2672 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2673 if (!PtrTy || PtrTy->isAggregateType())
2674 return nullptr;
2676 // Try to remove a gep instruction to make the pointer (actually index at this
2677 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2678 // pointer, otherwise, we are analyzing the index.
2679 Value *OrigPtr = Ptr;
2681 // The size of the pointer access.
2682 int64_t PtrAccessSize = 1;
2684 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2685 const SCEV *V = SE->getSCEV(Ptr);
2687 if (Ptr != OrigPtr)
2688 // Strip off casts.
2689 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2690 V = C->getOperand();
2692 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2693 if (!S)
2694 return nullptr;
2696 // If the pointer is invariant then there is no stride and it makes no
2697 // sense to add it here.
2698 if (Lp != S->getLoop())
2699 return nullptr;
2701 V = S->getStepRecurrence(*SE);
2702 if (!V)
2703 return nullptr;
2705 // Strip off the size of access multiplication if we are still analyzing the
2706 // pointer.
2707 if (OrigPtr == Ptr) {
2708 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2709 if (M->getOperand(0)->getSCEVType() != scConstant)
2710 return nullptr;
2712 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2714 // Huge step value - give up.
2715 if (APStepVal.getBitWidth() > 64)
2716 return nullptr;
2718 int64_t StepVal = APStepVal.getSExtValue();
2719 if (PtrAccessSize != StepVal)
2720 return nullptr;
2721 V = M->getOperand(1);
2725 // Note that the restriction after this loop invariant check are only
2726 // profitability restrictions.
2727 if (!SE->isLoopInvariant(V, Lp))
2728 return nullptr;
2730 // Look for the loop invariant symbolic value.
2731 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2732 if (!U) {
2733 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2734 if (!C)
2735 return nullptr;
2736 U = dyn_cast<SCEVUnknown>(C->getOperand());
2737 if (!U)
2738 return nullptr;
2740 // Match legacy behavior - this is not needed for correctness
2741 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2742 return nullptr;
2745 return V;
2748 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2749 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2750 if (!Ptr)
2751 return;
2753 // Note: getStrideFromPointer is a *profitability* heuristic. We
2754 // could broaden the scope of values returned here - to anything
2755 // which happens to be loop invariant and contributes to the
2756 // computation of an interesting IV - but we chose not to as we
2757 // don't have a cost model here, and broadening the scope exposes
2758 // far too many unprofitable cases.
2759 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2760 if (!StrideExpr)
2761 return;
2763 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2764 "versioning:");
2765 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2767 if (!SpeculateUnitStride) {
2768 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2769 return;
2772 // Avoid adding the "Stride == 1" predicate when we know that
2773 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2774 // or zero iteration loop, as Trip-Count <= Stride == 1.
2776 // TODO: We are currently not making a very informed decision on when it is
2777 // beneficial to apply stride versioning. It might make more sense that the
2778 // users of this analysis (such as the vectorizer) will trigger it, based on
2779 // their specific cost considerations; For example, in cases where stride
2780 // versioning does not help resolving memory accesses/dependences, the
2781 // vectorizer should evaluate the cost of the runtime test, and the benefit
2782 // of various possible stride specializations, considering the alternatives
2783 // of using gather/scatters (if available).
2785 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2787 // Match the types so we can compare the stride and the BETakenCount.
2788 // The Stride can be positive/negative, so we sign extend Stride;
2789 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2790 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2791 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
2792 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
2793 const SCEV *CastedStride = StrideExpr;
2794 const SCEV *CastedBECount = BETakenCount;
2795 ScalarEvolution *SE = PSE->getSE();
2796 if (BETypeSizeBits >= StrideTypeSizeBits)
2797 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2798 else
2799 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2800 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2801 // Since TripCount == BackEdgeTakenCount + 1, checking:
2802 // "Stride >= TripCount" is equivalent to checking:
2803 // Stride - BETakenCount > 0
2804 if (SE->isKnownPositive(StrideMinusBETaken)) {
2805 LLVM_DEBUG(
2806 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2807 "Stride==1 predicate will imply that the loop executes "
2808 "at most once.\n");
2809 return;
2811 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
2813 // Strip back off the integer cast, and check that our result is a
2814 // SCEVUnknown as we expect.
2815 const SCEV *StrideBase = StrideExpr;
2816 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
2817 StrideBase = C->getOperand();
2818 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
2821 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2822 const TargetLibraryInfo *TLI, AAResults *AA,
2823 DominatorTree *DT, LoopInfo *LI)
2824 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2825 PtrRtChecking(nullptr),
2826 DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L) {
2827 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
2828 if (canAnalyzeLoop()) {
2829 analyzeLoop(AA, LI, TLI, DT);
2833 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2834 if (CanVecMem) {
2835 OS.indent(Depth) << "Memory dependences are safe";
2836 const MemoryDepChecker &DC = getDepChecker();
2837 if (!DC.isSafeForAnyVectorWidth())
2838 OS << " with a maximum safe vector width of "
2839 << DC.getMaxSafeVectorWidthInBits() << " bits";
2840 if (PtrRtChecking->Need)
2841 OS << " with run-time checks";
2842 OS << "\n";
2845 if (HasConvergentOp)
2846 OS.indent(Depth) << "Has convergent operation in loop\n";
2848 if (Report)
2849 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2851 if (auto *Dependences = DepChecker->getDependences()) {
2852 OS.indent(Depth) << "Dependences:\n";
2853 for (const auto &Dep : *Dependences) {
2854 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2855 OS << "\n";
2857 } else
2858 OS.indent(Depth) << "Too many dependences, not recorded\n";
2860 // List the pair of accesses need run-time checks to prove independence.
2861 PtrRtChecking->print(OS, Depth);
2862 OS << "\n";
2864 OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2865 << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2866 << "found in loop.\n";
2868 OS.indent(Depth) << "SCEV assumptions:\n";
2869 PSE->getPredicate().print(OS, Depth);
2871 OS << "\n";
2873 OS.indent(Depth) << "Expressions re-written:\n";
2874 PSE->print(OS, Depth);
2877 const LoopAccessInfo &LoopAccessInfoManager::getInfo(Loop &L) {
2878 auto I = LoopAccessInfoMap.insert({&L, nullptr});
2880 if (I.second)
2881 I.first->second =
2882 std::make_unique<LoopAccessInfo>(&L, &SE, TLI, &AA, &DT, &LI);
2884 return *I.first->second;
2887 bool LoopAccessInfoManager::invalidate(
2888 Function &F, const PreservedAnalyses &PA,
2889 FunctionAnalysisManager::Invalidator &Inv) {
2890 // Check whether our analysis is preserved.
2891 auto PAC = PA.getChecker<LoopAccessAnalysis>();
2892 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
2893 // If not, give up now.
2894 return true;
2896 // Check whether the analyses we depend on became invalid for any reason.
2897 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
2898 // invalid.
2899 return Inv.invalidate<AAManager>(F, PA) ||
2900 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
2901 Inv.invalidate<LoopAnalysis>(F, PA) ||
2902 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2905 LoopAccessInfoManager LoopAccessAnalysis::run(Function &F,
2906 FunctionAnalysisManager &FAM) {
2907 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
2908 auto &AA = FAM.getResult<AAManager>(F);
2909 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2910 auto &LI = FAM.getResult<LoopAnalysis>(F);
2911 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
2912 return LoopAccessInfoManager(SE, AA, DT, LI, &TLI);
2915 AnalysisKey LoopAccessAnalysis::Key;