[DAGCombiner] Expand combining of FP logical ops to sign-setting FP ops
[llvm-core.git] / include / llvm / Analysis / LoopAccessAnalysis.h
blob86b402b2394f8bdc94fb20ff394808c80111b439
1 //===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interface for the loop memory dependence framework that
11 // was originally developed for the Loop Vectorizer.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
16 #define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AliasSetTracker.h"
23 #include "llvm/Analysis/LoopAnalysisManager.h"
24 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
25 #include "llvm/IR/DiagnosticInfo.h"
26 #include "llvm/IR/ValueHandle.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/raw_ostream.h"
30 namespace llvm {
32 class Value;
33 class DataLayout;
34 class ScalarEvolution;
35 class Loop;
36 class SCEV;
37 class SCEVUnionPredicate;
38 class LoopAccessInfo;
39 class OptimizationRemarkEmitter;
41 /// Collection of parameters shared beetween the Loop Vectorizer and the
42 /// Loop Access Analysis.
43 struct VectorizerParams {
44 /// Maximum SIMD width.
45 static const unsigned MaxVectorWidth;
47 /// VF as overridden by the user.
48 static unsigned VectorizationFactor;
49 /// Interleave factor as overridden by the user.
50 static unsigned VectorizationInterleave;
51 /// True if force-vector-interleave was specified by the user.
52 static bool isInterleaveForced();
54 /// \When performing memory disambiguation checks at runtime do not
55 /// make more than this number of comparisons.
56 static unsigned RuntimeMemoryCheckThreshold;
59 /// Checks memory dependences among accesses to the same underlying
60 /// object to determine whether there vectorization is legal or not (and at
61 /// which vectorization factor).
62 ///
63 /// Note: This class will compute a conservative dependence for access to
64 /// different underlying pointers. Clients, such as the loop vectorizer, will
65 /// sometimes deal these potential dependencies by emitting runtime checks.
66 ///
67 /// We use the ScalarEvolution framework to symbolically evalutate access
68 /// functions pairs. Since we currently don't restructure the loop we can rely
69 /// on the program order of memory accesses to determine their safety.
70 /// At the moment we will only deem accesses as safe for:
71 /// * A negative constant distance assuming program order.
72 ///
73 /// Safe: tmp = a[i + 1]; OR a[i + 1] = x;
74 /// a[i] = tmp; y = a[i];
75 ///
76 /// The latter case is safe because later checks guarantuee that there can't
77 /// be a cycle through a phi node (that is, we check that "x" and "y" is not
78 /// the same variable: a header phi can only be an induction or a reduction, a
79 /// reduction can't have a memory sink, an induction can't have a memory
80 /// source). This is important and must not be violated (or we have to
81 /// resort to checking for cycles through memory).
82 ///
83 /// * A positive constant distance assuming program order that is bigger
84 /// than the biggest memory access.
85 ///
86 /// tmp = a[i] OR b[i] = x
87 /// a[i+2] = tmp y = b[i+2];
88 ///
89 /// Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
90 ///
91 /// * Zero distances and all accesses have the same size.
92 ///
93 class MemoryDepChecker {
94 public:
95 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
96 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
97 /// Set of potential dependent memory accesses.
98 typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
100 /// Dependece between memory access instructions.
101 struct Dependence {
102 /// The type of the dependence.
103 enum DepType {
104 // No dependence.
105 NoDep,
106 // We couldn't determine the direction or the distance.
107 Unknown,
108 // Lexically forward.
110 // FIXME: If we only have loop-independent forward dependences (e.g. a
111 // read and write of A[i]), LAA will locally deem the dependence "safe"
112 // without querying the MemoryDepChecker. Therefore we can miss
113 // enumerating loop-independent forward dependences in
114 // getDependences. Note that as soon as there are different
115 // indices used to access the same array, the MemoryDepChecker *is*
116 // queried and the dependence list is complete.
117 Forward,
118 // Forward, but if vectorized, is likely to prevent store-to-load
119 // forwarding.
120 ForwardButPreventsForwarding,
121 // Lexically backward.
122 Backward,
123 // Backward, but the distance allows a vectorization factor of
124 // MaxSafeDepDistBytes.
125 BackwardVectorizable,
126 // Same, but may prevent store-to-load forwarding.
127 BackwardVectorizableButPreventsForwarding
130 /// String version of the types.
131 static const char *DepName[];
133 /// Index of the source of the dependence in the InstMap vector.
134 unsigned Source;
135 /// Index of the destination of the dependence in the InstMap vector.
136 unsigned Destination;
137 /// The type of the dependence.
138 DepType Type;
140 Dependence(unsigned Source, unsigned Destination, DepType Type)
141 : Source(Source), Destination(Destination), Type(Type) {}
143 /// Return the source instruction of the dependence.
144 Instruction *getSource(const LoopAccessInfo &LAI) const;
145 /// Return the destination instruction of the dependence.
146 Instruction *getDestination(const LoopAccessInfo &LAI) const;
148 /// Dependence types that don't prevent vectorization.
149 static bool isSafeForVectorization(DepType Type);
151 /// Lexically forward dependence.
152 bool isForward() const;
153 /// Lexically backward dependence.
154 bool isBackward() const;
156 /// May be a lexically backward dependence type (includes Unknown).
157 bool isPossiblyBackward() const;
159 /// Print the dependence. \p Instr is used to map the instruction
160 /// indices to instructions.
161 void print(raw_ostream &OS, unsigned Depth,
162 const SmallVectorImpl<Instruction *> &Instrs) const;
165 MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
166 : PSE(PSE), InnermostLoop(L), AccessIdx(0), MaxSafeRegisterWidth(-1U),
167 ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
168 RecordDependences(true) {}
170 /// Register the location (instructions are given increasing numbers)
171 /// of a write access.
172 void addAccess(StoreInst *SI) {
173 Value *Ptr = SI->getPointerOperand();
174 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
175 InstMap.push_back(SI);
176 ++AccessIdx;
179 /// Register the location (instructions are given increasing numbers)
180 /// of a write access.
181 void addAccess(LoadInst *LI) {
182 Value *Ptr = LI->getPointerOperand();
183 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
184 InstMap.push_back(LI);
185 ++AccessIdx;
188 /// Check whether the dependencies between the accesses are safe.
190 /// Only checks sets with elements in \p CheckDeps.
191 bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
192 const ValueToValueMap &Strides);
194 /// No memory dependence was encountered that would inhibit
195 /// vectorization.
196 bool isSafeForVectorization() const { return SafeForVectorization; }
198 /// The maximum number of bytes of a vector register we can vectorize
199 /// the accesses safely with.
200 uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
202 /// Return the number of elements that are safe to operate on
203 /// simultaneously, multiplied by the size of the element in bits.
204 uint64_t getMaxSafeRegisterWidth() const { return MaxSafeRegisterWidth; }
206 /// In same cases when the dependency check fails we can still
207 /// vectorize the loop with a dynamic array access check.
208 bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
210 /// Returns the memory dependences. If null is returned we exceeded
211 /// the MaxDependences threshold and this information is not
212 /// available.
213 const SmallVectorImpl<Dependence> *getDependences() const {
214 return RecordDependences ? &Dependences : nullptr;
217 void clearDependences() { Dependences.clear(); }
219 /// The vector of memory access instructions. The indices are used as
220 /// instruction identifiers in the Dependence class.
221 const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
222 return InstMap;
225 /// Generate a mapping between the memory instructions and their
226 /// indices according to program order.
227 DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
228 DenseMap<Instruction *, unsigned> OrderMap;
230 for (unsigned I = 0; I < InstMap.size(); ++I)
231 OrderMap[InstMap[I]] = I;
233 return OrderMap;
236 /// Find the set of instructions that read or write via \p Ptr.
237 SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
238 bool isWrite) const;
240 private:
241 /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
242 /// applies dynamic knowledge to simplify SCEV expressions and convert them
243 /// to a more usable form. We need this in case assumptions about SCEV
244 /// expressions need to be made in order to avoid unknown dependences. For
245 /// example we might assume a unit stride for a pointer in order to prove
246 /// that a memory access is strided and doesn't wrap.
247 PredicatedScalarEvolution &PSE;
248 const Loop *InnermostLoop;
250 /// Maps access locations (ptr, read/write) to program order.
251 DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
253 /// Memory access instructions in program order.
254 SmallVector<Instruction *, 16> InstMap;
256 /// The program order index to be used for the next instruction.
257 unsigned AccessIdx;
259 // We can access this many bytes in parallel safely.
260 uint64_t MaxSafeDepDistBytes;
262 /// Number of elements (from consecutive iterations) that are safe to
263 /// operate on simultaneously, multiplied by the size of the element in bits.
264 /// The size of the element is taken from the memory access that is most
265 /// restrictive.
266 uint64_t MaxSafeRegisterWidth;
268 /// If we see a non-constant dependence distance we can still try to
269 /// vectorize this loop with runtime checks.
270 bool ShouldRetryWithRuntimeCheck;
272 /// No memory dependence was encountered that would inhibit
273 /// vectorization.
274 bool SafeForVectorization;
276 //// True if Dependences reflects the dependences in the
277 //// loop. If false we exceeded MaxDependences and
278 //// Dependences is invalid.
279 bool RecordDependences;
281 /// Memory dependences collected during the analysis. Only valid if
282 /// RecordDependences is true.
283 SmallVector<Dependence, 8> Dependences;
285 /// Check whether there is a plausible dependence between the two
286 /// accesses.
288 /// Access \p A must happen before \p B in program order. The two indices
289 /// identify the index into the program order map.
291 /// This function checks whether there is a plausible dependence (or the
292 /// absence of such can't be proved) between the two accesses. If there is a
293 /// plausible dependence but the dependence distance is bigger than one
294 /// element access it records this distance in \p MaxSafeDepDistBytes (if this
295 /// distance is smaller than any other distance encountered so far).
296 /// Otherwise, this function returns true signaling a possible dependence.
297 Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
298 const MemAccessInfo &B, unsigned BIdx,
299 const ValueToValueMap &Strides);
301 /// Check whether the data dependence could prevent store-load
302 /// forwarding.
304 /// \return false if we shouldn't vectorize at all or avoid larger
305 /// vectorization factors by limiting MaxSafeDepDistBytes.
306 bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
309 /// Holds information about the memory runtime legality checks to verify
310 /// that a group of pointers do not overlap.
311 class RuntimePointerChecking {
312 public:
313 struct PointerInfo {
314 /// Holds the pointer value that we need to check.
315 TrackingVH<Value> PointerValue;
316 /// Holds the smallest byte address accessed by the pointer throughout all
317 /// iterations of the loop.
318 const SCEV *Start;
319 /// Holds the largest byte address accessed by the pointer throughout all
320 /// iterations of the loop, plus 1.
321 const SCEV *End;
322 /// Holds the information if this pointer is used for writing to memory.
323 bool IsWritePtr;
324 /// Holds the id of the set of pointers that could be dependent because of a
325 /// shared underlying object.
326 unsigned DependencySetId;
327 /// Holds the id of the disjoint alias set to which this pointer belongs.
328 unsigned AliasSetId;
329 /// SCEV for the access.
330 const SCEV *Expr;
332 PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
333 bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
334 const SCEV *Expr)
335 : PointerValue(PointerValue), Start(Start), End(End),
336 IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
337 AliasSetId(AliasSetId), Expr(Expr) {}
340 RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
342 /// Reset the state of the pointer runtime information.
343 void reset() {
344 Need = false;
345 Pointers.clear();
346 Checks.clear();
349 /// Insert a pointer and calculate the start and end SCEVs.
350 /// We need \p PSE in order to compute the SCEV expression of the pointer
351 /// according to the assumptions that we've made during the analysis.
352 /// The method might also version the pointer stride according to \p Strides,
353 /// and add new predicates to \p PSE.
354 void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
355 unsigned ASId, const ValueToValueMap &Strides,
356 PredicatedScalarEvolution &PSE);
358 /// No run-time memory checking is necessary.
359 bool empty() const { return Pointers.empty(); }
361 /// A grouping of pointers. A single memcheck is required between
362 /// two groups.
363 struct CheckingPtrGroup {
364 /// Create a new pointer checking group containing a single
365 /// pointer, with index \p Index in RtCheck.
366 CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
367 : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
368 Low(RtCheck.Pointers[Index].Start) {
369 Members.push_back(Index);
372 /// Tries to add the pointer recorded in RtCheck at index
373 /// \p Index to this pointer checking group. We can only add a pointer
374 /// to a checking group if we will still be able to get
375 /// the upper and lower bounds of the check. Returns true in case
376 /// of success, false otherwise.
377 bool addPointer(unsigned Index);
379 /// Constitutes the context of this pointer checking group. For each
380 /// pointer that is a member of this group we will retain the index
381 /// at which it appears in RtCheck.
382 RuntimePointerChecking &RtCheck;
383 /// The SCEV expression which represents the upper bound of all the
384 /// pointers in this group.
385 const SCEV *High;
386 /// The SCEV expression which represents the lower bound of all the
387 /// pointers in this group.
388 const SCEV *Low;
389 /// Indices of all the pointers that constitute this grouping.
390 SmallVector<unsigned, 2> Members;
393 /// A memcheck which made up of a pair of grouped pointers.
395 /// These *have* to be const for now, since checks are generated from
396 /// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
397 /// function. FIXME: once check-generation is moved inside this class (after
398 /// the PtrPartition hack is removed), we could drop const.
399 typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
400 PointerCheck;
402 /// Generate the checks and store it. This also performs the grouping
403 /// of pointers to reduce the number of memchecks necessary.
404 void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
405 bool UseDependencies);
407 /// Returns the checks that generateChecks created.
408 const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
410 /// Decide if we need to add a check between two groups of pointers,
411 /// according to needsChecking.
412 bool needsChecking(const CheckingPtrGroup &M,
413 const CheckingPtrGroup &N) const;
415 /// Returns the number of run-time checks required according to
416 /// needsChecking.
417 unsigned getNumberOfChecks() const { return Checks.size(); }
419 /// Print the list run-time memory checks necessary.
420 void print(raw_ostream &OS, unsigned Depth = 0) const;
422 /// Print \p Checks.
423 void printChecks(raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
424 unsigned Depth = 0) const;
426 /// This flag indicates if we need to add the runtime check.
427 bool Need;
429 /// Information about the pointers that may require checking.
430 SmallVector<PointerInfo, 2> Pointers;
432 /// Holds a partitioning of pointers into "check groups".
433 SmallVector<CheckingPtrGroup, 2> CheckingGroups;
435 /// Check if pointers are in the same partition
437 /// \p PtrToPartition contains the partition number for pointers (-1 if the
438 /// pointer belongs to multiple partitions).
439 static bool
440 arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
441 unsigned PtrIdx1, unsigned PtrIdx2);
443 /// Decide whether we need to issue a run-time check for pointer at
444 /// index \p I and \p J to prove their independence.
445 bool needsChecking(unsigned I, unsigned J) const;
447 /// Return PointerInfo for pointer at index \p PtrIdx.
448 const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
449 return Pointers[PtrIdx];
452 private:
453 /// Groups pointers such that a single memcheck is required
454 /// between two different groups. This will clear the CheckingGroups vector
455 /// and re-compute it. We will only group dependecies if \p UseDependencies
456 /// is true, otherwise we will create a separate group for each pointer.
457 void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
458 bool UseDependencies);
460 /// Generate the checks and return them.
461 SmallVector<PointerCheck, 4>
462 generateChecks() const;
464 /// Holds a pointer to the ScalarEvolution analysis.
465 ScalarEvolution *SE;
467 /// Set of run-time checks required to establish independence of
468 /// otherwise may-aliasing pointers in the loop.
469 SmallVector<PointerCheck, 4> Checks;
472 /// Drive the analysis of memory accesses in the loop
474 /// This class is responsible for analyzing the memory accesses of a loop. It
475 /// collects the accesses and then its main helper the AccessAnalysis class
476 /// finds and categorizes the dependences in buildDependenceSets.
478 /// For memory dependences that can be analyzed at compile time, it determines
479 /// whether the dependence is part of cycle inhibiting vectorization. This work
480 /// is delegated to the MemoryDepChecker class.
482 /// For memory dependences that cannot be determined at compile time, it
483 /// generates run-time checks to prove independence. This is done by
484 /// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
485 /// RuntimePointerCheck class.
487 /// If pointers can wrap or can't be expressed as affine AddRec expressions by
488 /// ScalarEvolution, we will generate run-time checks by emitting a
489 /// SCEVUnionPredicate.
491 /// Checks for both memory dependences and the SCEV predicates contained in the
492 /// PSE must be emitted in order for the results of this analysis to be valid.
493 class LoopAccessInfo {
494 public:
495 LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI,
496 AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI);
498 /// Return true we can analyze the memory accesses in the loop and there are
499 /// no memory dependence cycles.
500 bool canVectorizeMemory() const { return CanVecMem; }
502 const RuntimePointerChecking *getRuntimePointerChecking() const {
503 return PtrRtChecking.get();
506 /// Number of memchecks required to prove independence of otherwise
507 /// may-alias pointers.
508 unsigned getNumRuntimePointerChecks() const {
509 return PtrRtChecking->getNumberOfChecks();
512 /// Return true if the block BB needs to be predicated in order for the loop
513 /// to be vectorized.
514 static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
515 DominatorTree *DT);
517 /// Returns true if the value V is uniform within the loop.
518 bool isUniform(Value *V) const;
520 uint64_t getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; }
521 unsigned getNumStores() const { return NumStores; }
522 unsigned getNumLoads() const { return NumLoads;}
524 /// Add code that checks at runtime if the accessed arrays overlap.
526 /// Returns a pair of instructions where the first element is the first
527 /// instruction generated in possibly a sequence of instructions and the
528 /// second value is the final comparator value or NULL if no check is needed.
529 std::pair<Instruction *, Instruction *>
530 addRuntimeChecks(Instruction *Loc) const;
532 /// Generete the instructions for the checks in \p PointerChecks.
534 /// Returns a pair of instructions where the first element is the first
535 /// instruction generated in possibly a sequence of instructions and the
536 /// second value is the final comparator value or NULL if no check is needed.
537 std::pair<Instruction *, Instruction *>
538 addRuntimeChecks(Instruction *Loc,
539 const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
540 &PointerChecks) const;
542 /// The diagnostics report generated for the analysis. E.g. why we
543 /// couldn't analyze the loop.
544 const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
546 /// the Memory Dependence Checker which can determine the
547 /// loop-independent and loop-carried dependences between memory accesses.
548 const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
550 /// Return the list of instructions that use \p Ptr to read or write
551 /// memory.
552 SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
553 bool isWrite) const {
554 return DepChecker->getInstructionsForAccess(Ptr, isWrite);
557 /// If an access has a symbolic strides, this maps the pointer value to
558 /// the stride symbol.
559 const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
561 /// Pointer has a symbolic stride.
562 bool hasStride(Value *V) const { return StrideSet.count(V); }
564 /// Print the information about the memory accesses in the loop.
565 void print(raw_ostream &OS, unsigned Depth = 0) const;
567 /// If the loop has any store of a variant value to an invariant address, then
568 /// return true, else return false.
569 bool hasVariantStoreToLoopInvariantAddress() const {
570 return HasVariantStoreToLoopInvariantAddress;
573 /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
574 /// them to a more usable form. All SCEV expressions during the analysis
575 /// should be re-written (and therefore simplified) according to PSE.
576 /// A user of LoopAccessAnalysis will need to emit the runtime checks
577 /// associated with this predicate.
578 const PredicatedScalarEvolution &getPSE() const { return *PSE; }
580 private:
581 /// Analyze the loop.
582 void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
583 const TargetLibraryInfo *TLI, DominatorTree *DT);
585 /// Check if the structure of the loop allows it to be analyzed by this
586 /// pass.
587 bool canAnalyzeLoop();
589 /// Save the analysis remark.
591 /// LAA does not directly emits the remarks. Instead it stores it which the
592 /// client can retrieve and presents as its own analysis
593 /// (e.g. -Rpass-analysis=loop-vectorize).
594 OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
595 Instruction *Instr = nullptr);
597 /// Collect memory access with loop invariant strides.
599 /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
600 /// invariant.
601 void collectStridedAccess(Value *LoadOrStoreInst);
603 std::unique_ptr<PredicatedScalarEvolution> PSE;
605 /// We need to check that all of the pointers in this list are disjoint
606 /// at runtime. Using std::unique_ptr to make using move ctor simpler.
607 std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
609 /// the Memory Dependence Checker which can determine the
610 /// loop-independent and loop-carried dependences between memory accesses.
611 std::unique_ptr<MemoryDepChecker> DepChecker;
613 Loop *TheLoop;
615 unsigned NumLoads;
616 unsigned NumStores;
618 uint64_t MaxSafeDepDistBytes;
620 /// Cache the result of analyzeLoop.
621 bool CanVecMem;
623 /// Indicator that there is a store of a variant value to a uniform address.
624 bool HasVariantStoreToLoopInvariantAddress;
626 /// The diagnostics report generated for the analysis. E.g. why we
627 /// couldn't analyze the loop.
628 std::unique_ptr<OptimizationRemarkAnalysis> Report;
630 /// If an access has a symbolic strides, this maps the pointer value to
631 /// the stride symbol.
632 ValueToValueMap SymbolicStrides;
634 /// Set of symbolic strides values.
635 SmallPtrSet<Value *, 8> StrideSet;
638 Value *stripIntegerCast(Value *V);
640 /// Return the SCEV corresponding to a pointer with the symbolic stride
641 /// replaced with constant one, assuming the SCEV predicate associated with
642 /// \p PSE is true.
644 /// If necessary this method will version the stride of the pointer according
645 /// to \p PtrToStride and therefore add further predicates to \p PSE.
647 /// If \p OrigPtr is not null, use it to look up the stride value instead of \p
648 /// Ptr. \p PtrToStride provides the mapping between the pointer value and its
649 /// stride as collected by LoopVectorizationLegality::collectStridedAccess.
650 const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
651 const ValueToValueMap &PtrToStride,
652 Value *Ptr, Value *OrigPtr = nullptr);
654 /// If the pointer has a constant stride return it in units of its
655 /// element size. Otherwise return zero.
657 /// Ensure that it does not wrap in the address space, assuming the predicate
658 /// associated with \p PSE is true.
660 /// If necessary this method will version the stride of the pointer according
661 /// to \p PtrToStride and therefore add further predicates to \p PSE.
662 /// The \p Assume parameter indicates if we are allowed to make additional
663 /// run-time assumptions.
664 int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
665 const ValueToValueMap &StridesMap = ValueToValueMap(),
666 bool Assume = false, bool ShouldCheckWrap = true);
668 /// Attempt to sort the pointers in \p VL and return the sorted indices
669 /// in \p SortedIndices, if reordering is required.
671 /// Returns 'true' if sorting is legal, otherwise returns 'false'.
673 /// For example, for a given \p VL of memory accesses in program order, a[i+4],
674 /// a[i+0], a[i+1] and a[i+7], this function will sort the \p VL and save the
675 /// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
676 /// saves the mask for actual memory accesses in program order in
677 /// \p SortedIndices as <1,2,0,3>
678 bool sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
679 ScalarEvolution &SE,
680 SmallVectorImpl<unsigned> &SortedIndices);
682 /// Returns true if the memory operations \p A and \p B are consecutive.
683 /// This is a simple API that does not depend on the analysis pass.
684 bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
685 ScalarEvolution &SE, bool CheckType = true);
687 /// This analysis provides dependence information for the memory accesses
688 /// of a loop.
690 /// It runs the analysis for a loop on demand. This can be initiated by
691 /// querying the loop access info via LAA::getInfo. getInfo return a
692 /// LoopAccessInfo object. See this class for the specifics of what information
693 /// is provided.
694 class LoopAccessLegacyAnalysis : public FunctionPass {
695 public:
696 static char ID;
698 LoopAccessLegacyAnalysis() : FunctionPass(ID) {
699 initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
702 bool runOnFunction(Function &F) override;
704 void getAnalysisUsage(AnalysisUsage &AU) const override;
706 /// Query the result of the loop access information for the loop \p L.
708 /// If there is no cached result available run the analysis.
709 const LoopAccessInfo &getInfo(Loop *L);
711 void releaseMemory() override {
712 // Invalidate the cache when the pass is freed.
713 LoopAccessInfoMap.clear();
716 /// Print the result of the analysis when invoked with -analyze.
717 void print(raw_ostream &OS, const Module *M = nullptr) const override;
719 private:
720 /// The cache.
721 DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
723 // The used analysis passes.
724 ScalarEvolution *SE;
725 const TargetLibraryInfo *TLI;
726 AliasAnalysis *AA;
727 DominatorTree *DT;
728 LoopInfo *LI;
731 /// This analysis provides dependence information for the memory
732 /// accesses of a loop.
734 /// It runs the analysis for a loop on demand. This can be initiated by
735 /// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
736 /// getResult return a LoopAccessInfo object. See this class for the
737 /// specifics of what information is provided.
738 class LoopAccessAnalysis
739 : public AnalysisInfoMixin<LoopAccessAnalysis> {
740 friend AnalysisInfoMixin<LoopAccessAnalysis>;
741 static AnalysisKey Key;
743 public:
744 typedef LoopAccessInfo Result;
746 Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
749 inline Instruction *MemoryDepChecker::Dependence::getSource(
750 const LoopAccessInfo &LAI) const {
751 return LAI.getDepChecker().getMemoryInstructions()[Source];
754 inline Instruction *MemoryDepChecker::Dependence::getDestination(
755 const LoopAccessInfo &LAI) const {
756 return LAI.getDepChecker().getMemoryInstructions()[Destination];
759 } // End llvm namespace
761 #endif