[clang][bytecode] Fix reporting failed local constexpr initializers (#123588)
[llvm-project.git] / llvm / lib / Analysis / LoopCacheAnalysis.cpp
blob2897b922f61e48aa9bd74148de5b2327b1e3ebe1
1 //===- LoopCacheAnalysis.cpp - Loop Cache Analysis -------------------------==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
6 // See https://llvm.org/LICENSE.txt for license information.
7 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 //
9 //===----------------------------------------------------------------------===//
10 ///
11 /// \file
12 /// This file defines the implementation for the loop cache analysis.
13 /// The implementation is largely based on the following paper:
14 ///
15 /// Compiler Optimizations for Improving Data Locality
16 /// By: Steve Carr, Katherine S. McKinley, Chau-Wen Tseng
17 /// http://www.cs.utexas.edu/users/mckinley/papers/asplos-1994.pdf
18 ///
19 /// The general approach taken to estimate the number of cache lines used by the
20 /// memory references in an inner loop is:
21 /// 1. Partition memory references that exhibit temporal or spacial reuse
22 /// into reference groups.
23 /// 2. For each loop L in the a loop nest LN:
24 /// a. Compute the cost of the reference group
25 /// b. Compute the loop cost by summing up the reference groups costs
26 //===----------------------------------------------------------------------===//
28 #include "llvm/Analysis/LoopCacheAnalysis.h"
29 #include "llvm/ADT/BreadthFirstIterator.h"
30 #include "llvm/ADT/Sequence.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/Analysis/AliasAnalysis.h"
33 #include "llvm/Analysis/Delinearization.h"
34 #include "llvm/Analysis/DependenceAnalysis.h"
35 #include "llvm/Analysis/LoopInfo.h"
36 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
37 #include "llvm/Analysis/TargetTransformInfo.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
41 using namespace llvm;
43 #define DEBUG_TYPE "loop-cache-cost"
45 static cl::opt<unsigned> DefaultTripCount(
46 "default-trip-count", cl::init(100), cl::Hidden,
47 cl::desc("Use this to specify the default trip count of a loop"));
49 // In this analysis two array references are considered to exhibit temporal
50 // reuse if they access either the same memory location, or a memory location
51 // with distance smaller than a configurable threshold.
52 static cl::opt<unsigned> TemporalReuseThreshold(
53 "temporal-reuse-threshold", cl::init(2), cl::Hidden,
54 cl::desc("Use this to specify the max. distance between array elements "
55 "accessed in a loop so that the elements are classified to have "
56 "temporal reuse"));
58 /// Retrieve the innermost loop in the given loop nest \p Loops. It returns a
59 /// nullptr if any loops in the loop vector supplied has more than one sibling.
60 /// The loop vector is expected to contain loops collected in breadth-first
61 /// order.
62 static Loop *getInnerMostLoop(const LoopVectorTy &Loops) {
63 assert(!Loops.empty() && "Expecting a non-empy loop vector");
65 Loop *LastLoop = Loops.back();
66 Loop *ParentLoop = LastLoop->getParentLoop();
68 if (ParentLoop == nullptr) {
69 assert(Loops.size() == 1 && "Expecting a single loop");
70 return LastLoop;
73 return (llvm::is_sorted(Loops,
74 [](const Loop *L1, const Loop *L2) {
75 return L1->getLoopDepth() < L2->getLoopDepth();
76 }))
77 ? LastLoop
78 : nullptr;
81 static bool isOneDimensionalArray(const SCEV &AccessFn, const SCEV &ElemSize,
82 const Loop &L, ScalarEvolution &SE) {
83 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&AccessFn);
84 if (!AR || !AR->isAffine())
85 return false;
87 assert(AR->getLoop() && "AR should have a loop");
89 // Check that start and increment are not add recurrences.
90 const SCEV *Start = AR->getStart();
91 const SCEV *Step = AR->getStepRecurrence(SE);
92 if (isa<SCEVAddRecExpr>(Start) || isa<SCEVAddRecExpr>(Step))
93 return false;
95 // Check that start and increment are both invariant in the loop.
96 if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
97 return false;
99 const SCEV *StepRec = AR->getStepRecurrence(SE);
100 if (StepRec && SE.isKnownNegative(StepRec))
101 StepRec = SE.getNegativeSCEV(StepRec);
103 return StepRec == &ElemSize;
106 /// Compute the trip count for the given loop \p L or assume a default value if
107 /// it is not a compile time constant. Return the SCEV expression for the trip
108 /// count.
109 static const SCEV *computeTripCount(const Loop &L, const SCEV &ElemSize,
110 ScalarEvolution &SE) {
111 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(&L);
112 const SCEV *TripCount = (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
113 isa<SCEVConstant>(BackedgeTakenCount))
114 ? SE.getTripCountFromExitCount(BackedgeTakenCount)
115 : nullptr;
117 if (!TripCount) {
118 LLVM_DEBUG(dbgs() << "Trip count of loop " << L.getName()
119 << " could not be computed, using DefaultTripCount\n");
120 TripCount = SE.getConstant(ElemSize.getType(), DefaultTripCount);
123 return TripCount;
126 //===----------------------------------------------------------------------===//
127 // IndexedReference implementation
129 raw_ostream &llvm::operator<<(raw_ostream &OS, const IndexedReference &R) {
130 if (!R.IsValid) {
131 OS << R.StoreOrLoadInst;
132 OS << ", IsValid=false.";
133 return OS;
136 OS << *R.BasePointer;
137 for (const SCEV *Subscript : R.Subscripts)
138 OS << "[" << *Subscript << "]";
140 OS << ", Sizes: ";
141 for (const SCEV *Size : R.Sizes)
142 OS << "[" << *Size << "]";
144 return OS;
147 IndexedReference::IndexedReference(Instruction &StoreOrLoadInst,
148 const LoopInfo &LI, ScalarEvolution &SE)
149 : StoreOrLoadInst(StoreOrLoadInst), SE(SE) {
150 assert((isa<StoreInst>(StoreOrLoadInst) || isa<LoadInst>(StoreOrLoadInst)) &&
151 "Expecting a load or store instruction");
153 IsValid = delinearize(LI);
154 if (IsValid)
155 LLVM_DEBUG(dbgs().indent(2) << "Succesfully delinearized: " << *this
156 << "\n");
159 std::optional<bool>
160 IndexedReference::hasSpacialReuse(const IndexedReference &Other, unsigned CLS,
161 AAResults &AA) const {
162 assert(IsValid && "Expecting a valid reference");
164 if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
165 LLVM_DEBUG(dbgs().indent(2)
166 << "No spacial reuse: different base pointers\n");
167 return false;
170 unsigned NumSubscripts = getNumSubscripts();
171 if (NumSubscripts != Other.getNumSubscripts()) {
172 LLVM_DEBUG(dbgs().indent(2)
173 << "No spacial reuse: different number of subscripts\n");
174 return false;
177 // all subscripts must be equal, except the leftmost one (the last one).
178 for (auto SubNum : seq<unsigned>(0, NumSubscripts - 1)) {
179 if (getSubscript(SubNum) != Other.getSubscript(SubNum)) {
180 LLVM_DEBUG(dbgs().indent(2) << "No spacial reuse, different subscripts: "
181 << "\n\t" << *getSubscript(SubNum) << "\n\t"
182 << *Other.getSubscript(SubNum) << "\n");
183 return false;
187 // the difference between the last subscripts must be less than the cache line
188 // size.
189 const SCEV *LastSubscript = getLastSubscript();
190 const SCEV *OtherLastSubscript = Other.getLastSubscript();
191 const SCEVConstant *Diff = dyn_cast<SCEVConstant>(
192 SE.getMinusSCEV(LastSubscript, OtherLastSubscript));
194 if (Diff == nullptr) {
195 LLVM_DEBUG(dbgs().indent(2)
196 << "No spacial reuse, difference between subscript:\n\t"
197 << *LastSubscript << "\n\t" << OtherLastSubscript
198 << "\nis not constant.\n");
199 return std::nullopt;
202 bool InSameCacheLine = (Diff->getValue()->getSExtValue() < CLS);
204 LLVM_DEBUG({
205 if (InSameCacheLine)
206 dbgs().indent(2) << "Found spacial reuse.\n";
207 else
208 dbgs().indent(2) << "No spacial reuse.\n";
211 return InSameCacheLine;
214 std::optional<bool>
215 IndexedReference::hasTemporalReuse(const IndexedReference &Other,
216 unsigned MaxDistance, const Loop &L,
217 DependenceInfo &DI, AAResults &AA) const {
218 assert(IsValid && "Expecting a valid reference");
220 if (BasePointer != Other.getBasePointer() && !isAliased(Other, AA)) {
221 LLVM_DEBUG(dbgs().indent(2)
222 << "No temporal reuse: different base pointer\n");
223 return false;
226 std::unique_ptr<Dependence> D =
227 DI.depends(&StoreOrLoadInst, &Other.StoreOrLoadInst, true);
229 if (D == nullptr) {
230 LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: no dependence\n");
231 return false;
234 if (D->isLoopIndependent()) {
235 LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
236 return true;
239 // Check the dependence distance at every loop level. There is temporal reuse
240 // if the distance at the given loop's depth is small (|d| <= MaxDistance) and
241 // it is zero at every other loop level.
242 int LoopDepth = L.getLoopDepth();
243 int Levels = D->getLevels();
244 for (int Level = 1; Level <= Levels; ++Level) {
245 const SCEV *Distance = D->getDistance(Level);
246 const SCEVConstant *SCEVConst = dyn_cast_or_null<SCEVConstant>(Distance);
248 if (SCEVConst == nullptr) {
249 LLVM_DEBUG(dbgs().indent(2) << "No temporal reuse: distance unknown\n");
250 return std::nullopt;
253 const ConstantInt &CI = *SCEVConst->getValue();
254 if (Level != LoopDepth && !CI.isZero()) {
255 LLVM_DEBUG(dbgs().indent(2)
256 << "No temporal reuse: distance is not zero at depth=" << Level
257 << "\n");
258 return false;
259 } else if (Level == LoopDepth && CI.getSExtValue() > MaxDistance) {
260 LLVM_DEBUG(
261 dbgs().indent(2)
262 << "No temporal reuse: distance is greater than MaxDistance at depth="
263 << Level << "\n");
264 return false;
268 LLVM_DEBUG(dbgs().indent(2) << "Found temporal reuse\n");
269 return true;
272 CacheCostTy IndexedReference::computeRefCost(const Loop &L,
273 unsigned CLS) const {
274 assert(IsValid && "Expecting a valid reference");
275 LLVM_DEBUG({
276 dbgs().indent(2) << "Computing cache cost for:\n";
277 dbgs().indent(4) << *this << "\n";
280 // If the indexed reference is loop invariant the cost is one.
281 if (isLoopInvariant(L)) {
282 LLVM_DEBUG(dbgs().indent(4) << "Reference is loop invariant: RefCost=1\n");
283 return 1;
286 const SCEV *TripCount = computeTripCount(L, *Sizes.back(), SE);
287 assert(TripCount && "Expecting valid TripCount");
288 LLVM_DEBUG(dbgs() << "TripCount=" << *TripCount << "\n");
290 const SCEV *RefCost = nullptr;
291 const SCEV *Stride = nullptr;
292 if (isConsecutive(L, Stride, CLS)) {
293 // If the indexed reference is 'consecutive' the cost is
294 // (TripCount*Stride)/CLS.
295 assert(Stride != nullptr &&
296 "Stride should not be null for consecutive access!");
297 Type *WiderType = SE.getWiderType(Stride->getType(), TripCount->getType());
298 const SCEV *CacheLineSize = SE.getConstant(WiderType, CLS);
299 Stride = SE.getNoopOrAnyExtend(Stride, WiderType);
300 TripCount = SE.getNoopOrZeroExtend(TripCount, WiderType);
301 const SCEV *Numerator = SE.getMulExpr(Stride, TripCount);
302 // Round the fractional cost up to the nearest integer number.
303 // The impact is the most significant when cost is calculated
304 // to be a number less than one, because it makes more sense
305 // to say one cache line is used rather than zero cache line
306 // is used.
307 RefCost = SE.getUDivCeilSCEV(Numerator, CacheLineSize);
309 LLVM_DEBUG(dbgs().indent(4)
310 << "Access is consecutive: RefCost=(TripCount*Stride)/CLS="
311 << *RefCost << "\n");
312 } else {
313 // If the indexed reference is not 'consecutive' the cost is proportional to
314 // the trip count and the depth of the dimension which the subject loop
315 // subscript is accessing. We try to estimate this by multiplying the cost
316 // by the trip counts of loops corresponding to the inner dimensions. For
317 // example, given the indexed reference 'A[i][j][k]', and assuming the
318 // i-loop is in the innermost position, the cost would be equal to the
319 // iterations of the i-loop multiplied by iterations of the j-loop.
320 RefCost = TripCount;
322 int Index = getSubscriptIndex(L);
323 assert(Index >= 0 && "Could not locate a valid Index");
325 for (unsigned I = Index + 1; I < getNumSubscripts() - 1; ++I) {
326 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(getSubscript(I));
327 assert(AR && AR->getLoop() && "Expecting valid loop");
328 const SCEV *TripCount =
329 computeTripCount(*AR->getLoop(), *Sizes.back(), SE);
330 Type *WiderType = SE.getWiderType(RefCost->getType(), TripCount->getType());
331 // For the multiplication result to fit, request a type twice as wide.
332 WiderType = WiderType->getExtendedType();
333 RefCost = SE.getMulExpr(SE.getNoopOrZeroExtend(RefCost, WiderType),
334 SE.getNoopOrZeroExtend(TripCount, WiderType));
337 LLVM_DEBUG(dbgs().indent(4)
338 << "Access is not consecutive: RefCost=" << *RefCost << "\n");
340 assert(RefCost && "Expecting a valid RefCost");
342 // Attempt to fold RefCost into a constant.
343 // CacheCostTy is a signed integer, but the tripcount value can be large
344 // and may not fit, so saturate/limit the value to the maximum signed
345 // integer value.
346 if (auto ConstantCost = dyn_cast<SCEVConstant>(RefCost))
347 return ConstantCost->getValue()->getLimitedValue(
348 std::numeric_limits<int64_t>::max());
350 LLVM_DEBUG(dbgs().indent(4)
351 << "RefCost is not a constant! Setting to RefCost=InvalidCost "
352 "(invalid value).\n");
354 return CacheCostTy::getInvalid();
357 bool IndexedReference::tryDelinearizeFixedSize(
358 const SCEV *AccessFn, SmallVectorImpl<const SCEV *> &Subscripts) {
359 SmallVector<int, 4> ArraySizes;
360 if (!tryDelinearizeFixedSizeImpl(&SE, &StoreOrLoadInst, AccessFn, Subscripts,
361 ArraySizes))
362 return false;
364 // Populate Sizes with scev expressions to be used in calculations later.
365 for (auto Idx : seq<unsigned>(1, Subscripts.size()))
366 Sizes.push_back(
367 SE.getConstant(Subscripts[Idx]->getType(), ArraySizes[Idx - 1]));
369 LLVM_DEBUG({
370 dbgs() << "Delinearized subscripts of fixed-size array\n"
371 << "GEP:" << *getLoadStorePointerOperand(&StoreOrLoadInst)
372 << "\n";
374 return true;
377 bool IndexedReference::delinearize(const LoopInfo &LI) {
378 assert(Subscripts.empty() && "Subscripts should be empty");
379 assert(Sizes.empty() && "Sizes should be empty");
380 assert(!IsValid && "Should be called once from the constructor");
381 LLVM_DEBUG(dbgs() << "Delinearizing: " << StoreOrLoadInst << "\n");
383 const SCEV *ElemSize = SE.getElementSize(&StoreOrLoadInst);
384 const BasicBlock *BB = StoreOrLoadInst.getParent();
386 if (Loop *L = LI.getLoopFor(BB)) {
387 const SCEV *AccessFn =
388 SE.getSCEVAtScope(getPointerOperand(&StoreOrLoadInst), L);
390 BasePointer = dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFn));
391 if (BasePointer == nullptr) {
392 LLVM_DEBUG(
393 dbgs().indent(2)
394 << "ERROR: failed to delinearize, can't identify base pointer\n");
395 return false;
398 bool IsFixedSize = false;
399 // Try to delinearize fixed-size arrays.
400 if (tryDelinearizeFixedSize(AccessFn, Subscripts)) {
401 IsFixedSize = true;
402 // The last element of Sizes is the element size.
403 Sizes.push_back(ElemSize);
404 LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
405 << "', AccessFn: " << *AccessFn << "\n");
408 AccessFn = SE.getMinusSCEV(AccessFn, BasePointer);
410 // Try to delinearize parametric-size arrays.
411 if (!IsFixedSize) {
412 LLVM_DEBUG(dbgs().indent(2) << "In Loop '" << L->getName()
413 << "', AccessFn: " << *AccessFn << "\n");
414 llvm::delinearize(SE, AccessFn, Subscripts, Sizes,
415 SE.getElementSize(&StoreOrLoadInst));
418 if (Subscripts.empty() || Sizes.empty() ||
419 Subscripts.size() != Sizes.size()) {
420 // Attempt to determine whether we have a single dimensional array access.
421 // before giving up.
422 if (!isOneDimensionalArray(*AccessFn, *ElemSize, *L, SE)) {
423 LLVM_DEBUG(dbgs().indent(2)
424 << "ERROR: failed to delinearize reference\n");
425 Subscripts.clear();
426 Sizes.clear();
427 return false;
430 // The array may be accessed in reverse, for example:
431 // for (i = N; i > 0; i--)
432 // A[i] = 0;
433 // In this case, reconstruct the access function using the absolute value
434 // of the step recurrence.
435 const SCEVAddRecExpr *AccessFnAR = dyn_cast<SCEVAddRecExpr>(AccessFn);
436 const SCEV *StepRec = AccessFnAR ? AccessFnAR->getStepRecurrence(SE) : nullptr;
438 if (StepRec && SE.isKnownNegative(StepRec))
439 AccessFn = SE.getAddRecExpr(AccessFnAR->getStart(),
440 SE.getNegativeSCEV(StepRec),
441 AccessFnAR->getLoop(),
442 AccessFnAR->getNoWrapFlags());
443 const SCEV *Div = SE.getUDivExactExpr(AccessFn, ElemSize);
444 Subscripts.push_back(Div);
445 Sizes.push_back(ElemSize);
448 return all_of(Subscripts, [&](const SCEV *Subscript) {
449 return isSimpleAddRecurrence(*Subscript, *L);
453 return false;
456 bool IndexedReference::isLoopInvariant(const Loop &L) const {
457 Value *Addr = getPointerOperand(&StoreOrLoadInst);
458 assert(Addr != nullptr && "Expecting either a load or a store instruction");
459 assert(SE.isSCEVable(Addr->getType()) && "Addr should be SCEVable");
461 if (SE.isLoopInvariant(SE.getSCEV(Addr), &L))
462 return true;
464 // The indexed reference is loop invariant if none of the coefficients use
465 // the loop induction variable.
466 bool allCoeffForLoopAreZero = all_of(Subscripts, [&](const SCEV *Subscript) {
467 return isCoeffForLoopZeroOrInvariant(*Subscript, L);
470 return allCoeffForLoopAreZero;
473 bool IndexedReference::isConsecutive(const Loop &L, const SCEV *&Stride,
474 unsigned CLS) const {
475 // The indexed reference is 'consecutive' if the only coefficient that uses
476 // the loop induction variable is the last one...
477 const SCEV *LastSubscript = Subscripts.back();
478 for (const SCEV *Subscript : Subscripts) {
479 if (Subscript == LastSubscript)
480 continue;
481 if (!isCoeffForLoopZeroOrInvariant(*Subscript, L))
482 return false;
485 // ...and the access stride is less than the cache line size.
486 const SCEV *Coeff = getLastCoefficient();
487 const SCEV *ElemSize = Sizes.back();
488 Type *WiderType = SE.getWiderType(Coeff->getType(), ElemSize->getType());
489 // FIXME: This assumes that all values are signed integers which may
490 // be incorrect in unusual codes and incorrectly use sext instead of zext.
491 // for (uint32_t i = 0; i < 512; ++i) {
492 // uint8_t trunc = i;
493 // A[trunc] = 42;
494 // }
495 // This consecutively iterates twice over A. If `trunc` is sign-extended,
496 // we would conclude that this may iterate backwards over the array.
497 // However, LoopCacheAnalysis is heuristic anyway and transformations must
498 // not result in wrong optimizations if the heuristic was incorrect.
499 Stride = SE.getMulExpr(SE.getNoopOrSignExtend(Coeff, WiderType),
500 SE.getNoopOrSignExtend(ElemSize, WiderType));
501 const SCEV *CacheLineSize = SE.getConstant(Stride->getType(), CLS);
503 Stride = SE.isKnownNegative(Stride) ? SE.getNegativeSCEV(Stride) : Stride;
504 return SE.isKnownPredicate(ICmpInst::ICMP_ULT, Stride, CacheLineSize);
507 int IndexedReference::getSubscriptIndex(const Loop &L) const {
508 for (auto Idx : seq<int>(0, getNumSubscripts())) {
509 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(getSubscript(Idx));
510 if (AR && AR->getLoop() == &L) {
511 return Idx;
514 return -1;
517 const SCEV *IndexedReference::getLastCoefficient() const {
518 const SCEV *LastSubscript = getLastSubscript();
519 auto *AR = cast<SCEVAddRecExpr>(LastSubscript);
520 return AR->getStepRecurrence(SE);
523 bool IndexedReference::isCoeffForLoopZeroOrInvariant(const SCEV &Subscript,
524 const Loop &L) const {
525 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(&Subscript);
526 return (AR != nullptr) ? AR->getLoop() != &L
527 : SE.isLoopInvariant(&Subscript, &L);
530 bool IndexedReference::isSimpleAddRecurrence(const SCEV &Subscript,
531 const Loop &L) const {
532 if (!isa<SCEVAddRecExpr>(Subscript))
533 return false;
535 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(&Subscript);
536 assert(AR->getLoop() && "AR should have a loop");
538 if (!AR->isAffine())
539 return false;
541 const SCEV *Start = AR->getStart();
542 const SCEV *Step = AR->getStepRecurrence(SE);
544 if (!SE.isLoopInvariant(Start, &L) || !SE.isLoopInvariant(Step, &L))
545 return false;
547 return true;
550 bool IndexedReference::isAliased(const IndexedReference &Other,
551 AAResults &AA) const {
552 const auto &Loc1 = MemoryLocation::get(&StoreOrLoadInst);
553 const auto &Loc2 = MemoryLocation::get(&Other.StoreOrLoadInst);
554 return AA.isMustAlias(Loc1, Loc2);
557 //===----------------------------------------------------------------------===//
558 // CacheCost implementation
560 raw_ostream &llvm::operator<<(raw_ostream &OS, const CacheCost &CC) {
561 for (const auto &LC : CC.LoopCosts) {
562 const Loop *L = LC.first;
563 OS << "Loop '" << L->getName() << "' has cost = " << LC.second << "\n";
565 return OS;
568 CacheCost::CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI,
569 ScalarEvolution &SE, TargetTransformInfo &TTI,
570 AAResults &AA, DependenceInfo &DI,
571 std::optional<unsigned> TRT)
572 : Loops(Loops), TRT(TRT.value_or(TemporalReuseThreshold)), LI(LI), SE(SE),
573 TTI(TTI), AA(AA), DI(DI) {
574 assert(!Loops.empty() && "Expecting a non-empty loop vector.");
576 for (const Loop *L : Loops) {
577 unsigned TripCount = SE.getSmallConstantTripCount(L);
578 TripCount = (TripCount == 0) ? DefaultTripCount : TripCount;
579 TripCounts.push_back({L, TripCount});
582 calculateCacheFootprint();
585 std::unique_ptr<CacheCost>
586 CacheCost::getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR,
587 DependenceInfo &DI, std::optional<unsigned> TRT) {
588 if (!Root.isOutermost()) {
589 LLVM_DEBUG(dbgs() << "Expecting the outermost loop in a loop nest\n");
590 return nullptr;
593 LoopVectorTy Loops;
594 append_range(Loops, breadth_first(&Root));
596 if (!getInnerMostLoop(Loops)) {
597 LLVM_DEBUG(dbgs() << "Cannot compute cache cost of loop nest with more "
598 "than one innermost loop\n");
599 return nullptr;
602 return std::make_unique<CacheCost>(Loops, AR.LI, AR.SE, AR.TTI, AR.AA, DI, TRT);
605 void CacheCost::calculateCacheFootprint() {
606 LLVM_DEBUG(dbgs() << "POPULATING REFERENCE GROUPS\n");
607 ReferenceGroupsTy RefGroups;
608 if (!populateReferenceGroups(RefGroups))
609 return;
611 LLVM_DEBUG(dbgs() << "COMPUTING LOOP CACHE COSTS\n");
612 for (const Loop *L : Loops) {
613 assert(llvm::none_of(
614 LoopCosts,
615 [L](const LoopCacheCostTy &LCC) { return LCC.first == L; }) &&
616 "Should not add duplicate element");
617 CacheCostTy LoopCost = computeLoopCacheCost(*L, RefGroups);
618 LoopCosts.push_back(std::make_pair(L, LoopCost));
621 sortLoopCosts();
622 RefGroups.clear();
625 bool CacheCost::populateReferenceGroups(ReferenceGroupsTy &RefGroups) const {
626 assert(RefGroups.empty() && "Reference groups should be empty");
628 unsigned CLS = TTI.getCacheLineSize();
629 Loop *InnerMostLoop = getInnerMostLoop(Loops);
630 assert(InnerMostLoop != nullptr && "Expecting a valid innermost loop");
632 for (BasicBlock *BB : InnerMostLoop->getBlocks()) {
633 for (Instruction &I : *BB) {
634 if (!isa<StoreInst>(I) && !isa<LoadInst>(I))
635 continue;
637 std::unique_ptr<IndexedReference> R(new IndexedReference(I, LI, SE));
638 if (!R->isValid())
639 continue;
641 bool Added = false;
642 for (ReferenceGroupTy &RefGroup : RefGroups) {
643 const IndexedReference &Representative = *RefGroup.front();
644 LLVM_DEBUG({
645 dbgs() << "References:\n";
646 dbgs().indent(2) << *R << "\n";
647 dbgs().indent(2) << Representative << "\n";
651 // FIXME: Both positive and negative access functions will be placed
652 // into the same reference group, resulting in a bi-directional array
653 // access such as:
654 // for (i = N; i > 0; i--)
655 // A[i] = A[N - i];
656 // having the same cost calculation as a single dimention access pattern
657 // for (i = 0; i < N; i++)
658 // A[i] = A[i];
659 // when in actuality, depending on the array size, the first example
660 // should have a cost closer to 2x the second due to the two cache
661 // access per iteration from opposite ends of the array
662 std::optional<bool> HasTemporalReuse =
663 R->hasTemporalReuse(Representative, *TRT, *InnerMostLoop, DI, AA);
664 std::optional<bool> HasSpacialReuse =
665 R->hasSpacialReuse(Representative, CLS, AA);
667 if ((HasTemporalReuse && *HasTemporalReuse) ||
668 (HasSpacialReuse && *HasSpacialReuse)) {
669 RefGroup.push_back(std::move(R));
670 Added = true;
671 break;
675 if (!Added) {
676 ReferenceGroupTy RG;
677 RG.push_back(std::move(R));
678 RefGroups.push_back(std::move(RG));
683 if (RefGroups.empty())
684 return false;
686 LLVM_DEBUG({
687 dbgs() << "\nIDENTIFIED REFERENCE GROUPS:\n";
688 int n = 1;
689 for (const ReferenceGroupTy &RG : RefGroups) {
690 dbgs().indent(2) << "RefGroup " << n << ":\n";
691 for (const auto &IR : RG)
692 dbgs().indent(4) << *IR << "\n";
693 n++;
695 dbgs() << "\n";
698 return true;
701 CacheCostTy
702 CacheCost::computeLoopCacheCost(const Loop &L,
703 const ReferenceGroupsTy &RefGroups) const {
704 if (!L.isLoopSimplifyForm())
705 return CacheCostTy::getInvalid();
707 LLVM_DEBUG(dbgs() << "Considering loop '" << L.getName()
708 << "' as innermost loop.\n");
710 // Compute the product of the trip counts of each other loop in the nest.
711 CacheCostTy TripCountsProduct = 1;
712 for (const auto &TC : TripCounts) {
713 if (TC.first == &L)
714 continue;
715 TripCountsProduct *= TC.second;
718 CacheCostTy LoopCost = 0;
719 for (const ReferenceGroupTy &RG : RefGroups) {
720 CacheCostTy RefGroupCost = computeRefGroupCacheCost(RG, L);
721 LoopCost += RefGroupCost * TripCountsProduct;
724 LLVM_DEBUG(dbgs().indent(2) << "Loop '" << L.getName()
725 << "' has cost=" << LoopCost << "\n");
727 return LoopCost;
730 CacheCostTy CacheCost::computeRefGroupCacheCost(const ReferenceGroupTy &RG,
731 const Loop &L) const {
732 assert(!RG.empty() && "Reference group should have at least one member.");
734 const IndexedReference *Representative = RG.front().get();
735 return Representative->computeRefCost(L, TTI.getCacheLineSize());
738 //===----------------------------------------------------------------------===//
739 // LoopCachePrinterPass implementation
741 PreservedAnalyses LoopCachePrinterPass::run(Loop &L, LoopAnalysisManager &AM,
742 LoopStandardAnalysisResults &AR,
743 LPMUpdater &U) {
744 Function *F = L.getHeader()->getParent();
745 DependenceInfo DI(F, &AR.AA, &AR.SE, &AR.LI);
747 if (auto CC = CacheCost::getCacheCost(L, AR, DI))
748 OS << *CC;
750 return PreservedAnalyses::all();