1 //===- ScopDetection.cpp - Detect Scops -----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Detect the maximal Scops of a function.
11 // A static control part (Scop) is a subgraph of the control flow graph (CFG)
12 // that only has statically known control flow and can therefore be described
13 // within the polyhedral model.
15 // Every Scop fulfills these restrictions:
17 // * It is a single entry single exit region
19 // * Only affine linear bounds in the loops
21 // Every natural loop in a Scop must have a number of loop iterations that can
22 // be described as an affine linear function in surrounding loop iterators or
23 // parameters. (A parameter is a scalar that does not change its value during
24 // execution of the Scop).
26 // * Only comparisons of affine linear expressions in conditions
28 // * All loops and conditions perfectly nested
30 // The control flow needs to be structured such that it could be written using
31 // just 'for' and 'if' statements, without the need for any 'goto', 'break' or
34 // * Side effect free functions call
36 // Function calls and intrinsics that do not have side effects (readnone)
37 // or memory intrinsics (memset, memcpy, memmove) are allowed.
39 // The Scop detection finds the largest Scops by checking if the largest
40 // region is a Scop. If this is not the case, its canonical subregions are
41 // checked until a region is a Scop. It is now tried to extend this Scop by
42 // creating a larger non canonical region.
44 //===----------------------------------------------------------------------===//
46 #include "polly/ScopDetection.h"
47 #include "polly/LinkAllPasses.h"
48 #include "polly/Options.h"
49 #include "polly/ScopDetectionDiagnostic.h"
50 #include "polly/Support/SCEVValidator.h"
51 #include "polly/Support/ScopHelper.h"
52 #include "polly/Support/ScopLocation.h"
53 #include "llvm/ADT/SmallPtrSet.h"
54 #include "llvm/ADT/Statistic.h"
55 #include "llvm/Analysis/AliasAnalysis.h"
56 #include "llvm/Analysis/Delinearization.h"
57 #include "llvm/Analysis/Loads.h"
58 #include "llvm/Analysis/LoopInfo.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/RegionInfo.h"
61 #include "llvm/Analysis/ScalarEvolution.h"
62 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
63 #include "llvm/IR/BasicBlock.h"
64 #include "llvm/IR/DebugLoc.h"
65 #include "llvm/IR/DerivedTypes.h"
66 #include "llvm/IR/DiagnosticInfo.h"
67 #include "llvm/IR/DiagnosticPrinter.h"
68 #include "llvm/IR/Dominators.h"
69 #include "llvm/IR/Function.h"
70 #include "llvm/IR/InstrTypes.h"
71 #include "llvm/IR/Instruction.h"
72 #include "llvm/IR/Instructions.h"
73 #include "llvm/IR/IntrinsicInst.h"
74 #include "llvm/IR/Metadata.h"
75 #include "llvm/IR/Module.h"
76 #include "llvm/IR/PassManager.h"
77 #include "llvm/IR/Value.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/Regex.h"
82 #include "llvm/Support/raw_ostream.h"
92 using namespace polly
;
94 #include "polly/Support/PollyDebug.h"
95 #define DEBUG_TYPE "polly-detect"
97 // This option is set to a very high value, as analyzing such loops increases
98 // compile time on several cases. For experiments that enable this option,
99 // a value of around 40 has been working to avoid run-time regressions with
100 // Polly while still exposing interesting optimization opportunities.
101 static cl::opt
<int> ProfitabilityMinPerLoopInstructions(
102 "polly-detect-profitability-min-per-loop-insts",
103 cl::desc("The minimal number of per-loop instructions before a single loop "
104 "region is considered profitable"),
105 cl::Hidden
, cl::ValueRequired
, cl::init(100000000), cl::cat(PollyCategory
));
107 bool polly::PollyProcessUnprofitable
;
109 static cl::opt
<bool, true> XPollyProcessUnprofitable(
110 "polly-process-unprofitable",
112 "Process scops that are unlikely to benefit from Polly optimizations."),
113 cl::location(PollyProcessUnprofitable
), cl::cat(PollyCategory
));
115 static cl::list
<std::string
> OnlyFunctions(
117 cl::desc("Only run on functions that match a regex. "
118 "Multiple regexes can be comma separated. "
119 "Scop detection will run on all functions that match "
120 "ANY of the regexes provided."),
121 cl::CommaSeparated
, cl::cat(PollyCategory
));
123 static cl::list
<std::string
> IgnoredFunctions(
125 cl::desc("Ignore functions that match a regex. "
126 "Multiple regexes can be comma separated. "
127 "Scop detection will ignore all functions that match "
128 "ANY of the regexes provided."),
129 cl::CommaSeparated
, cl::cat(PollyCategory
));
131 bool polly::PollyAllowFullFunction
;
133 static cl::opt
<bool, true>
134 XAllowFullFunction("polly-detect-full-functions",
135 cl::desc("Allow the detection of full functions"),
136 cl::location(polly::PollyAllowFullFunction
),
137 cl::init(false), cl::cat(PollyCategory
));
139 static cl::opt
<std::string
> OnlyRegion(
141 cl::desc("Only run on certain regions (The provided identifier must "
142 "appear in the name of the region's entry block"),
143 cl::value_desc("identifier"), cl::ValueRequired
, cl::init(""),
144 cl::cat(PollyCategory
));
147 IgnoreAliasing("polly-ignore-aliasing",
148 cl::desc("Ignore possible aliasing of the array bases"),
149 cl::Hidden
, cl::cat(PollyCategory
));
151 bool polly::PollyAllowUnsignedOperations
;
153 static cl::opt
<bool, true> XPollyAllowUnsignedOperations(
154 "polly-allow-unsigned-operations",
155 cl::desc("Allow unsigned operations such as comparisons or zero-extends."),
156 cl::location(PollyAllowUnsignedOperations
), cl::Hidden
, cl::init(true),
157 cl::cat(PollyCategory
));
159 bool polly::PollyUseRuntimeAliasChecks
;
161 static cl::opt
<bool, true> XPollyUseRuntimeAliasChecks(
162 "polly-use-runtime-alias-checks",
163 cl::desc("Use runtime alias checks to resolve possible aliasing."),
164 cl::location(PollyUseRuntimeAliasChecks
), cl::Hidden
, cl::init(true),
165 cl::cat(PollyCategory
));
168 ReportLevel("polly-report",
169 cl::desc("Print information about the activities of Polly"),
170 cl::cat(PollyCategory
));
172 static cl::opt
<bool> AllowDifferentTypes(
173 "polly-allow-differing-element-types",
174 cl::desc("Allow different element types for array accesses"), cl::Hidden
,
175 cl::init(true), cl::cat(PollyCategory
));
178 AllowNonAffine("polly-allow-nonaffine",
179 cl::desc("Allow non affine access functions in arrays"),
180 cl::Hidden
, cl::cat(PollyCategory
));
183 AllowModrefCall("polly-allow-modref-calls",
184 cl::desc("Allow functions with known modref behavior"),
185 cl::Hidden
, cl::cat(PollyCategory
));
187 static cl::opt
<bool> AllowNonAffineSubRegions(
188 "polly-allow-nonaffine-branches",
189 cl::desc("Allow non affine conditions for branches"), cl::Hidden
,
190 cl::init(true), cl::cat(PollyCategory
));
193 AllowNonAffineSubLoops("polly-allow-nonaffine-loops",
194 cl::desc("Allow non affine conditions for loops"),
195 cl::Hidden
, cl::cat(PollyCategory
));
197 static cl::opt
<bool, true>
198 TrackFailures("polly-detect-track-failures",
199 cl::desc("Track failure strings in detecting scop regions"),
200 cl::location(PollyTrackFailures
), cl::Hidden
, cl::init(true),
201 cl::cat(PollyCategory
));
203 static cl::opt
<bool> KeepGoing("polly-detect-keep-going",
204 cl::desc("Do not fail on the first error."),
205 cl::Hidden
, cl::cat(PollyCategory
));
207 static cl::opt
<bool, true>
208 PollyDelinearizeX("polly-delinearize",
209 cl::desc("Delinearize array access functions"),
210 cl::location(PollyDelinearize
), cl::Hidden
,
211 cl::init(true), cl::cat(PollyCategory
));
214 VerifyScops("polly-detect-verify",
215 cl::desc("Verify the detected SCoPs after each transformation"),
216 cl::Hidden
, cl::cat(PollyCategory
));
218 bool polly::PollyInvariantLoadHoisting
;
220 static cl::opt
<bool, true>
221 XPollyInvariantLoadHoisting("polly-invariant-load-hoisting",
222 cl::desc("Hoist invariant loads."),
223 cl::location(PollyInvariantLoadHoisting
),
224 cl::Hidden
, cl::cat(PollyCategory
));
226 static cl::opt
<bool> PollyAllowErrorBlocks(
227 "polly-allow-error-blocks",
228 cl::desc("Allow to speculate on the execution of 'error blocks'."),
229 cl::Hidden
, cl::init(true), cl::cat(PollyCategory
));
231 /// The minimal trip count under which loops are considered unprofitable.
232 static const unsigned MIN_LOOP_TRIP_COUNT
= 8;
234 bool polly::PollyTrackFailures
= false;
235 bool polly::PollyDelinearize
= false;
236 StringRef
polly::PollySkipFnAttr
= "polly.skip.fn";
238 //===----------------------------------------------------------------------===//
241 STATISTIC(NumScopRegions
, "Number of scops");
242 STATISTIC(NumLoopsInScop
, "Number of loops in scops");
243 STATISTIC(NumScopsDepthZero
, "Number of scops with maximal loop depth 0");
244 STATISTIC(NumScopsDepthOne
, "Number of scops with maximal loop depth 1");
245 STATISTIC(NumScopsDepthTwo
, "Number of scops with maximal loop depth 2");
246 STATISTIC(NumScopsDepthThree
, "Number of scops with maximal loop depth 3");
247 STATISTIC(NumScopsDepthFour
, "Number of scops with maximal loop depth 4");
248 STATISTIC(NumScopsDepthFive
, "Number of scops with maximal loop depth 5");
249 STATISTIC(NumScopsDepthLarger
,
250 "Number of scops with maximal loop depth 6 and larger");
251 STATISTIC(NumProfScopRegions
, "Number of scops (profitable scops only)");
252 STATISTIC(NumLoopsInProfScop
,
253 "Number of loops in scops (profitable scops only)");
254 STATISTIC(NumLoopsOverall
, "Number of total loops");
255 STATISTIC(NumProfScopsDepthZero
,
256 "Number of scops with maximal loop depth 0 (profitable scops only)");
257 STATISTIC(NumProfScopsDepthOne
,
258 "Number of scops with maximal loop depth 1 (profitable scops only)");
259 STATISTIC(NumProfScopsDepthTwo
,
260 "Number of scops with maximal loop depth 2 (profitable scops only)");
261 STATISTIC(NumProfScopsDepthThree
,
262 "Number of scops with maximal loop depth 3 (profitable scops only)");
263 STATISTIC(NumProfScopsDepthFour
,
264 "Number of scops with maximal loop depth 4 (profitable scops only)");
265 STATISTIC(NumProfScopsDepthFive
,
266 "Number of scops with maximal loop depth 5 (profitable scops only)");
267 STATISTIC(NumProfScopsDepthLarger
,
268 "Number of scops with maximal loop depth 6 and larger "
269 "(profitable scops only)");
270 STATISTIC(MaxNumLoopsInScop
, "Maximal number of loops in scops");
271 STATISTIC(MaxNumLoopsInProfScop
,
272 "Maximal number of loops in scops (profitable scops only)");
274 static void updateLoopCountStatistic(ScopDetection::LoopStats Stats
,
275 bool OnlyProfitable
);
279 class DiagnosticScopFound final
: public DiagnosticInfo
{
281 static int PluginDiagnosticKind
;
284 std::string FileName
;
285 unsigned EntryLine
, ExitLine
;
288 DiagnosticScopFound(Function
&F
, std::string FileName
, unsigned EntryLine
,
290 : DiagnosticInfo(PluginDiagnosticKind
, DS_Note
), F(F
), FileName(FileName
),
291 EntryLine(EntryLine
), ExitLine(ExitLine
) {}
293 void print(DiagnosticPrinter
&DP
) const override
;
295 static bool classof(const DiagnosticInfo
*DI
) {
296 return DI
->getKind() == PluginDiagnosticKind
;
301 int DiagnosticScopFound::PluginDiagnosticKind
=
302 getNextAvailablePluginDiagnosticKind();
304 void DiagnosticScopFound::print(DiagnosticPrinter
&DP
) const {
305 DP
<< "Polly detected an optimizable loop region (scop) in function '" << F
308 if (FileName
.empty()) {
309 DP
<< "Scop location is unknown. Compile with debug info "
310 "(-g) to get more precise information. ";
314 DP
<< FileName
<< ":" << EntryLine
<< ": Start of scop\n";
315 DP
<< FileName
<< ":" << ExitLine
<< ": End of scop";
318 /// Check if a string matches any regex in a list of regexes.
319 /// @param Str the input string to match against.
320 /// @param RegexList a list of strings that are regular expressions.
321 static bool doesStringMatchAnyRegex(StringRef Str
,
322 const cl::list
<std::string
> &RegexList
) {
323 for (auto RegexStr
: RegexList
) {
328 report_fatal_error(Twine("invalid regex given as input to polly: ") + Err
,
337 //===----------------------------------------------------------------------===//
340 ScopDetection::ScopDetection(const DominatorTree
&DT
, ScalarEvolution
&SE
,
341 LoopInfo
&LI
, RegionInfo
&RI
, AAResults
&AA
,
342 OptimizationRemarkEmitter
&ORE
)
343 : DT(DT
), SE(SE
), LI(LI
), RI(RI
), AA(AA
), ORE(ORE
) {}
345 void ScopDetection::detect(Function
&F
) {
346 assert(ValidRegions
.empty() && "Detection must run only once");
348 if (!PollyProcessUnprofitable
&& LI
.empty())
351 Region
*TopRegion
= RI
.getTopLevelRegion();
353 if (!OnlyFunctions
.empty() &&
354 !doesStringMatchAnyRegex(F
.getName(), OnlyFunctions
))
357 if (doesStringMatchAnyRegex(F
.getName(), IgnoredFunctions
))
360 if (!isValidFunction(F
))
363 findScops(*TopRegion
);
365 NumScopRegions
+= ValidRegions
.size();
367 // Prune non-profitable regions.
368 for (auto &DIt
: DetectionContextMap
) {
369 DetectionContext
&DC
= *DIt
.getSecond().get();
370 if (DC
.Log
.hasErrors())
372 if (!ValidRegions
.count(&DC
.CurRegion
))
374 LoopStats Stats
= countBeneficialLoops(&DC
.CurRegion
, SE
, LI
, 0);
375 updateLoopCountStatistic(Stats
, false /* OnlyProfitable */);
376 if (isProfitableRegion(DC
)) {
377 updateLoopCountStatistic(Stats
, true /* OnlyProfitable */);
381 ValidRegions
.remove(&DC
.CurRegion
);
384 NumProfScopRegions
+= ValidRegions
.size();
385 NumLoopsOverall
+= countBeneficialLoops(TopRegion
, SE
, LI
, 0).NumLoops
;
387 // Only makes sense when we tracked errors.
388 if (PollyTrackFailures
)
389 emitMissedRemarks(F
);
394 assert(ValidRegions
.size() <= DetectionContextMap
.size() &&
395 "Cached more results than valid regions");
398 template <class RR
, typename
... Args
>
399 inline bool ScopDetection::invalid(DetectionContext
&Context
, bool Assert
,
400 Args
&&...Arguments
) const {
401 if (!Context
.Verifying
) {
402 RejectLog
&Log
= Context
.Log
;
403 std::shared_ptr
<RR
> RejectReason
= std::make_shared
<RR
>(Arguments
...);
404 Context
.IsInvalid
= true;
406 // Log even if PollyTrackFailures is false, the log entries are also used in
407 // canUseISLTripCount().
408 Log
.report(RejectReason
);
410 POLLY_DEBUG(dbgs() << RejectReason
->getMessage());
411 POLLY_DEBUG(dbgs() << "\n");
413 assert(!Assert
&& "Verification of detected scop failed");
419 bool ScopDetection::isMaxRegionInScop(const Region
&R
, bool Verify
) {
420 if (!ValidRegions
.count(&R
))
424 BBPair P
= getBBPairForRegion(&R
);
425 std::unique_ptr
<DetectionContext
> &Entry
= DetectionContextMap
[P
];
427 // Free previous DetectionContext for the region and create and verify a new
428 // one. Be sure that the DetectionContext is not still used by a ScopInfop.
429 // Due to changes but CodeGeneration of another Scop, the Region object and
430 // the BBPair might not match anymore.
431 Entry
= std::make_unique
<DetectionContext
>(const_cast<Region
&>(R
), AA
,
432 /*Verifying=*/false);
434 return isValidRegion(*Entry
.get());
440 std::string
ScopDetection::regionIsInvalidBecause(const Region
*R
) const {
441 // Get the first error we found. Even in keep-going mode, this is the first
442 // reason that caused the candidate to be rejected.
443 auto *Log
= lookupRejectionLog(R
);
445 // This can happen when we marked a region invalid, but didn't track
447 if (!Log
|| !Log
->hasErrors())
450 RejectReasonPtr RR
= *Log
->begin();
451 return RR
->getMessage();
454 bool ScopDetection::addOverApproximatedRegion(Region
*AR
,
455 DetectionContext
&Context
) const {
456 // If we already know about Ar we can exit.
457 if (!Context
.NonAffineSubRegionSet
.insert(AR
))
460 // All loops in the region have to be overapproximated too if there
461 // are accesses that depend on the iteration count.
463 for (BasicBlock
*BB
: AR
->blocks()) {
464 Loop
*L
= LI
.getLoopFor(BB
);
466 Context
.BoxedLoopsSet
.insert(L
);
469 return (AllowNonAffineSubLoops
|| Context
.BoxedLoopsSet
.empty());
472 bool ScopDetection::onlyValidRequiredInvariantLoads(
473 InvariantLoadsSetTy
&RequiredILS
, DetectionContext
&Context
) const {
474 Region
&CurRegion
= Context
.CurRegion
;
475 const DataLayout
&DL
= CurRegion
.getEntry()->getModule()->getDataLayout();
477 if (!PollyInvariantLoadHoisting
&& !RequiredILS
.empty())
480 for (LoadInst
*Load
: RequiredILS
) {
481 // If we already know a load has been accepted as required invariant, we
482 // already run the validation below once and consequently don't need to
483 // run it again. Hence, we return early. For certain test cases (e.g.,
484 // COSMO this avoids us spending 50% of scop-detection time in this
485 // very function (and its children).
486 if (Context
.RequiredILS
.count(Load
))
488 if (!isHoistableLoad(Load
, CurRegion
, LI
, SE
, DT
, Context
.RequiredILS
))
491 for (auto NonAffineRegion
: Context
.NonAffineSubRegionSet
) {
492 if (isSafeToLoadUnconditionally(Load
->getPointerOperand(),
493 Load
->getType(), Load
->getAlign(), DL
,
497 if (NonAffineRegion
->contains(Load
) &&
498 Load
->getParent() != NonAffineRegion
->getEntry())
503 Context
.RequiredILS
.insert(RequiredILS
.begin(), RequiredILS
.end());
508 bool ScopDetection::involvesMultiplePtrs(const SCEV
*S0
, const SCEV
*S1
,
510 SetVector
<Value
*> Values
;
511 findValues(S0
, SE
, Values
);
513 findValues(S1
, SE
, Values
);
515 SmallPtrSet
<Value
*, 8> PtrVals
;
516 for (auto *V
: Values
) {
517 if (auto *P2I
= dyn_cast
<PtrToIntInst
>(V
))
518 V
= P2I
->getOperand(0);
520 if (!V
->getType()->isPointerTy())
523 auto *PtrSCEV
= SE
.getSCEVAtScope(V
, Scope
);
524 if (isa
<SCEVConstant
>(PtrSCEV
))
527 auto *BasePtr
= dyn_cast
<SCEVUnknown
>(SE
.getPointerBase(PtrSCEV
));
531 auto *BasePtrVal
= BasePtr
->getValue();
532 if (PtrVals
.insert(BasePtrVal
).second
) {
533 for (auto *PtrVal
: PtrVals
)
534 if (PtrVal
!= BasePtrVal
&& !AA
.isNoAlias(PtrVal
, BasePtrVal
))
542 bool ScopDetection::isAffine(const SCEV
*S
, Loop
*Scope
,
543 DetectionContext
&Context
) const {
544 InvariantLoadsSetTy AccessILS
;
545 if (!isAffineExpr(&Context
.CurRegion
, Scope
, S
, SE
, &AccessILS
))
548 if (!onlyValidRequiredInvariantLoads(AccessILS
, Context
))
554 bool ScopDetection::isValidSwitch(BasicBlock
&BB
, SwitchInst
*SI
,
555 Value
*Condition
, bool IsLoopBranch
,
556 DetectionContext
&Context
) const {
557 Loop
*L
= LI
.getLoopFor(&BB
);
558 const SCEV
*ConditionSCEV
= SE
.getSCEVAtScope(Condition
, L
);
560 if (IsLoopBranch
&& L
->isLoopLatch(&BB
))
563 // Check for invalid usage of different pointers in one expression.
564 if (involvesMultiplePtrs(ConditionSCEV
, nullptr, L
))
567 if (isAffine(ConditionSCEV
, L
, Context
))
570 if (AllowNonAffineSubRegions
&&
571 addOverApproximatedRegion(RI
.getRegionFor(&BB
), Context
))
574 return invalid
<ReportNonAffBranch
>(Context
, /*Assert=*/true, &BB
,
575 ConditionSCEV
, ConditionSCEV
, SI
);
578 bool ScopDetection::isValidBranch(BasicBlock
&BB
, BranchInst
*BI
,
579 Value
*Condition
, bool IsLoopBranch
,
580 DetectionContext
&Context
) {
581 // Constant integer conditions are always affine.
582 if (isa
<ConstantInt
>(Condition
))
585 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(Condition
)) {
586 auto Opcode
= BinOp
->getOpcode();
587 if (Opcode
== Instruction::And
|| Opcode
== Instruction::Or
) {
588 Value
*Op0
= BinOp
->getOperand(0);
589 Value
*Op1
= BinOp
->getOperand(1);
590 return isValidBranch(BB
, BI
, Op0
, IsLoopBranch
, Context
) &&
591 isValidBranch(BB
, BI
, Op1
, IsLoopBranch
, Context
);
595 if (auto PHI
= dyn_cast
<PHINode
>(Condition
)) {
596 auto *Unique
= dyn_cast_or_null
<ConstantInt
>(
597 getUniqueNonErrorValue(PHI
, &Context
.CurRegion
, this));
598 if (Unique
&& (Unique
->isZero() || Unique
->isOne()))
602 if (auto Load
= dyn_cast
<LoadInst
>(Condition
))
603 if (!IsLoopBranch
&& Context
.CurRegion
.contains(Load
)) {
604 Context
.RequiredILS
.insert(Load
);
608 // Non constant conditions of branches need to be ICmpInst.
609 if (!isa
<ICmpInst
>(Condition
)) {
610 if (!IsLoopBranch
&& AllowNonAffineSubRegions
&&
611 addOverApproximatedRegion(RI
.getRegionFor(&BB
), Context
))
613 return invalid
<ReportInvalidCond
>(Context
, /*Assert=*/true, BI
, &BB
);
616 ICmpInst
*ICmp
= cast
<ICmpInst
>(Condition
);
618 // Are both operands of the ICmp affine?
619 if (isa
<UndefValue
>(ICmp
->getOperand(0)) ||
620 isa
<UndefValue
>(ICmp
->getOperand(1)))
621 return invalid
<ReportUndefOperand
>(Context
, /*Assert=*/true, &BB
, ICmp
);
623 Loop
*L
= LI
.getLoopFor(&BB
);
624 const SCEV
*LHS
= SE
.getSCEVAtScope(ICmp
->getOperand(0), L
);
625 const SCEV
*RHS
= SE
.getSCEVAtScope(ICmp
->getOperand(1), L
);
627 LHS
= tryForwardThroughPHI(LHS
, Context
.CurRegion
, SE
, this);
628 RHS
= tryForwardThroughPHI(RHS
, Context
.CurRegion
, SE
, this);
630 // If unsigned operations are not allowed try to approximate the region.
631 if (ICmp
->isUnsigned() && !PollyAllowUnsignedOperations
)
632 return !IsLoopBranch
&& AllowNonAffineSubRegions
&&
633 addOverApproximatedRegion(RI
.getRegionFor(&BB
), Context
);
635 // Check for invalid usage of different pointers in one expression.
636 if (ICmp
->isEquality() && involvesMultiplePtrs(LHS
, nullptr, L
) &&
637 involvesMultiplePtrs(RHS
, nullptr, L
))
640 // Check for invalid usage of different pointers in a relational comparison.
641 if (ICmp
->isRelational() && involvesMultiplePtrs(LHS
, RHS
, L
))
644 if (isAffine(LHS
, L
, Context
) && isAffine(RHS
, L
, Context
))
647 if (!IsLoopBranch
&& AllowNonAffineSubRegions
&&
648 addOverApproximatedRegion(RI
.getRegionFor(&BB
), Context
))
654 return invalid
<ReportNonAffBranch
>(Context
, /*Assert=*/true, &BB
, LHS
, RHS
,
658 bool ScopDetection::isValidCFG(BasicBlock
&BB
, bool IsLoopBranch
,
659 bool AllowUnreachable
,
660 DetectionContext
&Context
) {
661 Region
&CurRegion
= Context
.CurRegion
;
663 Instruction
*TI
= BB
.getTerminator();
665 if (AllowUnreachable
&& isa
<UnreachableInst
>(TI
))
668 // Return instructions are only valid if the region is the top level region.
669 if (isa
<ReturnInst
>(TI
) && CurRegion
.isTopLevelRegion())
672 Value
*Condition
= getConditionFromTerminator(TI
);
675 return invalid
<ReportInvalidTerminator
>(Context
, /*Assert=*/true, &BB
);
677 // UndefValue is not allowed as condition.
678 if (isa
<UndefValue
>(Condition
))
679 return invalid
<ReportUndefCond
>(Context
, /*Assert=*/true, TI
, &BB
);
681 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(TI
))
682 return isValidBranch(BB
, BI
, Condition
, IsLoopBranch
, Context
);
684 SwitchInst
*SI
= dyn_cast
<SwitchInst
>(TI
);
685 assert(SI
&& "Terminator was neither branch nor switch");
687 return isValidSwitch(BB
, SI
, Condition
, IsLoopBranch
, Context
);
690 bool ScopDetection::isValidCallInst(CallInst
&CI
,
691 DetectionContext
&Context
) const {
692 if (CI
.doesNotReturn())
695 if (CI
.doesNotAccessMemory())
698 if (auto *II
= dyn_cast
<IntrinsicInst
>(&CI
))
699 if (isValidIntrinsicInst(*II
, Context
))
702 Function
*CalledFunction
= CI
.getCalledFunction();
704 // Indirect calls are not supported.
705 if (CalledFunction
== nullptr)
708 if (isDebugCall(&CI
)) {
709 POLLY_DEBUG(dbgs() << "Allow call to debug function: "
710 << CalledFunction
->getName() << '\n');
714 if (AllowModrefCall
) {
715 MemoryEffects ME
= AA
.getMemoryEffects(CalledFunction
);
716 if (ME
.onlyAccessesArgPointees()) {
717 for (const auto &Arg
: CI
.args()) {
718 if (!Arg
->getType()->isPointerTy())
721 // Bail if a pointer argument has a base address not known to
722 // ScalarEvolution. Note that a zero pointer is acceptable.
723 auto *ArgSCEV
= SE
.getSCEVAtScope(Arg
, LI
.getLoopFor(CI
.getParent()));
724 if (ArgSCEV
->isZero())
727 auto *BP
= dyn_cast
<SCEVUnknown
>(SE
.getPointerBase(ArgSCEV
));
731 // Implicitly disable delinearization since we have an unknown
732 // accesses with an unknown access function.
733 Context
.HasUnknownAccess
= true;
736 // Explicitly use addUnknown so we don't put a loop-variant
737 // pointer into the alias set.
738 Context
.AST
.addUnknown(&CI
);
742 if (ME
.onlyReadsMemory()) {
743 // Implicitly disable delinearization since we have an unknown
744 // accesses with an unknown access function.
745 Context
.HasUnknownAccess
= true;
746 // Explicitly use addUnknown so we don't put a loop-variant
747 // pointer into the alias set.
748 Context
.AST
.addUnknown(&CI
);
757 bool ScopDetection::isValidIntrinsicInst(IntrinsicInst
&II
,
758 DetectionContext
&Context
) const {
759 if (isIgnoredIntrinsic(&II
))
762 // The closest loop surrounding the call instruction.
763 Loop
*L
= LI
.getLoopFor(II
.getParent());
765 // The access function and base pointer for memory intrinsics.
767 const SCEVUnknown
*BP
;
769 switch (II
.getIntrinsicID()) {
770 // Memory intrinsics that can be represented are supported.
771 case Intrinsic::memmove
:
772 case Intrinsic::memcpy
:
773 AF
= SE
.getSCEVAtScope(cast
<MemTransferInst
>(II
).getSource(), L
);
775 BP
= dyn_cast
<SCEVUnknown
>(SE
.getPointerBase(AF
));
776 // Bail if the source pointer is not valid.
777 if (!isValidAccess(&II
, AF
, BP
, Context
))
781 case Intrinsic::memset
:
782 AF
= SE
.getSCEVAtScope(cast
<MemIntrinsic
>(II
).getDest(), L
);
784 BP
= dyn_cast
<SCEVUnknown
>(SE
.getPointerBase(AF
));
785 // Bail if the destination pointer is not valid.
786 if (!isValidAccess(&II
, AF
, BP
, Context
))
790 // Bail if the length is not affine.
791 if (!isAffine(SE
.getSCEVAtScope(cast
<MemIntrinsic
>(II
).getLength(), L
), L
,
803 bool ScopDetection::isInvariant(Value
&Val
, const Region
&Reg
,
804 DetectionContext
&Ctx
) const {
805 // A reference to function argument or constant value is invariant.
806 if (isa
<Argument
>(Val
) || isa
<Constant
>(Val
))
809 Instruction
*I
= dyn_cast
<Instruction
>(&Val
);
813 if (!Reg
.contains(I
))
816 // Loads within the SCoP may read arbitrary values, need to hoist them. If it
817 // is not hoistable, it will be rejected later, but here we assume it is and
818 // that makes the value invariant.
819 if (auto LI
= dyn_cast
<LoadInst
>(I
)) {
820 Ctx
.RequiredILS
.insert(LI
);
829 /// Remove smax of smax(0, size) expressions from a SCEV expression and
830 /// register the '...' components.
832 /// Array access expressions as they are generated by GFortran contain smax(0,
833 /// size) expressions that confuse the 'normal' delinearization algorithm.
834 /// However, if we extract such expressions before the normal delinearization
835 /// takes place they can actually help to identify array size expressions in
836 /// Fortran accesses. For the subsequently following delinearization the smax(0,
837 /// size) component can be replaced by just 'size'. This is correct as we will
838 /// always add and verify the assumption that for all subscript expressions
839 /// 'exp' the inequality 0 <= exp < size holds. Hence, we will also verify
840 /// that 0 <= size, which means smax(0, size) == size.
841 class SCEVRemoveMax final
: public SCEVRewriteVisitor
<SCEVRemoveMax
> {
843 SCEVRemoveMax(ScalarEvolution
&SE
, std::vector
<const SCEV
*> *Terms
)
844 : SCEVRewriteVisitor(SE
), Terms(Terms
) {}
846 static const SCEV
*rewrite(const SCEV
*Scev
, ScalarEvolution
&SE
,
847 std::vector
<const SCEV
*> *Terms
= nullptr) {
848 SCEVRemoveMax
Rewriter(SE
, Terms
);
849 return Rewriter
.visit(Scev
);
852 const SCEV
*visitSMaxExpr(const SCEVSMaxExpr
*Expr
) {
853 if ((Expr
->getNumOperands() == 2) && Expr
->getOperand(0)->isZero()) {
854 auto Res
= visit(Expr
->getOperand(1));
856 (*Terms
).push_back(Res
);
864 std::vector
<const SCEV
*> *Terms
;
868 SmallVector
<const SCEV
*, 4>
869 ScopDetection::getDelinearizationTerms(DetectionContext
&Context
,
870 const SCEVUnknown
*BasePointer
) const {
871 SmallVector
<const SCEV
*, 4> Terms
;
872 for (const auto &Pair
: Context
.Accesses
[BasePointer
]) {
873 std::vector
<const SCEV
*> MaxTerms
;
874 SCEVRemoveMax::rewrite(Pair
.second
, SE
, &MaxTerms
);
875 if (!MaxTerms
.empty()) {
876 Terms
.insert(Terms
.begin(), MaxTerms
.begin(), MaxTerms
.end());
879 // In case the outermost expression is a plain add, we check if any of its
880 // terms has the form 4 * %inst * %param * %param ..., aka a term that
881 // contains a product between a parameter and an instruction that is
882 // inside the scop. Such instructions, if allowed at all, are instructions
883 // SCEV can not represent, but Polly is still looking through. As a
884 // result, these instructions can depend on induction variables and are
885 // most likely no array sizes. However, terms that are multiplied with
886 // them are likely candidates for array sizes.
887 if (auto *AF
= dyn_cast
<SCEVAddExpr
>(Pair
.second
)) {
888 for (auto Op
: AF
->operands()) {
889 if (auto *AF2
= dyn_cast
<SCEVAddRecExpr
>(Op
))
890 collectParametricTerms(SE
, AF2
, Terms
);
891 if (auto *AF2
= dyn_cast
<SCEVMulExpr
>(Op
)) {
892 SmallVector
<const SCEV
*, 0> Operands
;
894 for (auto *MulOp
: AF2
->operands()) {
895 if (auto *Const
= dyn_cast
<SCEVConstant
>(MulOp
))
896 Operands
.push_back(Const
);
897 if (auto *Unknown
= dyn_cast
<SCEVUnknown
>(MulOp
)) {
898 if (auto *Inst
= dyn_cast
<Instruction
>(Unknown
->getValue())) {
899 if (!Context
.CurRegion
.contains(Inst
))
900 Operands
.push_back(MulOp
);
903 Operands
.push_back(MulOp
);
908 Terms
.push_back(SE
.getMulExpr(Operands
));
913 collectParametricTerms(SE
, Pair
.second
, Terms
);
918 bool ScopDetection::hasValidArraySizes(DetectionContext
&Context
,
919 SmallVectorImpl
<const SCEV
*> &Sizes
,
920 const SCEVUnknown
*BasePointer
,
922 // If no sizes were found, all sizes are trivially valid. We allow this case
923 // to make it possible to pass known-affine accesses to the delinearization to
924 // try to recover some interesting multi-dimensional accesses, but to still
925 // allow the already known to be affine access in case the delinearization
926 // fails. In such situations, the delinearization will just return a Sizes
927 // array of size zero.
928 if (Sizes
.size() == 0)
931 Value
*BaseValue
= BasePointer
->getValue();
932 Region
&CurRegion
= Context
.CurRegion
;
933 for (const SCEV
*DelinearizedSize
: Sizes
) {
934 // Don't pass down the scope to isAfffine; array dimensions must be
935 // invariant across the entire scop.
936 if (!isAffine(DelinearizedSize
, nullptr, Context
)) {
940 if (auto *Unknown
= dyn_cast
<SCEVUnknown
>(DelinearizedSize
)) {
941 auto *V
= dyn_cast
<Value
>(Unknown
->getValue());
942 if (auto *Load
= dyn_cast
<LoadInst
>(V
)) {
943 if (Context
.CurRegion
.contains(Load
) &&
944 isHoistableLoad(Load
, CurRegion
, LI
, SE
, DT
, Context
.RequiredILS
))
945 Context
.RequiredILS
.insert(Load
);
949 if (hasScalarDepsInsideRegion(DelinearizedSize
, &CurRegion
, Scope
, false,
950 Context
.RequiredILS
))
951 return invalid
<ReportNonAffineAccess
>(
952 Context
, /*Assert=*/true, DelinearizedSize
,
953 Context
.Accesses
[BasePointer
].front().first
, BaseValue
);
956 // No array shape derived.
961 for (const auto &Pair
: Context
.Accesses
[BasePointer
]) {
962 const Instruction
*Insn
= Pair
.first
;
963 const SCEV
*AF
= Pair
.second
;
965 if (!isAffine(AF
, Scope
, Context
)) {
966 invalid
<ReportNonAffineAccess
>(Context
, /*Assert=*/true, AF
, Insn
,
977 // We first store the resulting memory accesses in TempMemoryAccesses. Only
978 // if the access functions for all memory accesses have been successfully
979 // delinearized we continue. Otherwise, we either report a failure or, if
980 // non-affine accesses are allowed, we drop the information. In case the
981 // information is dropped the memory accesses need to be overapproximated
982 // when translated to a polyhedral representation.
983 bool ScopDetection::computeAccessFunctions(
984 DetectionContext
&Context
, const SCEVUnknown
*BasePointer
,
985 std::shared_ptr
<ArrayShape
> Shape
) const {
986 Value
*BaseValue
= BasePointer
->getValue();
987 bool BasePtrHasNonAffine
= false;
988 MapInsnToMemAcc TempMemoryAccesses
;
989 for (const auto &Pair
: Context
.Accesses
[BasePointer
]) {
990 const Instruction
*Insn
= Pair
.first
;
991 auto *AF
= Pair
.second
;
992 AF
= SCEVRemoveMax::rewrite(AF
, SE
);
993 bool IsNonAffine
= false;
994 TempMemoryAccesses
.insert(std::make_pair(Insn
, MemAcc(Insn
, Shape
)));
995 MemAcc
*Acc
= &TempMemoryAccesses
.find(Insn
)->second
;
996 auto *Scope
= LI
.getLoopFor(Insn
->getParent());
999 if (isAffine(Pair
.second
, Scope
, Context
))
1000 Acc
->DelinearizedSubscripts
.push_back(Pair
.second
);
1004 if (Shape
->DelinearizedSizes
.size() == 0) {
1005 Acc
->DelinearizedSubscripts
.push_back(AF
);
1007 llvm::computeAccessFunctions(SE
, AF
, Acc
->DelinearizedSubscripts
,
1008 Shape
->DelinearizedSizes
);
1009 if (Acc
->DelinearizedSubscripts
.size() == 0)
1012 for (const SCEV
*S
: Acc
->DelinearizedSubscripts
)
1013 if (!isAffine(S
, Scope
, Context
))
1017 // (Possibly) report non affine access
1019 BasePtrHasNonAffine
= true;
1020 if (!AllowNonAffine
) {
1021 invalid
<ReportNonAffineAccess
>(Context
, /*Assert=*/true, Pair
.second
,
1029 if (!BasePtrHasNonAffine
)
1030 Context
.InsnToMemAcc
.insert(TempMemoryAccesses
.begin(),
1031 TempMemoryAccesses
.end());
1036 bool ScopDetection::hasBaseAffineAccesses(DetectionContext
&Context
,
1037 const SCEVUnknown
*BasePointer
,
1038 Loop
*Scope
) const {
1039 auto Shape
= std::shared_ptr
<ArrayShape
>(new ArrayShape(BasePointer
));
1041 auto Terms
= getDelinearizationTerms(Context
, BasePointer
);
1043 findArrayDimensions(SE
, Terms
, Shape
->DelinearizedSizes
,
1044 Context
.ElementSize
[BasePointer
]);
1046 if (!hasValidArraySizes(Context
, Shape
->DelinearizedSizes
, BasePointer
,
1050 return computeAccessFunctions(Context
, BasePointer
, Shape
);
1053 bool ScopDetection::hasAffineMemoryAccesses(DetectionContext
&Context
) const {
1054 // TODO: If we have an unknown access and other non-affine accesses we do
1055 // not try to delinearize them for now.
1056 if (Context
.HasUnknownAccess
&& !Context
.NonAffineAccesses
.empty())
1057 return AllowNonAffine
;
1059 for (auto &Pair
: Context
.NonAffineAccesses
) {
1060 auto *BasePointer
= Pair
.first
;
1061 auto *Scope
= Pair
.second
;
1062 if (!hasBaseAffineAccesses(Context
, BasePointer
, Scope
)) {
1063 Context
.IsInvalid
= true;
1071 bool ScopDetection::isValidAccess(Instruction
*Inst
, const SCEV
*AF
,
1072 const SCEVUnknown
*BP
,
1073 DetectionContext
&Context
) const {
1076 return invalid
<ReportNoBasePtr
>(Context
, /*Assert=*/true, Inst
);
1078 auto *BV
= BP
->getValue();
1079 if (isa
<UndefValue
>(BV
))
1080 return invalid
<ReportUndefBasePtr
>(Context
, /*Assert=*/true, Inst
);
1082 // FIXME: Think about allowing IntToPtrInst
1083 if (IntToPtrInst
*Inst
= dyn_cast
<IntToPtrInst
>(BV
))
1084 return invalid
<ReportIntToPtr
>(Context
, /*Assert=*/true, Inst
);
1086 // Check that the base address of the access is invariant in the current
1088 if (!isInvariant(*BV
, Context
.CurRegion
, Context
))
1089 return invalid
<ReportVariantBasePtr
>(Context
, /*Assert=*/true, BV
, Inst
);
1091 AF
= SE
.getMinusSCEV(AF
, BP
);
1094 if (!isa
<MemIntrinsic
>(Inst
)) {
1095 Size
= SE
.getElementSize(Inst
);
1098 SE
.getEffectiveSCEVType(PointerType::getUnqual(SE
.getContext()));
1099 Size
= SE
.getConstant(SizeTy
, 8);
1102 if (Context
.ElementSize
[BP
]) {
1103 if (!AllowDifferentTypes
&& Context
.ElementSize
[BP
] != Size
)
1104 return invalid
<ReportDifferentArrayElementSize
>(Context
, /*Assert=*/true,
1107 Context
.ElementSize
[BP
] = SE
.getSMinExpr(Size
, Context
.ElementSize
[BP
]);
1109 Context
.ElementSize
[BP
] = Size
;
1112 bool IsVariantInNonAffineLoop
= false;
1113 SetVector
<const Loop
*> Loops
;
1114 findLoops(AF
, Loops
);
1115 for (const Loop
*L
: Loops
)
1116 if (Context
.BoxedLoopsSet
.count(L
))
1117 IsVariantInNonAffineLoop
= true;
1119 auto *Scope
= LI
.getLoopFor(Inst
->getParent());
1120 bool IsAffine
= !IsVariantInNonAffineLoop
&& isAffine(AF
, Scope
, Context
);
1121 // Do not try to delinearize memory intrinsics and force them to be affine.
1122 if (isa
<MemIntrinsic
>(Inst
) && !IsAffine
) {
1123 return invalid
<ReportNonAffineAccess
>(Context
, /*Assert=*/true, AF
, Inst
,
1125 } else if (PollyDelinearize
&& !IsVariantInNonAffineLoop
) {
1126 Context
.Accesses
[BP
].push_back({Inst
, AF
});
1129 Context
.NonAffineAccesses
.insert(
1130 std::make_pair(BP
, LI
.getLoopFor(Inst
->getParent())));
1131 } else if (!AllowNonAffine
&& !IsAffine
) {
1132 return invalid
<ReportNonAffineAccess
>(Context
, /*Assert=*/true, AF
, Inst
,
1139 // Check if the base pointer of the memory access does alias with
1140 // any other pointer. This cannot be handled at the moment.
1141 AAMDNodes AATags
= Inst
->getAAMetadata();
1142 AliasSet
&AS
= Context
.AST
.getAliasSetFor(
1143 MemoryLocation::getBeforeOrAfter(BP
->getValue(), AATags
));
1145 if (!AS
.isMustAlias()) {
1146 if (PollyUseRuntimeAliasChecks
) {
1147 bool CanBuildRunTimeCheck
= true;
1148 // The run-time alias check places code that involves the base pointer at
1149 // the beginning of the SCoP. This breaks if the base pointer is defined
1150 // inside the scop. Hence, we can only create a run-time check if we are
1151 // sure the base pointer is not an instruction defined inside the scop.
1152 // However, we can ignore loads that will be hoisted.
1154 auto ASPointers
= AS
.getPointers();
1156 InvariantLoadsSetTy VariantLS
, InvariantLS
;
1157 // In order to detect loads which are dependent on other invariant loads
1158 // as invariant, we use fixed-point iteration method here i.e we iterate
1159 // over the alias set for arbitrary number of times until it is safe to
1160 // assume that all the invariant loads have been detected
1162 const unsigned int VariantSize
= VariantLS
.size(),
1163 InvariantSize
= InvariantLS
.size();
1165 for (const Value
*Ptr
: ASPointers
) {
1166 Instruction
*Inst
= dyn_cast
<Instruction
>(const_cast<Value
*>(Ptr
));
1167 if (Inst
&& Context
.CurRegion
.contains(Inst
)) {
1168 auto *Load
= dyn_cast
<LoadInst
>(Inst
);
1169 if (Load
&& InvariantLS
.count(Load
))
1171 if (Load
&& isHoistableLoad(Load
, Context
.CurRegion
, LI
, SE
, DT
,
1173 if (VariantLS
.count(Load
))
1174 VariantLS
.remove(Load
);
1175 Context
.RequiredILS
.insert(Load
);
1176 InvariantLS
.insert(Load
);
1178 CanBuildRunTimeCheck
= false;
1179 VariantLS
.insert(Load
);
1184 if (InvariantSize
== InvariantLS
.size() &&
1185 VariantSize
== VariantLS
.size())
1189 if (CanBuildRunTimeCheck
)
1192 return invalid
<ReportAlias
>(Context
, /*Assert=*/true, Inst
, AS
);
1198 bool ScopDetection::isValidMemoryAccess(MemAccInst Inst
,
1199 DetectionContext
&Context
) const {
1200 Value
*Ptr
= Inst
.getPointerOperand();
1201 Loop
*L
= LI
.getLoopFor(Inst
->getParent());
1202 const SCEV
*AccessFunction
= SE
.getSCEVAtScope(Ptr
, L
);
1203 const SCEVUnknown
*BasePointer
;
1205 BasePointer
= dyn_cast
<SCEVUnknown
>(SE
.getPointerBase(AccessFunction
));
1207 return isValidAccess(Inst
, AccessFunction
, BasePointer
, Context
);
1210 bool ScopDetection::isValidInstruction(Instruction
&Inst
,
1211 DetectionContext
&Context
) {
1212 for (auto &Op
: Inst
.operands()) {
1213 auto *OpInst
= dyn_cast
<Instruction
>(&Op
);
1218 if (isErrorBlock(*OpInst
->getParent(), Context
.CurRegion
)) {
1219 auto *PHI
= dyn_cast
<PHINode
>(OpInst
);
1221 for (User
*U
: PHI
->users()) {
1222 auto *UI
= dyn_cast
<Instruction
>(U
);
1223 if (!UI
|| !UI
->isTerminator())
1232 if (isa
<LandingPadInst
>(&Inst
) || isa
<ResumeInst
>(&Inst
))
1235 // We only check the call instruction but not invoke instruction.
1236 if (CallInst
*CI
= dyn_cast
<CallInst
>(&Inst
)) {
1237 if (isValidCallInst(*CI
, Context
))
1240 return invalid
<ReportFuncCall
>(Context
, /*Assert=*/true, &Inst
);
1243 if (!Inst
.mayReadOrWriteMemory()) {
1244 if (!isa
<AllocaInst
>(Inst
))
1247 return invalid
<ReportAlloca
>(Context
, /*Assert=*/true, &Inst
);
1250 // Check the access function.
1251 if (auto MemInst
= MemAccInst::dyn_cast(Inst
)) {
1252 Context
.hasStores
|= isa
<StoreInst
>(MemInst
);
1253 Context
.hasLoads
|= isa
<LoadInst
>(MemInst
);
1254 if (!MemInst
.isSimple())
1255 return invalid
<ReportNonSimpleMemoryAccess
>(Context
, /*Assert=*/true,
1258 return isValidMemoryAccess(MemInst
, Context
);
1261 // We do not know this instruction, therefore we assume it is invalid.
1262 return invalid
<ReportUnknownInst
>(Context
, /*Assert=*/true, &Inst
);
1265 /// Check whether @p L has exiting blocks.
1267 /// @param L The loop of interest
1269 /// @return True if the loop has exiting blocks, false otherwise.
1270 static bool hasExitingBlocks(Loop
*L
) {
1271 SmallVector
<BasicBlock
*, 4> ExitingBlocks
;
1272 L
->getExitingBlocks(ExitingBlocks
);
1273 return !ExitingBlocks
.empty();
1276 bool ScopDetection::canUseISLTripCount(Loop
*L
, DetectionContext
&Context
) {
1277 // FIXME: Yes, this is bad. isValidCFG() may call invalid<Reason>() which
1278 // causes the SCoP to be rejected regardless on whether non-ISL trip counts
1279 // could be used. We currently preserve the legacy behaviour of rejecting
1280 // based on Context.Log.size() added by isValidCFG() or before, regardless on
1281 // whether the ISL trip count can be used or can be used as a non-affine
1282 // region. However, we allow rejections by isValidCFG() that do not result in
1283 // an error log entry.
1284 bool OldIsInvalid
= Context
.IsInvalid
;
1286 // Ensure the loop has valid exiting blocks as well as latches, otherwise we
1287 // need to overapproximate it as a boxed loop.
1288 SmallVector
<BasicBlock
*, 4> LoopControlBlocks
;
1289 L
->getExitingBlocks(LoopControlBlocks
);
1290 L
->getLoopLatches(LoopControlBlocks
);
1291 for (BasicBlock
*ControlBB
: LoopControlBlocks
) {
1292 if (!isValidCFG(*ControlBB
, true, false, Context
)) {
1293 Context
.IsInvalid
= OldIsInvalid
|| Context
.Log
.size();
1298 // We can use ISL to compute the trip count of L.
1299 Context
.IsInvalid
= OldIsInvalid
|| Context
.Log
.size();
1303 bool ScopDetection::isValidLoop(Loop
*L
, DetectionContext
&Context
) {
1304 // Loops that contain part but not all of the blocks of a region cannot be
1305 // handled by the schedule generation. Such loop constructs can happen
1306 // because a region can contain BBs that have no path to the exit block
1307 // (Infinite loops, UnreachableInst), but such blocks are never part of a
1311 // | Loop Header | <-----------.
1312 // --------------- |
1314 // _______________ ______________
1315 // | RegionEntry |-----> | RegionExit |----->
1316 // --------------- --------------
1319 // | EndlessLoop | <--.
1320 // --------------- |
1324 // In the example above, the loop (LoopHeader,RegionEntry,RegionExit) is
1325 // neither entirely contained in the region RegionEntry->RegionExit
1326 // (containing RegionEntry,EndlessLoop) nor is the region entirely contained
1328 // The block EndlessLoop is contained in the region because Region::contains
1329 // tests whether it is not dominated by RegionExit. This is probably to not
1330 // having to query the PostdominatorTree. Instead of an endless loop, a dead
1331 // end can also be formed by an UnreachableInst. This case is already caught
1332 // by isErrorBlock(). We hence only have to reject endless loops here.
1333 if (!hasExitingBlocks(L
))
1334 return invalid
<ReportLoopHasNoExit
>(Context
, /*Assert=*/true, L
);
1336 // The algorithm for domain construction assumes that loops has only a single
1337 // exit block (and hence corresponds to a subregion). Note that we cannot use
1338 // L->getExitBlock() because it does not check whether all exiting edges point
1340 SmallVector
<BasicBlock
*, 4> ExitBlocks
;
1341 L
->getExitBlocks(ExitBlocks
);
1342 BasicBlock
*TheExitBlock
= ExitBlocks
[0];
1343 for (BasicBlock
*ExitBB
: ExitBlocks
) {
1344 if (TheExitBlock
!= ExitBB
)
1345 return invalid
<ReportLoopHasMultipleExits
>(Context
, /*Assert=*/true, L
);
1348 if (canUseISLTripCount(L
, Context
))
1351 if (AllowNonAffineSubLoops
&& AllowNonAffineSubRegions
) {
1352 Region
*R
= RI
.getRegionFor(L
->getHeader());
1353 while (R
!= &Context
.CurRegion
&& !R
->contains(L
))
1356 if (addOverApproximatedRegion(R
, Context
))
1360 const SCEV
*LoopCount
= SE
.getBackedgeTakenCount(L
);
1361 return invalid
<ReportLoopBound
>(Context
, /*Assert=*/true, L
, LoopCount
);
1364 /// Return the number of loops in @p L (incl. @p L) that have a trip
1365 /// count that is not known to be less than @MinProfitableTrips.
1366 ScopDetection::LoopStats
1367 ScopDetection::countBeneficialSubLoops(Loop
*L
, ScalarEvolution
&SE
,
1368 unsigned MinProfitableTrips
) {
1369 auto *TripCount
= SE
.getBackedgeTakenCount(L
);
1372 int MaxLoopDepth
= 1;
1373 if (MinProfitableTrips
> 0)
1374 if (auto *TripCountC
= dyn_cast
<SCEVConstant
>(TripCount
))
1375 if (TripCountC
->getType()->getScalarSizeInBits() <= 64)
1376 if (TripCountC
->getValue()->getZExtValue() <= MinProfitableTrips
)
1379 for (auto &SubLoop
: *L
) {
1380 LoopStats Stats
= countBeneficialSubLoops(SubLoop
, SE
, MinProfitableTrips
);
1381 NumLoops
+= Stats
.NumLoops
;
1382 MaxLoopDepth
= std::max(MaxLoopDepth
, Stats
.MaxDepth
+ 1);
1385 return {NumLoops
, MaxLoopDepth
};
1388 ScopDetection::LoopStats
1389 ScopDetection::countBeneficialLoops(Region
*R
, ScalarEvolution
&SE
,
1390 LoopInfo
&LI
, unsigned MinProfitableTrips
) {
1392 int MaxLoopDepth
= 0;
1394 auto L
= LI
.getLoopFor(R
->getEntry());
1396 // If L is fully contained in R, move to first loop surrounding R. Otherwise,
1397 // L is either nullptr or already surrounding R.
1398 if (L
&& R
->contains(L
)) {
1399 L
= R
->outermostLoopInRegion(L
);
1400 L
= L
->getParentLoop();
1404 L
? L
->getSubLoopsVector() : std::vector
<Loop
*>(LI
.begin(), LI
.end());
1406 for (auto &SubLoop
: SubLoops
)
1407 if (R
->contains(SubLoop
)) {
1409 countBeneficialSubLoops(SubLoop
, SE
, MinProfitableTrips
);
1410 LoopNum
+= Stats
.NumLoops
;
1411 MaxLoopDepth
= std::max(MaxLoopDepth
, Stats
.MaxDepth
);
1414 return {LoopNum
, MaxLoopDepth
};
1417 static bool isErrorBlockImpl(BasicBlock
&BB
, const Region
&R
, LoopInfo
&LI
,
1418 const DominatorTree
&DT
) {
1419 if (isa
<UnreachableInst
>(BB
.getTerminator()))
1422 if (LI
.isLoopHeader(&BB
))
1425 // Don't consider something outside the SCoP as error block. It will precede
1426 // the code versioning runtime check.
1427 if (!R
.contains(&BB
))
1430 // Basic blocks that are always executed are not considered error blocks,
1431 // as their execution can not be a rare event.
1432 bool DominatesAllPredecessors
= true;
1433 if (R
.isTopLevelRegion()) {
1434 for (BasicBlock
&I
: *R
.getEntry()->getParent()) {
1435 if (isa
<ReturnInst
>(I
.getTerminator()) && !DT
.dominates(&BB
, &I
)) {
1436 DominatesAllPredecessors
= false;
1441 for (auto Pred
: predecessors(R
.getExit())) {
1442 if (R
.contains(Pred
) && !DT
.dominates(&BB
, Pred
)) {
1443 DominatesAllPredecessors
= false;
1449 if (DominatesAllPredecessors
)
1452 for (Instruction
&Inst
: BB
)
1453 if (CallInst
*CI
= dyn_cast
<CallInst
>(&Inst
)) {
1454 if (isDebugCall(CI
))
1457 if (isIgnoredIntrinsic(CI
))
1460 // memset, memcpy and memmove are modeled intrinsics.
1461 if (isa
<MemSetInst
>(CI
) || isa
<MemTransferInst
>(CI
))
1464 if (!CI
->doesNotAccessMemory())
1466 if (CI
->doesNotReturn())
1473 bool ScopDetection::isErrorBlock(llvm::BasicBlock
&BB
, const llvm::Region
&R
) {
1474 if (!PollyAllowErrorBlocks
)
1477 auto It
= ErrorBlockCache
.insert({std::make_pair(&BB
, &R
), false});
1479 return It
.first
->getSecond();
1481 bool Result
= isErrorBlockImpl(BB
, R
, LI
, DT
);
1482 It
.first
->second
= Result
;
1486 Region
*ScopDetection::expandRegion(Region
&R
) {
1487 // Initial no valid region was found (greater than R)
1488 std::unique_ptr
<Region
> LastValidRegion
;
1489 auto ExpandedRegion
= std::unique_ptr
<Region
>(R
.getExpandedRegion());
1491 POLLY_DEBUG(dbgs() << "\tExpanding " << R
.getNameStr() << "\n");
1493 while (ExpandedRegion
) {
1494 BBPair P
= getBBPairForRegion(ExpandedRegion
.get());
1495 std::unique_ptr
<DetectionContext
> &Entry
= DetectionContextMap
[P
];
1496 Entry
= std::make_unique
<DetectionContext
>(*ExpandedRegion
, AA
,
1497 /*Verifying=*/false);
1498 DetectionContext
&Context
= *Entry
.get();
1500 POLLY_DEBUG(dbgs() << "\t\tTrying " << ExpandedRegion
->getNameStr()
1502 // Only expand when we did not collect errors.
1504 if (!Context
.Log
.hasErrors()) {
1505 // If the exit is valid check all blocks
1506 // - if true, a valid region was found => store it + keep expanding
1507 // - if false, .tbd. => stop (should this really end the loop?)
1508 if (!allBlocksValid(Context
) || Context
.Log
.hasErrors()) {
1509 removeCachedResults(*ExpandedRegion
);
1510 DetectionContextMap
.erase(P
);
1514 // Store this region, because it is the greatest valid (encountered so
1516 if (LastValidRegion
) {
1517 removeCachedResults(*LastValidRegion
);
1518 DetectionContextMap
.erase(P
);
1520 LastValidRegion
= std::move(ExpandedRegion
);
1522 // Create and test the next greater region (if any)
1524 std::unique_ptr
<Region
>(LastValidRegion
->getExpandedRegion());
1527 // Create and test the next greater region (if any)
1528 removeCachedResults(*ExpandedRegion
);
1529 DetectionContextMap
.erase(P
);
1531 std::unique_ptr
<Region
>(ExpandedRegion
->getExpandedRegion());
1536 if (LastValidRegion
)
1537 dbgs() << "\tto " << LastValidRegion
->getNameStr() << "\n";
1539 dbgs() << "\tExpanding " << R
.getNameStr() << " failed\n";
1542 return LastValidRegion
.release();
1545 static bool regionWithoutLoops(Region
&R
, LoopInfo
&LI
) {
1546 for (const BasicBlock
*BB
: R
.blocks())
1547 if (R
.contains(LI
.getLoopFor(BB
)))
1553 void ScopDetection::removeCachedResultsRecursively(const Region
&R
) {
1554 for (auto &SubRegion
: R
) {
1555 if (ValidRegions
.count(SubRegion
.get())) {
1556 removeCachedResults(*SubRegion
.get());
1558 removeCachedResultsRecursively(*SubRegion
);
1562 void ScopDetection::removeCachedResults(const Region
&R
) {
1563 ValidRegions
.remove(&R
);
1566 void ScopDetection::findScops(Region
&R
) {
1567 std::unique_ptr
<DetectionContext
> &Entry
=
1568 DetectionContextMap
[getBBPairForRegion(&R
)];
1569 Entry
= std::make_unique
<DetectionContext
>(R
, AA
, /*Verifying=*/false);
1570 DetectionContext
&Context
= *Entry
.get();
1572 bool DidBailout
= true;
1573 if (!PollyProcessUnprofitable
&& regionWithoutLoops(R
, LI
))
1574 invalid
<ReportUnprofitable
>(Context
, /*Assert=*/true, &R
);
1576 DidBailout
= !isValidRegion(Context
);
1580 assert((!DidBailout
|| Context
.IsInvalid
) &&
1581 "With -polly-detect-keep-going, it is sufficient that if "
1582 "isValidRegion short-circuited, that SCoP is invalid");
1584 assert(DidBailout
== Context
.IsInvalid
&&
1585 "isValidRegion must short-circuit iff the ScoP is invalid");
1588 if (Context
.IsInvalid
) {
1589 removeCachedResults(R
);
1591 ValidRegions
.insert(&R
);
1595 for (auto &SubRegion
: R
)
1596 findScops(*SubRegion
);
1598 // Try to expand regions.
1600 // As the region tree normally only contains canonical regions, non canonical
1601 // regions that form a Scop are not found. Therefore, those non canonical
1602 // regions are checked by expanding the canonical ones.
1604 std::vector
<Region
*> ToExpand
;
1606 for (auto &SubRegion
: R
)
1607 ToExpand
.push_back(SubRegion
.get());
1609 for (Region
*CurrentRegion
: ToExpand
) {
1610 // Skip invalid regions. Regions may become invalid, if they are element of
1611 // an already expanded region.
1612 if (!ValidRegions
.count(CurrentRegion
))
1615 // Skip regions that had errors.
1616 bool HadErrors
= lookupRejectionLog(CurrentRegion
)->hasErrors();
1620 Region
*ExpandedR
= expandRegion(*CurrentRegion
);
1625 R
.addSubRegion(ExpandedR
, true);
1626 ValidRegions
.insert(ExpandedR
);
1627 removeCachedResults(*CurrentRegion
);
1628 removeCachedResultsRecursively(*ExpandedR
);
1632 bool ScopDetection::allBlocksValid(DetectionContext
&Context
) {
1633 Region
&CurRegion
= Context
.CurRegion
;
1635 for (const BasicBlock
*BB
: CurRegion
.blocks()) {
1636 Loop
*L
= LI
.getLoopFor(BB
);
1637 if (L
&& L
->getHeader() == BB
) {
1638 if (CurRegion
.contains(L
)) {
1639 if (!isValidLoop(L
, Context
)) {
1640 Context
.IsInvalid
= true;
1645 SmallVector
<BasicBlock
*, 1> Latches
;
1646 L
->getLoopLatches(Latches
);
1647 for (BasicBlock
*Latch
: Latches
)
1648 if (CurRegion
.contains(Latch
))
1649 return invalid
<ReportLoopOnlySomeLatches
>(Context
, /*Assert=*/true,
1655 for (BasicBlock
*BB
: CurRegion
.blocks()) {
1656 bool IsErrorBlock
= isErrorBlock(*BB
, CurRegion
);
1658 // Also check exception blocks (and possibly register them as non-affine
1659 // regions). Even though exception blocks are not modeled, we use them
1660 // to forward-propagate domain constraints during ScopInfo construction.
1661 if (!isValidCFG(*BB
, false, IsErrorBlock
, Context
) && !KeepGoing
)
1667 for (BasicBlock::iterator I
= BB
->begin(), E
= --BB
->end(); I
!= E
; ++I
)
1668 if (!isValidInstruction(*I
, Context
)) {
1669 Context
.IsInvalid
= true;
1675 if (!hasAffineMemoryAccesses(Context
))
1681 bool ScopDetection::hasSufficientCompute(DetectionContext
&Context
,
1682 int NumLoops
) const {
1688 for (auto *BB
: Context
.CurRegion
.blocks())
1689 if (Context
.CurRegion
.contains(LI
.getLoopFor(BB
)))
1690 InstCount
+= BB
->size();
1692 InstCount
= InstCount
/ NumLoops
;
1694 return InstCount
>= ProfitabilityMinPerLoopInstructions
;
1697 bool ScopDetection::hasPossiblyDistributableLoop(
1698 DetectionContext
&Context
) const {
1699 for (auto *BB
: Context
.CurRegion
.blocks()) {
1700 auto *L
= LI
.getLoopFor(BB
);
1703 if (!Context
.CurRegion
.contains(L
))
1705 if (Context
.BoxedLoopsSet
.count(L
))
1707 unsigned StmtsWithStoresInLoops
= 0;
1708 for (auto *LBB
: L
->blocks()) {
1709 bool MemStore
= false;
1710 for (auto &I
: *LBB
)
1711 MemStore
|= isa
<StoreInst
>(&I
);
1712 StmtsWithStoresInLoops
+= MemStore
;
1714 return (StmtsWithStoresInLoops
> 1);
1719 bool ScopDetection::isProfitableRegion(DetectionContext
&Context
) const {
1720 Region
&CurRegion
= Context
.CurRegion
;
1722 if (PollyProcessUnprofitable
)
1725 // We can probably not do a lot on scops that only write or only read
1727 if (!Context
.hasStores
|| !Context
.hasLoads
)
1728 return invalid
<ReportUnprofitable
>(Context
, /*Assert=*/true, &CurRegion
);
1731 countBeneficialLoops(&CurRegion
, SE
, LI
, MIN_LOOP_TRIP_COUNT
).NumLoops
;
1732 int NumAffineLoops
= NumLoops
- Context
.BoxedLoopsSet
.size();
1734 // Scops with at least two loops may allow either loop fusion or tiling and
1735 // are consequently interesting to look at.
1736 if (NumAffineLoops
>= 2)
1739 // A loop with multiple non-trivial blocks might be amendable to distribution.
1740 if (NumAffineLoops
== 1 && hasPossiblyDistributableLoop(Context
))
1743 // Scops that contain a loop with a non-trivial amount of computation per
1744 // loop-iteration are interesting as we may be able to parallelize such
1745 // loops. Individual loops that have only a small amount of computation
1746 // per-iteration are performance-wise very fragile as any change to the
1747 // loop induction variables may affect performance. To not cause spurious
1748 // performance regressions, we do not consider such loops.
1749 if (NumAffineLoops
== 1 && hasSufficientCompute(Context
, NumLoops
))
1752 return invalid
<ReportUnprofitable
>(Context
, /*Assert=*/true, &CurRegion
);
1755 bool ScopDetection::isValidRegion(DetectionContext
&Context
) {
1756 Region
&CurRegion
= Context
.CurRegion
;
1758 POLLY_DEBUG(dbgs() << "Checking region: " << CurRegion
.getNameStr()
1761 if (!PollyAllowFullFunction
&& CurRegion
.isTopLevelRegion()) {
1762 POLLY_DEBUG(dbgs() << "Top level region is invalid\n");
1763 Context
.IsInvalid
= true;
1768 if (CurRegion
.getExit() &&
1769 isa
<UnreachableInst
>(CurRegion
.getExit()->getTerminator())) {
1770 POLLY_DEBUG(dbgs() << "Unreachable in exit\n");
1771 return invalid
<ReportUnreachableInExit
>(Context
, /*Assert=*/true,
1772 CurRegion
.getExit(), DbgLoc
);
1775 if (!OnlyRegion
.empty() &&
1776 !CurRegion
.getEntry()->getName().count(OnlyRegion
)) {
1778 dbgs() << "Region entry does not match -polly-only-region";
1781 Context
.IsInvalid
= true;
1785 for (BasicBlock
*Pred
: predecessors(CurRegion
.getEntry())) {
1786 Instruction
*PredTerm
= Pred
->getTerminator();
1787 if (isa
<IndirectBrInst
>(PredTerm
) || isa
<CallBrInst
>(PredTerm
))
1788 return invalid
<ReportIndirectPredecessor
>(
1789 Context
, /*Assert=*/true, PredTerm
, PredTerm
->getDebugLoc());
1792 // SCoP cannot contain the entry block of the function, because we need
1793 // to insert alloca instruction there when translate scalar to array.
1794 if (!PollyAllowFullFunction
&&
1795 CurRegion
.getEntry() ==
1796 &(CurRegion
.getEntry()->getParent()->getEntryBlock()))
1797 return invalid
<ReportEntry
>(Context
, /*Assert=*/true, CurRegion
.getEntry());
1799 if (!allBlocksValid(Context
)) {
1800 // TODO: Every failure condition within allBlocksValid should call
1801 // invalid<Reason>(). Otherwise we reject SCoPs without giving feedback to
1803 Context
.IsInvalid
= true;
1807 if (!isReducibleRegion(CurRegion
, DbgLoc
))
1808 return invalid
<ReportIrreducibleRegion
>(Context
, /*Assert=*/true,
1809 &CurRegion
, DbgLoc
);
1811 POLLY_DEBUG(dbgs() << "OK\n");
1815 void ScopDetection::markFunctionAsInvalid(Function
*F
) {
1816 F
->addFnAttr(PollySkipFnAttr
);
1819 bool ScopDetection::isValidFunction(Function
&F
) {
1820 return !F
.hasFnAttribute(PollySkipFnAttr
);
1823 void ScopDetection::printLocations(Function
&F
) {
1824 for (const Region
*R
: *this) {
1825 unsigned LineEntry
, LineExit
;
1826 std::string FileName
;
1828 getDebugLocation(R
, LineEntry
, LineExit
, FileName
);
1829 DiagnosticScopFound
Diagnostic(F
, FileName
, LineEntry
, LineExit
);
1830 F
.getContext().diagnose(Diagnostic
);
1834 void ScopDetection::emitMissedRemarks(const Function
&F
) {
1835 for (auto &DIt
: DetectionContextMap
) {
1836 DetectionContext
&DC
= *DIt
.getSecond().get();
1837 if (DC
.Log
.hasErrors())
1838 emitRejectionRemarks(DIt
.getFirst(), DC
.Log
, ORE
);
1842 bool ScopDetection::isReducibleRegion(Region
&R
, DebugLoc
&DbgLoc
) const {
1843 /// Enum for coloring BBs in Region.
1845 /// WHITE - Unvisited BB in DFS walk.
1846 /// GREY - BBs which are currently on the DFS stack for processing.
1847 /// BLACK - Visited and completely processed BB.
1848 enum Color
{ WHITE
, GREY
, BLACK
};
1850 BasicBlock
*REntry
= R
.getEntry();
1851 BasicBlock
*RExit
= R
.getExit();
1852 // Map to match the color of a BasicBlock during the DFS walk.
1853 DenseMap
<const BasicBlock
*, Color
> BBColorMap
;
1854 // Stack keeping track of current BB and index of next child to be processed.
1855 std::stack
<std::pair
<BasicBlock
*, unsigned>> DFSStack
;
1857 unsigned AdjacentBlockIndex
= 0;
1858 BasicBlock
*CurrBB
, *SuccBB
;
1861 // Initialize the map for all BB with WHITE color.
1862 for (auto *BB
: R
.blocks())
1863 BBColorMap
[BB
] = WHITE
;
1865 // Process the entry block of the Region.
1866 BBColorMap
[CurrBB
] = GREY
;
1867 DFSStack
.push(std::make_pair(CurrBB
, 0));
1869 while (!DFSStack
.empty()) {
1870 // Get next BB on stack to be processed.
1871 CurrBB
= DFSStack
.top().first
;
1872 AdjacentBlockIndex
= DFSStack
.top().second
;
1875 // Loop to iterate over the successors of current BB.
1876 const Instruction
*TInst
= CurrBB
->getTerminator();
1877 unsigned NSucc
= TInst
->getNumSuccessors();
1878 for (unsigned I
= AdjacentBlockIndex
; I
< NSucc
;
1879 ++I
, ++AdjacentBlockIndex
) {
1880 SuccBB
= TInst
->getSuccessor(I
);
1882 // Checks for region exit block and self-loops in BB.
1883 if (SuccBB
== RExit
|| SuccBB
== CurrBB
)
1886 // WHITE indicates an unvisited BB in DFS walk.
1887 if (BBColorMap
[SuccBB
] == WHITE
) {
1888 // Push the current BB and the index of the next child to be visited.
1889 DFSStack
.push(std::make_pair(CurrBB
, I
+ 1));
1890 // Push the next BB to be processed.
1891 DFSStack
.push(std::make_pair(SuccBB
, 0));
1892 // First time the BB is being processed.
1893 BBColorMap
[SuccBB
] = GREY
;
1895 } else if (BBColorMap
[SuccBB
] == GREY
) {
1896 // GREY indicates a loop in the control flow.
1897 // If the destination dominates the source, it is a natural loop
1898 // else, an irreducible control flow in the region is detected.
1899 if (!DT
.dominates(SuccBB
, CurrBB
)) {
1900 // Get debug info of instruction which causes irregular control flow.
1901 DbgLoc
= TInst
->getDebugLoc();
1907 // If all children of current BB have been processed,
1908 // then mark that BB as fully processed.
1909 if (AdjacentBlockIndex
== NSucc
)
1910 BBColorMap
[CurrBB
] = BLACK
;
1916 static void updateLoopCountStatistic(ScopDetection::LoopStats Stats
,
1917 bool OnlyProfitable
) {
1918 if (!OnlyProfitable
) {
1919 NumLoopsInScop
+= Stats
.NumLoops
;
1921 std::max(MaxNumLoopsInScop
.getValue(), (uint64_t)Stats
.NumLoops
);
1922 if (Stats
.MaxDepth
== 0)
1923 NumScopsDepthZero
++;
1924 else if (Stats
.MaxDepth
== 1)
1926 else if (Stats
.MaxDepth
== 2)
1928 else if (Stats
.MaxDepth
== 3)
1929 NumScopsDepthThree
++;
1930 else if (Stats
.MaxDepth
== 4)
1931 NumScopsDepthFour
++;
1932 else if (Stats
.MaxDepth
== 5)
1933 NumScopsDepthFive
++;
1935 NumScopsDepthLarger
++;
1937 NumLoopsInProfScop
+= Stats
.NumLoops
;
1938 MaxNumLoopsInProfScop
=
1939 std::max(MaxNumLoopsInProfScop
.getValue(), (uint64_t)Stats
.NumLoops
);
1940 if (Stats
.MaxDepth
== 0)
1941 NumProfScopsDepthZero
++;
1942 else if (Stats
.MaxDepth
== 1)
1943 NumProfScopsDepthOne
++;
1944 else if (Stats
.MaxDepth
== 2)
1945 NumProfScopsDepthTwo
++;
1946 else if (Stats
.MaxDepth
== 3)
1947 NumProfScopsDepthThree
++;
1948 else if (Stats
.MaxDepth
== 4)
1949 NumProfScopsDepthFour
++;
1950 else if (Stats
.MaxDepth
== 5)
1951 NumProfScopsDepthFive
++;
1953 NumProfScopsDepthLarger
++;
1957 ScopDetection::DetectionContext
*
1958 ScopDetection::getDetectionContext(const Region
*R
) const {
1959 auto DCMIt
= DetectionContextMap
.find(getBBPairForRegion(R
));
1960 if (DCMIt
== DetectionContextMap
.end())
1962 return DCMIt
->second
.get();
1965 const RejectLog
*ScopDetection::lookupRejectionLog(const Region
*R
) const {
1966 const DetectionContext
*DC
= getDetectionContext(R
);
1967 return DC
? &DC
->Log
: nullptr;
1970 void ScopDetection::verifyRegion(const Region
&R
) {
1971 assert(isMaxRegionInScop(R
) && "Expect R is a valid region.");
1973 DetectionContext
Context(const_cast<Region
&>(R
), AA
, true /*verifying*/);
1974 isValidRegion(Context
);
1977 void ScopDetection::verifyAnalysis() {
1981 for (const Region
*R
: ValidRegions
)
1985 bool ScopDetectionWrapperPass::runOnFunction(Function
&F
) {
1986 auto &LI
= getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
1987 auto &RI
= getAnalysis
<RegionInfoPass
>().getRegionInfo();
1988 auto &AA
= getAnalysis
<AAResultsWrapperPass
>().getAAResults();
1989 auto &SE
= getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
1990 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
1991 auto &ORE
= getAnalysis
<OptimizationRemarkEmitterWrapperPass
>().getORE();
1993 Result
= std::make_unique
<ScopDetection
>(DT
, SE
, LI
, RI
, AA
, ORE
);
1998 void ScopDetectionWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
1999 AU
.addRequired
<LoopInfoWrapperPass
>();
2000 AU
.addRequiredTransitive
<ScalarEvolutionWrapperPass
>();
2001 AU
.addRequired
<DominatorTreeWrapperPass
>();
2002 AU
.addRequired
<OptimizationRemarkEmitterWrapperPass
>();
2003 // We also need AA and RegionInfo when we are verifying analysis.
2004 AU
.addRequiredTransitive
<AAResultsWrapperPass
>();
2005 AU
.addRequiredTransitive
<RegionInfoPass
>();
2006 AU
.setPreservesAll();
2009 void ScopDetectionWrapperPass::print(raw_ostream
&OS
, const Module
*) const {
2010 for (const Region
*R
: Result
->ValidRegions
)
2011 OS
<< "Valid Region for Scop: " << R
->getNameStr() << '\n';
2016 ScopDetectionWrapperPass::ScopDetectionWrapperPass() : FunctionPass(ID
) {
2017 // Disable runtime alias checks if we ignore aliasing all together.
2019 PollyUseRuntimeAliasChecks
= false;
2022 ScopAnalysis::ScopAnalysis() {
2023 // Disable runtime alias checks if we ignore aliasing all together.
2025 PollyUseRuntimeAliasChecks
= false;
2028 void ScopDetectionWrapperPass::releaseMemory() { Result
.reset(); }
2030 char ScopDetectionWrapperPass::ID
;
2032 AnalysisKey
ScopAnalysis::Key
;
2034 ScopDetection
ScopAnalysis::run(Function
&F
, FunctionAnalysisManager
&FAM
) {
2035 auto &LI
= FAM
.getResult
<LoopAnalysis
>(F
);
2036 auto &RI
= FAM
.getResult
<RegionInfoAnalysis
>(F
);
2037 auto &AA
= FAM
.getResult
<AAManager
>(F
);
2038 auto &SE
= FAM
.getResult
<ScalarEvolutionAnalysis
>(F
);
2039 auto &DT
= FAM
.getResult
<DominatorTreeAnalysis
>(F
);
2040 auto &ORE
= FAM
.getResult
<OptimizationRemarkEmitterAnalysis
>(F
);
2042 ScopDetection
Result(DT
, SE
, LI
, RI
, AA
, ORE
);
2047 PreservedAnalyses
ScopAnalysisPrinterPass::run(Function
&F
,
2048 FunctionAnalysisManager
&FAM
) {
2049 OS
<< "Detected Scops in Function " << F
.getName() << "\n";
2050 auto &SD
= FAM
.getResult
<ScopAnalysis
>(F
);
2051 for (const Region
*R
: SD
.ValidRegions
)
2052 OS
<< "Valid Region for Scop: " << R
->getNameStr() << '\n';
2055 return PreservedAnalyses::all();
2058 Pass
*polly::createScopDetectionWrapperPassPass() {
2059 return new ScopDetectionWrapperPass();
2062 INITIALIZE_PASS_BEGIN(ScopDetectionWrapperPass
, "polly-detect",
2063 "Polly - Detect static control parts (SCoPs)", false,
2065 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
);
2066 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
);
2067 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass
);
2068 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
);
2069 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
);
2070 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass
);
2071 INITIALIZE_PASS_END(ScopDetectionWrapperPass
, "polly-detect",
2072 "Polly - Detect static control parts (SCoPs)", false, false)
2074 //===----------------------------------------------------------------------===//
2077 /// Print result from ScopDetectionWrapperPass.
2078 class ScopDetectionPrinterLegacyPass final
: public FunctionPass
{
2082 ScopDetectionPrinterLegacyPass() : ScopDetectionPrinterLegacyPass(outs()) {}
2084 explicit ScopDetectionPrinterLegacyPass(llvm::raw_ostream
&OS
)
2085 : FunctionPass(ID
), OS(OS
) {}
2087 bool runOnFunction(Function
&F
) override
{
2088 ScopDetectionWrapperPass
&P
= getAnalysis
<ScopDetectionWrapperPass
>();
2090 OS
<< "Printing analysis '" << P
.getPassName() << "' for function '"
2091 << F
.getName() << "':\n";
2097 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
2098 FunctionPass::getAnalysisUsage(AU
);
2099 AU
.addRequired
<ScopDetectionWrapperPass
>();
2100 AU
.setPreservesAll();
2104 llvm::raw_ostream
&OS
;
2107 char ScopDetectionPrinterLegacyPass::ID
= 0;
2110 Pass
*polly::createScopDetectionPrinterLegacyPass(raw_ostream
&OS
) {
2111 return new ScopDetectionPrinterLegacyPass(OS
);
2114 INITIALIZE_PASS_BEGIN(ScopDetectionPrinterLegacyPass
, "polly-print-detect",
2115 "Polly - Print static control parts (SCoPs)", false,
2117 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass
);
2118 INITIALIZE_PASS_END(ScopDetectionPrinterLegacyPass
, "polly-print-detect",
2119 "Polly - Print static control parts (SCoPs)", false, false)