1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Interfaces for registering analysis passes, producing common pass manager
11 /// configurations, and parsing of pass pipelines.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_PASSES_PASSBUILDER_H
16 #define LLVM_PASSES_PASSBUILDER_H
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/Analysis/CGSCCPassManager.h"
20 #include "llvm/IR/PassManager.h"
21 #include "llvm/Support/Error.h"
22 #include "llvm/Transforms/Instrumentation.h"
23 #include "llvm/Transforms/Scalar/LoopPassManager.h"
30 class ModuleSummaryIndex
;
32 /// A struct capturing PGO tunables.
34 enum PGOAction
{ NoAction
, IRInstr
, IRUse
, SampleUse
};
35 enum CSPGOAction
{ NoCSAction
, CSIRInstr
, CSIRUse
};
36 PGOOptions(std::string ProfileFile
= "", std::string CSProfileGenFile
= "",
37 std::string ProfileRemappingFile
= "", PGOAction Action
= NoAction
,
38 CSPGOAction CSAction
= NoCSAction
, bool SamplePGOSupport
= false)
39 : ProfileFile(ProfileFile
), CSProfileGenFile(CSProfileGenFile
),
40 ProfileRemappingFile(ProfileRemappingFile
), Action(Action
),
42 SamplePGOSupport(SamplePGOSupport
|| Action
== SampleUse
) {
43 // Note, we do allow ProfileFile.empty() for Action=IRUse LTO can
44 // callback with IRUse action without ProfileFile.
46 // If there is a CSAction, PGOAction cannot be IRInstr or SampleUse.
47 assert(this->CSAction
== NoCSAction
||
48 (this->Action
!= IRInstr
&& this->Action
!= SampleUse
));
50 // For CSIRInstr, CSProfileGenFile also needs to be nonempty.
51 assert(this->CSAction
!= CSIRInstr
|| !this->CSProfileGenFile
.empty());
53 // If CSAction is CSIRUse, PGOAction needs to be IRUse as they share
55 assert(this->CSAction
!= CSIRUse
|| this->Action
== IRUse
);
57 // If neither Action nor CSAction, SamplePGOSupport needs to be true.
58 assert(this->Action
!= NoAction
|| this->CSAction
!= NoCSAction
||
59 this->SamplePGOSupport
);
61 std::string ProfileFile
;
62 std::string CSProfileGenFile
;
63 std::string ProfileRemappingFile
;
66 bool SamplePGOSupport
;
69 /// Tunable parameters for passes in the default pipelines.
70 class PipelineTuningOptions
{
72 /// Constructor sets pipeline tuning defaults based on cl::opts. Each option
73 /// can be set in the PassBuilder when using a LLVM as a library.
74 PipelineTuningOptions();
76 /// Tuning option to set loop interleaving on/off. Its default value is that
77 /// of the flag: `-interleave-loops`.
78 bool LoopInterleaving
;
80 /// Tuning option to enable/disable loop vectorization. Its default value is
81 /// that of the flag: `-vectorize-loops`.
82 bool LoopVectorization
;
84 /// Tuning option to enable/disable slp loop vectorization. Its default value
85 /// is that of the flag: `vectorize-slp`.
86 bool SLPVectorization
;
88 /// Tuning option to enable/disable loop unrolling. Its default value is true.
91 /// Tuning option to forget all SCEV loops in LoopUnroll. Its default value
92 /// is that of the flag: `-forget-scev-loop-unroll`.
93 bool ForgetAllSCEVInLoopUnroll
;
95 /// Tuning option to cap the number of calls to retrive clobbering accesses in
96 /// MemorySSA, in LICM.
97 unsigned LicmMssaOptCap
;
99 /// Tuning option to disable promotion to scalars in LICM with MemorySSA, if
100 /// the number of access is too large.
101 unsigned LicmMssaNoAccForPromotionCap
;
104 /// This class provides access to building LLVM's passes.
106 /// Its members provide the baseline state available to passes during their
107 /// construction. The \c PassRegistry.def file specifies how to construct all
108 /// of the built-in passes, and those may reference these members during
112 PipelineTuningOptions PTO
;
113 Optional
<PGOOptions
> PGOOpt
;
114 PassInstrumentationCallbacks
*PIC
;
117 /// A struct to capture parsed pass pipeline names.
119 /// A pipeline is defined as a series of names, each of which may in itself
120 /// recursively contain a nested pipeline. A name is either the name of a pass
121 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
122 /// name is the name of a pass, the InnerPipeline is empty, since passes
123 /// cannot contain inner pipelines. See parsePassPipeline() for a more
124 /// detailed description of the textual pipeline format.
125 struct PipelineElement
{
127 std::vector
<PipelineElement
> InnerPipeline
;
132 /// This enumerates the LLVM ThinLTO optimization phases.
133 enum class ThinLTOPhase
{
134 /// No ThinLTO behavior needed.
136 /// ThinLTO prelink (summary) phase.
138 /// ThinLTO postlink (backend compile) phase.
142 /// LLVM-provided high-level optimization levels.
144 /// This enumerates the LLVM-provided high-level optimization levels. Each
145 /// level has a specific goal and rationale.
146 enum OptimizationLevel
{
147 /// Disable as many optimizations as possible. This doesn't completely
148 /// disable the optimizer in all cases, for example always_inline functions
149 /// can be required to be inlined for correctness.
152 /// Optimize quickly without destroying debuggability.
154 /// FIXME: The current and historical behavior of this level does *not*
155 /// agree with this goal, but we would like to move toward this goal in the
158 /// This level is tuned to produce a result from the optimizer as quickly
159 /// as possible and to avoid destroying debuggability. This tends to result
160 /// in a very good development mode where the compiled code will be
161 /// immediately executed as part of testing. As a consequence, where
162 /// possible, we would like to produce efficient-to-execute code, but not
163 /// if it significantly slows down compilation or would prevent even basic
164 /// debugging of the resulting binary.
166 /// As an example, complex loop transformations such as versioning,
167 /// vectorization, or fusion might not make sense here due to the degree to
168 /// which the executed code would differ from the source code, and the
169 /// potential compile time cost.
172 /// Optimize for fast execution as much as possible without triggering
173 /// significant incremental compile time or code size growth.
175 /// The key idea is that optimizations at this level should "pay for
176 /// themselves". So if an optimization increases compile time by 5% or
177 /// increases code size by 5% for a particular benchmark, that benchmark
178 /// should also be one which sees a 5% runtime improvement. If the compile
179 /// time or code size penalties happen on average across a diverse range of
180 /// LLVM users' benchmarks, then the improvements should as well.
182 /// And no matter what, the compile time needs to not grow superlinearly
183 /// with the size of input to LLVM so that users can control the runtime of
184 /// the optimizer in this mode.
186 /// This is expected to be a good default optimization level for the vast
187 /// majority of users.
190 /// Optimize for fast execution as much as possible.
192 /// This mode is significantly more aggressive in trading off compile time
193 /// and code size to get execution time improvements. The core idea is that
194 /// this mode should include any optimization that helps execution time on
195 /// balance across a diverse collection of benchmarks, even if it increases
196 /// code size or compile time for some benchmarks without corresponding
197 /// improvements to execution time.
199 /// Despite being willing to trade more compile time off to get improved
200 /// execution time, this mode still tries to avoid superlinear growth in
201 /// order to make even significantly slower compile times at least scale
202 /// reasonably. This does not preclude very substantial constant factor
206 /// Similar to \c O2 but tries to optimize for small code size instead of
207 /// fast execution without triggering significant incremental execution
210 /// The logic here is exactly the same as \c O2, but with code size and
211 /// execution time metrics swapped.
213 /// A consequence of the different core goal is that this should in general
214 /// produce substantially smaller executables that still run in
215 /// a reasonable amount of time.
218 /// A very specialized mode that will optimize for code size at any and all
221 /// This is useful primarily when there are absolute size limitations and
222 /// any effort taken to reduce the size is worth it regardless of the
223 /// execution time impact. You should expect this level to produce rather
224 /// slow, but very small, code.
228 explicit PassBuilder(TargetMachine
*TM
= nullptr,
229 PipelineTuningOptions PTO
= PipelineTuningOptions(),
230 Optional
<PGOOptions
> PGOOpt
= None
,
231 PassInstrumentationCallbacks
*PIC
= nullptr)
232 : TM(TM
), PTO(PTO
), PGOOpt(PGOOpt
), PIC(PIC
) {}
234 /// Cross register the analysis managers through their proxies.
236 /// This is an interface that can be used to cross register each
237 /// AnalysisManager with all the others analysis managers.
238 void crossRegisterProxies(LoopAnalysisManager
&LAM
,
239 FunctionAnalysisManager
&FAM
,
240 CGSCCAnalysisManager
&CGAM
,
241 ModuleAnalysisManager
&MAM
);
243 /// Registers all available module analysis passes.
245 /// This is an interface that can be used to populate a \c
246 /// ModuleAnalysisManager with all registered module analyses. Callers can
247 /// still manually register any additional analyses. Callers can also
248 /// pre-register analyses and this will not override those.
249 void registerModuleAnalyses(ModuleAnalysisManager
&MAM
);
251 /// Registers all available CGSCC analysis passes.
253 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
254 /// with all registered CGSCC analyses. Callers can still manually register any
255 /// additional analyses. Callers can also pre-register analyses and this will
256 /// not override those.
257 void registerCGSCCAnalyses(CGSCCAnalysisManager
&CGAM
);
259 /// Registers all available function analysis passes.
261 /// This is an interface that can be used to populate a \c
262 /// FunctionAnalysisManager with all registered function analyses. Callers can
263 /// still manually register any additional analyses. Callers can also
264 /// pre-register analyses and this will not override those.
265 void registerFunctionAnalyses(FunctionAnalysisManager
&FAM
);
267 /// Registers all available loop analysis passes.
269 /// This is an interface that can be used to populate a \c LoopAnalysisManager
270 /// with all registered loop analyses. Callers can still manually register any
271 /// additional analyses.
272 void registerLoopAnalyses(LoopAnalysisManager
&LAM
);
274 /// Construct the core LLVM function canonicalization and simplification
277 /// This is a long pipeline and uses most of the per-function optimization
278 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
279 /// repeatedly over the IR and is not expected to destroy important
280 /// information about the semantics of the IR.
282 /// Note that \p Level cannot be `O0` here. The pipelines produced are
283 /// only intended for use when attempting to optimize code. If frontends
284 /// require some transformations for semantic reasons, they should explicitly
287 /// \p Phase indicates the current ThinLTO phase.
289 buildFunctionSimplificationPipeline(OptimizationLevel Level
,
291 bool DebugLogging
= false);
293 /// Construct the core LLVM module canonicalization and simplification
296 /// This pipeline focuses on canonicalizing and simplifying the entire module
297 /// of IR. Much like the function simplification pipeline above, it is
298 /// suitable to run repeatedly over the IR and is not expected to destroy
299 /// important information. It does, however, perform inlining and other
300 /// heuristic based simplifications that are not strictly reversible.
302 /// Note that \p Level cannot be `O0` here. The pipelines produced are
303 /// only intended for use when attempting to optimize code. If frontends
304 /// require some transformations for semantic reasons, they should explicitly
307 /// \p Phase indicates the current ThinLTO phase.
309 buildModuleSimplificationPipeline(OptimizationLevel Level
,
311 bool DebugLogging
= false);
313 /// Construct the core LLVM module optimization pipeline.
315 /// This pipeline focuses on optimizing the execution speed of the IR. It
316 /// uses cost modeling and thresholds to balance code growth against runtime
317 /// improvements. It includes vectorization and other information destroying
318 /// transformations. It also cannot generally be run repeatedly on a module
319 /// without potentially seriously regressing either runtime performance of
320 /// the code or serious code size growth.
322 /// Note that \p Level cannot be `O0` here. The pipelines produced are
323 /// only intended for use when attempting to optimize code. If frontends
324 /// require some transformations for semantic reasons, they should explicitly
326 ModulePassManager
buildModuleOptimizationPipeline(OptimizationLevel Level
,
327 bool DebugLogging
= false,
328 bool LTOPreLink
= false);
330 /// Build a per-module default optimization pipeline.
332 /// This provides a good default optimization pipeline for per-module
333 /// optimization and code generation without any link-time optimization. It
334 /// typically correspond to frontend "-O[123]" options for optimization
335 /// levels \c O1, \c O2 and \c O3 resp.
337 /// Note that \p Level cannot be `O0` here. The pipelines produced are
338 /// only intended for use when attempting to optimize code. If frontends
339 /// require some transformations for semantic reasons, they should explicitly
341 ModulePassManager
buildPerModuleDefaultPipeline(OptimizationLevel Level
,
342 bool DebugLogging
= false,
343 bool LTOPreLink
= false);
345 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
348 /// This adds the pre-link optimizations tuned to prepare a module for
349 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
350 /// without making irreversible decisions which could be made better during
353 /// Note that \p Level cannot be `O0` here. The pipelines produced are
354 /// only intended for use when attempting to optimize code. If frontends
355 /// require some transformations for semantic reasons, they should explicitly
358 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level
,
359 bool DebugLogging
= false);
361 /// Build an ThinLTO default optimization pipeline to a pass manager.
363 /// This provides a good default optimization pipeline for link-time
364 /// optimization and code generation. It is particularly tuned to fit well
365 /// when IR coming into the LTO phase was first run through \c
366 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
368 /// Note that \p Level cannot be `O0` here. The pipelines produced are
369 /// only intended for use when attempting to optimize code. If frontends
370 /// require some transformations for semantic reasons, they should explicitly
373 buildThinLTODefaultPipeline(OptimizationLevel Level
, bool DebugLogging
,
374 const ModuleSummaryIndex
*ImportSummary
);
376 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
379 /// This adds the pre-link optimizations tuned to work well with a later LTO
380 /// run. It works to minimize the IR which needs to be analyzed without
381 /// making irreversible decisions which could be made better during the LTO
384 /// Note that \p Level cannot be `O0` here. The pipelines produced are
385 /// only intended for use when attempting to optimize code. If frontends
386 /// require some transformations for semantic reasons, they should explicitly
388 ModulePassManager
buildLTOPreLinkDefaultPipeline(OptimizationLevel Level
,
389 bool DebugLogging
= false);
391 /// Build an LTO default optimization pipeline to a pass manager.
393 /// This provides a good default optimization pipeline for link-time
394 /// optimization and code generation. It is particularly tuned to fit well
395 /// when IR coming into the LTO phase was first run through \c
396 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
398 /// Note that \p Level cannot be `O0` here. The pipelines produced are
399 /// only intended for use when attempting to optimize code. If frontends
400 /// require some transformations for semantic reasons, they should explicitly
402 ModulePassManager
buildLTODefaultPipeline(OptimizationLevel Level
,
404 ModuleSummaryIndex
*ExportSummary
);
406 /// Build the default `AAManager` with the default alias analysis pipeline
408 AAManager
buildDefaultAAPipeline();
410 /// Parse a textual pass pipeline description into a \c
411 /// ModulePassManager.
413 /// The format of the textual pass pipeline description looks something like:
415 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
417 /// Pass managers have ()s describing the nest structure of passes. All passes
418 /// are comma separated. As a special shortcut, if the very first pass is not
419 /// a module pass (as a module pass manager is), this will automatically form
420 /// the shortest stack of pass managers that allow inserting that first pass.
421 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
422 /// passes 'lpassN', all of these are valid:
424 /// fpass1,fpass2,fpass3
425 /// cgpass1,cgpass2,cgpass3
426 /// lpass1,lpass2,lpass3
428 /// And they are equivalent to the following (resp.):
430 /// module(function(fpass1,fpass2,fpass3))
431 /// module(cgscc(cgpass1,cgpass2,cgpass3))
432 /// module(function(loop(lpass1,lpass2,lpass3)))
434 /// This shortcut is especially useful for debugging and testing small pass
435 /// combinations. Note that these shortcuts don't introduce any other magic.
436 /// If the sequence of passes aren't all the exact same kind of pass, it will
437 /// be an error. You cannot mix different levels implicitly, you must
438 /// explicitly form a pass manager in which to nest passes.
439 Error
parsePassPipeline(ModulePassManager
&MPM
, StringRef PipelineText
,
440 bool VerifyEachPass
= true,
441 bool DebugLogging
= false);
443 /// {{@ Parse a textual pass pipeline description into a specific PassManager
445 /// Automatic deduction of an appropriate pass manager stack is not supported.
446 /// For example, to insert a loop pass 'lpass' into a FunctionPassManager,
447 /// this is the valid pipeline text:
450 Error
parsePassPipeline(CGSCCPassManager
&CGPM
, StringRef PipelineText
,
451 bool VerifyEachPass
= true,
452 bool DebugLogging
= false);
453 Error
parsePassPipeline(FunctionPassManager
&FPM
, StringRef PipelineText
,
454 bool VerifyEachPass
= true,
455 bool DebugLogging
= false);
456 Error
parsePassPipeline(LoopPassManager
&LPM
, StringRef PipelineText
,
457 bool VerifyEachPass
= true,
458 bool DebugLogging
= false);
461 /// Parse a textual alias analysis pipeline into the provided AA manager.
463 /// The format of the textual AA pipeline is a comma separated list of AA
466 /// basic-aa,globals-aa,...
468 /// The AA manager is set up such that the provided alias analyses are tried
469 /// in the order specified. See the \c AAManaager documentation for details
470 /// about the logic used. This routine just provides the textual mapping
471 /// between AA names and the analyses to register with the manager.
473 /// Returns false if the text cannot be parsed cleanly. The specific state of
474 /// the \p AA manager is unspecified if such an error is encountered and this
476 Error
parseAAPipeline(AAManager
&AA
, StringRef PipelineText
);
478 /// Register a callback for a default optimizer pipeline extension
481 /// This extension point allows adding passes that perform peephole
482 /// optimizations similar to the instruction combiner. These passes will be
483 /// inserted after each instance of the instruction combiner pass.
484 void registerPeepholeEPCallback(
485 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
486 PeepholeEPCallbacks
.push_back(C
);
489 /// Register a callback for a default optimizer pipeline extension
492 /// This extension point allows adding late loop canonicalization and
493 /// simplification passes. This is the last point in the loop optimization
494 /// pipeline before loop deletion. Each pass added
495 /// here must be an instance of LoopPass.
496 /// This is the place to add passes that can remove loops, such as target-
497 /// specific loop idiom recognition.
498 void registerLateLoopOptimizationsEPCallback(
499 const std::function
<void(LoopPassManager
&, OptimizationLevel
)> &C
) {
500 LateLoopOptimizationsEPCallbacks
.push_back(C
);
503 /// Register a callback for a default optimizer pipeline extension
506 /// This extension point allows adding loop passes to the end of the loop
508 void registerLoopOptimizerEndEPCallback(
509 const std::function
<void(LoopPassManager
&, OptimizationLevel
)> &C
) {
510 LoopOptimizerEndEPCallbacks
.push_back(C
);
513 /// Register a callback for a default optimizer pipeline extension
516 /// This extension point allows adding optimization passes after most of the
517 /// main optimizations, but before the last cleanup-ish optimizations.
518 void registerScalarOptimizerLateEPCallback(
519 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
520 ScalarOptimizerLateEPCallbacks
.push_back(C
);
523 /// Register a callback for a default optimizer pipeline extension
526 /// This extension point allows adding CallGraphSCC passes at the end of the
527 /// main CallGraphSCC passes and before any function simplification passes run
528 /// by CGPassManager.
529 void registerCGSCCOptimizerLateEPCallback(
530 const std::function
<void(CGSCCPassManager
&, OptimizationLevel
)> &C
) {
531 CGSCCOptimizerLateEPCallbacks
.push_back(C
);
534 /// Register a callback for a default optimizer pipeline extension
537 /// This extension point allows adding optimization passes before the
538 /// vectorizer and other highly target specific optimization passes are
540 void registerVectorizerStartEPCallback(
541 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
542 VectorizerStartEPCallbacks
.push_back(C
);
545 /// Register a callback for a default optimizer pipeline extension point.
547 /// This extension point allows adding optimization once at the start of the
548 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
549 /// link-time pipelines).
550 void registerPipelineStartEPCallback(
551 const std::function
<void(ModulePassManager
&)> &C
) {
552 PipelineStartEPCallbacks
.push_back(C
);
555 /// Register a callback for a default optimizer pipeline extension point
557 /// This extension point allows adding optimizations at the very end of the
558 /// function optimization pipeline. A key difference between this and the
559 /// legacy PassManager's OptimizerLast callback is that this extension point
560 /// is not triggered at O0. Extensions to the O0 pipeline should append their
561 /// passes to the end of the overall pipeline.
562 void registerOptimizerLastEPCallback(
563 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
564 OptimizerLastEPCallbacks
.push_back(C
);
567 /// Register a callback for parsing an AliasAnalysis Name to populate
568 /// the given AAManager \p AA
569 void registerParseAACallback(
570 const std::function
<bool(StringRef Name
, AAManager
&AA
)> &C
) {
571 AAParsingCallbacks
.push_back(C
);
574 /// {{@ Register callbacks for analysis registration with this PassBuilder
576 /// Callees register their analyses with the given AnalysisManager objects.
577 void registerAnalysisRegistrationCallback(
578 const std::function
<void(CGSCCAnalysisManager
&)> &C
) {
579 CGSCCAnalysisRegistrationCallbacks
.push_back(C
);
581 void registerAnalysisRegistrationCallback(
582 const std::function
<void(FunctionAnalysisManager
&)> &C
) {
583 FunctionAnalysisRegistrationCallbacks
.push_back(C
);
585 void registerAnalysisRegistrationCallback(
586 const std::function
<void(LoopAnalysisManager
&)> &C
) {
587 LoopAnalysisRegistrationCallbacks
.push_back(C
);
589 void registerAnalysisRegistrationCallback(
590 const std::function
<void(ModuleAnalysisManager
&)> &C
) {
591 ModuleAnalysisRegistrationCallbacks
.push_back(C
);
595 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
596 /// Using these callbacks, callers can parse both a single pass name, as well
597 /// as entire sub-pipelines, and populate the PassManager instance
599 void registerPipelineParsingCallback(
600 const std::function
<bool(StringRef Name
, CGSCCPassManager
&,
601 ArrayRef
<PipelineElement
>)> &C
) {
602 CGSCCPipelineParsingCallbacks
.push_back(C
);
604 void registerPipelineParsingCallback(
605 const std::function
<bool(StringRef Name
, FunctionPassManager
&,
606 ArrayRef
<PipelineElement
>)> &C
) {
607 FunctionPipelineParsingCallbacks
.push_back(C
);
609 void registerPipelineParsingCallback(
610 const std::function
<bool(StringRef Name
, LoopPassManager
&,
611 ArrayRef
<PipelineElement
>)> &C
) {
612 LoopPipelineParsingCallbacks
.push_back(C
);
614 void registerPipelineParsingCallback(
615 const std::function
<bool(StringRef Name
, ModulePassManager
&,
616 ArrayRef
<PipelineElement
>)> &C
) {
617 ModulePipelineParsingCallbacks
.push_back(C
);
621 /// Register a callback for a top-level pipeline entry.
623 /// If the PassManager type is not given at the top level of the pipeline
624 /// text, this Callback should be used to determine the appropriate stack of
625 /// PassManagers and populate the passed ModulePassManager.
626 void registerParseTopLevelPipelineCallback(
627 const std::function
<bool(ModulePassManager
&, ArrayRef
<PipelineElement
>,
628 bool VerifyEachPass
, bool DebugLogging
)> &C
) {
629 TopLevelPipelineParsingCallbacks
.push_back(C
);
632 /// Add PGOInstrumenation passes for O0 only.
633 void addPGOInstrPassesForO0(ModulePassManager
&MPM
, bool DebugLogging
,
634 bool RunProfileGen
, bool IsCS
,
635 std::string ProfileFile
,
636 std::string ProfileRemappingFile
);
639 static Optional
<std::vector
<PipelineElement
>>
640 parsePipelineText(StringRef Text
);
642 Error
parseModulePass(ModulePassManager
&MPM
, const PipelineElement
&E
,
643 bool VerifyEachPass
, bool DebugLogging
);
644 Error
parseCGSCCPass(CGSCCPassManager
&CGPM
, const PipelineElement
&E
,
645 bool VerifyEachPass
, bool DebugLogging
);
646 Error
parseFunctionPass(FunctionPassManager
&FPM
, const PipelineElement
&E
,
647 bool VerifyEachPass
, bool DebugLogging
);
648 Error
parseLoopPass(LoopPassManager
&LPM
, const PipelineElement
&E
,
649 bool VerifyEachPass
, bool DebugLogging
);
650 bool parseAAPassName(AAManager
&AA
, StringRef Name
);
652 Error
parseLoopPassPipeline(LoopPassManager
&LPM
,
653 ArrayRef
<PipelineElement
> Pipeline
,
654 bool VerifyEachPass
, bool DebugLogging
);
655 Error
parseFunctionPassPipeline(FunctionPassManager
&FPM
,
656 ArrayRef
<PipelineElement
> Pipeline
,
657 bool VerifyEachPass
, bool DebugLogging
);
658 Error
parseCGSCCPassPipeline(CGSCCPassManager
&CGPM
,
659 ArrayRef
<PipelineElement
> Pipeline
,
660 bool VerifyEachPass
, bool DebugLogging
);
661 Error
parseModulePassPipeline(ModulePassManager
&MPM
,
662 ArrayRef
<PipelineElement
> Pipeline
,
663 bool VerifyEachPass
, bool DebugLogging
);
665 void addPGOInstrPasses(ModulePassManager
&MPM
, bool DebugLogging
,
666 OptimizationLevel Level
, bool RunProfileGen
, bool IsCS
,
667 std::string ProfileFile
,
668 std::string ProfileRemappingFile
);
669 void invokePeepholeEPCallbacks(FunctionPassManager
&, OptimizationLevel
);
671 // Extension Point callbacks
672 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
674 SmallVector
<std::function
<void(LoopPassManager
&, OptimizationLevel
)>, 2>
675 LateLoopOptimizationsEPCallbacks
;
676 SmallVector
<std::function
<void(LoopPassManager
&, OptimizationLevel
)>, 2>
677 LoopOptimizerEndEPCallbacks
;
678 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
679 ScalarOptimizerLateEPCallbacks
;
680 SmallVector
<std::function
<void(CGSCCPassManager
&, OptimizationLevel
)>, 2>
681 CGSCCOptimizerLateEPCallbacks
;
682 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
683 VectorizerStartEPCallbacks
;
684 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
685 OptimizerLastEPCallbacks
;
687 SmallVector
<std::function
<void(ModulePassManager
&)>, 2>
688 PipelineStartEPCallbacks
;
689 SmallVector
<std::function
<void(ModuleAnalysisManager
&)>, 2>
690 ModuleAnalysisRegistrationCallbacks
;
691 SmallVector
<std::function
<bool(StringRef
, ModulePassManager
&,
692 ArrayRef
<PipelineElement
>)>,
694 ModulePipelineParsingCallbacks
;
695 SmallVector
<std::function
<bool(ModulePassManager
&, ArrayRef
<PipelineElement
>,
696 bool VerifyEachPass
, bool DebugLogging
)>,
698 TopLevelPipelineParsingCallbacks
;
700 SmallVector
<std::function
<void(CGSCCAnalysisManager
&)>, 2>
701 CGSCCAnalysisRegistrationCallbacks
;
702 SmallVector
<std::function
<bool(StringRef
, CGSCCPassManager
&,
703 ArrayRef
<PipelineElement
>)>,
705 CGSCCPipelineParsingCallbacks
;
706 // Function callbacks
707 SmallVector
<std::function
<void(FunctionAnalysisManager
&)>, 2>
708 FunctionAnalysisRegistrationCallbacks
;
709 SmallVector
<std::function
<bool(StringRef
, FunctionPassManager
&,
710 ArrayRef
<PipelineElement
>)>,
712 FunctionPipelineParsingCallbacks
;
714 SmallVector
<std::function
<void(LoopAnalysisManager
&)>, 2>
715 LoopAnalysisRegistrationCallbacks
;
716 SmallVector
<std::function
<bool(StringRef
, LoopPassManager
&,
717 ArrayRef
<PipelineElement
>)>,
719 LoopPipelineParsingCallbacks
;
721 SmallVector
<std::function
<bool(StringRef Name
, AAManager
&AA
)>, 2>
725 /// This utility template takes care of adding require<> and invalidate<>
726 /// passes for an analysis to a given \c PassManager. It is intended to be used
727 /// during parsing of a pass pipeline when parsing a single PipelineName.
728 /// When registering a new function analysis FancyAnalysis with the pass
729 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
732 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
733 /// ArrayRef<PipelineElement> P) {
734 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
739 template <typename AnalysisT
, typename IRUnitT
, typename AnalysisManagerT
,
740 typename
... ExtraArgTs
>
741 bool parseAnalysisUtilityPasses(
742 StringRef AnalysisName
, StringRef PipelineName
,
743 PassManager
<IRUnitT
, AnalysisManagerT
, ExtraArgTs
...> &PM
) {
744 if (!PipelineName
.endswith(">"))
746 // See if this is an invalidate<> pass name
747 if (PipelineName
.startswith("invalidate<")) {
748 PipelineName
= PipelineName
.substr(11, PipelineName
.size() - 12);
749 if (PipelineName
!= AnalysisName
)
751 PM
.addPass(InvalidateAnalysisPass
<AnalysisT
>());
755 // See if this is a require<> pass name
756 if (PipelineName
.startswith("require<")) {
757 PipelineName
= PipelineName
.substr(8, PipelineName
.size() - 9);
758 if (PipelineName
!= AnalysisName
)
760 PM
.addPass(RequireAnalysisPass
<AnalysisT
, IRUnitT
, AnalysisManagerT
,