1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Interfaces for registering analysis passes, producing common pass manager
11 /// configurations, and parsing of pass pipelines.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_PASSES_PASSBUILDER_H
16 #define LLVM_PASSES_PASSBUILDER_H
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/Analysis/CGSCCPassManager.h"
20 #include "llvm/IR/PassManager.h"
21 #include "llvm/Support/Error.h"
22 #include "llvm/Transforms/Instrumentation.h"
23 #include "llvm/Transforms/Scalar/LoopPassManager.h"
30 class ModuleSummaryIndex
;
32 /// A struct capturing PGO tunables.
34 PGOOptions(std::string ProfileGenFile
= "", std::string ProfileUseFile
= "",
35 std::string SampleProfileFile
= "",
36 std::string ProfileRemappingFile
= "",
37 bool RunProfileGen
= false, bool SamplePGOSupport
= false)
38 : ProfileGenFile(ProfileGenFile
), ProfileUseFile(ProfileUseFile
),
39 SampleProfileFile(SampleProfileFile
),
40 ProfileRemappingFile(ProfileRemappingFile
),
41 RunProfileGen(RunProfileGen
),
42 SamplePGOSupport(SamplePGOSupport
|| !SampleProfileFile
.empty()) {
43 assert((RunProfileGen
||
44 !SampleProfileFile
.empty() ||
45 !ProfileUseFile
.empty() ||
46 SamplePGOSupport
) && "Illegal PGOOptions.");
48 std::string ProfileGenFile
;
49 std::string ProfileUseFile
;
50 std::string SampleProfileFile
;
51 std::string ProfileRemappingFile
;
53 bool SamplePGOSupport
;
56 /// This class provides access to building LLVM's passes.
58 /// Its members provide the baseline state available to passes during their
59 /// construction. The \c PassRegistry.def file specifies how to construct all
60 /// of the built-in passes, and those may reference these members during
64 Optional
<PGOOptions
> PGOOpt
;
65 PassInstrumentationCallbacks
*PIC
;
68 /// A struct to capture parsed pass pipeline names.
70 /// A pipeline is defined as a series of names, each of which may in itself
71 /// recursively contain a nested pipeline. A name is either the name of a pass
72 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
73 /// name is the name of a pass, the InnerPipeline is empty, since passes
74 /// cannot contain inner pipelines. See parsePassPipeline() for a more
75 /// detailed description of the textual pipeline format.
76 struct PipelineElement
{
78 std::vector
<PipelineElement
> InnerPipeline
;
83 /// This enumerates the LLVM ThinLTO optimization phases.
84 enum class ThinLTOPhase
{
85 /// No ThinLTO behavior needed.
87 // ThinLTO prelink (summary) phase.
89 // ThinLTO postlink (backend compile) phase.
93 /// LLVM-provided high-level optimization levels.
95 /// This enumerates the LLVM-provided high-level optimization levels. Each
96 /// level has a specific goal and rationale.
97 enum OptimizationLevel
{
98 /// Disable as many optimizations as possible. This doesn't completely
99 /// disable the optimizer in all cases, for example always_inline functions
100 /// can be required to be inlined for correctness.
103 /// Optimize quickly without destroying debuggability.
105 /// FIXME: The current and historical behavior of this level does *not*
106 /// agree with this goal, but we would like to move toward this goal in the
109 /// This level is tuned to produce a result from the optimizer as quickly
110 /// as possible and to avoid destroying debuggability. This tends to result
111 /// in a very good development mode where the compiled code will be
112 /// immediately executed as part of testing. As a consequence, where
113 /// possible, we would like to produce efficient-to-execute code, but not
114 /// if it significantly slows down compilation or would prevent even basic
115 /// debugging of the resulting binary.
117 /// As an example, complex loop transformations such as versioning,
118 /// vectorization, or fusion might not make sense here due to the degree to
119 /// which the executed code would differ from the source code, and the
120 /// potential compile time cost.
123 /// Optimize for fast execution as much as possible without triggering
124 /// significant incremental compile time or code size growth.
126 /// The key idea is that optimizations at this level should "pay for
127 /// themselves". So if an optimization increases compile time by 5% or
128 /// increases code size by 5% for a particular benchmark, that benchmark
129 /// should also be one which sees a 5% runtime improvement. If the compile
130 /// time or code size penalties happen on average across a diverse range of
131 /// LLVM users' benchmarks, then the improvements should as well.
133 /// And no matter what, the compile time needs to not grow superlinearly
134 /// with the size of input to LLVM so that users can control the runtime of
135 /// the optimizer in this mode.
137 /// This is expected to be a good default optimization level for the vast
138 /// majority of users.
141 /// Optimize for fast execution as much as possible.
143 /// This mode is significantly more aggressive in trading off compile time
144 /// and code size to get execution time improvements. The core idea is that
145 /// this mode should include any optimization that helps execution time on
146 /// balance across a diverse collection of benchmarks, even if it increases
147 /// code size or compile time for some benchmarks without corresponding
148 /// improvements to execution time.
150 /// Despite being willing to trade more compile time off to get improved
151 /// execution time, this mode still tries to avoid superlinear growth in
152 /// order to make even significantly slower compile times at least scale
153 /// reasonably. This does not preclude very substantial constant factor
157 /// Similar to \c O2 but tries to optimize for small code size instead of
158 /// fast execution without triggering significant incremental execution
161 /// The logic here is exactly the same as \c O2, but with code size and
162 /// execution time metrics swapped.
164 /// A consequence of the different core goal is that this should in general
165 /// produce substantially smaller executables that still run in
166 /// a reasonable amount of time.
169 /// A very specialized mode that will optimize for code size at any and all
172 /// This is useful primarily when there are absolute size limitations and
173 /// any effort taken to reduce the size is worth it regardless of the
174 /// execution time impact. You should expect this level to produce rather
175 /// slow, but very small, code.
179 explicit PassBuilder(TargetMachine
*TM
= nullptr,
180 Optional
<PGOOptions
> PGOOpt
= None
,
181 PassInstrumentationCallbacks
*PIC
= nullptr)
182 : TM(TM
), PGOOpt(PGOOpt
), PIC(PIC
) {}
184 /// Cross register the analysis managers through their proxies.
186 /// This is an interface that can be used to cross register each
187 // AnalysisManager with all the others analysis managers.
188 void crossRegisterProxies(LoopAnalysisManager
&LAM
,
189 FunctionAnalysisManager
&FAM
,
190 CGSCCAnalysisManager
&CGAM
,
191 ModuleAnalysisManager
&MAM
);
193 /// Registers all available module analysis passes.
195 /// This is an interface that can be used to populate a \c
196 /// ModuleAnalysisManager with all registered module analyses. Callers can
197 /// still manually register any additional analyses. Callers can also
198 /// pre-register analyses and this will not override those.
199 void registerModuleAnalyses(ModuleAnalysisManager
&MAM
);
201 /// Registers all available CGSCC analysis passes.
203 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
204 /// with all registered CGSCC analyses. Callers can still manually register any
205 /// additional analyses. Callers can also pre-register analyses and this will
206 /// not override those.
207 void registerCGSCCAnalyses(CGSCCAnalysisManager
&CGAM
);
209 /// Registers all available function analysis passes.
211 /// This is an interface that can be used to populate a \c
212 /// FunctionAnalysisManager with all registered function analyses. Callers can
213 /// still manually register any additional analyses. Callers can also
214 /// pre-register analyses and this will not override those.
215 void registerFunctionAnalyses(FunctionAnalysisManager
&FAM
);
217 /// Registers all available loop analysis passes.
219 /// This is an interface that can be used to populate a \c LoopAnalysisManager
220 /// with all registered loop analyses. Callers can still manually register any
221 /// additional analyses.
222 void registerLoopAnalyses(LoopAnalysisManager
&LAM
);
224 /// Construct the core LLVM function canonicalization and simplification
227 /// This is a long pipeline and uses most of the per-function optimization
228 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
229 /// repeatedly over the IR and is not expected to destroy important
230 /// information about the semantics of the IR.
232 /// Note that \p Level cannot be `O0` here. The pipelines produced are
233 /// only intended for use when attempting to optimize code. If frontends
234 /// require some transformations for semantic reasons, they should explicitly
237 /// \p Phase indicates the current ThinLTO phase.
239 buildFunctionSimplificationPipeline(OptimizationLevel Level
,
241 bool DebugLogging
= false);
243 /// Construct the core LLVM module canonicalization and simplification
246 /// This pipeline focuses on canonicalizing and simplifying the entire module
247 /// of IR. Much like the function simplification pipeline above, it is
248 /// suitable to run repeatedly over the IR and is not expected to destroy
249 /// important information. It does, however, perform inlining and other
250 /// heuristic based simplifications that are not strictly reversible.
252 /// Note that \p Level cannot be `O0` here. The pipelines produced are
253 /// only intended for use when attempting to optimize code. If frontends
254 /// require some transformations for semantic reasons, they should explicitly
257 /// \p Phase indicates the current ThinLTO phase.
259 buildModuleSimplificationPipeline(OptimizationLevel Level
,
261 bool DebugLogging
= false);
263 /// Construct the core LLVM module optimization pipeline.
265 /// This pipeline focuses on optimizing the execution speed of the IR. It
266 /// uses cost modeling and thresholds to balance code growth against runtime
267 /// improvements. It includes vectorization and other information destroying
268 /// transformations. It also cannot generally be run repeatedly on a module
269 /// without potentially seriously regressing either runtime performance of
270 /// the code or serious code size growth.
272 /// Note that \p Level cannot be `O0` here. The pipelines produced are
273 /// only intended for use when attempting to optimize code. If frontends
274 /// require some transformations for semantic reasons, they should explicitly
276 ModulePassManager
buildModuleOptimizationPipeline(OptimizationLevel Level
,
277 bool DebugLogging
= false,
278 bool LTOPreLink
= false);
280 /// Build a per-module default optimization pipeline.
282 /// This provides a good default optimization pipeline for per-module
283 /// optimization and code generation without any link-time optimization. It
284 /// typically correspond to frontend "-O[123]" options for optimization
285 /// levels \c O1, \c O2 and \c O3 resp.
287 /// Note that \p Level cannot be `O0` here. The pipelines produced are
288 /// only intended for use when attempting to optimize code. If frontends
289 /// require some transformations for semantic reasons, they should explicitly
291 ModulePassManager
buildPerModuleDefaultPipeline(OptimizationLevel Level
,
292 bool DebugLogging
= false,
293 bool LTOPreLink
= false);
295 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
298 /// This adds the pre-link optimizations tuned to prepare a module for
299 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
300 /// without making irreversible decisions which could be made better during
303 /// Note that \p Level cannot be `O0` here. The pipelines produced are
304 /// only intended for use when attempting to optimize code. If frontends
305 /// require some transformations for semantic reasons, they should explicitly
308 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level
,
309 bool DebugLogging
= false);
311 /// Build an ThinLTO default optimization pipeline to a pass manager.
313 /// This provides a good default optimization pipeline for link-time
314 /// optimization and code generation. It is particularly tuned to fit well
315 /// when IR coming into the LTO phase was first run through \c
316 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
318 /// Note that \p Level cannot be `O0` here. The pipelines produced are
319 /// only intended for use when attempting to optimize code. If frontends
320 /// require some transformations for semantic reasons, they should explicitly
323 buildThinLTODefaultPipeline(OptimizationLevel Level
, bool DebugLogging
,
324 const ModuleSummaryIndex
*ImportSummary
);
326 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
329 /// This adds the pre-link optimizations tuned to work well with a later LTO
330 /// run. It works to minimize the IR which needs to be analyzed without
331 /// making irreversible decisions which could be made better during the LTO
334 /// Note that \p Level cannot be `O0` here. The pipelines produced are
335 /// only intended for use when attempting to optimize code. If frontends
336 /// require some transformations for semantic reasons, they should explicitly
338 ModulePassManager
buildLTOPreLinkDefaultPipeline(OptimizationLevel Level
,
339 bool DebugLogging
= false);
341 /// Build an LTO default optimization pipeline to a pass manager.
343 /// This provides a good default optimization pipeline for link-time
344 /// optimization and code generation. It is particularly tuned to fit well
345 /// when IR coming into the LTO phase was first run through \c
346 /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
348 /// Note that \p Level cannot be `O0` here. The pipelines produced are
349 /// only intended for use when attempting to optimize code. If frontends
350 /// require some transformations for semantic reasons, they should explicitly
352 ModulePassManager
buildLTODefaultPipeline(OptimizationLevel Level
,
354 ModuleSummaryIndex
*ExportSummary
);
356 /// Build the default `AAManager` with the default alias analysis pipeline
358 AAManager
buildDefaultAAPipeline();
360 /// Parse a textual pass pipeline description into a \c
361 /// ModulePassManager.
363 /// The format of the textual pass pipeline description looks something like:
365 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
367 /// Pass managers have ()s describing the nest structure of passes. All passes
368 /// are comma separated. As a special shortcut, if the very first pass is not
369 /// a module pass (as a module pass manager is), this will automatically form
370 /// the shortest stack of pass managers that allow inserting that first pass.
371 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
372 /// passes 'lpassN', all of these are valid:
374 /// fpass1,fpass2,fpass3
375 /// cgpass1,cgpass2,cgpass3
376 /// lpass1,lpass2,lpass3
378 /// And they are equivalent to the following (resp.):
380 /// module(function(fpass1,fpass2,fpass3))
381 /// module(cgscc(cgpass1,cgpass2,cgpass3))
382 /// module(function(loop(lpass1,lpass2,lpass3)))
384 /// This shortcut is especially useful for debugging and testing small pass
385 /// combinations. Note that these shortcuts don't introduce any other magic.
386 /// If the sequence of passes aren't all the exact same kind of pass, it will
387 /// be an error. You cannot mix different levels implicitly, you must
388 /// explicitly form a pass manager in which to nest passes.
389 Error
parsePassPipeline(ModulePassManager
&MPM
, StringRef PipelineText
,
390 bool VerifyEachPass
= true,
391 bool DebugLogging
= false);
393 /// {{@ Parse a textual pass pipeline description into a specific PassManager
395 /// Automatic deduction of an appropriate pass manager stack is not supported.
396 /// For example, to insert a loop pass 'lpass' into a FunctinoPassManager,
397 /// this is the valid pipeline text:
400 Error
parsePassPipeline(CGSCCPassManager
&CGPM
, StringRef PipelineText
,
401 bool VerifyEachPass
= true,
402 bool DebugLogging
= false);
403 Error
parsePassPipeline(FunctionPassManager
&FPM
, StringRef PipelineText
,
404 bool VerifyEachPass
= true,
405 bool DebugLogging
= false);
406 Error
parsePassPipeline(LoopPassManager
&LPM
, StringRef PipelineText
,
407 bool VerifyEachPass
= true,
408 bool DebugLogging
= false);
411 /// Parse a textual alias analysis pipeline into the provided AA manager.
413 /// The format of the textual AA pipeline is a comma separated list of AA
416 /// basic-aa,globals-aa,...
418 /// The AA manager is set up such that the provided alias analyses are tried
419 /// in the order specified. See the \c AAManaager documentation for details
420 /// about the logic used. This routine just provides the textual mapping
421 /// between AA names and the analyses to register with the manager.
423 /// Returns false if the text cannot be parsed cleanly. The specific state of
424 /// the \p AA manager is unspecified if such an error is encountered and this
426 Error
parseAAPipeline(AAManager
&AA
, StringRef PipelineText
);
428 /// Register a callback for a default optimizer pipeline extension
431 /// This extension point allows adding passes that perform peephole
432 /// optimizations similar to the instruction combiner. These passes will be
433 /// inserted after each instance of the instruction combiner pass.
434 void registerPeepholeEPCallback(
435 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
436 PeepholeEPCallbacks
.push_back(C
);
439 /// Register a callback for a default optimizer pipeline extension
442 /// This extension point allows adding late loop canonicalization and
443 /// simplification passes. This is the last point in the loop optimization
444 /// pipeline before loop deletion. Each pass added
445 /// here must be an instance of LoopPass.
446 /// This is the place to add passes that can remove loops, such as target-
447 /// specific loop idiom recognition.
448 void registerLateLoopOptimizationsEPCallback(
449 const std::function
<void(LoopPassManager
&, OptimizationLevel
)> &C
) {
450 LateLoopOptimizationsEPCallbacks
.push_back(C
);
453 /// Register a callback for a default optimizer pipeline extension
456 /// This extension point allows adding loop passes to the end of the loop
458 void registerLoopOptimizerEndEPCallback(
459 const std::function
<void(LoopPassManager
&, OptimizationLevel
)> &C
) {
460 LoopOptimizerEndEPCallbacks
.push_back(C
);
463 /// Register a callback for a default optimizer pipeline extension
466 /// This extension point allows adding optimization passes after most of the
467 /// main optimizations, but before the last cleanup-ish optimizations.
468 void registerScalarOptimizerLateEPCallback(
469 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
470 ScalarOptimizerLateEPCallbacks
.push_back(C
);
473 /// Register a callback for a default optimizer pipeline extension
476 /// This extension point allows adding CallGraphSCC passes at the end of the
477 /// main CallGraphSCC passes and before any function simplification passes run
478 /// by CGPassManager.
479 void registerCGSCCOptimizerLateEPCallback(
480 const std::function
<void(CGSCCPassManager
&, OptimizationLevel
)> &C
) {
481 CGSCCOptimizerLateEPCallbacks
.push_back(C
);
484 /// Register a callback for a default optimizer pipeline extension
487 /// This extension point allows adding optimization passes before the
488 /// vectorizer and other highly target specific optimization passes are
490 void registerVectorizerStartEPCallback(
491 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
492 VectorizerStartEPCallbacks
.push_back(C
);
495 /// Register a callback for a default optimizer pipeline extension point.
497 /// This extension point allows adding optimization once at the start of the
498 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
499 /// link-time pipelines).
500 void registerPipelineStartEPCallback(
501 const std::function
<void(ModulePassManager
&)> &C
) {
502 PipelineStartEPCallbacks
.push_back(C
);
505 /// Register a callback for a default optimizer pipeline extension point
507 /// This extension point allows adding optimizations at the very end of the
508 /// function optimization pipeline. A key difference between this and the
509 /// legacy PassManager's OptimizerLast callback is that this extension point
510 /// is not triggered at O0. Extensions to the O0 pipeline should append their
511 /// passes to the end of the overall pipeline.
512 void registerOptimizerLastEPCallback(
513 const std::function
<void(FunctionPassManager
&, OptimizationLevel
)> &C
) {
514 OptimizerLastEPCallbacks
.push_back(C
);
517 /// Register a callback for parsing an AliasAnalysis Name to populate
518 /// the given AAManager \p AA
519 void registerParseAACallback(
520 const std::function
<bool(StringRef Name
, AAManager
&AA
)> &C
) {
521 AAParsingCallbacks
.push_back(C
);
524 /// {{@ Register callbacks for analysis registration with this PassBuilder
526 /// Callees register their analyses with the given AnalysisManager objects.
527 void registerAnalysisRegistrationCallback(
528 const std::function
<void(CGSCCAnalysisManager
&)> &C
) {
529 CGSCCAnalysisRegistrationCallbacks
.push_back(C
);
531 void registerAnalysisRegistrationCallback(
532 const std::function
<void(FunctionAnalysisManager
&)> &C
) {
533 FunctionAnalysisRegistrationCallbacks
.push_back(C
);
535 void registerAnalysisRegistrationCallback(
536 const std::function
<void(LoopAnalysisManager
&)> &C
) {
537 LoopAnalysisRegistrationCallbacks
.push_back(C
);
539 void registerAnalysisRegistrationCallback(
540 const std::function
<void(ModuleAnalysisManager
&)> &C
) {
541 ModuleAnalysisRegistrationCallbacks
.push_back(C
);
545 /// {{@ Register pipeline parsing callbacks with this pass builder instance.
546 /// Using these callbacks, callers can parse both a single pass name, as well
547 /// as entire sub-pipelines, and populate the PassManager instance
549 void registerPipelineParsingCallback(
550 const std::function
<bool(StringRef Name
, CGSCCPassManager
&,
551 ArrayRef
<PipelineElement
>)> &C
) {
552 CGSCCPipelineParsingCallbacks
.push_back(C
);
554 void registerPipelineParsingCallback(
555 const std::function
<bool(StringRef Name
, FunctionPassManager
&,
556 ArrayRef
<PipelineElement
>)> &C
) {
557 FunctionPipelineParsingCallbacks
.push_back(C
);
559 void registerPipelineParsingCallback(
560 const std::function
<bool(StringRef Name
, LoopPassManager
&,
561 ArrayRef
<PipelineElement
>)> &C
) {
562 LoopPipelineParsingCallbacks
.push_back(C
);
564 void registerPipelineParsingCallback(
565 const std::function
<bool(StringRef Name
, ModulePassManager
&,
566 ArrayRef
<PipelineElement
>)> &C
) {
567 ModulePipelineParsingCallbacks
.push_back(C
);
571 /// Register a callback for a top-level pipeline entry.
573 /// If the PassManager type is not given at the top level of the pipeline
574 /// text, this Callback should be used to determine the appropriate stack of
575 /// PassManagers and populate the passed ModulePassManager.
576 void registerParseTopLevelPipelineCallback(
577 const std::function
<bool(ModulePassManager
&, ArrayRef
<PipelineElement
>,
578 bool VerifyEachPass
, bool DebugLogging
)> &C
) {
579 TopLevelPipelineParsingCallbacks
.push_back(C
);
583 static Optional
<std::vector
<PipelineElement
>>
584 parsePipelineText(StringRef Text
);
586 Error
parseModulePass(ModulePassManager
&MPM
, const PipelineElement
&E
,
587 bool VerifyEachPass
, bool DebugLogging
);
588 Error
parseCGSCCPass(CGSCCPassManager
&CGPM
, const PipelineElement
&E
,
589 bool VerifyEachPass
, bool DebugLogging
);
590 Error
parseFunctionPass(FunctionPassManager
&FPM
, const PipelineElement
&E
,
591 bool VerifyEachPass
, bool DebugLogging
);
592 Error
parseLoopPass(LoopPassManager
&LPM
, const PipelineElement
&E
,
593 bool VerifyEachPass
, bool DebugLogging
);
594 bool parseAAPassName(AAManager
&AA
, StringRef Name
);
596 Error
parseLoopPassPipeline(LoopPassManager
&LPM
,
597 ArrayRef
<PipelineElement
> Pipeline
,
598 bool VerifyEachPass
, bool DebugLogging
);
599 Error
parseFunctionPassPipeline(FunctionPassManager
&FPM
,
600 ArrayRef
<PipelineElement
> Pipeline
,
601 bool VerifyEachPass
, bool DebugLogging
);
602 Error
parseCGSCCPassPipeline(CGSCCPassManager
&CGPM
,
603 ArrayRef
<PipelineElement
> Pipeline
,
604 bool VerifyEachPass
, bool DebugLogging
);
605 Error
parseModulePassPipeline(ModulePassManager
&MPM
,
606 ArrayRef
<PipelineElement
> Pipeline
,
607 bool VerifyEachPass
, bool DebugLogging
);
609 void addPGOInstrPasses(ModulePassManager
&MPM
, bool DebugLogging
,
610 OptimizationLevel Level
, bool RunProfileGen
,
611 std::string ProfileGenFile
,
612 std::string ProfileUseFile
,
613 std::string ProfileRemappingFile
);
615 void invokePeepholeEPCallbacks(FunctionPassManager
&, OptimizationLevel
);
617 // Extension Point callbacks
618 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
620 SmallVector
<std::function
<void(LoopPassManager
&, OptimizationLevel
)>, 2>
621 LateLoopOptimizationsEPCallbacks
;
622 SmallVector
<std::function
<void(LoopPassManager
&, OptimizationLevel
)>, 2>
623 LoopOptimizerEndEPCallbacks
;
624 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
625 ScalarOptimizerLateEPCallbacks
;
626 SmallVector
<std::function
<void(CGSCCPassManager
&, OptimizationLevel
)>, 2>
627 CGSCCOptimizerLateEPCallbacks
;
628 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
629 VectorizerStartEPCallbacks
;
630 SmallVector
<std::function
<void(FunctionPassManager
&, OptimizationLevel
)>, 2>
631 OptimizerLastEPCallbacks
;
633 SmallVector
<std::function
<void(ModulePassManager
&)>, 2>
634 PipelineStartEPCallbacks
;
635 SmallVector
<std::function
<void(ModuleAnalysisManager
&)>, 2>
636 ModuleAnalysisRegistrationCallbacks
;
637 SmallVector
<std::function
<bool(StringRef
, ModulePassManager
&,
638 ArrayRef
<PipelineElement
>)>,
640 ModulePipelineParsingCallbacks
;
641 SmallVector
<std::function
<bool(ModulePassManager
&, ArrayRef
<PipelineElement
>,
642 bool VerifyEachPass
, bool DebugLogging
)>,
644 TopLevelPipelineParsingCallbacks
;
646 SmallVector
<std::function
<void(CGSCCAnalysisManager
&)>, 2>
647 CGSCCAnalysisRegistrationCallbacks
;
648 SmallVector
<std::function
<bool(StringRef
, CGSCCPassManager
&,
649 ArrayRef
<PipelineElement
>)>,
651 CGSCCPipelineParsingCallbacks
;
652 // Function callbacks
653 SmallVector
<std::function
<void(FunctionAnalysisManager
&)>, 2>
654 FunctionAnalysisRegistrationCallbacks
;
655 SmallVector
<std::function
<bool(StringRef
, FunctionPassManager
&,
656 ArrayRef
<PipelineElement
>)>,
658 FunctionPipelineParsingCallbacks
;
660 SmallVector
<std::function
<void(LoopAnalysisManager
&)>, 2>
661 LoopAnalysisRegistrationCallbacks
;
662 SmallVector
<std::function
<bool(StringRef
, LoopPassManager
&,
663 ArrayRef
<PipelineElement
>)>,
665 LoopPipelineParsingCallbacks
;
667 SmallVector
<std::function
<bool(StringRef Name
, AAManager
&AA
)>, 2>
671 /// This utility template takes care of adding require<> and invalidate<>
672 /// passes for an analysis to a given \c PassManager. It is intended to be used
673 /// during parsing of a pass pipeline when parsing a single PipelineName.
674 /// When registering a new function analysis FancyAnalysis with the pass
675 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
678 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
679 /// ArrayRef<PipelineElement> P) {
680 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
685 template <typename AnalysisT
, typename IRUnitT
, typename AnalysisManagerT
,
686 typename
... ExtraArgTs
>
687 bool parseAnalysisUtilityPasses(
688 StringRef AnalysisName
, StringRef PipelineName
,
689 PassManager
<IRUnitT
, AnalysisManagerT
, ExtraArgTs
...> &PM
) {
690 if (!PipelineName
.endswith(">"))
692 // See if this is an invalidate<> pass name
693 if (PipelineName
.startswith("invalidate<")) {
694 PipelineName
= PipelineName
.substr(11, PipelineName
.size() - 12);
695 if (PipelineName
!= AnalysisName
)
697 PM
.addPass(InvalidateAnalysisPass
<AnalysisT
>());
701 // See if this is a require<> pass name
702 if (PipelineName
.startswith("require<")) {
703 PipelineName
= PipelineName
.substr(8, PipelineName
.size() - 9);
704 if (PipelineName
!= AnalysisName
)
706 PM
.addPass(RequireAnalysisPass
<AnalysisT
, IRUnitT
, AnalysisManagerT
,