1 //===-- llvm-exegesis.cpp ---------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Measures execution properties (latencies/uops) of an instruction.
12 //===----------------------------------------------------------------------===//
14 #include "lib/Analysis.h"
15 #include "lib/BenchmarkResult.h"
16 #include "lib/BenchmarkRunner.h"
17 #include "lib/Clustering.h"
18 #include "lib/CodeTemplate.h"
19 #include "lib/Error.h"
20 #include "lib/LlvmState.h"
21 #include "lib/PerfHelper.h"
22 #include "lib/ProgressMeter.h"
23 #include "lib/SnippetFile.h"
24 #include "lib/SnippetRepetitor.h"
25 #include "lib/Target.h"
26 #include "lib/TargetSelect.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/MC/MCInstBuilder.h"
30 #include "llvm/MC/MCObjectFileInfo.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/MC/MCSubtargetInfo.h"
35 #include "llvm/MC/TargetRegistry.h"
36 #include "llvm/Object/ObjectFile.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/FileSystem.h"
39 #include "llvm/Support/Format.h"
40 #include "llvm/Support/InitLLVM.h"
41 #include "llvm/Support/Path.h"
42 #include "llvm/Support/SourceMgr.h"
43 #include "llvm/Support/TargetSelect.h"
44 #include "llvm/TargetParser/Host.h"
51 static cl::opt
<int> OpcodeIndex(
53 cl::desc("opcode to measure, by index, or -1 to measure all opcodes"),
54 cl::cat(BenchmarkOptions
), cl::init(0));
56 static cl::opt
<std::string
>
57 OpcodeNames("opcode-name",
58 cl::desc("comma-separated list of opcodes to measure, by name"),
59 cl::cat(BenchmarkOptions
), cl::init(""));
61 static cl::opt
<std::string
> SnippetsFile("snippets-file",
62 cl::desc("code snippets to measure"),
63 cl::cat(BenchmarkOptions
),
66 static cl::opt
<std::string
>
67 BenchmarkFile("benchmarks-file",
68 cl::desc("File to read (analysis mode) or write "
69 "(latency/uops/inverse_throughput modes) benchmark "
70 "results. “-” uses stdin/stdout."),
71 cl::cat(Options
), cl::init(""));
73 static cl::opt
<exegesis::Benchmark::ModeE
> BenchmarkMode(
74 "mode", cl::desc("the mode to run"), cl::cat(Options
),
75 cl::values(clEnumValN(exegesis::Benchmark::Latency
, "latency",
76 "Instruction Latency"),
77 clEnumValN(exegesis::Benchmark::InverseThroughput
,
79 "Instruction Inverse Throughput"),
80 clEnumValN(exegesis::Benchmark::Uops
, "uops",
82 // When not asking for a specific benchmark mode,
83 // we'll analyse the results.
84 clEnumValN(exegesis::Benchmark::Unknown
, "analysis",
87 static cl::opt
<exegesis::Benchmark::ResultAggregationModeE
>
89 "result-aggregation-mode",
90 cl::desc("How to aggregate multi-values result"),
91 cl::cat(BenchmarkOptions
),
92 cl::values(clEnumValN(exegesis::Benchmark::Min
, "min",
94 clEnumValN(exegesis::Benchmark::Max
, "max",
96 clEnumValN(exegesis::Benchmark::Mean
, "mean",
97 "Compute mean of all readings"),
98 clEnumValN(exegesis::Benchmark::MinVariance
,
100 "Keep readings set with min-variance")),
101 cl::init(exegesis::Benchmark::Min
));
103 static cl::opt
<exegesis::Benchmark::RepetitionModeE
> RepetitionMode(
104 "repetition-mode", cl::desc("how to repeat the instruction snippet"),
105 cl::cat(BenchmarkOptions
),
107 clEnumValN(exegesis::Benchmark::Duplicate
, "duplicate",
108 "Duplicate the snippet"),
109 clEnumValN(exegesis::Benchmark::Loop
, "loop",
110 "Loop over the snippet"),
111 clEnumValN(exegesis::Benchmark::AggregateMin
, "min",
112 "All of the above and take the minimum of measurements")),
113 cl::init(exegesis::Benchmark::Duplicate
));
115 static cl::opt
<bool> BenchmarkMeasurementsPrintProgress(
116 "measurements-print-progress",
117 cl::desc("Produce progress indicator when performing measurements"),
118 cl::cat(BenchmarkOptions
), cl::init(false));
120 static cl::opt
<exegesis::BenchmarkPhaseSelectorE
> BenchmarkPhaseSelector(
123 "it is possible to stop the benchmarking process after some phase"),
124 cl::cat(BenchmarkOptions
),
126 clEnumValN(exegesis::BenchmarkPhaseSelectorE::PrepareSnippet
,
128 "Only generate the minimal instruction sequence"),
129 clEnumValN(exegesis::BenchmarkPhaseSelectorE::PrepareAndAssembleSnippet
,
130 "prepare-and-assemble-snippet",
131 "Same as prepare-snippet, but also dumps an excerpt of the "
132 "sequence (hex encoded)"),
133 clEnumValN(exegesis::BenchmarkPhaseSelectorE::AssembleMeasuredCode
,
134 "assemble-measured-code",
135 "Same as prepare-and-assemble-snippet, but also creates the "
137 "that can be dumped to a file using --dump-object-to-disk"),
139 exegesis::BenchmarkPhaseSelectorE::Measure
, "measure",
140 "Same as prepare-measured-code, but also runs the measurement "
142 cl::init(exegesis::BenchmarkPhaseSelectorE::Measure
));
145 UseDummyPerfCounters("use-dummy-perf-counters",
146 cl::desc("Do not read real performance counters, use "
147 "dummy values (for testing)"),
148 cl::cat(BenchmarkOptions
), cl::init(false));
150 static cl::opt
<unsigned>
151 NumRepetitions("num-repetitions",
152 cl::desc("number of time to repeat the asm snippet"),
153 cl::cat(BenchmarkOptions
), cl::init(10000));
155 static cl::opt
<unsigned>
156 LoopBodySize("loop-body-size",
157 cl::desc("when repeating the instruction snippet by looping "
158 "over it, duplicate the snippet until the loop body "
159 "contains at least this many instruction"),
160 cl::cat(BenchmarkOptions
), cl::init(0));
162 static cl::opt
<unsigned> MaxConfigsPerOpcode(
163 "max-configs-per-opcode",
165 "allow to snippet generator to generate at most that many configs"),
166 cl::cat(BenchmarkOptions
), cl::init(1));
168 static cl::opt
<bool> IgnoreInvalidSchedClass(
169 "ignore-invalid-sched-class",
170 cl::desc("ignore instructions that do not define a sched class"),
171 cl::cat(BenchmarkOptions
), cl::init(false));
173 static cl::opt
<exegesis::BenchmarkFilter
> AnalysisSnippetFilter(
174 "analysis-filter", cl::desc("Filter the benchmarks before analysing them"),
175 cl::cat(BenchmarkOptions
),
177 clEnumValN(exegesis::BenchmarkFilter::All
, "all",
178 "Keep all benchmarks (default)"),
179 clEnumValN(exegesis::BenchmarkFilter::RegOnly
, "reg-only",
180 "Keep only those benchmarks that do *NOT* involve memory"),
181 clEnumValN(exegesis::BenchmarkFilter::WithMem
, "mem-only",
182 "Keep only the benchmarks that *DO* involve memory")),
183 cl::init(exegesis::BenchmarkFilter::All
));
185 static cl::opt
<exegesis::BenchmarkClustering::ModeE
>
186 AnalysisClusteringAlgorithm(
187 "analysis-clustering", cl::desc("the clustering algorithm to use"),
188 cl::cat(AnalysisOptions
),
189 cl::values(clEnumValN(exegesis::BenchmarkClustering::Dbscan
,
190 "dbscan", "use DBSCAN/OPTICS algorithm"),
191 clEnumValN(exegesis::BenchmarkClustering::Naive
,
192 "naive", "one cluster per opcode")),
193 cl::init(exegesis::BenchmarkClustering::Dbscan
));
195 static cl::opt
<unsigned> AnalysisDbscanNumPoints(
196 "analysis-numpoints",
197 cl::desc("minimum number of points in an analysis cluster (dbscan only)"),
198 cl::cat(AnalysisOptions
), cl::init(3));
200 static cl::opt
<float> AnalysisClusteringEpsilon(
201 "analysis-clustering-epsilon",
202 cl::desc("epsilon for benchmark point clustering"),
203 cl::cat(AnalysisOptions
), cl::init(0.1));
205 static cl::opt
<float> AnalysisInconsistencyEpsilon(
206 "analysis-inconsistency-epsilon",
207 cl::desc("epsilon for detection of when the cluster is different from the "
208 "LLVM schedule profile values"),
209 cl::cat(AnalysisOptions
), cl::init(0.1));
211 static cl::opt
<std::string
>
212 AnalysisClustersOutputFile("analysis-clusters-output-file", cl::desc(""),
213 cl::cat(AnalysisOptions
), cl::init(""));
214 static cl::opt
<std::string
>
215 AnalysisInconsistenciesOutputFile("analysis-inconsistencies-output-file",
216 cl::desc(""), cl::cat(AnalysisOptions
),
219 static cl::opt
<bool> AnalysisDisplayUnstableOpcodes(
220 "analysis-display-unstable-clusters",
221 cl::desc("if there is more than one benchmark for an opcode, said "
222 "benchmarks may end up not being clustered into the same cluster "
223 "if the measured performance characteristics are different. by "
224 "default all such opcodes are filtered out. this flag will "
225 "instead show only such unstable opcodes"),
226 cl::cat(AnalysisOptions
), cl::init(false));
228 static cl::opt
<bool> AnalysisOverrideBenchmarksTripleAndCpu(
229 "analysis-override-benchmark-triple-and-cpu",
230 cl::desc("By default, we analyze the benchmarks for the triple/CPU they "
231 "were measured for, but if you want to analyze them for some "
232 "other combination (specified via -mtriple/-mcpu), you can "
234 cl::cat(AnalysisOptions
), cl::init(false));
236 static cl::opt
<std::string
>
237 TripleName("mtriple",
238 cl::desc("Target triple. See -version for available targets"),
241 static cl::opt
<std::string
>
243 cl::desc("Target a specific cpu type (-mcpu=help for details)"),
244 cl::value_desc("cpu-name"), cl::cat(Options
), cl::init("native"));
246 static cl::opt
<std::string
>
247 DumpObjectToDisk("dump-object-to-disk",
248 cl::desc("dumps the generated benchmark object to disk "
249 "and prints a message to access it"),
250 cl::ValueOptional
, cl::cat(BenchmarkOptions
));
252 static cl::opt
<BenchmarkRunner::ExecutionModeE
> ExecutionMode(
254 cl::desc("Selects the execution mode to use for running snippets"),
255 cl::cat(BenchmarkOptions
),
256 cl::values(clEnumValN(BenchmarkRunner::ExecutionModeE::InProcess
,
258 "Executes the snippets within the same process"),
259 clEnumValN(BenchmarkRunner::ExecutionModeE::SubProcess
,
261 "Spawns a subprocess for each snippet execution, "
262 "allows for the use of memory annotations")),
263 cl::init(BenchmarkRunner::ExecutionModeE::InProcess
));
265 static ExitOnError
ExitOnErr("llvm-exegesis error: ");
267 // Helper function that logs the error(s) and exits.
268 template <typename
... ArgTs
> static void ExitWithError(ArgTs
&&... Args
) {
269 ExitOnErr(make_error
<Failure
>(std::forward
<ArgTs
>(Args
)...));
272 // Check Err. If it's in a failure state log the file error(s) and exit.
273 static void ExitOnFileError(const Twine
&FileName
, Error Err
) {
275 ExitOnErr(createFileError(FileName
, std::move(Err
)));
279 // Check E. If it's in a success state then return the contained value.
280 // If it's in a failure state log the file error(s) and exit.
281 template <typename T
>
282 T
ExitOnFileError(const Twine
&FileName
, Expected
<T
> &&E
) {
283 ExitOnFileError(FileName
, E
.takeError());
284 return std::move(*E
);
287 // Checks that only one of OpcodeNames, OpcodeIndex or SnippetsFile is provided,
288 // and returns the opcode indices or {} if snippets should be read from
290 static std::vector
<unsigned> getOpcodesOrDie(const LLVMState
&State
) {
291 const size_t NumSetFlags
= (OpcodeNames
.empty() ? 0 : 1) +
292 (OpcodeIndex
== 0 ? 0 : 1) +
293 (SnippetsFile
.empty() ? 0 : 1);
294 const auto &ET
= State
.getExegesisTarget();
295 const auto AvailableFeatures
= State
.getSubtargetInfo().getFeatureBits();
297 if (NumSetFlags
!= 1) {
298 ExitOnErr
.setBanner("llvm-exegesis: ");
299 ExitWithError("please provide one and only one of 'opcode-index', "
300 "'opcode-name' or 'snippets-file'");
302 if (!SnippetsFile
.empty())
305 return {static_cast<unsigned>(OpcodeIndex
)};
306 if (OpcodeIndex
< 0) {
307 std::vector
<unsigned> Result
;
308 unsigned NumOpcodes
= State
.getInstrInfo().getNumOpcodes();
309 Result
.reserve(NumOpcodes
);
310 for (unsigned I
= 0, E
= NumOpcodes
; I
< E
; ++I
) {
311 if (!ET
.isOpcodeAvailable(I
, AvailableFeatures
))
317 // Resolve opcode name -> opcode.
318 const auto ResolveName
= [&State
](StringRef OpcodeName
) -> unsigned {
319 const auto &Map
= State
.getOpcodeNameToOpcodeIdxMapping();
320 auto I
= Map
.find(OpcodeName
);
322 return I
->getSecond();
325 SmallVector
<StringRef
, 2> Pieces
;
326 StringRef(OpcodeNames
.getValue())
327 .split(Pieces
, ",", /* MaxSplit */ -1, /* KeepEmpty */ false);
328 std::vector
<unsigned> Result
;
329 Result
.reserve(Pieces
.size());
330 for (const StringRef
&OpcodeName
: Pieces
) {
331 if (unsigned Opcode
= ResolveName(OpcodeName
))
332 Result
.push_back(Opcode
);
334 ExitWithError(Twine("unknown opcode ").concat(OpcodeName
));
339 // Generates code snippets for opcode `Opcode`.
340 static Expected
<std::vector
<BenchmarkCode
>>
341 generateSnippets(const LLVMState
&State
, unsigned Opcode
,
342 const BitVector
&ForbiddenRegs
) {
343 const Instruction
&Instr
= State
.getIC().getInstr(Opcode
);
344 const MCInstrDesc
&InstrDesc
= Instr
.Description
;
345 // Ignore instructions that we cannot run.
346 if (InstrDesc
.isPseudo() || InstrDesc
.usesCustomInsertionHook())
347 return make_error
<Failure
>(
348 "Unsupported opcode: isPseudo/usesCustomInserter");
349 if (InstrDesc
.isBranch() || InstrDesc
.isIndirectBranch())
350 return make_error
<Failure
>("Unsupported opcode: isBranch/isIndirectBranch");
351 if (InstrDesc
.isCall() || InstrDesc
.isReturn())
352 return make_error
<Failure
>("Unsupported opcode: isCall/isReturn");
354 const std::vector
<InstructionTemplate
> InstructionVariants
=
355 State
.getExegesisTarget().generateInstructionVariants(
356 Instr
, MaxConfigsPerOpcode
);
358 SnippetGenerator::Options SnippetOptions
;
359 SnippetOptions
.MaxConfigsPerOpcode
= MaxConfigsPerOpcode
;
360 const std::unique_ptr
<SnippetGenerator
> Generator
=
361 State
.getExegesisTarget().createSnippetGenerator(BenchmarkMode
, State
,
364 ExitWithError("cannot create snippet generator");
366 std::vector
<BenchmarkCode
> Benchmarks
;
367 for (const InstructionTemplate
&Variant
: InstructionVariants
) {
368 if (Benchmarks
.size() >= MaxConfigsPerOpcode
)
370 if (auto Err
= Generator
->generateConfigurations(Variant
, Benchmarks
,
372 return std::move(Err
);
377 static void runBenchmarkConfigurations(
378 const LLVMState
&State
, ArrayRef
<BenchmarkCode
> Configurations
,
379 ArrayRef
<std::unique_ptr
<const SnippetRepetitor
>> Repetitors
,
380 const BenchmarkRunner
&Runner
) {
381 assert(!Configurations
.empty() && "Don't have any configurations to run.");
382 std::optional
<raw_fd_ostream
> FileOstr
;
383 if (BenchmarkFile
!= "-") {
385 // Create output file or open existing file and truncate it, once.
386 ExitOnErr(errorCodeToError(openFileForWrite(BenchmarkFile
, ResultFD
,
387 sys::fs::CD_CreateAlways
,
388 sys::fs::OF_TextWithCRLF
)));
389 FileOstr
.emplace(ResultFD
, true /*shouldClose*/);
391 raw_ostream
&Ostr
= FileOstr
? *FileOstr
: outs();
393 std::optional
<ProgressMeter
<>> Meter
;
394 if (BenchmarkMeasurementsPrintProgress
)
395 Meter
.emplace(Configurations
.size());
396 for (const BenchmarkCode
&Conf
: Configurations
) {
397 ProgressMeter
<>::ProgressMeterStep
MeterStep(Meter
? &*Meter
: nullptr);
398 SmallVector
<Benchmark
, 2> AllResults
;
400 for (const std::unique_ptr
<const SnippetRepetitor
> &Repetitor
:
402 auto RC
= ExitOnErr(Runner
.getRunnableConfiguration(
403 Conf
, NumRepetitions
, LoopBodySize
, *Repetitor
));
404 std::optional
<StringRef
> DumpFile
;
405 if (DumpObjectToDisk
.getNumOccurrences())
406 DumpFile
= DumpObjectToDisk
;
407 AllResults
.emplace_back(
408 ExitOnErr(Runner
.runConfiguration(std::move(RC
), DumpFile
)));
410 Benchmark
&Result
= AllResults
.front();
412 // If any of our measurements failed, pretend they all have failed.
413 if (AllResults
.size() > 1 &&
414 any_of(AllResults
, [](const Benchmark
&R
) {
415 return R
.Measurements
.empty();
417 Result
.Measurements
.clear();
419 if (RepetitionMode
== Benchmark::RepetitionModeE::AggregateMin
) {
420 for (const Benchmark
&OtherResult
:
421 ArrayRef
<Benchmark
>(AllResults
).drop_front()) {
422 llvm::append_range(Result
.AssembledSnippet
,
423 OtherResult
.AssembledSnippet
);
424 // Aggregate measurements, but only iff all measurements succeeded.
425 if (Result
.Measurements
.empty())
427 assert(OtherResult
.Measurements
.size() == Result
.Measurements
.size() &&
428 "Expected to have identical number of measurements.");
429 for (auto I
: zip(Result
.Measurements
, OtherResult
.Measurements
)) {
430 BenchmarkMeasure
&Measurement
= std::get
<0>(I
);
431 const BenchmarkMeasure
&NewMeasurement
= std::get
<1>(I
);
432 assert(Measurement
.Key
== NewMeasurement
.Key
&&
433 "Expected measurements to be symmetric");
435 Measurement
.PerInstructionValue
=
436 std::min(Measurement
.PerInstructionValue
,
437 NewMeasurement
.PerInstructionValue
);
438 Measurement
.PerSnippetValue
= std::min(
439 Measurement
.PerSnippetValue
, NewMeasurement
.PerSnippetValue
);
444 // With dummy counters, measurements are rather meaningless,
445 // so drop them altogether.
446 if (UseDummyPerfCounters
)
447 Result
.Measurements
.clear();
449 ExitOnFileError(BenchmarkFile
, Result
.writeYamlTo(State
, Ostr
));
453 void benchmarkMain() {
454 if (BenchmarkPhaseSelector
== BenchmarkPhaseSelectorE::Measure
&&
455 !UseDummyPerfCounters
) {
458 "benchmarking unavailable, LLVM was built without libpfm. You can "
459 "pass --benchmark-phase=... to skip the actual benchmarking or "
460 "--use-dummy-perf-counters to not query the kernel for real event "
463 if (exegesis::pfm::pfmInitialize())
464 ExitWithError("cannot initialize libpfm");
468 InitializeAllAsmPrinters();
469 InitializeAllAsmParsers();
470 InitializeAllExegesisTargets();
472 const LLVMState State
=
473 ExitOnErr(LLVMState::Create(TripleName
, MCPU
, "", UseDummyPerfCounters
));
475 // Preliminary check to ensure features needed for requested
476 // benchmark mode are present on target CPU and/or OS.
477 if (BenchmarkPhaseSelector
== BenchmarkPhaseSelectorE::Measure
)
478 ExitOnErr(State
.getExegesisTarget().checkFeatureSupport());
480 if (ExecutionMode
== BenchmarkRunner::ExecutionModeE::SubProcess
&&
481 UseDummyPerfCounters
)
482 ExitWithError("Dummy perf counters are not supported in the subprocess "
485 const std::unique_ptr
<BenchmarkRunner
> Runner
=
486 ExitOnErr(State
.getExegesisTarget().createBenchmarkRunner(
487 BenchmarkMode
, State
, BenchmarkPhaseSelector
, ExecutionMode
,
490 ExitWithError("cannot create benchmark runner");
493 const auto Opcodes
= getOpcodesOrDie(State
);
495 SmallVector
<std::unique_ptr
<const SnippetRepetitor
>, 2> Repetitors
;
496 if (RepetitionMode
!= Benchmark::RepetitionModeE::AggregateMin
)
497 Repetitors
.emplace_back(SnippetRepetitor::Create(RepetitionMode
, State
));
499 for (Benchmark::RepetitionModeE RepMode
:
500 {Benchmark::RepetitionModeE::Duplicate
,
501 Benchmark::RepetitionModeE::Loop
})
502 Repetitors
.emplace_back(SnippetRepetitor::Create(RepMode
, State
));
505 BitVector AllReservedRegs
;
506 for (const std::unique_ptr
<const SnippetRepetitor
> &Repetitor
: Repetitors
)
507 AllReservedRegs
|= Repetitor
->getReservedRegs();
509 std::vector
<BenchmarkCode
> Configurations
;
510 if (!Opcodes
.empty()) {
511 for (const unsigned Opcode
: Opcodes
) {
512 // Ignore instructions without a sched class if
513 // -ignore-invalid-sched-class is passed.
514 if (IgnoreInvalidSchedClass
&&
515 State
.getInstrInfo().get(Opcode
).getSchedClass() == 0) {
516 errs() << State
.getInstrInfo().getName(Opcode
)
517 << ": ignoring instruction without sched class\n";
521 auto ConfigsForInstr
= generateSnippets(State
, Opcode
, AllReservedRegs
);
522 if (!ConfigsForInstr
) {
523 logAllUnhandledErrors(
524 ConfigsForInstr
.takeError(), errs(),
525 Twine(State
.getInstrInfo().getName(Opcode
)).concat(": "));
528 std::move(ConfigsForInstr
->begin(), ConfigsForInstr
->end(),
529 std::back_inserter(Configurations
));
532 Configurations
= ExitOnErr(readSnippets(State
, SnippetsFile
));
533 for (const auto &Configuration
: Configurations
) {
534 if (ExecutionMode
!= BenchmarkRunner::ExecutionModeE::SubProcess
&&
535 (Configuration
.Key
.MemoryMappings
.size() != 0 ||
536 Configuration
.Key
.MemoryValues
.size() != 0))
537 ExitWithError("Memory annotations are only supported in subprocess "
542 if (NumRepetitions
== 0) {
543 ExitOnErr
.setBanner("llvm-exegesis: ");
544 ExitWithError("--num-repetitions must be greater than zero");
547 // Write to standard output if file is not set.
548 if (BenchmarkFile
.empty())
551 if (!Configurations
.empty())
552 runBenchmarkConfigurations(State
, Configurations
, Repetitors
, *Runner
);
554 exegesis::pfm::pfmTerminate();
557 // Prints the results of running analysis pass `Pass` to file `OutputFilename`
558 // if OutputFilename is non-empty.
559 template <typename Pass
>
560 static void maybeRunAnalysis(const Analysis
&Analyzer
, const std::string
&Name
,
561 const std::string
&OutputFilename
) {
562 if (OutputFilename
.empty())
564 if (OutputFilename
!= "-") {
565 errs() << "Printing " << Name
<< " results to file '" << OutputFilename
568 std::error_code ErrorCode
;
569 raw_fd_ostream
ClustersOS(OutputFilename
, ErrorCode
,
570 sys::fs::FA_Read
| sys::fs::FA_Write
);
572 ExitOnFileError(OutputFilename
, errorCodeToError(ErrorCode
));
573 if (auto Err
= Analyzer
.run
<Pass
>(ClustersOS
))
574 ExitOnFileError(OutputFilename
, std::move(Err
));
577 static void filterPoints(MutableArrayRef
<Benchmark
> Points
,
578 const MCInstrInfo
&MCII
) {
579 if (AnalysisSnippetFilter
== exegesis::BenchmarkFilter::All
)
582 bool WantPointsWithMemOps
=
583 AnalysisSnippetFilter
== exegesis::BenchmarkFilter::WithMem
;
584 for (Benchmark
&Point
: Points
) {
585 if (!Point
.Error
.empty())
587 if (WantPointsWithMemOps
==
588 any_of(Point
.Key
.Instructions
, [&MCII
](const MCInst
&Inst
) {
589 const MCInstrDesc
&MCDesc
= MCII
.get(Inst
.getOpcode());
590 return MCDesc
.mayLoad() || MCDesc
.mayStore();
593 Point
.Error
= "filtered out by user";
597 static void analysisMain() {
598 ExitOnErr
.setBanner("llvm-exegesis: ");
599 if (BenchmarkFile
.empty())
600 ExitWithError("--benchmarks-file must be set");
602 if (AnalysisClustersOutputFile
.empty() &&
603 AnalysisInconsistenciesOutputFile
.empty()) {
605 "for --mode=analysis: At least one of --analysis-clusters-output-file "
606 "and --analysis-inconsistencies-output-file must be specified");
609 InitializeAllAsmPrinters();
610 InitializeAllDisassemblers();
611 InitializeAllExegesisTargets();
613 auto MemoryBuffer
= ExitOnFileError(
615 errorOrToExpected(MemoryBuffer::getFile(BenchmarkFile
, /*IsText=*/true)));
617 const auto TriplesAndCpus
= ExitOnFileError(
619 Benchmark::readTriplesAndCpusFromYamls(*MemoryBuffer
));
620 if (TriplesAndCpus
.empty()) {
621 errs() << "no benchmarks to analyze\n";
624 if (TriplesAndCpus
.size() > 1) {
625 ExitWithError("analysis file contains benchmarks from several CPUs. This "
628 auto TripleAndCpu
= *TriplesAndCpus
.begin();
629 if (AnalysisOverrideBenchmarksTripleAndCpu
) {
630 llvm::errs() << "overridding file CPU name (" << TripleAndCpu
.CpuName
631 << ") with provided tripled (" << TripleName
632 << ") and CPU name (" << MCPU
<< ")\n";
633 TripleAndCpu
.LLVMTriple
= TripleName
;
634 TripleAndCpu
.CpuName
= MCPU
;
636 llvm::errs() << "using Triple '" << TripleAndCpu
.LLVMTriple
<< "' and CPU '"
637 << TripleAndCpu
.CpuName
<< "'\n";
640 const LLVMState State
= ExitOnErr(
641 LLVMState::Create(TripleAndCpu
.LLVMTriple
, TripleAndCpu
.CpuName
));
642 std::vector
<Benchmark
> Points
= ExitOnFileError(
643 BenchmarkFile
, Benchmark::readYamls(State
, *MemoryBuffer
));
645 outs() << "Parsed " << Points
.size() << " benchmark points\n";
646 if (Points
.empty()) {
647 errs() << "no benchmarks to analyze\n";
650 // FIXME: Merge points from several runs (latency and uops).
652 filterPoints(Points
, State
.getInstrInfo());
654 const auto Clustering
= ExitOnErr(BenchmarkClustering::create(
655 Points
, AnalysisClusteringAlgorithm
, AnalysisDbscanNumPoints
,
656 AnalysisClusteringEpsilon
, &State
.getSubtargetInfo(),
657 &State
.getInstrInfo()));
659 const Analysis
Analyzer(State
, Clustering
, AnalysisInconsistencyEpsilon
,
660 AnalysisDisplayUnstableOpcodes
);
662 maybeRunAnalysis
<Analysis::PrintClusters
>(Analyzer
, "analysis clusters",
663 AnalysisClustersOutputFile
);
664 maybeRunAnalysis
<Analysis::PrintSchedClassInconsistencies
>(
665 Analyzer
, "sched class consistency analysis",
666 AnalysisInconsistenciesOutputFile
);
669 } // namespace exegesis
672 int main(int Argc
, char **Argv
) {
673 using namespace llvm
;
675 InitLLVM
X(Argc
, Argv
);
677 // Initialize targets so we can print them when flag --version is specified.
678 InitializeAllTargetInfos();
679 InitializeAllTargets();
680 InitializeAllTargetMCs();
682 // Register the Target and CPU printer for --version.
683 cl::AddExtraVersionPrinter(sys::printDefaultTargetAndDetectedCPU
);
685 // Enable printing of available targets when flag --version is specified.
686 cl::AddExtraVersionPrinter(TargetRegistry::printRegisteredTargetsForVersion
);
688 cl::HideUnrelatedOptions({&llvm::exegesis::Options
,
689 &llvm::exegesis::BenchmarkOptions
,
690 &llvm::exegesis::AnalysisOptions
});
692 cl::ParseCommandLineOptions(Argc
, Argv
,
693 "llvm host machine instruction characteristics "
694 "measurment and analysis.\n");
696 exegesis::ExitOnErr
.setExitCodeMapper([](const Error
&Err
) {
697 if (Err
.isA
<exegesis::ClusteringError
>())
702 if (exegesis::BenchmarkMode
== exegesis::Benchmark::Unknown
) {
703 exegesis::analysisMain();
705 exegesis::benchmarkMain();