1 //===- SubtargetEmitter.cpp - Generate subtarget enumerations -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This tablegen backend emits subtarget enumerations.
11 //===----------------------------------------------------------------------===//
13 #include "Common/CodeGenHwModes.h"
14 #include "Common/CodeGenSchedule.h"
15 #include "Common/CodeGenTarget.h"
16 #include "Common/PredicateExpander.h"
17 #include "Common/Utils.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/MC/MCInstrItineraries.h"
25 #include "llvm/MC/MCSchedule.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Format.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/TableGen/Error.h"
30 #include "llvm/TableGen/Record.h"
31 #include "llvm/TableGen/TableGenBackend.h"
32 #include "llvm/TargetParser/SubtargetFeature.h"
42 #define DEBUG_TYPE "subtarget-emitter"
46 using FeatureMapTy
= DenseMap
<const Record
*, unsigned>;
48 /// Sorting predicate to sort record pointers by their
50 struct LessRecordFieldFieldName
{
51 bool operator()(const Record
*Rec1
, const Record
*Rec2
) const {
52 return Rec1
->getValueAsString("FieldName") <
53 Rec2
->getValueAsString("FieldName");
57 class SubtargetEmitter
{
58 // Each processor has a SchedClassDesc table with an entry for each
59 // SchedClass. The SchedClassDesc table indexes into a global write resource
60 // table, write latency table, and read advance table.
61 struct SchedClassTables
{
62 std::vector
<std::vector
<MCSchedClassDesc
>> ProcSchedClasses
;
63 std::vector
<MCWriteProcResEntry
> WriteProcResources
;
64 std::vector
<MCWriteLatencyEntry
> WriteLatencies
;
65 std::vector
<std::string
> WriterNames
;
66 std::vector
<MCReadAdvanceEntry
> ReadAdvanceEntries
;
68 // Reserve an invalid entry at index 0
70 ProcSchedClasses
.resize(1);
71 WriteProcResources
.resize(1);
72 WriteLatencies
.resize(1);
73 WriterNames
.push_back("InvalidWrite");
74 ReadAdvanceEntries
.resize(1);
78 struct LessWriteProcResources
{
79 bool operator()(const MCWriteProcResEntry
&LHS
,
80 const MCWriteProcResEntry
&RHS
) {
81 return LHS
.ProcResourceIdx
< RHS
.ProcResourceIdx
;
86 const RecordKeeper
&Records
;
87 CodeGenSchedModels
&SchedModels
;
90 FeatureMapTy
enumeration(raw_ostream
&OS
);
91 void emitSubtargetInfoMacroCalls(raw_ostream
&OS
);
92 unsigned featureKeyValues(raw_ostream
&OS
, const FeatureMapTy
&FeatureMap
);
93 unsigned cpuKeyValues(raw_ostream
&OS
, const FeatureMapTy
&FeatureMap
);
94 void formItineraryStageString(const std::string
&Names
,
95 const Record
*ItinData
, std::string
&ItinString
,
97 void formItineraryOperandCycleString(const Record
*ItinData
,
98 std::string
&ItinString
,
99 unsigned &NOperandCycles
);
100 void formItineraryBypassString(const std::string
&Names
,
101 const Record
*ItinData
,
102 std::string
&ItinString
,
103 unsigned NOperandCycles
);
104 void emitStageAndOperandCycleData(
105 raw_ostream
&OS
, std::vector
<std::vector
<InstrItinerary
>> &ProcItinLists
);
106 void emitItineraries(raw_ostream
&OS
,
107 std::vector
<std::vector
<InstrItinerary
>> &ProcItinLists
);
108 unsigned emitRegisterFileTables(const CodeGenProcModel
&ProcModel
,
110 void emitLoadStoreQueueInfo(const CodeGenProcModel
&ProcModel
,
112 void emitExtraProcessorInfo(const CodeGenProcModel
&ProcModel
,
114 void emitProcessorProp(raw_ostream
&OS
, const Record
*R
, StringRef Name
,
116 void emitProcessorResourceSubUnits(const CodeGenProcModel
&ProcModel
,
118 void emitProcessorResources(const CodeGenProcModel
&ProcModel
,
120 const Record
*findWriteResources(const CodeGenSchedRW
&SchedWrite
,
121 const CodeGenProcModel
&ProcModel
);
122 const Record
*findReadAdvance(const CodeGenSchedRW
&SchedRead
,
123 const CodeGenProcModel
&ProcModel
);
124 void expandProcResources(ConstRecVec
&PRVec
,
125 std::vector
<int64_t> &ReleaseAtCycles
,
126 std::vector
<int64_t> &AcquireAtCycles
,
127 const CodeGenProcModel
&ProcModel
);
128 void genSchedClassTables(const CodeGenProcModel
&ProcModel
,
129 SchedClassTables
&SchedTables
);
130 void emitSchedClassTables(SchedClassTables
&SchedTables
, raw_ostream
&OS
);
131 void emitProcessorModels(raw_ostream
&OS
);
132 void emitSchedModelHelpers(const std::string
&ClassName
, raw_ostream
&OS
);
133 void emitSchedModelHelpersImpl(raw_ostream
&OS
,
134 bool OnlyExpandMCInstPredicates
= false);
135 void emitGenMCSubtargetInfo(raw_ostream
&OS
);
136 void emitMcInstrAnalysisPredicateFunctions(raw_ostream
&OS
);
138 void emitSchedModel(raw_ostream
&OS
);
139 void emitGetMacroFusions(const std::string
&ClassName
, raw_ostream
&OS
);
140 void emitHwModeCheck(const std::string
&ClassName
, raw_ostream
&OS
);
141 void parseFeaturesFunction(raw_ostream
&OS
);
144 SubtargetEmitter(const RecordKeeper
&R
)
145 : TGT(R
), Records(R
), SchedModels(TGT
.getSchedModels()),
146 Target(TGT
.getName()) {}
148 void run(raw_ostream
&O
);
151 } // end anonymous namespace
154 // Enumeration - Emit the specified class as an enumeration.
156 FeatureMapTy
SubtargetEmitter::enumeration(raw_ostream
&OS
) {
157 ArrayRef
<const Record
*> DefList
=
158 Records
.getAllDerivedDefinitions("SubtargetFeature");
160 unsigned N
= DefList
.size();
162 return FeatureMapTy();
163 if (N
+ 1 > MAX_SUBTARGET_FEATURES
)
165 "Too many subtarget features! Bump MAX_SUBTARGET_FEATURES.");
167 OS
<< "namespace " << Target
<< " {\n";
172 FeatureMapTy FeatureMap
;
174 for (unsigned I
= 0; I
< N
; ++I
) {
176 const Record
*Def
= DefList
[I
];
179 OS
<< " " << Def
->getName() << " = " << I
<< ",\n";
181 // Save the index for this feature.
186 << "NumSubtargetFeatures = " << N
<< "\n";
188 // Close enumeration and namespace
190 OS
<< "} // end namespace " << Target
<< "\n";
194 static void printFeatureMask(raw_ostream
&OS
,
195 ArrayRef
<const Record
*> FeatureList
,
196 const FeatureMapTy
&FeatureMap
) {
197 std::array
<uint64_t, MAX_SUBTARGET_WORDS
> Mask
= {};
198 for (const Record
*Feature
: FeatureList
) {
199 unsigned Bit
= FeatureMap
.lookup(Feature
);
200 Mask
[Bit
/ 64] |= 1ULL << (Bit
% 64);
204 for (unsigned I
= 0; I
!= Mask
.size(); ++I
) {
206 OS
.write_hex(Mask
[I
]);
212 /// Emit some information about the SubtargetFeature as calls to a macro so
213 /// that they can be used from C++.
214 void SubtargetEmitter::emitSubtargetInfoMacroCalls(raw_ostream
&OS
) {
215 OS
<< "\n#ifdef GET_SUBTARGETINFO_MACRO\n";
217 std::vector
<const Record
*> FeatureList
=
218 Records
.getAllDerivedDefinitions("SubtargetFeature");
219 llvm::sort(FeatureList
, LessRecordFieldFieldName());
221 for (const Record
*Feature
: FeatureList
) {
222 const StringRef FieldName
= Feature
->getValueAsString("FieldName");
223 const StringRef Value
= Feature
->getValueAsString("Value");
225 // Only handle boolean features for now, excluding BitVectors and enums.
226 const bool IsBool
= (Value
== "false" || Value
== "true") &&
227 !StringRef(FieldName
).contains('[');
231 // Some features default to true, with values set to false if enabled.
232 const char *Default
= Value
== "false" ? "true" : "false";
234 // Define the getter with lowercased first char: xxxYyy() { return XxxYyy; }
235 const std::string Getter
=
236 FieldName
.substr(0, 1).lower() + FieldName
.substr(1).str();
238 OS
<< "GET_SUBTARGETINFO_MACRO(" << FieldName
<< ", " << Default
<< ", "
241 OS
<< "#undef GET_SUBTARGETINFO_MACRO\n";
242 OS
<< "#endif // GET_SUBTARGETINFO_MACRO\n\n";
244 OS
<< "\n#ifdef GET_SUBTARGETINFO_MC_DESC\n";
245 OS
<< "#undef GET_SUBTARGETINFO_MC_DESC\n\n";
247 if (Target
== "AArch64")
248 OS
<< "#include \"llvm/TargetParser/AArch64TargetParser.h\"\n\n";
252 // FeatureKeyValues - Emit data of all the subtarget features. Used by the
255 unsigned SubtargetEmitter::featureKeyValues(raw_ostream
&OS
,
256 const FeatureMapTy
&FeatureMap
) {
257 std::vector
<const Record
*> FeatureList
=
258 Records
.getAllDerivedDefinitions("SubtargetFeature");
260 // Remove features with empty name.
261 llvm::erase_if(FeatureList
, [](const Record
*Rec
) {
262 return Rec
->getValueAsString("Name").empty();
264 if (FeatureList
.empty())
267 // Sort and check duplicate Feature name.
268 sortAndReportDuplicates(FeatureList
, "Feature");
270 // Begin feature table.
271 OS
<< "// Sorted (by key) array of values for CPU features.\n"
272 << "extern const llvm::SubtargetFeatureKV " << Target
273 << "FeatureKV[] = {\n";
275 for (const Record
*Feature
: FeatureList
) {
277 StringRef Name
= Feature
->getName();
278 StringRef CommandLineName
= Feature
->getValueAsString("Name");
279 StringRef Desc
= Feature
->getValueAsString("Desc");
281 // Emit as { "feature", "description", { featureEnum }, { i1 , i2 , ... , in
284 << "\"" << CommandLineName
<< "\", "
285 << "\"" << Desc
<< "\", " << Target
<< "::" << Name
<< ", ";
287 ConstRecVec ImpliesList
= Feature
->getValueAsListOfDefs("Implies");
289 printFeatureMask(OS
, ImpliesList
, FeatureMap
);
294 // End feature table.
297 return FeatureList
.size();
301 // CPUKeyValues - Emit data of all the subtarget processors. Used by command
304 unsigned SubtargetEmitter::cpuKeyValues(raw_ostream
&OS
,
305 const FeatureMapTy
&FeatureMap
) {
306 // Gather and sort processor information
307 std::vector
<const Record
*> ProcessorList
=
308 Records
.getAllDerivedDefinitions("Processor");
309 llvm::sort(ProcessorList
, LessRecordFieldName());
311 // Note that unlike `FeatureKeyValues`, here we do not need to check for
312 // duplicate processors, since that is already done when the SubtargetEmitter
313 // constructor calls `getSchedModels` to build a `CodeGenSchedModels` object,
314 // which does the duplicate processor check.
316 // Begin processor table.
317 OS
<< "// Sorted (by key) array of values for CPU subtype.\n"
318 << "extern const llvm::SubtargetSubTypeKV " << Target
319 << "SubTypeKV[] = {\n";
321 for (const Record
*Processor
: ProcessorList
) {
322 StringRef Name
= Processor
->getValueAsString("Name");
323 ConstRecVec FeatureList
= Processor
->getValueAsListOfDefs("Features");
324 ConstRecVec TuneFeatureList
=
325 Processor
->getValueAsListOfDefs("TuneFeatures");
327 // Emit as "{ "cpu", "description", 0, { f1 , f2 , ... fn } },".
329 << "\"" << Name
<< "\", ";
331 printFeatureMask(OS
, FeatureList
, FeatureMap
);
333 printFeatureMask(OS
, TuneFeatureList
, FeatureMap
);
335 // Emit the scheduler model pointer.
336 const std::string
&ProcModelName
=
337 SchedModels
.getModelForProc(Processor
).ModelName
;
338 OS
<< ", &" << ProcModelName
<< " },\n";
341 // End processor table.
344 return ProcessorList
.size();
348 // FormItineraryStageString - Compose a string containing the stage
349 // data initialization for the specified itinerary. N is the number
352 void SubtargetEmitter::formItineraryStageString(const std::string
&Name
,
353 const Record
*ItinData
,
354 std::string
&ItinString
,
357 ConstRecVec StageList
= ItinData
->getValueAsListOfDefs("Stages");
360 unsigned N
= NStages
= StageList
.size();
361 for (unsigned I
= 0; I
< N
;) {
363 const Record
*Stage
= StageList
[I
];
365 // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
366 int Cycles
= Stage
->getValueAsInt("Cycles");
367 ItinString
+= " { " + itostr(Cycles
) + ", ";
370 ConstRecVec UnitList
= Stage
->getValueAsListOfDefs("Units");
373 for (unsigned J
= 0, M
= UnitList
.size(); J
< M
;) {
374 // Add name and bitwise or
375 ItinString
+= Name
+ "FU::" + UnitList
[J
]->getName().str();
380 int TimeInc
= Stage
->getValueAsInt("TimeInc");
381 ItinString
+= ", " + itostr(TimeInc
);
383 int Kind
= Stage
->getValueAsInt("Kind");
384 ItinString
+= ", (llvm::InstrStage::ReservationKinds)" + itostr(Kind
);
394 // FormItineraryOperandCycleString - Compose a string containing the
395 // operand cycle initialization for the specified itinerary. N is the
396 // number of operands that has cycles specified.
398 void SubtargetEmitter::formItineraryOperandCycleString(
399 const Record
*ItinData
, std::string
&ItinString
, unsigned &NOperandCycles
) {
400 // Get operand cycle list
401 std::vector
<int64_t> OperandCycleList
=
402 ItinData
->getValueAsListOfInts("OperandCycles");
404 // For each operand cycle
405 NOperandCycles
= OperandCycleList
.size();
407 for (int OCycle
: OperandCycleList
) {
408 // Next operand cycle
410 ItinString
+= " " + itostr(OCycle
);
414 void SubtargetEmitter::formItineraryBypassString(const std::string
&Name
,
415 const Record
*ItinData
,
416 std::string
&ItinString
,
417 unsigned NOperandCycles
) {
418 ConstRecVec BypassList
= ItinData
->getValueAsListOfDefs("Bypasses");
419 unsigned N
= BypassList
.size();
424 ItinString
+= Name
+ "Bypass::" + BypassList
[I
]->getName().str();
426 for (; I
< NOperandCycles
; ++I
) {
433 // EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
434 // cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
435 // by CodeGenSchedClass::Index.
437 void SubtargetEmitter::emitStageAndOperandCycleData(
438 raw_ostream
&OS
, std::vector
<std::vector
<InstrItinerary
>> &ProcItinLists
) {
439 // Multiple processor models may share an itinerary record. Emit it once.
440 SmallPtrSet
<const Record
*, 8> ItinsDefSet
;
442 // Emit functional units for all the itineraries.
443 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
445 if (!ItinsDefSet
.insert(ProcModel
.ItinsDef
).second
)
448 ConstRecVec FUs
= ProcModel
.ItinsDef
->getValueAsListOfDefs("FU");
452 StringRef Name
= ProcModel
.ItinsDef
->getName();
453 OS
<< "\n// Functional units for \"" << Name
<< "\"\n"
454 << "namespace " << Name
<< "FU {\n";
456 for (unsigned J
= 0, FUN
= FUs
.size(); J
< FUN
; ++J
)
457 OS
<< " const InstrStage::FuncUnits " << FUs
[J
]->getName()
458 << " = 1ULL << " << J
<< ";\n";
460 OS
<< "} // end namespace " << Name
<< "FU\n";
462 ConstRecVec BPs
= ProcModel
.ItinsDef
->getValueAsListOfDefs("BP");
464 OS
<< "\n// Pipeline forwarding paths for itineraries \"" << Name
466 << "namespace " << Name
<< "Bypass {\n";
468 OS
<< " const unsigned NoBypass = 0;\n";
469 for (unsigned J
= 0, BPN
= BPs
.size(); J
< BPN
; ++J
)
470 OS
<< " const unsigned " << BPs
[J
]->getName() << " = 1 << " << J
473 OS
<< "} // end namespace " << Name
<< "Bypass\n";
477 // Begin stages table
478 std::string StageTable
=
479 "\nextern const llvm::InstrStage " + Target
+ "Stages[] = {\n";
480 StageTable
+= " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
482 // Begin operand cycle table
483 std::string OperandCycleTable
=
484 "extern const unsigned " + Target
+ "OperandCycles[] = {\n";
485 OperandCycleTable
+= " 0, // No itinerary\n";
487 // Begin pipeline bypass table
488 std::string BypassTable
=
489 "extern const unsigned " + Target
+ "ForwardingPaths[] = {\n";
490 BypassTable
+= " 0, // No itinerary\n";
492 // For each Itinerary across all processors, add a unique entry to the stages,
493 // operand cycles, and pipeline bypass tables. Then add the new Itinerary
494 // object with computed offsets to the ProcItinLists result.
495 unsigned StageCount
= 1, OperandCycleCount
= 1;
496 StringMap
<unsigned> ItinStageMap
, ItinOperandMap
;
497 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
498 // Add process itinerary to the list.
499 std::vector
<InstrItinerary
> &ItinList
= ProcItinLists
.emplace_back();
501 // If this processor defines no itineraries, then leave the itinerary list
503 if (!ProcModel
.hasItineraries())
506 StringRef Name
= ProcModel
.ItinsDef
->getName();
508 ItinList
.resize(SchedModels
.numInstrSchedClasses());
509 assert(ProcModel
.ItinDefList
.size() == ItinList
.size() && "bad Itins");
511 for (unsigned SchedClassIdx
= 0, SchedClassEnd
= ItinList
.size();
512 SchedClassIdx
< SchedClassEnd
; ++SchedClassIdx
) {
514 // Next itinerary data
515 const Record
*ItinData
= ProcModel
.ItinDefList
[SchedClassIdx
];
517 // Get string and stage count
518 std::string ItinStageString
;
519 unsigned NStages
= 0;
521 formItineraryStageString(std::string(Name
), ItinData
, ItinStageString
,
524 // Get string and operand cycle count
525 std::string ItinOperandCycleString
;
526 unsigned NOperandCycles
= 0;
527 std::string ItinBypassString
;
529 formItineraryOperandCycleString(ItinData
, ItinOperandCycleString
,
532 formItineraryBypassString(std::string(Name
), ItinData
, ItinBypassString
,
536 // Check to see if stage already exists and create if it doesn't
537 uint16_t FindStage
= 0;
539 FindStage
= ItinStageMap
[ItinStageString
];
540 if (FindStage
== 0) {
541 // Emit as { cycles, u1 | u2 | ... | un, timeinc }, // indices
542 StageTable
+= ItinStageString
+ ", // " + itostr(StageCount
);
544 StageTable
+= "-" + itostr(StageCount
+ NStages
- 1);
546 // Record Itin class number.
547 ItinStageMap
[ItinStageString
] = FindStage
= StageCount
;
548 StageCount
+= NStages
;
552 // Check to see if operand cycle already exists and create if it doesn't
553 uint16_t FindOperandCycle
= 0;
554 if (NOperandCycles
> 0) {
555 std::string ItinOperandString
=
556 ItinOperandCycleString
+ ItinBypassString
;
557 FindOperandCycle
= ItinOperandMap
[ItinOperandString
];
558 if (FindOperandCycle
== 0) {
559 // Emit as cycle, // index
560 OperandCycleTable
+= ItinOperandCycleString
+ ", // ";
561 std::string OperandIdxComment
= itostr(OperandCycleCount
);
562 if (NOperandCycles
> 1)
564 "-" + itostr(OperandCycleCount
+ NOperandCycles
- 1);
565 OperandCycleTable
+= OperandIdxComment
+ "\n";
566 // Record Itin class number.
567 ItinOperandMap
[ItinOperandCycleString
] = FindOperandCycle
=
569 // Emit as bypass, // index
570 BypassTable
+= ItinBypassString
+ ", // " + OperandIdxComment
+ "\n";
571 OperandCycleCount
+= NOperandCycles
;
575 // Set up itinerary as location and location + stage count
576 int16_t NumUOps
= ItinData
? ItinData
->getValueAsInt("NumMicroOps") : 0;
577 InstrItinerary Intinerary
= {
580 uint16_t(FindStage
+ NStages
),
582 uint16_t(FindOperandCycle
+ NOperandCycles
),
585 // Inject - empty slots will be 0, 0
586 ItinList
[SchedClassIdx
] = Intinerary
;
591 StageTable
+= " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
592 StageTable
+= "};\n";
594 // Closing operand cycles
595 OperandCycleTable
+= " 0 // End operand cycles\n";
596 OperandCycleTable
+= "};\n";
598 BypassTable
+= " 0 // End bypass tables\n";
599 BypassTable
+= "};\n";
603 OS
<< OperandCycleTable
;
608 // EmitProcessorData - Generate data for processor itineraries that were
609 // computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
610 // Itineraries for each processor. The Itinerary lists are indexed on
611 // CodeGenSchedClass::Index.
613 void SubtargetEmitter::emitItineraries(
614 raw_ostream
&OS
, std::vector
<std::vector
<InstrItinerary
>> &ProcItinLists
) {
615 // Multiple processor models may share an itinerary record. Emit it once.
616 SmallPtrSet
<const Record
*, 8> ItinsDefSet
;
618 // For each processor's machine model
619 std::vector
<std::vector
<InstrItinerary
>>::iterator ProcItinListsIter
=
620 ProcItinLists
.begin();
621 for (CodeGenSchedModels::ProcIter PI
= SchedModels
.procModelBegin(),
622 PE
= SchedModels
.procModelEnd();
623 PI
!= PE
; ++PI
, ++ProcItinListsIter
) {
625 const Record
*ItinsDef
= PI
->ItinsDef
;
626 if (!ItinsDefSet
.insert(ItinsDef
).second
)
629 // Get the itinerary list for the processor.
630 assert(ProcItinListsIter
!= ProcItinLists
.end() && "bad iterator");
631 std::vector
<InstrItinerary
> &ItinList
= *ProcItinListsIter
;
633 // Empty itineraries aren't referenced anywhere in the tablegen output
634 // so don't emit them.
635 if (ItinList
.empty())
639 OS
<< "static const llvm::InstrItinerary ";
641 // Begin processor itinerary table
642 OS
<< ItinsDef
->getName() << "[] = {\n";
644 // For each itinerary class in CodeGenSchedClass::Index order.
645 for (unsigned J
= 0, M
= ItinList
.size(); J
< M
; ++J
) {
646 InstrItinerary
&Intinerary
= ItinList
[J
];
648 // Emit Itinerary in the form of
649 // { firstStage, lastStage, firstCycle, lastCycle } // index
650 OS
<< " { " << Intinerary
.NumMicroOps
<< ", " << Intinerary
.FirstStage
651 << ", " << Intinerary
.LastStage
<< ", " << Intinerary
.FirstOperandCycle
652 << ", " << Intinerary
.LastOperandCycle
<< " }"
653 << ", // " << J
<< " " << SchedModels
.getSchedClass(J
).Name
<< "\n";
655 // End processor itinerary table
656 OS
<< " { 0, uint16_t(~0U), uint16_t(~0U), uint16_t(~0U), uint16_t(~0U) }"
662 // Emit either the value defined in the TableGen Record, or the default
663 // value defined in the C++ header. The Record is null if the processor does not
665 void SubtargetEmitter::emitProcessorProp(raw_ostream
&OS
, const Record
*R
,
666 StringRef Name
, char Separator
) {
668 int V
= R
? R
->getValueAsInt(Name
) : -1;
670 OS
<< V
<< Separator
<< " // " << Name
;
672 OS
<< "MCSchedModel::Default" << Name
<< Separator
;
676 void SubtargetEmitter::emitProcessorResourceSubUnits(
677 const CodeGenProcModel
&ProcModel
, raw_ostream
&OS
) {
678 OS
<< "\nstatic const unsigned " << ProcModel
.ModelName
679 << "ProcResourceSubUnits[] = {\n"
680 << " 0, // Invalid\n";
682 for (unsigned I
= 0, E
= ProcModel
.ProcResourceDefs
.size(); I
< E
; ++I
) {
683 const Record
*PRDef
= ProcModel
.ProcResourceDefs
[I
];
684 if (!PRDef
->isSubClassOf("ProcResGroup"))
686 for (const Record
*RUDef
: PRDef
->getValueAsListOfDefs("Resources")) {
688 SchedModels
.findProcResUnits(RUDef
, ProcModel
, PRDef
->getLoc());
689 for (unsigned J
= 0; J
< RU
->getValueAsInt("NumUnits"); ++J
) {
690 OS
<< " " << ProcModel
.getProcResourceIdx(RU
) << ", ";
693 OS
<< " // " << PRDef
->getName() << "\n";
698 static void emitRetireControlUnitInfo(const CodeGenProcModel
&ProcModel
,
700 int64_t ReorderBufferSize
= 0, MaxRetirePerCycle
= 0;
701 if (const Record
*RCU
= ProcModel
.RetireControlUnit
) {
703 std::max(ReorderBufferSize
, RCU
->getValueAsInt("ReorderBufferSize"));
705 std::max(MaxRetirePerCycle
, RCU
->getValueAsInt("MaxRetirePerCycle"));
708 OS
<< ReorderBufferSize
<< ", // ReorderBufferSize\n ";
709 OS
<< MaxRetirePerCycle
<< ", // MaxRetirePerCycle\n ";
712 static void emitRegisterFileInfo(const CodeGenProcModel
&ProcModel
,
713 unsigned NumRegisterFiles
,
714 unsigned NumCostEntries
, raw_ostream
&OS
) {
715 if (NumRegisterFiles
)
716 OS
<< ProcModel
.ModelName
<< "RegisterFiles,\n " << (1 + NumRegisterFiles
);
718 OS
<< "nullptr,\n 0";
720 OS
<< ", // Number of register files.\n ";
722 OS
<< ProcModel
.ModelName
<< "RegisterCosts,\n ";
725 OS
<< NumCostEntries
<< ", // Number of register cost entries.\n";
729 SubtargetEmitter::emitRegisterFileTables(const CodeGenProcModel
&ProcModel
,
731 if (llvm::all_of(ProcModel
.RegisterFiles
, [](const CodeGenRegisterFile
&RF
) {
732 return RF
.hasDefaultCosts();
736 // Print the RegisterCost table first.
737 OS
<< "\n// {RegisterClassID, Register Cost, AllowMoveElimination }\n";
738 OS
<< "static const llvm::MCRegisterCostEntry " << ProcModel
.ModelName
742 for (const CodeGenRegisterFile
&RF
: ProcModel
.RegisterFiles
) {
743 // Skip register files with a default cost table.
744 if (RF
.hasDefaultCosts())
746 // Add entries to the cost table.
747 for (const CodeGenRegisterCost
&RC
: RF
.Costs
) {
749 const Record
*Rec
= RC
.RCDef
;
750 if (Rec
->getValue("Namespace"))
751 OS
<< Rec
->getValueAsString("Namespace") << "::";
752 OS
<< Rec
->getName() << "RegClassID, " << RC
.Cost
<< ", "
753 << RC
.AllowMoveElimination
<< "},\n";
758 // Now generate a table with register file info.
759 OS
<< "\n // {Name, #PhysRegs, #CostEntries, IndexToCostTbl, "
760 << "MaxMovesEliminatedPerCycle, AllowZeroMoveEliminationOnly }\n";
761 OS
<< "static const llvm::MCRegisterFileDesc " << ProcModel
.ModelName
764 << " { \"InvalidRegisterFile\", 0, 0, 0, 0, 0 },\n";
765 unsigned CostTblIndex
= 0;
767 for (const CodeGenRegisterFile
&RD
: ProcModel
.RegisterFiles
) {
769 OS
<< '"' << RD
.Name
<< '"' << ", " << RD
.NumPhysRegs
<< ", ";
770 unsigned NumCostEntries
= RD
.Costs
.size();
771 OS
<< NumCostEntries
<< ", " << CostTblIndex
<< ", "
772 << RD
.MaxMovesEliminatedPerCycle
<< ", "
773 << RD
.AllowZeroMoveEliminationOnly
<< "},\n";
774 CostTblIndex
+= NumCostEntries
;
781 void SubtargetEmitter::emitLoadStoreQueueInfo(const CodeGenProcModel
&ProcModel
,
783 unsigned QueueID
= 0;
784 if (ProcModel
.LoadQueue
) {
785 const Record
*Queue
= ProcModel
.LoadQueue
->getValueAsDef("QueueDescriptor");
786 QueueID
= 1 + std::distance(ProcModel
.ProcResourceDefs
.begin(),
787 find(ProcModel
.ProcResourceDefs
, Queue
));
789 OS
<< " " << QueueID
<< ", // Resource Descriptor for the Load Queue\n";
792 if (ProcModel
.StoreQueue
) {
793 const Record
*Queue
=
794 ProcModel
.StoreQueue
->getValueAsDef("QueueDescriptor");
795 QueueID
= 1 + std::distance(ProcModel
.ProcResourceDefs
.begin(),
796 find(ProcModel
.ProcResourceDefs
, Queue
));
798 OS
<< " " << QueueID
<< ", // Resource Descriptor for the Store Queue\n";
801 void SubtargetEmitter::emitExtraProcessorInfo(const CodeGenProcModel
&ProcModel
,
803 // Generate a table of register file descriptors (one entry per each user
804 // defined register file), and a table of register costs.
805 unsigned NumCostEntries
= emitRegisterFileTables(ProcModel
, OS
);
807 // Now generate a table for the extra processor info.
808 OS
<< "\nstatic const llvm::MCExtraProcessorInfo " << ProcModel
.ModelName
809 << "ExtraInfo = {\n ";
811 // Add information related to the retire control unit.
812 emitRetireControlUnitInfo(ProcModel
, OS
);
814 // Add information related to the register files (i.e. where to find register
815 // file descriptors and register costs).
816 emitRegisterFileInfo(ProcModel
, ProcModel
.RegisterFiles
.size(),
819 // Add information about load/store queues.
820 emitLoadStoreQueueInfo(ProcModel
, OS
);
825 void SubtargetEmitter::emitProcessorResources(const CodeGenProcModel
&ProcModel
,
827 emitProcessorResourceSubUnits(ProcModel
, OS
);
829 OS
<< "\n// {Name, NumUnits, SuperIdx, BufferSize, SubUnitsIdxBegin}\n";
830 OS
<< "static const llvm::MCProcResourceDesc " << ProcModel
.ModelName
833 << " {\"InvalidUnit\", 0, 0, 0, 0},\n";
835 unsigned SubUnitsOffset
= 1;
836 for (unsigned I
= 0, E
= ProcModel
.ProcResourceDefs
.size(); I
< E
; ++I
) {
837 const Record
*PRDef
= ProcModel
.ProcResourceDefs
[I
];
839 const Record
*SuperDef
= nullptr;
840 unsigned SuperIdx
= 0;
841 unsigned NumUnits
= 0;
842 const unsigned SubUnitsBeginOffset
= SubUnitsOffset
;
843 int BufferSize
= PRDef
->getValueAsInt("BufferSize");
844 if (PRDef
->isSubClassOf("ProcResGroup")) {
845 for (const Record
*RU
: PRDef
->getValueAsListOfDefs("Resources")) {
846 NumUnits
+= RU
->getValueAsInt("NumUnits");
847 SubUnitsOffset
+= RU
->getValueAsInt("NumUnits");
851 if (PRDef
->getValueInit("Super")->isComplete()) {
852 SuperDef
= SchedModels
.findProcResUnits(PRDef
->getValueAsDef("Super"),
853 ProcModel
, PRDef
->getLoc());
854 SuperIdx
= ProcModel
.getProcResourceIdx(SuperDef
);
856 NumUnits
= PRDef
->getValueAsInt("NumUnits");
858 // Emit the ProcResourceDesc
859 OS
<< " {\"" << PRDef
->getName() << "\", ";
860 if (PRDef
->getName().size() < 15)
861 OS
.indent(15 - PRDef
->getName().size());
862 OS
<< NumUnits
<< ", " << SuperIdx
<< ", " << BufferSize
<< ", ";
863 if (SubUnitsBeginOffset
!= SubUnitsOffset
) {
864 OS
<< ProcModel
.ModelName
<< "ProcResourceSubUnits + "
865 << SubUnitsBeginOffset
;
869 OS
<< "}, // #" << I
+ 1;
871 OS
<< ", Super=" << SuperDef
->getName();
877 // Find the WriteRes Record that defines processor resources for this
880 SubtargetEmitter::findWriteResources(const CodeGenSchedRW
&SchedWrite
,
881 const CodeGenProcModel
&ProcModel
) {
883 // Check if the SchedWrite is already subtarget-specific and directly
884 // specifies a set of processor resources.
885 if (SchedWrite
.TheDef
->isSubClassOf("SchedWriteRes"))
886 return SchedWrite
.TheDef
;
888 const Record
*AliasDef
= nullptr;
889 for (const Record
*A
: SchedWrite
.Aliases
) {
890 const CodeGenSchedRW
&AliasRW
=
891 SchedModels
.getSchedRW(A
->getValueAsDef("AliasRW"));
892 if (AliasRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
893 const Record
*ModelDef
= AliasRW
.TheDef
->getValueAsDef("SchedModel");
894 if (&SchedModels
.getProcModel(ModelDef
) != &ProcModel
)
898 PrintFatalError(AliasRW
.TheDef
->getLoc(),
900 "defined for processor " +
901 ProcModel
.ModelName
+
902 " Ensure only one SchedAlias exists per RW.");
903 AliasDef
= AliasRW
.TheDef
;
905 if (AliasDef
&& AliasDef
->isSubClassOf("SchedWriteRes"))
908 // Check this processor's list of write resources.
909 const Record
*ResDef
= nullptr;
910 for (const Record
*WR
: ProcModel
.WriteResDefs
) {
911 if (!WR
->isSubClassOf("WriteRes"))
913 const Record
*WRDef
= WR
->getValueAsDef("WriteType");
914 if (AliasDef
== WRDef
|| SchedWrite
.TheDef
== WRDef
) {
916 PrintFatalError(WR
->getLoc(), "Resources are defined for both "
917 "SchedWrite and its alias on processor " +
918 ProcModel
.ModelName
);
921 // If there is no AliasDef and we find a match, we can early exit since
922 // there is no need to verify whether there are resources defined for both
923 // SchedWrite and its alias.
928 // TODO: If ProcModel has a base model (previous generation processor),
929 // then call FindWriteResources recursively with that model here.
931 PrintFatalError(ProcModel
.ModelDef
->getLoc(),
932 Twine("Processor does not define resources for ") +
933 SchedWrite
.TheDef
->getName());
938 /// Find the ReadAdvance record for the given SchedRead on this processor or
941 SubtargetEmitter::findReadAdvance(const CodeGenSchedRW
&SchedRead
,
942 const CodeGenProcModel
&ProcModel
) {
943 // Check for SchedReads that directly specify a ReadAdvance.
944 if (SchedRead
.TheDef
->isSubClassOf("SchedReadAdvance"))
945 return SchedRead
.TheDef
;
947 // Check this processor's list of aliases for SchedRead.
948 const Record
*AliasDef
= nullptr;
949 for (const Record
*A
: SchedRead
.Aliases
) {
950 const CodeGenSchedRW
&AliasRW
=
951 SchedModels
.getSchedRW(A
->getValueAsDef("AliasRW"));
952 if (AliasRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
953 const Record
*ModelDef
= AliasRW
.TheDef
->getValueAsDef("SchedModel");
954 if (&SchedModels
.getProcModel(ModelDef
) != &ProcModel
)
958 PrintFatalError(AliasRW
.TheDef
->getLoc(),
960 "defined for processor " +
961 ProcModel
.ModelName
+
962 " Ensure only one SchedAlias exists per RW.");
963 AliasDef
= AliasRW
.TheDef
;
965 if (AliasDef
&& AliasDef
->isSubClassOf("SchedReadAdvance"))
968 // Check this processor's ReadAdvanceList.
969 const Record
*ResDef
= nullptr;
970 for (const Record
*RA
: ProcModel
.ReadAdvanceDefs
) {
971 if (!RA
->isSubClassOf("ReadAdvance"))
973 const Record
*RADef
= RA
->getValueAsDef("ReadType");
974 if (AliasDef
== RADef
|| SchedRead
.TheDef
== RADef
) {
976 PrintFatalError(RA
->getLoc(), "Resources are defined for both "
977 "SchedRead and its alias on processor " +
978 ProcModel
.ModelName
);
981 // If there is no AliasDef and we find a match, we can early exit since
982 // there is no need to verify whether there are resources defined for both
983 // SchedRead and its alias.
988 // TODO: If ProcModel has a base model (previous generation processor),
989 // then call FindReadAdvance recursively with that model here.
990 if (!ResDef
&& SchedRead
.TheDef
->getName() != "ReadDefault") {
991 PrintFatalError(ProcModel
.ModelDef
->getLoc(),
992 Twine("Processor does not define resources for ") +
993 SchedRead
.TheDef
->getName());
998 // Expand an explicit list of processor resources into a full list of implied
999 // resource groups and super resources that cover them.
1000 void SubtargetEmitter::expandProcResources(
1001 ConstRecVec
&PRVec
, std::vector
<int64_t> &ReleaseAtCycles
,
1002 std::vector
<int64_t> &AcquireAtCycles
, const CodeGenProcModel
&PM
) {
1003 assert(PRVec
.size() == ReleaseAtCycles
.size() && "failed precondition");
1004 for (unsigned I
= 0, E
= PRVec
.size(); I
!= E
; ++I
) {
1005 const Record
*PRDef
= PRVec
[I
];
1006 ConstRecVec SubResources
;
1007 if (PRDef
->isSubClassOf("ProcResGroup"))
1008 SubResources
= PRDef
->getValueAsListOfDefs("Resources");
1010 SubResources
.push_back(PRDef
);
1011 PRDef
= SchedModels
.findProcResUnits(PRDef
, PM
, PRDef
->getLoc());
1012 for (const Record
*SubDef
= PRDef
;
1013 SubDef
->getValueInit("Super")->isComplete();) {
1014 if (SubDef
->isSubClassOf("ProcResGroup")) {
1015 // Disallow this for simplicitly.
1016 PrintFatalError(SubDef
->getLoc(), "Processor resource group "
1017 " cannot be a super resources.");
1019 const Record
*SuperDef
= SchedModels
.findProcResUnits(
1020 SubDef
->getValueAsDef("Super"), PM
, SubDef
->getLoc());
1021 PRVec
.push_back(SuperDef
);
1022 ReleaseAtCycles
.push_back(ReleaseAtCycles
[I
]);
1023 AcquireAtCycles
.push_back(AcquireAtCycles
[I
]);
1027 for (const Record
*PR
: PM
.ProcResourceDefs
) {
1028 if (PR
== PRDef
|| !PR
->isSubClassOf("ProcResGroup"))
1030 ConstRecVec SuperResources
= PR
->getValueAsListOfDefs("Resources");
1031 ConstRecIter SubI
= SubResources
.begin(), SubE
= SubResources
.end();
1032 for (; SubI
!= SubE
; ++SubI
) {
1033 if (!is_contained(SuperResources
, *SubI
)) {
1038 PRVec
.push_back(PR
);
1039 ReleaseAtCycles
.push_back(ReleaseAtCycles
[I
]);
1040 AcquireAtCycles
.push_back(AcquireAtCycles
[I
]);
1046 // Generate the SchedClass table for this processor and update global
1047 // tables. Must be called for each processor in order.
1048 void SubtargetEmitter::genSchedClassTables(const CodeGenProcModel
&ProcModel
,
1049 SchedClassTables
&SchedTables
) {
1050 std::vector
<MCSchedClassDesc
> &SCTab
=
1051 SchedTables
.ProcSchedClasses
.emplace_back();
1052 if (!ProcModel
.hasInstrSchedModel())
1055 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
1056 for (const CodeGenSchedClass
&SC
: SchedModels
.schedClasses()) {
1057 LLVM_DEBUG(SC
.dump(&SchedModels
));
1059 MCSchedClassDesc
&SCDesc
= SCTab
.emplace_back();
1060 // SCDesc.Name is guarded by NDEBUG
1061 SCDesc
.NumMicroOps
= 0;
1062 SCDesc
.BeginGroup
= false;
1063 SCDesc
.EndGroup
= false;
1064 SCDesc
.RetireOOO
= false;
1065 SCDesc
.WriteProcResIdx
= 0;
1066 SCDesc
.WriteLatencyIdx
= 0;
1067 SCDesc
.ReadAdvanceIdx
= 0;
1069 // A Variant SchedClass has no resources of its own.
1070 bool HasVariants
= false;
1071 for (const CodeGenSchedTransition
&CGT
:
1072 make_range(SC
.Transitions
.begin(), SC
.Transitions
.end())) {
1073 if (CGT
.ProcIndex
== ProcModel
.Index
) {
1079 SCDesc
.NumMicroOps
= MCSchedClassDesc::VariantNumMicroOps
;
1083 // Determine if the SchedClass is actually reachable on this processor. If
1084 // not don't try to locate the processor resources, it will fail.
1085 // If ProcIndices contains 0, this class applies to all processors.
1086 assert(!SC
.ProcIndices
.empty() && "expect at least one procidx");
1087 if (SC
.ProcIndices
[0] != 0) {
1088 if (!is_contained(SC
.ProcIndices
, ProcModel
.Index
))
1091 IdxVec Writes
= SC
.Writes
;
1092 IdxVec Reads
= SC
.Reads
;
1093 if (!SC
.InstRWs
.empty()) {
1094 // This class has a default ReadWrite list which can be overridden by
1095 // InstRW definitions.
1096 const Record
*RWDef
= nullptr;
1097 for (const Record
*RW
: SC
.InstRWs
) {
1098 const Record
*RWModelDef
= RW
->getValueAsDef("SchedModel");
1099 if (&ProcModel
== &SchedModels
.getProcModel(RWModelDef
)) {
1107 SchedModels
.findRWs(RWDef
->getValueAsListOfDefs("OperandReadWrites"),
1111 if (Writes
.empty()) {
1112 // Check this processor's itinerary class resources.
1113 for (const Record
*I
: ProcModel
.ItinRWDefs
) {
1114 ConstRecVec Matched
= I
->getValueAsListOfDefs("MatchedItinClasses");
1115 if (is_contained(Matched
, SC
.ItinClassDef
)) {
1116 SchedModels
.findRWs(I
->getValueAsListOfDefs("OperandReadWrites"),
1121 if (Writes
.empty()) {
1122 LLVM_DEBUG(dbgs() << ProcModel
.ModelName
1123 << " does not have resources for class " << SC
.Name
1125 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1128 // Sum resources across all operand writes.
1129 std::vector
<MCWriteProcResEntry
> WriteProcResources
;
1130 std::vector
<MCWriteLatencyEntry
> WriteLatencies
;
1131 std::vector
<std::string
> WriterNames
;
1132 std::vector
<MCReadAdvanceEntry
> ReadAdvanceEntries
;
1133 for (unsigned W
: Writes
) {
1135 SchedModels
.expandRWSeqForProc(W
, WriteSeq
, /*IsRead=*/false, ProcModel
);
1137 // For each operand, create a latency entry.
1138 MCWriteLatencyEntry WLEntry
;
1140 unsigned WriteID
= WriteSeq
.back();
1141 WriterNames
.push_back(SchedModels
.getSchedWrite(WriteID
).Name
);
1142 // If this Write is not referenced by a ReadAdvance, don't distinguish it
1143 // from other WriteLatency entries.
1144 if (!ProcModel
.hasReadOfWrite(SchedModels
.getSchedWrite(WriteID
).TheDef
))
1146 WLEntry
.WriteResourceID
= WriteID
;
1148 for (unsigned WS
: WriteSeq
) {
1149 const Record
*WriteRes
=
1150 findWriteResources(SchedModels
.getSchedWrite(WS
), ProcModel
);
1152 // Mark the parent class as invalid for unsupported write types.
1153 if (WriteRes
->getValueAsBit("Unsupported")) {
1154 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1157 WLEntry
.Cycles
+= WriteRes
->getValueAsInt("Latency");
1158 SCDesc
.NumMicroOps
+= WriteRes
->getValueAsInt("NumMicroOps");
1159 SCDesc
.BeginGroup
|= WriteRes
->getValueAsBit("BeginGroup");
1160 SCDesc
.EndGroup
|= WriteRes
->getValueAsBit("EndGroup");
1161 SCDesc
.BeginGroup
|= WriteRes
->getValueAsBit("SingleIssue");
1162 SCDesc
.EndGroup
|= WriteRes
->getValueAsBit("SingleIssue");
1163 SCDesc
.RetireOOO
|= WriteRes
->getValueAsBit("RetireOOO");
1165 // Create an entry for each ProcResource listed in WriteRes.
1166 ConstRecVec PRVec
= WriteRes
->getValueAsListOfDefs("ProcResources");
1167 std::vector
<int64_t> ReleaseAtCycles
=
1168 WriteRes
->getValueAsListOfInts("ReleaseAtCycles");
1170 std::vector
<int64_t> AcquireAtCycles
=
1171 WriteRes
->getValueAsListOfInts("AcquireAtCycles");
1173 // Check consistency of the two vectors carrying the start and
1174 // stop cycles of the resources.
1175 if (!ReleaseAtCycles
.empty() &&
1176 ReleaseAtCycles
.size() != PRVec
.size()) {
1177 // If ReleaseAtCycles is provided, check consistency.
1180 Twine("Inconsistent release at cycles: size(ReleaseAtCycles) != "
1181 "size(ProcResources): ")
1182 .concat(Twine(PRVec
.size()))
1184 .concat(Twine(ReleaseAtCycles
.size())));
1187 if (!AcquireAtCycles
.empty() &&
1188 AcquireAtCycles
.size() != PRVec
.size()) {
1191 Twine("Inconsistent resource cycles: size(AcquireAtCycles) != "
1192 "size(ProcResources): ")
1193 .concat(Twine(AcquireAtCycles
.size()))
1195 .concat(Twine(PRVec
.size())));
1198 if (ReleaseAtCycles
.empty()) {
1199 // If ReleaseAtCycles is not provided, default to one cycle
1201 ReleaseAtCycles
.resize(PRVec
.size(), 1);
1204 if (AcquireAtCycles
.empty()) {
1205 // If AcquireAtCycles is not provided, reserve the resource
1206 // starting from cycle 0.
1207 AcquireAtCycles
.resize(PRVec
.size(), 0);
1210 assert(AcquireAtCycles
.size() == ReleaseAtCycles
.size());
1212 expandProcResources(PRVec
, ReleaseAtCycles
, AcquireAtCycles
, ProcModel
);
1213 assert(AcquireAtCycles
.size() == ReleaseAtCycles
.size());
1215 for (unsigned PRIdx
= 0, PREnd
= PRVec
.size(); PRIdx
!= PREnd
;
1217 MCWriteProcResEntry WPREntry
;
1218 WPREntry
.ProcResourceIdx
= ProcModel
.getProcResourceIdx(PRVec
[PRIdx
]);
1219 assert(WPREntry
.ProcResourceIdx
&& "Bad ProcResourceIdx");
1220 WPREntry
.ReleaseAtCycle
= ReleaseAtCycles
[PRIdx
];
1221 WPREntry
.AcquireAtCycle
= AcquireAtCycles
[PRIdx
];
1222 if (AcquireAtCycles
[PRIdx
] > ReleaseAtCycles
[PRIdx
]) {
1225 Twine("Inconsistent resource cycles: AcquireAtCycles "
1226 "< ReleaseAtCycles must hold."));
1228 if (AcquireAtCycles
[PRIdx
] < 0) {
1229 PrintFatalError(WriteRes
->getLoc(),
1230 Twine("Invalid value: AcquireAtCycle "
1231 "must be a non-negative value."));
1233 // If this resource is already used in this sequence, add the current
1234 // entry's cycles so that the same resource appears to be used
1235 // serially, rather than multiple parallel uses. This is important for
1236 // in-order machine where the resource consumption is a hazard.
1237 unsigned WPRIdx
= 0, WPREnd
= WriteProcResources
.size();
1238 for (; WPRIdx
!= WPREnd
; ++WPRIdx
) {
1239 if (WriteProcResources
[WPRIdx
].ProcResourceIdx
==
1240 WPREntry
.ProcResourceIdx
) {
1241 // TODO: multiple use of the same resources would
1242 // require either 1. thinking of how to handle multiple
1243 // intervals for the same resource in
1244 // `<Target>WriteProcResTable` (see
1245 // `SubtargetEmitter::EmitSchedClassTables`), or
1246 // 2. thinking how to merge multiple intervals into a
1248 assert(WPREntry
.AcquireAtCycle
== 0 &&
1249 "multiple use ofthe same resource is not yet handled");
1250 WriteProcResources
[WPRIdx
].ReleaseAtCycle
+=
1251 WPREntry
.ReleaseAtCycle
;
1255 if (WPRIdx
== WPREnd
)
1256 WriteProcResources
.push_back(WPREntry
);
1259 WriteLatencies
.push_back(WLEntry
);
1261 // Create an entry for each operand Read in this SchedClass.
1262 // Entries must be sorted first by UseIdx then by WriteResourceID.
1263 for (unsigned UseIdx
= 0, EndIdx
= Reads
.size(); UseIdx
!= EndIdx
;
1265 const Record
*ReadAdvance
=
1266 findReadAdvance(SchedModels
.getSchedRead(Reads
[UseIdx
]), ProcModel
);
1270 // Mark the parent class as invalid for unsupported write types.
1271 if (ReadAdvance
->getValueAsBit("Unsupported")) {
1272 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1275 ConstRecVec ValidWrites
=
1276 ReadAdvance
->getValueAsListOfDefs("ValidWrites");
1278 if (ValidWrites
.empty())
1279 WriteIDs
.push_back(0);
1281 for (const Record
*VW
: ValidWrites
) {
1282 unsigned WriteID
= SchedModels
.getSchedRWIdx(VW
, /*IsRead=*/false);
1283 assert(WriteID
!= 0 &&
1284 "Expected a valid SchedRW in the list of ValidWrites");
1285 WriteIDs
.push_back(WriteID
);
1288 llvm::sort(WriteIDs
);
1289 for (unsigned W
: WriteIDs
) {
1290 MCReadAdvanceEntry RAEntry
;
1291 RAEntry
.UseIdx
= UseIdx
;
1292 RAEntry
.WriteResourceID
= W
;
1293 RAEntry
.Cycles
= ReadAdvance
->getValueAsInt("Cycles");
1294 ReadAdvanceEntries
.push_back(RAEntry
);
1297 if (SCDesc
.NumMicroOps
== MCSchedClassDesc::InvalidNumMicroOps
) {
1298 WriteProcResources
.clear();
1299 WriteLatencies
.clear();
1300 ReadAdvanceEntries
.clear();
1302 // Add the information for this SchedClass to the global tables using basic
1305 // WritePrecRes entries are sorted by ProcResIdx.
1306 llvm::sort(WriteProcResources
, LessWriteProcResources());
1308 SCDesc
.NumWriteProcResEntries
= WriteProcResources
.size();
1309 std::vector
<MCWriteProcResEntry
>::iterator WPRPos
=
1310 std::search(SchedTables
.WriteProcResources
.begin(),
1311 SchedTables
.WriteProcResources
.end(),
1312 WriteProcResources
.begin(), WriteProcResources
.end());
1313 if (WPRPos
!= SchedTables
.WriteProcResources
.end())
1314 SCDesc
.WriteProcResIdx
= WPRPos
- SchedTables
.WriteProcResources
.begin();
1316 SCDesc
.WriteProcResIdx
= SchedTables
.WriteProcResources
.size();
1317 SchedTables
.WriteProcResources
.insert(WPRPos
, WriteProcResources
.begin(),
1318 WriteProcResources
.end());
1320 // Latency entries must remain in operand order.
1321 SCDesc
.NumWriteLatencyEntries
= WriteLatencies
.size();
1322 std::vector
<MCWriteLatencyEntry
>::iterator WLPos
= std::search(
1323 SchedTables
.WriteLatencies
.begin(), SchedTables
.WriteLatencies
.end(),
1324 WriteLatencies
.begin(), WriteLatencies
.end());
1325 if (WLPos
!= SchedTables
.WriteLatencies
.end()) {
1326 unsigned Idx
= WLPos
- SchedTables
.WriteLatencies
.begin();
1327 SCDesc
.WriteLatencyIdx
= Idx
;
1328 for (unsigned I
= 0, E
= WriteLatencies
.size(); I
< E
; ++I
)
1329 if (SchedTables
.WriterNames
[Idx
+ I
].find(WriterNames
[I
]) ==
1330 std::string::npos
) {
1331 SchedTables
.WriterNames
[Idx
+ I
] += std::string("_") + WriterNames
[I
];
1334 SCDesc
.WriteLatencyIdx
= SchedTables
.WriteLatencies
.size();
1335 llvm::append_range(SchedTables
.WriteLatencies
, WriteLatencies
);
1336 llvm::append_range(SchedTables
.WriterNames
, WriterNames
);
1338 // ReadAdvanceEntries must remain in operand order.
1339 SCDesc
.NumReadAdvanceEntries
= ReadAdvanceEntries
.size();
1340 std::vector
<MCReadAdvanceEntry
>::iterator RAPos
=
1341 std::search(SchedTables
.ReadAdvanceEntries
.begin(),
1342 SchedTables
.ReadAdvanceEntries
.end(),
1343 ReadAdvanceEntries
.begin(), ReadAdvanceEntries
.end());
1344 if (RAPos
!= SchedTables
.ReadAdvanceEntries
.end())
1345 SCDesc
.ReadAdvanceIdx
= RAPos
- SchedTables
.ReadAdvanceEntries
.begin();
1347 SCDesc
.ReadAdvanceIdx
= SchedTables
.ReadAdvanceEntries
.size();
1348 llvm::append_range(SchedTables
.ReadAdvanceEntries
, ReadAdvanceEntries
);
1353 // Emit SchedClass tables for all processors and associated global tables.
1354 void SubtargetEmitter::emitSchedClassTables(SchedClassTables
&SchedTables
,
1356 // Emit global WriteProcResTable.
1357 OS
<< "\n// {ProcResourceIdx, ReleaseAtCycle, AcquireAtCycle}\n"
1358 << "extern const llvm::MCWriteProcResEntry " << Target
1359 << "WriteProcResTable[] = {\n"
1360 << " { 0, 0, 0 }, // Invalid\n";
1361 for (unsigned WPRIdx
= 1, WPREnd
= SchedTables
.WriteProcResources
.size();
1362 WPRIdx
!= WPREnd
; ++WPRIdx
) {
1363 MCWriteProcResEntry
&WPREntry
= SchedTables
.WriteProcResources
[WPRIdx
];
1364 OS
<< " {" << format("%2d", WPREntry
.ProcResourceIdx
) << ", "
1365 << format("%2d", WPREntry
.ReleaseAtCycle
) << ", "
1366 << format("%2d", WPREntry
.AcquireAtCycle
) << "}";
1367 if (WPRIdx
+ 1 < WPREnd
)
1369 OS
<< " // #" << WPRIdx
<< '\n';
1371 OS
<< "}; // " << Target
<< "WriteProcResTable\n";
1373 // Emit global WriteLatencyTable.
1374 OS
<< "\n// {Cycles, WriteResourceID}\n"
1375 << "extern const llvm::MCWriteLatencyEntry " << Target
1376 << "WriteLatencyTable[] = {\n"
1377 << " { 0, 0}, // Invalid\n";
1378 for (unsigned WLIdx
= 1, WLEnd
= SchedTables
.WriteLatencies
.size();
1379 WLIdx
!= WLEnd
; ++WLIdx
) {
1380 MCWriteLatencyEntry
&WLEntry
= SchedTables
.WriteLatencies
[WLIdx
];
1381 OS
<< " {" << format("%2d", WLEntry
.Cycles
) << ", "
1382 << format("%2d", WLEntry
.WriteResourceID
) << "}";
1383 if (WLIdx
+ 1 < WLEnd
)
1385 OS
<< " // #" << WLIdx
<< " " << SchedTables
.WriterNames
[WLIdx
] << '\n';
1387 OS
<< "}; // " << Target
<< "WriteLatencyTable\n";
1389 // Emit global ReadAdvanceTable.
1390 OS
<< "\n// {UseIdx, WriteResourceID, Cycles}\n"
1391 << "extern const llvm::MCReadAdvanceEntry " << Target
1392 << "ReadAdvanceTable[] = {\n"
1393 << " {0, 0, 0}, // Invalid\n";
1394 for (unsigned RAIdx
= 1, RAEnd
= SchedTables
.ReadAdvanceEntries
.size();
1395 RAIdx
!= RAEnd
; ++RAIdx
) {
1396 MCReadAdvanceEntry
&RAEntry
= SchedTables
.ReadAdvanceEntries
[RAIdx
];
1397 OS
<< " {" << RAEntry
.UseIdx
<< ", "
1398 << format("%2d", RAEntry
.WriteResourceID
) << ", "
1399 << format("%2d", RAEntry
.Cycles
) << "}";
1400 if (RAIdx
+ 1 < RAEnd
)
1402 OS
<< " // #" << RAIdx
<< '\n';
1404 OS
<< "}; // " << Target
<< "ReadAdvanceTable\n";
1406 // Emit a SchedClass table for each processor.
1407 for (CodeGenSchedModels::ProcIter PI
= SchedModels
.procModelBegin(),
1408 PE
= SchedModels
.procModelEnd();
1410 if (!PI
->hasInstrSchedModel())
1413 std::vector
<MCSchedClassDesc
> &SCTab
=
1414 SchedTables
.ProcSchedClasses
[1 + (PI
- SchedModels
.procModelBegin())];
1416 OS
<< "\n// {Name, NumMicroOps, BeginGroup, EndGroup, RetireOOO,"
1417 << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
1418 OS
<< "static const llvm::MCSchedClassDesc " << PI
->ModelName
1419 << "SchedClasses[] = {\n";
1421 // The first class is always invalid. We no way to distinguish it except by
1422 // name and position.
1423 assert(SchedModels
.getSchedClass(0).Name
== "NoInstrModel" &&
1424 "invalid class not first");
1425 OS
<< " {DBGFIELD(\"InvalidSchedClass\") "
1426 << MCSchedClassDesc::InvalidNumMicroOps
1427 << ", false, false, false, 0, 0, 0, 0, 0, 0},\n";
1429 for (unsigned SCIdx
= 1, SCEnd
= SCTab
.size(); SCIdx
!= SCEnd
; ++SCIdx
) {
1430 MCSchedClassDesc
&MCDesc
= SCTab
[SCIdx
];
1431 const CodeGenSchedClass
&SchedClass
= SchedModels
.getSchedClass(SCIdx
);
1432 OS
<< " {DBGFIELD(\"" << SchedClass
.Name
<< "\") ";
1433 if (SchedClass
.Name
.size() < 18)
1434 OS
.indent(18 - SchedClass
.Name
.size());
1435 OS
<< MCDesc
.NumMicroOps
<< ", " << (MCDesc
.BeginGroup
? "true" : "false")
1436 << ", " << (MCDesc
.EndGroup
? "true" : "false") << ", "
1437 << (MCDesc
.RetireOOO
? "true" : "false") << ", "
1438 << format("%2d", MCDesc
.WriteProcResIdx
) << ", "
1439 << MCDesc
.NumWriteProcResEntries
<< ", "
1440 << format("%2d", MCDesc
.WriteLatencyIdx
) << ", "
1441 << MCDesc
.NumWriteLatencyEntries
<< ", "
1442 << format("%2d", MCDesc
.ReadAdvanceIdx
) << ", "
1443 << MCDesc
.NumReadAdvanceEntries
<< "}, // #" << SCIdx
<< '\n';
1445 OS
<< "}; // " << PI
->ModelName
<< "SchedClasses\n";
1449 void SubtargetEmitter::emitProcessorModels(raw_ostream
&OS
) {
1450 // For each processor model.
1451 for (const CodeGenProcModel
&PM
: SchedModels
.procModels()) {
1452 // Emit extra processor info if available.
1453 if (PM
.hasExtraProcessorInfo())
1454 emitExtraProcessorInfo(PM
, OS
);
1455 // Emit processor resource table.
1456 if (PM
.hasInstrSchedModel())
1457 emitProcessorResources(PM
, OS
);
1458 else if (!PM
.ProcResourceDefs
.empty())
1459 PrintFatalError(PM
.ModelDef
->getLoc(),
1460 "SchedMachineModel defines "
1461 "ProcResources without defining WriteRes SchedWriteRes");
1463 // Begin processor itinerary properties
1465 OS
<< "static const llvm::MCSchedModel " << PM
.ModelName
<< " = {\n";
1466 emitProcessorProp(OS
, PM
.ModelDef
, "IssueWidth", ',');
1467 emitProcessorProp(OS
, PM
.ModelDef
, "MicroOpBufferSize", ',');
1468 emitProcessorProp(OS
, PM
.ModelDef
, "LoopMicroOpBufferSize", ',');
1469 emitProcessorProp(OS
, PM
.ModelDef
, "LoadLatency", ',');
1470 emitProcessorProp(OS
, PM
.ModelDef
, "HighLatency", ',');
1471 emitProcessorProp(OS
, PM
.ModelDef
, "MispredictPenalty", ',');
1473 bool PostRAScheduler
=
1474 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("PostRAScheduler") : false);
1476 OS
<< " " << (PostRAScheduler
? "true" : "false") << ", // "
1477 << "PostRAScheduler\n";
1479 bool CompleteModel
=
1480 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("CompleteModel") : false);
1482 OS
<< " " << (CompleteModel
? "true" : "false") << ", // "
1483 << "CompleteModel\n";
1485 bool EnableIntervals
=
1486 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("EnableIntervals") : false);
1488 OS
<< " " << (EnableIntervals
? "true" : "false") << ", // "
1489 << "EnableIntervals\n";
1491 OS
<< " " << PM
.Index
<< ", // Processor ID\n";
1492 if (PM
.hasInstrSchedModel())
1493 OS
<< " " << PM
.ModelName
<< "ProcResources"
1495 << " " << PM
.ModelName
<< "SchedClasses"
1497 << " " << PM
.ProcResourceDefs
.size() + 1 << ",\n"
1499 << (SchedModels
.schedClassEnd() - SchedModels
.schedClassBegin())
1502 OS
<< " nullptr, nullptr, 0, 0,"
1503 << " // No instruction-level machine model.\n";
1504 if (PM
.hasItineraries())
1505 OS
<< " " << PM
.ItinsDef
->getName() << ",\n";
1507 OS
<< " nullptr, // No Itinerary\n";
1508 if (PM
.hasExtraProcessorInfo())
1509 OS
<< " &" << PM
.ModelName
<< "ExtraInfo,\n";
1511 OS
<< " nullptr // No extra processor descriptor\n";
1517 // EmitSchedModel - Emits all scheduling model tables, folding common patterns.
1519 void SubtargetEmitter::emitSchedModel(raw_ostream
&OS
) {
1520 OS
<< "#ifdef DBGFIELD\n"
1521 << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
1523 << "#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)\n"
1524 << "#define DBGFIELD(x) x,\n"
1526 << "#define DBGFIELD(x)\n"
1529 if (SchedModels
.hasItineraries()) {
1530 std::vector
<std::vector
<InstrItinerary
>> ProcItinLists
;
1531 // Emit the stage data
1532 emitStageAndOperandCycleData(OS
, ProcItinLists
);
1533 emitItineraries(OS
, ProcItinLists
);
1535 OS
<< "\n// ===============================================================\n"
1536 << "// Data tables for the new per-operand machine model.\n";
1538 SchedClassTables SchedTables
;
1539 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
1540 genSchedClassTables(ProcModel
, SchedTables
);
1542 emitSchedClassTables(SchedTables
, OS
);
1544 OS
<< "\n#undef DBGFIELD\n";
1546 // Emit the processor machine model
1547 emitProcessorModels(OS
);
1550 static void emitPredicateProlog(const RecordKeeper
&Records
, raw_ostream
&OS
) {
1552 raw_string_ostream
Stream(Buffer
);
1554 // Print all PredicateProlog records to the output stream.
1555 for (const Record
*P
: Records
.getAllDerivedDefinitions("PredicateProlog"))
1556 Stream
<< P
->getValueAsString("Code") << '\n';
1561 static bool isTruePredicate(const Record
*Rec
) {
1562 return Rec
->isSubClassOf("MCSchedPredicate") &&
1563 Rec
->getValueAsDef("Pred")->isSubClassOf("MCTrue");
1566 static void emitPredicates(const CodeGenSchedTransition
&T
,
1567 const CodeGenSchedClass
&SC
, PredicateExpander
&PE
,
1570 raw_string_ostream
SS(Buffer
);
1572 // If not all predicates are MCTrue, then we need an if-stmt.
1573 unsigned NumNonTruePreds
=
1574 T
.PredTerm
.size() - count_if(T
.PredTerm
, isTruePredicate
);
1576 SS
<< PE
.getIndent();
1578 if (NumNonTruePreds
) {
1579 bool FirstNonTruePredicate
= true;
1582 PE
.getIndent() += 2;
1584 for (const Record
*Rec
: T
.PredTerm
) {
1585 // Skip predicates that evaluate to "true".
1586 if (isTruePredicate(Rec
))
1589 if (FirstNonTruePredicate
) {
1590 FirstNonTruePredicate
= false;
1593 SS
<< PE
.getIndent();
1597 if (Rec
->isSubClassOf("MCSchedPredicate")) {
1598 PE
.expandPredicate(SS
, Rec
->getValueAsDef("Pred"));
1602 // Expand this legacy predicate and wrap it around braces if there is more
1603 // than one predicate to expand.
1604 SS
<< ((NumNonTruePreds
> 1) ? "(" : "")
1605 << Rec
->getValueAsString("Predicate")
1606 << ((NumNonTruePreds
> 1) ? ")" : "");
1609 SS
<< ")\n"; // end of if-stmt
1611 SS
<< PE
.getIndent();
1615 SS
<< "return " << T
.ToClassIdx
<< "; // " << SC
.Name
<< '\n';
1619 // Used by method `SubtargetEmitter::emitSchedModelHelpersImpl()` to generate
1620 // epilogue code for the auto-generated helper.
1621 static void emitSchedModelHelperEpilogue(raw_ostream
&OS
,
1622 bool ShouldReturnZero
) {
1623 if (ShouldReturnZero
) {
1624 OS
<< " // Don't know how to resolve this scheduling class.\n"
1629 OS
<< " report_fatal_error(\"Expected a variant SchedClass\");\n";
1632 static bool hasMCSchedPredicates(const CodeGenSchedTransition
&T
) {
1633 return all_of(T
.PredTerm
, [](const Record
*Rec
) {
1634 return Rec
->isSubClassOf("MCSchedPredicate");
1638 static void collectVariantClasses(const CodeGenSchedModels
&SchedModels
,
1639 IdxVec
&VariantClasses
,
1640 bool OnlyExpandMCInstPredicates
) {
1641 for (const CodeGenSchedClass
&SC
: SchedModels
.schedClasses()) {
1642 // Ignore non-variant scheduling classes.
1643 if (SC
.Transitions
.empty())
1646 if (OnlyExpandMCInstPredicates
) {
1647 // Ignore this variant scheduling class no transitions use any meaningful
1648 // MCSchedPredicate definitions.
1649 if (llvm::none_of(SC
.Transitions
, hasMCSchedPredicates
))
1653 VariantClasses
.push_back(SC
.Index
);
1657 static void collectProcessorIndices(const CodeGenSchedClass
&SC
,
1658 IdxVec
&ProcIndices
) {
1659 // A variant scheduling class may define transitions for multiple
1660 // processors. This function identifies wich processors are associated with
1661 // transition rules specified by variant class `SC`.
1662 for (const CodeGenSchedTransition
&T
: SC
.Transitions
) {
1664 std::set_union(&T
.ProcIndex
, &T
.ProcIndex
+ 1, ProcIndices
.begin(),
1665 ProcIndices
.end(), std::back_inserter(PI
));
1666 ProcIndices
= std::move(PI
);
1670 static bool isAlwaysTrue(const CodeGenSchedTransition
&T
) {
1671 return llvm::all_of(T
.PredTerm
, isTruePredicate
);
1674 void SubtargetEmitter::emitSchedModelHelpersImpl(
1675 raw_ostream
&OS
, bool OnlyExpandMCInstPredicates
) {
1676 IdxVec VariantClasses
;
1677 collectVariantClasses(SchedModels
, VariantClasses
,
1678 OnlyExpandMCInstPredicates
);
1680 if (VariantClasses
.empty()) {
1681 emitSchedModelHelperEpilogue(OS
, OnlyExpandMCInstPredicates
);
1685 // Construct a switch statement where the condition is a check on the
1686 // scheduling class identifier. There is a `case` for every variant class
1687 // defined by the processor models of this target.
1688 // Each `case` implements a number of rules to resolve (i.e. to transition
1689 // from) a variant scheduling class to another scheduling class. Rules are
1690 // described by instances of CodeGenSchedTransition. Note that transitions may
1691 // not be valid for all processors.
1692 OS
<< " switch (SchedClass) {\n";
1693 for (unsigned VC
: VariantClasses
) {
1695 const CodeGenSchedClass
&SC
= SchedModels
.getSchedClass(VC
);
1696 collectProcessorIndices(SC
, ProcIndices
);
1698 OS
<< " case " << VC
<< ": // " << SC
.Name
<< '\n';
1700 PredicateExpander
PE(Target
);
1702 PE
.setExpandForMC(OnlyExpandMCInstPredicates
);
1703 for (unsigned PI
: ProcIndices
) {
1706 // Emit a guard on the processor ID.
1708 OS
<< (OnlyExpandMCInstPredicates
1710 : "if (SchedModel->getProcessorID() == ");
1712 OS
<< "{ // " << (SchedModels
.procModelBegin() + PI
)->ModelName
<< '\n';
1715 // Now emit transitions associated with processor PI.
1716 const CodeGenSchedTransition
*FinalT
= nullptr;
1717 for (const CodeGenSchedTransition
&T
: SC
.Transitions
) {
1718 if (PI
!= 0 && T
.ProcIndex
!= PI
)
1721 // Emit only transitions based on MCSchedPredicate, if it's the case.
1722 // At least the transition specified by NoSchedPred is emitted,
1723 // which becomes the default transition for those variants otherwise
1724 // not based on MCSchedPredicate.
1725 // FIXME: preferably, llvm-mca should instead assume a reasonable
1726 // default when a variant transition is not based on MCSchedPredicate
1727 // for a given processor.
1728 if (OnlyExpandMCInstPredicates
&& !hasMCSchedPredicates(T
))
1731 // If transition is folded to 'return X' it should be the last one.
1732 if (isAlwaysTrue(T
)) {
1737 emitPredicates(T
, SchedModels
.getSchedClass(T
.ToClassIdx
), PE
, OS
);
1740 emitPredicates(*FinalT
, SchedModels
.getSchedClass(FinalT
->ToClassIdx
),
1749 if (SC
.isInferred())
1750 OS
<< " return " << SC
.Index
<< ";\n";
1756 emitSchedModelHelperEpilogue(OS
, OnlyExpandMCInstPredicates
);
1759 void SubtargetEmitter::emitSchedModelHelpers(const std::string
&ClassName
,
1761 OS
<< "unsigned " << ClassName
1762 << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
1763 << " const TargetSchedModel *SchedModel) const {\n";
1765 // Emit the predicate prolog code.
1766 emitPredicateProlog(Records
, OS
);
1768 // Emit target predicates.
1769 emitSchedModelHelpersImpl(OS
);
1771 OS
<< "} // " << ClassName
<< "::resolveSchedClass\n\n";
1773 OS
<< "unsigned " << ClassName
1774 << "\n::resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI,"
1775 << " const MCInstrInfo *MCII, unsigned CPUID) const {\n"
1776 << " return " << Target
<< "_MC"
1777 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n"
1778 << "} // " << ClassName
<< "::resolveVariantSchedClass\n\n";
1780 STIPredicateExpander
PE(Target
, /*Indent=*/0);
1781 PE
.setClassPrefix(ClassName
);
1782 PE
.setExpandDefinition(true);
1785 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1786 PE
.expandSTIPredicate(OS
, Fn
);
1789 void SubtargetEmitter::emitHwModeCheck(const std::string
&ClassName
,
1791 const CodeGenHwModes
&CGH
= TGT
.getHwModes();
1792 assert(CGH
.getNumModeIds() > 0);
1793 if (CGH
.getNumModeIds() == 1)
1796 // Collect all HwModes and related features defined in the TD files,
1797 // and store them as a bit set.
1798 unsigned ValueTypeModes
= 0;
1799 unsigned RegInfoModes
= 0;
1800 unsigned EncodingInfoModes
= 0;
1801 for (const auto &MS
: CGH
.getHwModeSelects()) {
1802 for (const HwModeSelect::PairType
&P
: MS
.second
.Items
) {
1803 if (P
.first
== DefaultMode
)
1805 if (P
.second
->isSubClassOf("ValueType")) {
1806 ValueTypeModes
|= (1 << (P
.first
- 1));
1807 } else if (P
.second
->isSubClassOf("RegInfo") ||
1808 P
.second
->isSubClassOf("SubRegRange")) {
1809 RegInfoModes
|= (1 << (P
.first
- 1));
1810 } else if (P
.second
->isSubClassOf("InstructionEncoding")) {
1811 EncodingInfoModes
|= (1 << (P
.first
- 1));
1816 // Start emitting for getHwModeSet().
1817 OS
<< "unsigned " << ClassName
<< "::getHwModeSet() const {\n";
1818 OS
<< " // Collect HwModes and store them as a bit set.\n";
1819 OS
<< " unsigned Modes = 0;\n";
1820 for (unsigned M
= 1, NumModes
= CGH
.getNumModeIds(); M
!= NumModes
; ++M
) {
1821 const HwMode
&HM
= CGH
.getMode(M
);
1822 OS
<< " if (checkFeatures(\"" << HM
.Features
<< "\")) Modes |= (1 << "
1823 << (M
- 1) << ");\n";
1825 OS
<< " return Modes;\n}\n";
1826 // End emitting for getHwModeSet().
1828 auto HandlePerMode
= [&](std::string ModeType
, unsigned ModeInBitSet
) {
1829 OS
<< " case HwMode_" << ModeType
<< ":\n"
1830 << " Modes &= " << ModeInBitSet
<< ";\n"
1831 << " if (!Modes)\n return Modes;\n"
1832 << " if (!llvm::has_single_bit<unsigned>(Modes))\n"
1833 << " llvm_unreachable(\"Two or more HwModes for " << ModeType
1834 << " were found!\");\n"
1835 << " return llvm::countr_zero(Modes) + 1;\n";
1838 // Start emitting for getHwMode().
1839 OS
<< "unsigned " << ClassName
1840 << "::getHwMode(enum HwModeType type) const {\n";
1841 OS
<< " unsigned Modes = getHwModeSet();\n\n";
1842 OS
<< " if (!Modes)\n return Modes;\n\n";
1843 OS
<< " switch (type) {\n";
1844 OS
<< " case HwMode_Default:\n return llvm::countr_zero(Modes) + 1;\n";
1845 HandlePerMode("ValueType", ValueTypeModes
);
1846 HandlePerMode("RegInfo", RegInfoModes
);
1847 HandlePerMode("EncodingInfo", EncodingInfoModes
);
1849 OS
<< " llvm_unreachable(\"unexpected HwModeType\");\n"
1850 << " return 0; // should not get here\n}\n";
1851 // End emitting for getHwMode().
1854 void SubtargetEmitter::emitGetMacroFusions(const std::string
&ClassName
,
1856 if (!TGT
.hasMacroFusion())
1859 OS
<< "std::vector<MacroFusionPredTy> " << ClassName
1860 << "::getMacroFusions() const {\n";
1861 OS
.indent(2) << "std::vector<MacroFusionPredTy> Fusions;\n";
1862 for (auto *Fusion
: TGT
.getMacroFusions()) {
1863 std::string Name
= Fusion
->getNameInitAsString();
1864 OS
.indent(2) << "if (hasFeature(" << Target
<< "::" << Name
1865 << ")) Fusions.push_back(llvm::is" << Name
<< ");\n";
1868 OS
.indent(2) << "return Fusions;\n";
1872 // Produces a subtarget specific function for parsing
1873 // the subtarget features string.
1874 void SubtargetEmitter::parseFeaturesFunction(raw_ostream
&OS
) {
1875 ArrayRef
<const Record
*> Features
=
1876 Records
.getAllDerivedDefinitions("SubtargetFeature");
1878 OS
<< "// ParseSubtargetFeatures - Parses features string setting specified\n"
1879 << "// subtarget options.\n"
1882 OS
<< "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, "
1883 << "StringRef FS) {\n"
1884 << " LLVM_DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
1885 << " LLVM_DEBUG(dbgs() << \"\\nCPU:\" << CPU);\n"
1886 << " LLVM_DEBUG(dbgs() << \"\\nTuneCPU:\" << TuneCPU << \"\\n\\n\");\n";
1888 if (Features
.empty()) {
1893 if (Target
== "AArch64")
1894 OS
<< " CPU = AArch64::resolveCPUAlias(CPU);\n"
1895 << " TuneCPU = AArch64::resolveCPUAlias(TuneCPU);\n";
1897 OS
<< " InitMCProcessorInfo(CPU, TuneCPU, FS);\n"
1898 << " const FeatureBitset &Bits = getFeatureBits();\n";
1900 for (const Record
*R
: Features
) {
1902 StringRef Instance
= R
->getName();
1903 StringRef Value
= R
->getValueAsString("Value");
1904 StringRef FieldName
= R
->getValueAsString("FieldName");
1906 if (Value
== "true" || Value
== "false")
1907 OS
<< " if (Bits[" << Target
<< "::" << Instance
<< "]) " << FieldName
1908 << " = " << Value
<< ";\n";
1910 OS
<< " if (Bits[" << Target
<< "::" << Instance
<< "] && " << FieldName
1911 << " < " << Value
<< ") " << FieldName
<< " = " << Value
<< ";\n";
1917 void SubtargetEmitter::emitGenMCSubtargetInfo(raw_ostream
&OS
) {
1918 OS
<< "namespace " << Target
<< "_MC {\n"
1919 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,\n"
1920 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID) {\n";
1921 emitSchedModelHelpersImpl(OS
, /* OnlyExpandMCPredicates */ true);
1923 OS
<< "} // end namespace " << Target
<< "_MC\n\n";
1925 OS
<< "struct " << Target
1926 << "GenMCSubtargetInfo : public MCSubtargetInfo {\n";
1927 OS
<< " " << Target
<< "GenMCSubtargetInfo(const Triple &TT,\n"
1928 << " StringRef CPU, StringRef TuneCPU, StringRef FS,\n"
1929 << " ArrayRef<SubtargetFeatureKV> PF,\n"
1930 << " ArrayRef<SubtargetSubTypeKV> PD,\n"
1931 << " const MCWriteProcResEntry *WPR,\n"
1932 << " const MCWriteLatencyEntry *WL,\n"
1933 << " const MCReadAdvanceEntry *RA, const InstrStage *IS,\n"
1934 << " const unsigned *OC, const unsigned *FP) :\n"
1935 << " MCSubtargetInfo(TT, CPU, TuneCPU, FS, PF, PD,\n"
1936 << " WPR, WL, RA, IS, OC, FP) { }\n\n"
1937 << " unsigned resolveVariantSchedClass(unsigned SchedClass,\n"
1938 << " const MCInst *MI, const MCInstrInfo *MCII,\n"
1939 << " unsigned CPUID) const override {\n"
1940 << " return " << Target
<< "_MC"
1941 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n";
1943 if (TGT
.getHwModes().getNumModeIds() > 1) {
1944 OS
<< " unsigned getHwModeSet() const override;\n";
1945 OS
<< " unsigned getHwMode(enum HwModeType type = HwMode_Default) const "
1948 if (Target
== "AArch64")
1949 OS
<< " bool isCPUStringValid(StringRef CPU) const override {\n"
1950 << " CPU = AArch64::resolveCPUAlias(CPU);\n"
1951 << " return MCSubtargetInfo::isCPUStringValid(CPU);\n"
1954 emitHwModeCheck(Target
+ "GenMCSubtargetInfo", OS
);
1957 void SubtargetEmitter::emitMcInstrAnalysisPredicateFunctions(raw_ostream
&OS
) {
1958 OS
<< "\n#ifdef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n";
1959 OS
<< "#undef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1961 STIPredicateExpander
PE(Target
, /*Indent=*/0);
1962 PE
.setExpandForMC(true);
1964 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1965 PE
.expandSTIPredicate(OS
, Fn
);
1967 OS
<< "#endif // GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1969 OS
<< "\n#ifdef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n";
1970 OS
<< "#undef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1972 std::string ClassPrefix
= Target
+ "MCInstrAnalysis";
1973 PE
.setExpandDefinition(true);
1974 PE
.setClassPrefix(ClassPrefix
);
1975 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1976 PE
.expandSTIPredicate(OS
, Fn
);
1978 OS
<< "#endif // GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1982 // SubtargetEmitter::run - Main subtarget enumeration emitter.
1984 void SubtargetEmitter::run(raw_ostream
&OS
) {
1985 emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS
);
1987 OS
<< "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
1988 OS
<< "#undef GET_SUBTARGETINFO_ENUM\n\n";
1990 OS
<< "namespace llvm {\n";
1991 auto FeatureMap
= enumeration(OS
);
1992 OS
<< "} // end namespace llvm\n\n";
1993 OS
<< "#endif // GET_SUBTARGETINFO_ENUM\n\n";
1995 emitSubtargetInfoMacroCalls(OS
);
1997 OS
<< "namespace llvm {\n";
1998 unsigned NumFeatures
= featureKeyValues(OS
, FeatureMap
);
2002 unsigned NumProcs
= cpuKeyValues(OS
, FeatureMap
);
2005 // MCInstrInfo initialization routine.
2006 emitGenMCSubtargetInfo(OS
);
2008 OS
<< "\nstatic inline MCSubtargetInfo *create" << Target
2009 << "MCSubtargetInfoImpl("
2010 << "const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS) {\n";
2011 if (Target
== "AArch64")
2012 OS
<< " CPU = AArch64::resolveCPUAlias(CPU);\n"
2013 << " TuneCPU = AArch64::resolveCPUAlias(TuneCPU);\n";
2014 OS
<< " return new " << Target
2015 << "GenMCSubtargetInfo(TT, CPU, TuneCPU, FS, ";
2017 OS
<< Target
<< "FeatureKV, ";
2021 OS
<< Target
<< "SubTypeKV, ";
2026 OS
<< Target
<< "WriteProcResTable, " << Target
<< "WriteLatencyTable, "
2027 << Target
<< "ReadAdvanceTable, ";
2030 if (SchedModels
.hasItineraries()) {
2031 OS
<< Target
<< "Stages, " << Target
<< "OperandCycles, " << Target
2032 << "ForwardingPaths";
2034 OS
<< "nullptr, nullptr, nullptr";
2037 OS
<< "} // end namespace llvm\n\n";
2039 OS
<< "#endif // GET_SUBTARGETINFO_MC_DESC\n\n";
2041 OS
<< "\n#ifdef GET_SUBTARGETINFO_TARGET_DESC\n";
2042 OS
<< "#undef GET_SUBTARGETINFO_TARGET_DESC\n\n";
2044 OS
<< "#include \"llvm/Support/Debug.h\"\n";
2045 OS
<< "#include \"llvm/Support/raw_ostream.h\"\n\n";
2046 if (Target
== "AArch64")
2047 OS
<< "#include \"llvm/TargetParser/AArch64TargetParser.h\"\n\n";
2048 parseFeaturesFunction(OS
);
2050 OS
<< "#endif // GET_SUBTARGETINFO_TARGET_DESC\n\n";
2052 // Create a TargetSubtargetInfo subclass to hide the MC layer initialization.
2053 OS
<< "\n#ifdef GET_SUBTARGETINFO_HEADER\n";
2054 OS
<< "#undef GET_SUBTARGETINFO_HEADER\n\n";
2056 std::string ClassName
= Target
+ "GenSubtargetInfo";
2057 OS
<< "namespace llvm {\n";
2058 OS
<< "class DFAPacketizer;\n";
2059 OS
<< "namespace " << Target
<< "_MC {\n"
2060 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,"
2061 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID);\n"
2062 << "} // end namespace " << Target
<< "_MC\n\n";
2063 OS
<< "struct " << ClassName
<< " : public TargetSubtargetInfo {\n"
2064 << " explicit " << ClassName
<< "(const Triple &TT, StringRef CPU, "
2065 << "StringRef TuneCPU, StringRef FS);\n"
2067 << " unsigned resolveSchedClass(unsigned SchedClass, "
2068 << " const MachineInstr *DefMI,"
2069 << " const TargetSchedModel *SchedModel) const override;\n"
2070 << " unsigned resolveVariantSchedClass(unsigned SchedClass,"
2071 << " const MCInst *MI, const MCInstrInfo *MCII,"
2072 << " unsigned CPUID) const override;\n"
2073 << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
2075 if (TGT
.getHwModes().getNumModeIds() > 1) {
2076 OS
<< " unsigned getHwModeSet() const override;\n";
2077 OS
<< " unsigned getHwMode(enum HwModeType type = HwMode_Default) const "
2080 if (TGT
.hasMacroFusion())
2081 OS
<< " std::vector<MacroFusionPredTy> getMacroFusions() const "
2084 STIPredicateExpander
PE(Target
);
2086 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
2087 PE
.expandSTIPredicate(OS
, Fn
);
2090 << "} // end namespace llvm\n\n";
2092 OS
<< "#endif // GET_SUBTARGETINFO_HEADER\n\n";
2094 OS
<< "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
2095 OS
<< "#undef GET_SUBTARGETINFO_CTOR\n\n";
2097 OS
<< "#include \"llvm/CodeGen/TargetSchedule.h\"\n\n";
2098 OS
<< "namespace llvm {\n";
2099 OS
<< "extern const llvm::SubtargetFeatureKV " << Target
<< "FeatureKV[];\n";
2100 OS
<< "extern const llvm::SubtargetSubTypeKV " << Target
<< "SubTypeKV[];\n";
2101 OS
<< "extern const llvm::MCWriteProcResEntry " << Target
2102 << "WriteProcResTable[];\n";
2103 OS
<< "extern const llvm::MCWriteLatencyEntry " << Target
2104 << "WriteLatencyTable[];\n";
2105 OS
<< "extern const llvm::MCReadAdvanceEntry " << Target
2106 << "ReadAdvanceTable[];\n";
2108 if (SchedModels
.hasItineraries()) {
2109 OS
<< "extern const llvm::InstrStage " << Target
<< "Stages[];\n";
2110 OS
<< "extern const unsigned " << Target
<< "OperandCycles[];\n";
2111 OS
<< "extern const unsigned " << Target
<< "ForwardingPaths[];\n";
2114 OS
<< ClassName
<< "::" << ClassName
<< "(const Triple &TT, StringRef CPU, "
2115 << "StringRef TuneCPU, StringRef FS)\n";
2117 if (Target
== "AArch64")
2118 OS
<< " : TargetSubtargetInfo(TT, AArch64::resolveCPUAlias(CPU),\n"
2119 << " AArch64::resolveCPUAlias(TuneCPU), FS, ";
2121 OS
<< " : TargetSubtargetInfo(TT, CPU, TuneCPU, FS, ";
2123 OS
<< "ArrayRef(" << Target
<< "FeatureKV, " << NumFeatures
<< "), ";
2127 OS
<< "ArrayRef(" << Target
<< "SubTypeKV, " << NumProcs
<< "), ";
2132 OS
<< Target
<< "WriteProcResTable, " << Target
<< "WriteLatencyTable, "
2133 << Target
<< "ReadAdvanceTable, ";
2136 if (SchedModels
.hasItineraries()) {
2137 OS
<< Target
<< "Stages, " << Target
<< "OperandCycles, " << Target
2138 << "ForwardingPaths";
2140 OS
<< "nullptr, nullptr, nullptr";
2143 emitSchedModelHelpers(ClassName
, OS
);
2144 emitHwModeCheck(ClassName
, OS
);
2145 emitGetMacroFusions(ClassName
, OS
);
2147 OS
<< "} // end namespace llvm\n\n";
2149 OS
<< "#endif // GET_SUBTARGETINFO_CTOR\n\n";
2151 emitMcInstrAnalysisPredicateFunctions(OS
);
2154 static TableGen::Emitter::OptClass
<SubtargetEmitter
>
2155 X("gen-subtarget", "Generate subtarget enumerations");