1 //===- SubtargetEmitter.cpp - Generate subtarget enumerations -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This tablegen backend emits subtarget enumerations.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenHwModes.h"
14 #include "CodeGenSchedule.h"
15 #include "CodeGenTarget.h"
16 #include "PredicateExpander.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/MC/MCInstrItineraries.h"
22 #include "llvm/MC/MCSchedule.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/Format.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/TableGen/Error.h"
27 #include "llvm/TableGen/Record.h"
28 #include "llvm/TableGen/TableGenBackend.h"
29 #include "llvm/TargetParser/SubtargetFeature.h"
40 #define DEBUG_TYPE "subtarget-emitter"
44 /// Sorting predicate to sort record pointers by their
46 struct LessRecordFieldFieldName
{
47 bool operator()(const Record
*Rec1
, const Record
*Rec2
) const {
48 return Rec1
->getValueAsString("FieldName") <
49 Rec2
->getValueAsString("FieldName");
53 class SubtargetEmitter
{
54 // Each processor has a SchedClassDesc table with an entry for each SchedClass.
55 // The SchedClassDesc table indexes into a global write resource table, write
56 // latency table, and read advance table.
57 struct SchedClassTables
{
58 std::vector
<std::vector
<MCSchedClassDesc
>> ProcSchedClasses
;
59 std::vector
<MCWriteProcResEntry
> WriteProcResources
;
60 std::vector
<MCWriteLatencyEntry
> WriteLatencies
;
61 std::vector
<std::string
> WriterNames
;
62 std::vector
<MCReadAdvanceEntry
> ReadAdvanceEntries
;
64 // Reserve an invalid entry at index 0
66 ProcSchedClasses
.resize(1);
67 WriteProcResources
.resize(1);
68 WriteLatencies
.resize(1);
69 WriterNames
.push_back("InvalidWrite");
70 ReadAdvanceEntries
.resize(1);
74 struct LessWriteProcResources
{
75 bool operator()(const MCWriteProcResEntry
&LHS
,
76 const MCWriteProcResEntry
&RHS
) {
77 return LHS
.ProcResourceIdx
< RHS
.ProcResourceIdx
;
82 RecordKeeper
&Records
;
83 CodeGenSchedModels
&SchedModels
;
86 void Enumeration(raw_ostream
&OS
, DenseMap
<Record
*, unsigned> &FeatureMap
);
87 void EmitSubtargetInfoMacroCalls(raw_ostream
&OS
);
88 unsigned FeatureKeyValues(raw_ostream
&OS
,
89 const DenseMap
<Record
*, unsigned> &FeatureMap
);
90 unsigned CPUKeyValues(raw_ostream
&OS
,
91 const DenseMap
<Record
*, unsigned> &FeatureMap
);
92 void FormItineraryStageString(const std::string
&Names
,
93 Record
*ItinData
, std::string
&ItinString
,
95 void FormItineraryOperandCycleString(Record
*ItinData
, std::string
&ItinString
,
96 unsigned &NOperandCycles
);
97 void FormItineraryBypassString(const std::string
&Names
,
99 std::string
&ItinString
, unsigned NOperandCycles
);
100 void EmitStageAndOperandCycleData(raw_ostream
&OS
,
101 std::vector
<std::vector
<InstrItinerary
>>
103 void EmitItineraries(raw_ostream
&OS
,
104 std::vector
<std::vector
<InstrItinerary
>>
106 unsigned EmitRegisterFileTables(const CodeGenProcModel
&ProcModel
,
108 void EmitLoadStoreQueueInfo(const CodeGenProcModel
&ProcModel
,
110 void EmitExtraProcessorInfo(const CodeGenProcModel
&ProcModel
,
112 void EmitProcessorProp(raw_ostream
&OS
, const Record
*R
, StringRef Name
,
114 void EmitProcessorResourceSubUnits(const CodeGenProcModel
&ProcModel
,
116 void EmitProcessorResources(const CodeGenProcModel
&ProcModel
,
118 Record
*FindWriteResources(const CodeGenSchedRW
&SchedWrite
,
119 const CodeGenProcModel
&ProcModel
);
120 Record
*FindReadAdvance(const CodeGenSchedRW
&SchedRead
,
121 const CodeGenProcModel
&ProcModel
);
122 void ExpandProcResources(RecVec
&PRVec
, std::vector
<int64_t> &ReleaseAtCycles
,
123 std::vector
<int64_t> &AcquireAtCycles
,
124 const CodeGenProcModel
&ProcModel
);
125 void GenSchedClassTables(const CodeGenProcModel
&ProcModel
,
126 SchedClassTables
&SchedTables
);
127 void EmitSchedClassTables(SchedClassTables
&SchedTables
, raw_ostream
&OS
);
128 void EmitProcessorModels(raw_ostream
&OS
);
129 void EmitSchedModelHelpers(const std::string
&ClassName
, raw_ostream
&OS
);
130 void emitSchedModelHelpersImpl(raw_ostream
&OS
,
131 bool OnlyExpandMCInstPredicates
= false);
132 void emitGenMCSubtargetInfo(raw_ostream
&OS
);
133 void EmitMCInstrAnalysisPredicateFunctions(raw_ostream
&OS
);
135 void EmitSchedModel(raw_ostream
&OS
);
136 void EmitHwModeCheck(const std::string
&ClassName
, raw_ostream
&OS
);
137 void ParseFeaturesFunction(raw_ostream
&OS
);
140 SubtargetEmitter(RecordKeeper
&R
)
141 : TGT(R
), Records(R
), SchedModels(TGT
.getSchedModels()),
142 Target(TGT
.getName()) {}
144 void run(raw_ostream
&o
);
147 } // end anonymous namespace
150 // Enumeration - Emit the specified class as an enumeration.
152 void SubtargetEmitter::Enumeration(raw_ostream
&OS
,
153 DenseMap
<Record
*, unsigned> &FeatureMap
) {
154 // Get all records of class and sort
155 std::vector
<Record
*> DefList
=
156 Records
.getAllDerivedDefinitions("SubtargetFeature");
157 llvm::sort(DefList
, LessRecord());
159 unsigned N
= DefList
.size();
162 if (N
+ 1 > MAX_SUBTARGET_FEATURES
)
163 PrintFatalError("Too many subtarget features! Bump MAX_SUBTARGET_FEATURES.");
165 OS
<< "namespace " << Target
<< " {\n";
171 for (unsigned i
= 0; i
< N
; ++i
) {
173 Record
*Def
= DefList
[i
];
176 OS
<< " " << Def
->getName() << " = " << i
<< ",\n";
178 // Save the index for this feature.
183 << "NumSubtargetFeatures = " << N
<< "\n";
185 // Close enumeration and namespace
187 OS
<< "} // end namespace " << Target
<< "\n";
190 static void printFeatureMask(raw_ostream
&OS
, RecVec
&FeatureList
,
191 const DenseMap
<Record
*, unsigned> &FeatureMap
) {
192 std::array
<uint64_t, MAX_SUBTARGET_WORDS
> Mask
= {};
193 for (const Record
*Feature
: FeatureList
) {
194 unsigned Bit
= FeatureMap
.lookup(Feature
);
195 Mask
[Bit
/ 64] |= 1ULL << (Bit
% 64);
199 for (unsigned i
= 0; i
!= Mask
.size(); ++i
) {
201 OS
.write_hex(Mask
[i
]);
207 /// Emit some information about the SubtargetFeature as calls to a macro so
208 /// that they can be used from C++.
209 void SubtargetEmitter::EmitSubtargetInfoMacroCalls(raw_ostream
&OS
) {
210 OS
<< "\n#ifdef GET_SUBTARGETINFO_MACRO\n";
212 std::vector
<Record
*> FeatureList
=
213 Records
.getAllDerivedDefinitions("SubtargetFeature");
214 llvm::sort(FeatureList
, LessRecordFieldFieldName());
216 for (const Record
*Feature
: FeatureList
) {
217 const StringRef FieldName
= Feature
->getValueAsString("FieldName");
218 const StringRef Value
= Feature
->getValueAsString("Value");
220 // Only handle boolean features for now, excluding BitVectors and enums.
221 const bool IsBool
= (Value
== "false" || Value
== "true") &&
222 !StringRef(FieldName
).contains('[');
226 // Some features default to true, with values set to false if enabled.
227 const char *Default
= Value
== "false" ? "true" : "false";
229 // Define the getter with lowercased first char: xxxYyy() { return XxxYyy; }
230 const std::string Getter
=
231 FieldName
.substr(0, 1).lower() + FieldName
.substr(1).str();
233 OS
<< "GET_SUBTARGETINFO_MACRO(" << FieldName
<< ", " << Default
<< ", "
236 OS
<< "#undef GET_SUBTARGETINFO_MACRO\n";
237 OS
<< "#endif // GET_SUBTARGETINFO_MACRO\n\n";
239 OS
<< "\n#ifdef GET_SUBTARGETINFO_MC_DESC\n";
240 OS
<< "#undef GET_SUBTARGETINFO_MC_DESC\n\n";
244 // FeatureKeyValues - Emit data of all the subtarget features. Used by the
247 unsigned SubtargetEmitter::FeatureKeyValues(
248 raw_ostream
&OS
, const DenseMap
<Record
*, unsigned> &FeatureMap
) {
249 // Gather and sort all the features
250 std::vector
<Record
*> FeatureList
=
251 Records
.getAllDerivedDefinitions("SubtargetFeature");
253 if (FeatureList
.empty())
256 llvm::sort(FeatureList
, LessRecordFieldName());
258 // Begin feature table
259 OS
<< "// Sorted (by key) array of values for CPU features.\n"
260 << "extern const llvm::SubtargetFeatureKV " << Target
261 << "FeatureKV[] = {\n";
264 unsigned NumFeatures
= 0;
265 for (const Record
*Feature
: FeatureList
) {
267 StringRef Name
= Feature
->getName();
268 StringRef CommandLineName
= Feature
->getValueAsString("Name");
269 StringRef Desc
= Feature
->getValueAsString("Desc");
271 if (CommandLineName
.empty()) continue;
273 // Emit as { "feature", "description", { featureEnum }, { i1 , i2 , ... , in } }
275 << "\"" << CommandLineName
<< "\", "
276 << "\"" << Desc
<< "\", "
277 << Target
<< "::" << Name
<< ", ";
279 RecVec ImpliesList
= Feature
->getValueAsListOfDefs("Implies");
281 printFeatureMask(OS
, ImpliesList
, FeatureMap
);
294 // CPUKeyValues - Emit data of all the subtarget processors. Used by command
298 SubtargetEmitter::CPUKeyValues(raw_ostream
&OS
,
299 const DenseMap
<Record
*, unsigned> &FeatureMap
) {
300 // Gather and sort processor information
301 std::vector
<Record
*> ProcessorList
=
302 Records
.getAllDerivedDefinitions("Processor");
303 llvm::sort(ProcessorList
, LessRecordFieldName());
305 // Begin processor table
306 OS
<< "// Sorted (by key) array of values for CPU subtype.\n"
307 << "extern const llvm::SubtargetSubTypeKV " << Target
308 << "SubTypeKV[] = {\n";
310 // For each processor
311 for (Record
*Processor
: ProcessorList
) {
312 StringRef Name
= Processor
->getValueAsString("Name");
313 RecVec FeatureList
= Processor
->getValueAsListOfDefs("Features");
314 RecVec TuneFeatureList
= Processor
->getValueAsListOfDefs("TuneFeatures");
316 // Emit as { "cpu", "description", 0, { f1 , f2 , ... fn } },
318 << "\"" << Name
<< "\", ";
320 printFeatureMask(OS
, FeatureList
, FeatureMap
);
322 printFeatureMask(OS
, TuneFeatureList
, FeatureMap
);
324 // Emit the scheduler model pointer.
325 const std::string
&ProcModelName
=
326 SchedModels
.getModelForProc(Processor
).ModelName
;
327 OS
<< ", &" << ProcModelName
<< " },\n";
330 // End processor table
333 return ProcessorList
.size();
337 // FormItineraryStageString - Compose a string containing the stage
338 // data initialization for the specified itinerary. N is the number
341 void SubtargetEmitter::FormItineraryStageString(const std::string
&Name
,
343 std::string
&ItinString
,
346 RecVec StageList
= ItinData
->getValueAsListOfDefs("Stages");
349 unsigned N
= NStages
= StageList
.size();
350 for (unsigned i
= 0; i
< N
;) {
352 const Record
*Stage
= StageList
[i
];
354 // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
355 int Cycles
= Stage
->getValueAsInt("Cycles");
356 ItinString
+= " { " + itostr(Cycles
) + ", ";
359 RecVec UnitList
= Stage
->getValueAsListOfDefs("Units");
362 for (unsigned j
= 0, M
= UnitList
.size(); j
< M
;) {
363 // Add name and bitwise or
364 ItinString
+= Name
+ "FU::" + UnitList
[j
]->getName().str();
365 if (++j
< M
) ItinString
+= " | ";
368 int TimeInc
= Stage
->getValueAsInt("TimeInc");
369 ItinString
+= ", " + itostr(TimeInc
);
371 int Kind
= Stage
->getValueAsInt("Kind");
372 ItinString
+= ", (llvm::InstrStage::ReservationKinds)" + itostr(Kind
);
376 if (++i
< N
) ItinString
+= ", ";
381 // FormItineraryOperandCycleString - Compose a string containing the
382 // operand cycle initialization for the specified itinerary. N is the
383 // number of operands that has cycles specified.
385 void SubtargetEmitter::FormItineraryOperandCycleString(Record
*ItinData
,
386 std::string
&ItinString
, unsigned &NOperandCycles
) {
387 // Get operand cycle list
388 std::vector
<int64_t> OperandCycleList
=
389 ItinData
->getValueAsListOfInts("OperandCycles");
391 // For each operand cycle
392 NOperandCycles
= OperandCycleList
.size();
394 for (int OCycle
: OperandCycleList
) {
395 // Next operand cycle
397 ItinString
+= " " + itostr(OCycle
);
401 void SubtargetEmitter::FormItineraryBypassString(const std::string
&Name
,
403 std::string
&ItinString
,
404 unsigned NOperandCycles
) {
405 RecVec BypassList
= ItinData
->getValueAsListOfDefs("Bypasses");
406 unsigned N
= BypassList
.size();
411 ItinString
+= Name
+ "Bypass::" + BypassList
[i
]->getName().str();
413 for (; i
< NOperandCycles
; ++i
) {
420 // EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
421 // cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
422 // by CodeGenSchedClass::Index.
424 void SubtargetEmitter::
425 EmitStageAndOperandCycleData(raw_ostream
&OS
,
426 std::vector
<std::vector
<InstrItinerary
>>
428 // Multiple processor models may share an itinerary record. Emit it once.
429 SmallPtrSet
<Record
*, 8> ItinsDefSet
;
431 // Emit functional units for all the itineraries.
432 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
434 if (!ItinsDefSet
.insert(ProcModel
.ItinsDef
).second
)
437 RecVec FUs
= ProcModel
.ItinsDef
->getValueAsListOfDefs("FU");
441 StringRef Name
= ProcModel
.ItinsDef
->getName();
442 OS
<< "\n// Functional units for \"" << Name
<< "\"\n"
443 << "namespace " << Name
<< "FU {\n";
445 for (unsigned j
= 0, FUN
= FUs
.size(); j
< FUN
; ++j
)
446 OS
<< " const InstrStage::FuncUnits " << FUs
[j
]->getName()
447 << " = 1ULL << " << j
<< ";\n";
449 OS
<< "} // end namespace " << Name
<< "FU\n";
451 RecVec BPs
= ProcModel
.ItinsDef
->getValueAsListOfDefs("BP");
453 OS
<< "\n// Pipeline forwarding paths for itineraries \"" << Name
454 << "\"\n" << "namespace " << Name
<< "Bypass {\n";
456 OS
<< " const unsigned NoBypass = 0;\n";
457 for (unsigned j
= 0, BPN
= BPs
.size(); j
< BPN
; ++j
)
458 OS
<< " const unsigned " << BPs
[j
]->getName()
459 << " = 1 << " << j
<< ";\n";
461 OS
<< "} // end namespace " << Name
<< "Bypass\n";
465 // Begin stages table
466 std::string StageTable
= "\nextern const llvm::InstrStage " + Target
+
468 StageTable
+= " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
470 // Begin operand cycle table
471 std::string OperandCycleTable
= "extern const unsigned " + Target
+
472 "OperandCycles[] = {\n";
473 OperandCycleTable
+= " 0, // No itinerary\n";
475 // Begin pipeline bypass table
476 std::string BypassTable
= "extern const unsigned " + Target
+
477 "ForwardingPaths[] = {\n";
478 BypassTable
+= " 0, // No itinerary\n";
480 // For each Itinerary across all processors, add a unique entry to the stages,
481 // operand cycles, and pipeline bypass tables. Then add the new Itinerary
482 // object with computed offsets to the ProcItinLists result.
483 unsigned StageCount
= 1, OperandCycleCount
= 1;
484 std::map
<std::string
, unsigned> ItinStageMap
, ItinOperandMap
;
485 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
486 // Add process itinerary to the list.
487 ProcItinLists
.resize(ProcItinLists
.size()+1);
489 // If this processor defines no itineraries, then leave the itinerary list
491 std::vector
<InstrItinerary
> &ItinList
= ProcItinLists
.back();
492 if (!ProcModel
.hasItineraries())
495 StringRef Name
= ProcModel
.ItinsDef
->getName();
497 ItinList
.resize(SchedModels
.numInstrSchedClasses());
498 assert(ProcModel
.ItinDefList
.size() == ItinList
.size() && "bad Itins");
500 for (unsigned SchedClassIdx
= 0, SchedClassEnd
= ItinList
.size();
501 SchedClassIdx
< SchedClassEnd
; ++SchedClassIdx
) {
503 // Next itinerary data
504 Record
*ItinData
= ProcModel
.ItinDefList
[SchedClassIdx
];
506 // Get string and stage count
507 std::string ItinStageString
;
508 unsigned NStages
= 0;
510 FormItineraryStageString(std::string(Name
), ItinData
, ItinStageString
,
513 // Get string and operand cycle count
514 std::string ItinOperandCycleString
;
515 unsigned NOperandCycles
= 0;
516 std::string ItinBypassString
;
518 FormItineraryOperandCycleString(ItinData
, ItinOperandCycleString
,
521 FormItineraryBypassString(std::string(Name
), ItinData
, ItinBypassString
,
525 // Check to see if stage already exists and create if it doesn't
526 uint16_t FindStage
= 0;
528 FindStage
= ItinStageMap
[ItinStageString
];
529 if (FindStage
== 0) {
530 // Emit as { cycles, u1 | u2 | ... | un, timeinc }, // indices
531 StageTable
+= ItinStageString
+ ", // " + itostr(StageCount
);
533 StageTable
+= "-" + itostr(StageCount
+ NStages
- 1);
535 // Record Itin class number.
536 ItinStageMap
[ItinStageString
] = FindStage
= StageCount
;
537 StageCount
+= NStages
;
541 // Check to see if operand cycle already exists and create if it doesn't
542 uint16_t FindOperandCycle
= 0;
543 if (NOperandCycles
> 0) {
544 std::string ItinOperandString
= ItinOperandCycleString
+ItinBypassString
;
545 FindOperandCycle
= ItinOperandMap
[ItinOperandString
];
546 if (FindOperandCycle
== 0) {
547 // Emit as cycle, // index
548 OperandCycleTable
+= ItinOperandCycleString
+ ", // ";
549 std::string OperandIdxComment
= itostr(OperandCycleCount
);
550 if (NOperandCycles
> 1)
551 OperandIdxComment
+= "-"
552 + itostr(OperandCycleCount
+ NOperandCycles
- 1);
553 OperandCycleTable
+= OperandIdxComment
+ "\n";
554 // Record Itin class number.
555 ItinOperandMap
[ItinOperandCycleString
] =
556 FindOperandCycle
= OperandCycleCount
;
557 // Emit as bypass, // index
558 BypassTable
+= ItinBypassString
+ ", // " + OperandIdxComment
+ "\n";
559 OperandCycleCount
+= NOperandCycles
;
563 // Set up itinerary as location and location + stage count
564 int16_t NumUOps
= ItinData
? ItinData
->getValueAsInt("NumMicroOps") : 0;
565 InstrItinerary Intinerary
= {
568 uint16_t(FindStage
+ NStages
),
570 uint16_t(FindOperandCycle
+ NOperandCycles
),
573 // Inject - empty slots will be 0, 0
574 ItinList
[SchedClassIdx
] = Intinerary
;
579 StageTable
+= " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
580 StageTable
+= "};\n";
582 // Closing operand cycles
583 OperandCycleTable
+= " 0 // End operand cycles\n";
584 OperandCycleTable
+= "};\n";
586 BypassTable
+= " 0 // End bypass tables\n";
587 BypassTable
+= "};\n";
591 OS
<< OperandCycleTable
;
596 // EmitProcessorData - Generate data for processor itineraries that were
597 // computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
598 // Itineraries for each processor. The Itinerary lists are indexed on
599 // CodeGenSchedClass::Index.
601 void SubtargetEmitter::
602 EmitItineraries(raw_ostream
&OS
,
603 std::vector
<std::vector
<InstrItinerary
>> &ProcItinLists
) {
604 // Multiple processor models may share an itinerary record. Emit it once.
605 SmallPtrSet
<Record
*, 8> ItinsDefSet
;
607 // For each processor's machine model
608 std::vector
<std::vector
<InstrItinerary
>>::iterator
609 ProcItinListsIter
= ProcItinLists
.begin();
610 for (CodeGenSchedModels::ProcIter PI
= SchedModels
.procModelBegin(),
611 PE
= SchedModels
.procModelEnd(); PI
!= PE
; ++PI
, ++ProcItinListsIter
) {
613 Record
*ItinsDef
= PI
->ItinsDef
;
614 if (!ItinsDefSet
.insert(ItinsDef
).second
)
617 // Get the itinerary list for the processor.
618 assert(ProcItinListsIter
!= ProcItinLists
.end() && "bad iterator");
619 std::vector
<InstrItinerary
> &ItinList
= *ProcItinListsIter
;
621 // Empty itineraries aren't referenced anywhere in the tablegen output
622 // so don't emit them.
623 if (ItinList
.empty())
627 OS
<< "static const llvm::InstrItinerary ";
629 // Begin processor itinerary table
630 OS
<< ItinsDef
->getName() << "[] = {\n";
632 // For each itinerary class in CodeGenSchedClass::Index order.
633 for (unsigned j
= 0, M
= ItinList
.size(); j
< M
; ++j
) {
634 InstrItinerary
&Intinerary
= ItinList
[j
];
636 // Emit Itinerary in the form of
637 // { firstStage, lastStage, firstCycle, lastCycle } // index
639 Intinerary
.NumMicroOps
<< ", " <<
640 Intinerary
.FirstStage
<< ", " <<
641 Intinerary
.LastStage
<< ", " <<
642 Intinerary
.FirstOperandCycle
<< ", " <<
643 Intinerary
.LastOperandCycle
<< " }" <<
644 ", // " << j
<< " " << SchedModels
.getSchedClass(j
).Name
<< "\n";
646 // End processor itinerary table
647 OS
<< " { 0, uint16_t(~0U), uint16_t(~0U), uint16_t(~0U), uint16_t(~0U) }"
653 // Emit either the value defined in the TableGen Record, or the default
654 // value defined in the C++ header. The Record is null if the processor does not
656 void SubtargetEmitter::EmitProcessorProp(raw_ostream
&OS
, const Record
*R
,
657 StringRef Name
, char Separator
) {
659 int V
= R
? R
->getValueAsInt(Name
) : -1;
661 OS
<< V
<< Separator
<< " // " << Name
;
663 OS
<< "MCSchedModel::Default" << Name
<< Separator
;
667 void SubtargetEmitter::EmitProcessorResourceSubUnits(
668 const CodeGenProcModel
&ProcModel
, raw_ostream
&OS
) {
669 OS
<< "\nstatic const unsigned " << ProcModel
.ModelName
670 << "ProcResourceSubUnits[] = {\n"
671 << " 0, // Invalid\n";
673 for (unsigned i
= 0, e
= ProcModel
.ProcResourceDefs
.size(); i
< e
; ++i
) {
674 Record
*PRDef
= ProcModel
.ProcResourceDefs
[i
];
675 if (!PRDef
->isSubClassOf("ProcResGroup"))
677 RecVec ResUnits
= PRDef
->getValueAsListOfDefs("Resources");
678 for (Record
*RUDef
: ResUnits
) {
680 SchedModels
.findProcResUnits(RUDef
, ProcModel
, PRDef
->getLoc());
681 for (unsigned J
= 0; J
< RU
->getValueAsInt("NumUnits"); ++J
) {
682 OS
<< " " << ProcModel
.getProcResourceIdx(RU
) << ", ";
685 OS
<< " // " << PRDef
->getName() << "\n";
690 static void EmitRetireControlUnitInfo(const CodeGenProcModel
&ProcModel
,
692 int64_t ReorderBufferSize
= 0, MaxRetirePerCycle
= 0;
693 if (Record
*RCU
= ProcModel
.RetireControlUnit
) {
695 std::max(ReorderBufferSize
, RCU
->getValueAsInt("ReorderBufferSize"));
697 std::max(MaxRetirePerCycle
, RCU
->getValueAsInt("MaxRetirePerCycle"));
700 OS
<< ReorderBufferSize
<< ", // ReorderBufferSize\n ";
701 OS
<< MaxRetirePerCycle
<< ", // MaxRetirePerCycle\n ";
704 static void EmitRegisterFileInfo(const CodeGenProcModel
&ProcModel
,
705 unsigned NumRegisterFiles
,
706 unsigned NumCostEntries
, raw_ostream
&OS
) {
707 if (NumRegisterFiles
)
708 OS
<< ProcModel
.ModelName
<< "RegisterFiles,\n " << (1 + NumRegisterFiles
);
710 OS
<< "nullptr,\n 0";
712 OS
<< ", // Number of register files.\n ";
714 OS
<< ProcModel
.ModelName
<< "RegisterCosts,\n ";
717 OS
<< NumCostEntries
<< ", // Number of register cost entries.\n";
721 SubtargetEmitter::EmitRegisterFileTables(const CodeGenProcModel
&ProcModel
,
723 if (llvm::all_of(ProcModel
.RegisterFiles
, [](const CodeGenRegisterFile
&RF
) {
724 return RF
.hasDefaultCosts();
728 // Print the RegisterCost table first.
729 OS
<< "\n// {RegisterClassID, Register Cost, AllowMoveElimination }\n";
730 OS
<< "static const llvm::MCRegisterCostEntry " << ProcModel
.ModelName
734 for (const CodeGenRegisterFile
&RF
: ProcModel
.RegisterFiles
) {
735 // Skip register files with a default cost table.
736 if (RF
.hasDefaultCosts())
738 // Add entries to the cost table.
739 for (const CodeGenRegisterCost
&RC
: RF
.Costs
) {
741 Record
*Rec
= RC
.RCDef
;
742 if (Rec
->getValue("Namespace"))
743 OS
<< Rec
->getValueAsString("Namespace") << "::";
744 OS
<< Rec
->getName() << "RegClassID, " << RC
.Cost
<< ", "
745 << RC
.AllowMoveElimination
<< "},\n";
750 // Now generate a table with register file info.
751 OS
<< "\n // {Name, #PhysRegs, #CostEntries, IndexToCostTbl, "
752 << "MaxMovesEliminatedPerCycle, AllowZeroMoveEliminationOnly }\n";
753 OS
<< "static const llvm::MCRegisterFileDesc " << ProcModel
.ModelName
756 << " { \"InvalidRegisterFile\", 0, 0, 0, 0, 0 },\n";
757 unsigned CostTblIndex
= 0;
759 for (const CodeGenRegisterFile
&RD
: ProcModel
.RegisterFiles
) {
761 OS
<< '"' << RD
.Name
<< '"' << ", " << RD
.NumPhysRegs
<< ", ";
762 unsigned NumCostEntries
= RD
.Costs
.size();
763 OS
<< NumCostEntries
<< ", " << CostTblIndex
<< ", "
764 << RD
.MaxMovesEliminatedPerCycle
<< ", "
765 << RD
.AllowZeroMoveEliminationOnly
<< "},\n";
766 CostTblIndex
+= NumCostEntries
;
773 void SubtargetEmitter::EmitLoadStoreQueueInfo(const CodeGenProcModel
&ProcModel
,
775 unsigned QueueID
= 0;
776 if (ProcModel
.LoadQueue
) {
777 const Record
*Queue
= ProcModel
.LoadQueue
->getValueAsDef("QueueDescriptor");
778 QueueID
= 1 + std::distance(ProcModel
.ProcResourceDefs
.begin(),
779 find(ProcModel
.ProcResourceDefs
, Queue
));
781 OS
<< " " << QueueID
<< ", // Resource Descriptor for the Load Queue\n";
784 if (ProcModel
.StoreQueue
) {
785 const Record
*Queue
=
786 ProcModel
.StoreQueue
->getValueAsDef("QueueDescriptor");
787 QueueID
= 1 + std::distance(ProcModel
.ProcResourceDefs
.begin(),
788 find(ProcModel
.ProcResourceDefs
, Queue
));
790 OS
<< " " << QueueID
<< ", // Resource Descriptor for the Store Queue\n";
793 void SubtargetEmitter::EmitExtraProcessorInfo(const CodeGenProcModel
&ProcModel
,
795 // Generate a table of register file descriptors (one entry per each user
796 // defined register file), and a table of register costs.
797 unsigned NumCostEntries
= EmitRegisterFileTables(ProcModel
, OS
);
799 // Now generate a table for the extra processor info.
800 OS
<< "\nstatic const llvm::MCExtraProcessorInfo " << ProcModel
.ModelName
801 << "ExtraInfo = {\n ";
803 // Add information related to the retire control unit.
804 EmitRetireControlUnitInfo(ProcModel
, OS
);
806 // Add information related to the register files (i.e. where to find register
807 // file descriptors and register costs).
808 EmitRegisterFileInfo(ProcModel
, ProcModel
.RegisterFiles
.size(),
811 // Add information about load/store queues.
812 EmitLoadStoreQueueInfo(ProcModel
, OS
);
817 void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel
&ProcModel
,
819 EmitProcessorResourceSubUnits(ProcModel
, OS
);
821 OS
<< "\n// {Name, NumUnits, SuperIdx, BufferSize, SubUnitsIdxBegin}\n";
822 OS
<< "static const llvm::MCProcResourceDesc " << ProcModel
.ModelName
825 << " {\"InvalidUnit\", 0, 0, 0, 0},\n";
827 unsigned SubUnitsOffset
= 1;
828 for (unsigned i
= 0, e
= ProcModel
.ProcResourceDefs
.size(); i
< e
; ++i
) {
829 Record
*PRDef
= ProcModel
.ProcResourceDefs
[i
];
831 Record
*SuperDef
= nullptr;
832 unsigned SuperIdx
= 0;
833 unsigned NumUnits
= 0;
834 const unsigned SubUnitsBeginOffset
= SubUnitsOffset
;
835 int BufferSize
= PRDef
->getValueAsInt("BufferSize");
836 if (PRDef
->isSubClassOf("ProcResGroup")) {
837 RecVec ResUnits
= PRDef
->getValueAsListOfDefs("Resources");
838 for (Record
*RU
: ResUnits
) {
839 NumUnits
+= RU
->getValueAsInt("NumUnits");
840 SubUnitsOffset
+= RU
->getValueAsInt("NumUnits");
845 if (PRDef
->getValueInit("Super")->isComplete()) {
847 SchedModels
.findProcResUnits(PRDef
->getValueAsDef("Super"),
848 ProcModel
, PRDef
->getLoc());
849 SuperIdx
= ProcModel
.getProcResourceIdx(SuperDef
);
851 NumUnits
= PRDef
->getValueAsInt("NumUnits");
853 // Emit the ProcResourceDesc
854 OS
<< " {\"" << PRDef
->getName() << "\", ";
855 if (PRDef
->getName().size() < 15)
856 OS
.indent(15 - PRDef
->getName().size());
857 OS
<< NumUnits
<< ", " << SuperIdx
<< ", " << BufferSize
<< ", ";
858 if (SubUnitsBeginOffset
!= SubUnitsOffset
) {
859 OS
<< ProcModel
.ModelName
<< "ProcResourceSubUnits + "
860 << SubUnitsBeginOffset
;
864 OS
<< "}, // #" << i
+1;
866 OS
<< ", Super=" << SuperDef
->getName();
872 // Find the WriteRes Record that defines processor resources for this
874 Record
*SubtargetEmitter::FindWriteResources(
875 const CodeGenSchedRW
&SchedWrite
, const CodeGenProcModel
&ProcModel
) {
877 // Check if the SchedWrite is already subtarget-specific and directly
878 // specifies a set of processor resources.
879 if (SchedWrite
.TheDef
->isSubClassOf("SchedWriteRes"))
880 return SchedWrite
.TheDef
;
882 Record
*AliasDef
= nullptr;
883 for (Record
*A
: SchedWrite
.Aliases
) {
884 const CodeGenSchedRW
&AliasRW
=
885 SchedModels
.getSchedRW(A
->getValueAsDef("AliasRW"));
886 if (AliasRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
887 Record
*ModelDef
= AliasRW
.TheDef
->getValueAsDef("SchedModel");
888 if (&SchedModels
.getProcModel(ModelDef
) != &ProcModel
)
892 PrintFatalError(AliasRW
.TheDef
->getLoc(), "Multiple aliases "
893 "defined for processor " + ProcModel
.ModelName
+
894 " Ensure only one SchedAlias exists per RW.");
895 AliasDef
= AliasRW
.TheDef
;
897 if (AliasDef
&& AliasDef
->isSubClassOf("SchedWriteRes"))
900 // Check this processor's list of write resources.
901 Record
*ResDef
= nullptr;
902 for (Record
*WR
: ProcModel
.WriteResDefs
) {
903 if (!WR
->isSubClassOf("WriteRes"))
905 if (AliasDef
== WR
->getValueAsDef("WriteType")
906 || SchedWrite
.TheDef
== WR
->getValueAsDef("WriteType")) {
908 PrintFatalError(WR
->getLoc(), "Resources are defined for both "
909 "SchedWrite and its alias on processor " +
910 ProcModel
.ModelName
);
915 // TODO: If ProcModel has a base model (previous generation processor),
916 // then call FindWriteResources recursively with that model here.
918 PrintFatalError(ProcModel
.ModelDef
->getLoc(),
919 Twine("Processor does not define resources for ") +
920 SchedWrite
.TheDef
->getName());
925 /// Find the ReadAdvance record for the given SchedRead on this processor or
927 Record
*SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW
&SchedRead
,
928 const CodeGenProcModel
&ProcModel
) {
929 // Check for SchedReads that directly specify a ReadAdvance.
930 if (SchedRead
.TheDef
->isSubClassOf("SchedReadAdvance"))
931 return SchedRead
.TheDef
;
933 // Check this processor's list of aliases for SchedRead.
934 Record
*AliasDef
= nullptr;
935 for (Record
*A
: SchedRead
.Aliases
) {
936 const CodeGenSchedRW
&AliasRW
=
937 SchedModels
.getSchedRW(A
->getValueAsDef("AliasRW"));
938 if (AliasRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
939 Record
*ModelDef
= AliasRW
.TheDef
->getValueAsDef("SchedModel");
940 if (&SchedModels
.getProcModel(ModelDef
) != &ProcModel
)
944 PrintFatalError(AliasRW
.TheDef
->getLoc(), "Multiple aliases "
945 "defined for processor " + ProcModel
.ModelName
+
946 " Ensure only one SchedAlias exists per RW.");
947 AliasDef
= AliasRW
.TheDef
;
949 if (AliasDef
&& AliasDef
->isSubClassOf("SchedReadAdvance"))
952 // Check this processor's ReadAdvanceList.
953 Record
*ResDef
= nullptr;
954 for (Record
*RA
: ProcModel
.ReadAdvanceDefs
) {
955 if (!RA
->isSubClassOf("ReadAdvance"))
957 if (AliasDef
== RA
->getValueAsDef("ReadType")
958 || SchedRead
.TheDef
== RA
->getValueAsDef("ReadType")) {
960 PrintFatalError(RA
->getLoc(), "Resources are defined for both "
961 "SchedRead and its alias on processor " +
962 ProcModel
.ModelName
);
967 // TODO: If ProcModel has a base model (previous generation processor),
968 // then call FindReadAdvance recursively with that model here.
969 if (!ResDef
&& SchedRead
.TheDef
->getName() != "ReadDefault") {
970 PrintFatalError(ProcModel
.ModelDef
->getLoc(),
971 Twine("Processor does not define resources for ") +
972 SchedRead
.TheDef
->getName());
977 // Expand an explicit list of processor resources into a full list of implied
978 // resource groups and super resources that cover them.
979 void SubtargetEmitter::ExpandProcResources(
980 RecVec
&PRVec
, std::vector
<int64_t> &ReleaseAtCycles
,
981 std::vector
<int64_t> &AcquireAtCycles
, const CodeGenProcModel
&PM
) {
982 assert(PRVec
.size() == ReleaseAtCycles
.size() && "failed precondition");
983 for (unsigned i
= 0, e
= PRVec
.size(); i
!= e
; ++i
) {
984 Record
*PRDef
= PRVec
[i
];
986 if (PRDef
->isSubClassOf("ProcResGroup"))
987 SubResources
= PRDef
->getValueAsListOfDefs("Resources");
989 SubResources
.push_back(PRDef
);
990 PRDef
= SchedModels
.findProcResUnits(PRDef
, PM
, PRDef
->getLoc());
991 for (Record
*SubDef
= PRDef
;
992 SubDef
->getValueInit("Super")->isComplete();) {
993 if (SubDef
->isSubClassOf("ProcResGroup")) {
994 // Disallow this for simplicitly.
995 PrintFatalError(SubDef
->getLoc(), "Processor resource group "
996 " cannot be a super resources.");
999 SchedModels
.findProcResUnits(SubDef
->getValueAsDef("Super"), PM
,
1001 PRVec
.push_back(SuperDef
);
1002 ReleaseAtCycles
.push_back(ReleaseAtCycles
[i
]);
1003 AcquireAtCycles
.push_back(AcquireAtCycles
[i
]);
1007 for (Record
*PR
: PM
.ProcResourceDefs
) {
1008 if (PR
== PRDef
|| !PR
->isSubClassOf("ProcResGroup"))
1010 RecVec SuperResources
= PR
->getValueAsListOfDefs("Resources");
1011 RecIter SubI
= SubResources
.begin(), SubE
= SubResources
.end();
1012 for( ; SubI
!= SubE
; ++SubI
) {
1013 if (!is_contained(SuperResources
, *SubI
)) {
1018 PRVec
.push_back(PR
);
1019 ReleaseAtCycles
.push_back(ReleaseAtCycles
[i
]);
1020 AcquireAtCycles
.push_back(AcquireAtCycles
[i
]);
1026 // Generate the SchedClass table for this processor and update global
1027 // tables. Must be called for each processor in order.
1028 void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel
&ProcModel
,
1029 SchedClassTables
&SchedTables
) {
1030 SchedTables
.ProcSchedClasses
.resize(SchedTables
.ProcSchedClasses
.size() + 1);
1031 if (!ProcModel
.hasInstrSchedModel())
1034 std::vector
<MCSchedClassDesc
> &SCTab
= SchedTables
.ProcSchedClasses
.back();
1035 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
1036 for (const CodeGenSchedClass
&SC
: SchedModels
.schedClasses()) {
1037 LLVM_DEBUG(SC
.dump(&SchedModels
));
1039 SCTab
.resize(SCTab
.size() + 1);
1040 MCSchedClassDesc
&SCDesc
= SCTab
.back();
1041 // SCDesc.Name is guarded by NDEBUG
1042 SCDesc
.NumMicroOps
= 0;
1043 SCDesc
.BeginGroup
= false;
1044 SCDesc
.EndGroup
= false;
1045 SCDesc
.RetireOOO
= false;
1046 SCDesc
.WriteProcResIdx
= 0;
1047 SCDesc
.WriteLatencyIdx
= 0;
1048 SCDesc
.ReadAdvanceIdx
= 0;
1050 // A Variant SchedClass has no resources of its own.
1051 bool HasVariants
= false;
1052 for (const CodeGenSchedTransition
&CGT
:
1053 make_range(SC
.Transitions
.begin(), SC
.Transitions
.end())) {
1054 if (CGT
.ProcIndex
== ProcModel
.Index
) {
1060 SCDesc
.NumMicroOps
= MCSchedClassDesc::VariantNumMicroOps
;
1064 // Determine if the SchedClass is actually reachable on this processor. If
1065 // not don't try to locate the processor resources, it will fail.
1066 // If ProcIndices contains 0, this class applies to all processors.
1067 assert(!SC
.ProcIndices
.empty() && "expect at least one procidx");
1068 if (SC
.ProcIndices
[0] != 0) {
1069 if (!is_contained(SC
.ProcIndices
, ProcModel
.Index
))
1072 IdxVec Writes
= SC
.Writes
;
1073 IdxVec Reads
= SC
.Reads
;
1074 if (!SC
.InstRWs
.empty()) {
1075 // This class has a default ReadWrite list which can be overridden by
1076 // InstRW definitions.
1077 Record
*RWDef
= nullptr;
1078 for (Record
*RW
: SC
.InstRWs
) {
1079 Record
*RWModelDef
= RW
->getValueAsDef("SchedModel");
1080 if (&ProcModel
== &SchedModels
.getProcModel(RWModelDef
)) {
1088 SchedModels
.findRWs(RWDef
->getValueAsListOfDefs("OperandReadWrites"),
1092 if (Writes
.empty()) {
1093 // Check this processor's itinerary class resources.
1094 for (Record
*I
: ProcModel
.ItinRWDefs
) {
1095 RecVec Matched
= I
->getValueAsListOfDefs("MatchedItinClasses");
1096 if (is_contained(Matched
, SC
.ItinClassDef
)) {
1097 SchedModels
.findRWs(I
->getValueAsListOfDefs("OperandReadWrites"),
1102 if (Writes
.empty()) {
1103 LLVM_DEBUG(dbgs() << ProcModel
.ModelName
1104 << " does not have resources for class " << SC
.Name
1106 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1109 // Sum resources across all operand writes.
1110 std::vector
<MCWriteProcResEntry
> WriteProcResources
;
1111 std::vector
<MCWriteLatencyEntry
> WriteLatencies
;
1112 std::vector
<std::string
> WriterNames
;
1113 std::vector
<MCReadAdvanceEntry
> ReadAdvanceEntries
;
1114 for (unsigned W
: Writes
) {
1116 SchedModels
.expandRWSeqForProc(W
, WriteSeq
, /*IsRead=*/false,
1119 // For each operand, create a latency entry.
1120 MCWriteLatencyEntry WLEntry
;
1122 unsigned WriteID
= WriteSeq
.back();
1123 WriterNames
.push_back(SchedModels
.getSchedWrite(WriteID
).Name
);
1124 // If this Write is not referenced by a ReadAdvance, don't distinguish it
1125 // from other WriteLatency entries.
1126 if (!SchedModels
.hasReadOfWrite(
1127 SchedModels
.getSchedWrite(WriteID
).TheDef
)) {
1130 WLEntry
.WriteResourceID
= WriteID
;
1132 for (unsigned WS
: WriteSeq
) {
1135 FindWriteResources(SchedModels
.getSchedWrite(WS
), ProcModel
);
1137 // Mark the parent class as invalid for unsupported write types.
1138 if (WriteRes
->getValueAsBit("Unsupported")) {
1139 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1142 WLEntry
.Cycles
+= WriteRes
->getValueAsInt("Latency");
1143 SCDesc
.NumMicroOps
+= WriteRes
->getValueAsInt("NumMicroOps");
1144 SCDesc
.BeginGroup
|= WriteRes
->getValueAsBit("BeginGroup");
1145 SCDesc
.EndGroup
|= WriteRes
->getValueAsBit("EndGroup");
1146 SCDesc
.BeginGroup
|= WriteRes
->getValueAsBit("SingleIssue");
1147 SCDesc
.EndGroup
|= WriteRes
->getValueAsBit("SingleIssue");
1148 SCDesc
.RetireOOO
|= WriteRes
->getValueAsBit("RetireOOO");
1150 // Create an entry for each ProcResource listed in WriteRes.
1151 RecVec PRVec
= WriteRes
->getValueAsListOfDefs("ProcResources");
1152 std::vector
<int64_t> ReleaseAtCycles
=
1153 WriteRes
->getValueAsListOfInts("ReleaseAtCycles");
1155 std::vector
<int64_t> AcquireAtCycles
=
1156 WriteRes
->getValueAsListOfInts("AcquireAtCycles");
1158 // Check consistency of the two vectors carrying the start and
1159 // stop cycles of the resources.
1160 if (!ReleaseAtCycles
.empty() &&
1161 ReleaseAtCycles
.size() != PRVec
.size()) {
1162 // If ReleaseAtCycles is provided, check consistency.
1165 Twine("Inconsistent release at cycles: size(ReleaseAtCycles) != "
1166 "size(ProcResources): ")
1167 .concat(Twine(PRVec
.size()))
1169 .concat(Twine(ReleaseAtCycles
.size())));
1172 if (!AcquireAtCycles
.empty() && AcquireAtCycles
.size() != PRVec
.size()) {
1175 Twine("Inconsistent resource cycles: size(AcquireAtCycles) != "
1176 "size(ProcResources): ")
1177 .concat(Twine(AcquireAtCycles
.size()))
1179 .concat(Twine(PRVec
.size())));
1182 if (ReleaseAtCycles
.empty()) {
1183 // If ReleaseAtCycles is not provided, default to one cycle
1185 ReleaseAtCycles
.resize(PRVec
.size(), 1);
1188 if (AcquireAtCycles
.empty()) {
1189 // If AcquireAtCycles is not provided, reserve the resource
1190 // starting from cycle 0.
1191 AcquireAtCycles
.resize(PRVec
.size(), 0);
1194 assert(AcquireAtCycles
.size() == ReleaseAtCycles
.size());
1196 ExpandProcResources(PRVec
, ReleaseAtCycles
, AcquireAtCycles
, ProcModel
);
1197 assert(AcquireAtCycles
.size() == ReleaseAtCycles
.size());
1199 for (unsigned PRIdx
= 0, PREnd
= PRVec
.size();
1200 PRIdx
!= PREnd
; ++PRIdx
) {
1201 MCWriteProcResEntry WPREntry
;
1202 WPREntry
.ProcResourceIdx
= ProcModel
.getProcResourceIdx(PRVec
[PRIdx
]);
1203 assert(WPREntry
.ProcResourceIdx
&& "Bad ProcResourceIdx");
1204 WPREntry
.ReleaseAtCycle
= ReleaseAtCycles
[PRIdx
];
1205 WPREntry
.AcquireAtCycle
= AcquireAtCycles
[PRIdx
];
1206 if (AcquireAtCycles
[PRIdx
] > ReleaseAtCycles
[PRIdx
]) {
1209 Twine("Inconsistent resource cycles: AcquireAtCycles "
1210 "< ReleaseAtCycles must hold."));
1212 if (AcquireAtCycles
[PRIdx
] < 0) {
1213 PrintFatalError(WriteRes
->getLoc(),
1214 Twine("Invalid value: AcquireAtCycle "
1215 "must be a non-negative value."));
1217 // If this resource is already used in this sequence, add the current
1218 // entry's cycles so that the same resource appears to be used
1219 // serially, rather than multiple parallel uses. This is important for
1220 // in-order machine where the resource consumption is a hazard.
1221 unsigned WPRIdx
= 0, WPREnd
= WriteProcResources
.size();
1222 for( ; WPRIdx
!= WPREnd
; ++WPRIdx
) {
1223 if (WriteProcResources
[WPRIdx
].ProcResourceIdx
1224 == WPREntry
.ProcResourceIdx
) {
1225 // TODO: multiple use of the same resources would
1226 // require either 1. thinking of how to handle multiple
1227 // intervals for the same resource in
1228 // `<Target>WriteProcResTable` (see
1229 // `SubtargetEmitter::EmitSchedClassTables`), or
1230 // 2. thinking how to merge multiple intervals into a
1232 assert(WPREntry
.AcquireAtCycle
== 0 &&
1233 "multiple use ofthe same resource is not yet handled");
1234 WriteProcResources
[WPRIdx
].ReleaseAtCycle
+=
1235 WPREntry
.ReleaseAtCycle
;
1239 if (WPRIdx
== WPREnd
)
1240 WriteProcResources
.push_back(WPREntry
);
1243 WriteLatencies
.push_back(WLEntry
);
1245 // Create an entry for each operand Read in this SchedClass.
1246 // Entries must be sorted first by UseIdx then by WriteResourceID.
1247 for (unsigned UseIdx
= 0, EndIdx
= Reads
.size();
1248 UseIdx
!= EndIdx
; ++UseIdx
) {
1249 Record
*ReadAdvance
=
1250 FindReadAdvance(SchedModels
.getSchedRead(Reads
[UseIdx
]), ProcModel
);
1254 // Mark the parent class as invalid for unsupported write types.
1255 if (ReadAdvance
->getValueAsBit("Unsupported")) {
1256 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1259 RecVec ValidWrites
= ReadAdvance
->getValueAsListOfDefs("ValidWrites");
1261 if (ValidWrites
.empty())
1262 WriteIDs
.push_back(0);
1264 for (Record
*VW
: ValidWrites
) {
1265 WriteIDs
.push_back(SchedModels
.getSchedRWIdx(VW
, /*IsRead=*/false));
1268 llvm::sort(WriteIDs
);
1269 for(unsigned W
: WriteIDs
) {
1270 MCReadAdvanceEntry RAEntry
;
1271 RAEntry
.UseIdx
= UseIdx
;
1272 RAEntry
.WriteResourceID
= W
;
1273 RAEntry
.Cycles
= ReadAdvance
->getValueAsInt("Cycles");
1274 ReadAdvanceEntries
.push_back(RAEntry
);
1277 if (SCDesc
.NumMicroOps
== MCSchedClassDesc::InvalidNumMicroOps
) {
1278 WriteProcResources
.clear();
1279 WriteLatencies
.clear();
1280 ReadAdvanceEntries
.clear();
1282 // Add the information for this SchedClass to the global tables using basic
1285 // WritePrecRes entries are sorted by ProcResIdx.
1286 llvm::sort(WriteProcResources
, LessWriteProcResources());
1288 SCDesc
.NumWriteProcResEntries
= WriteProcResources
.size();
1289 std::vector
<MCWriteProcResEntry
>::iterator WPRPos
=
1290 std::search(SchedTables
.WriteProcResources
.begin(),
1291 SchedTables
.WriteProcResources
.end(),
1292 WriteProcResources
.begin(), WriteProcResources
.end());
1293 if (WPRPos
!= SchedTables
.WriteProcResources
.end())
1294 SCDesc
.WriteProcResIdx
= WPRPos
- SchedTables
.WriteProcResources
.begin();
1296 SCDesc
.WriteProcResIdx
= SchedTables
.WriteProcResources
.size();
1297 SchedTables
.WriteProcResources
.insert(WPRPos
, WriteProcResources
.begin(),
1298 WriteProcResources
.end());
1300 // Latency entries must remain in operand order.
1301 SCDesc
.NumWriteLatencyEntries
= WriteLatencies
.size();
1302 std::vector
<MCWriteLatencyEntry
>::iterator WLPos
=
1303 std::search(SchedTables
.WriteLatencies
.begin(),
1304 SchedTables
.WriteLatencies
.end(),
1305 WriteLatencies
.begin(), WriteLatencies
.end());
1306 if (WLPos
!= SchedTables
.WriteLatencies
.end()) {
1307 unsigned idx
= WLPos
- SchedTables
.WriteLatencies
.begin();
1308 SCDesc
.WriteLatencyIdx
= idx
;
1309 for (unsigned i
= 0, e
= WriteLatencies
.size(); i
< e
; ++i
)
1310 if (SchedTables
.WriterNames
[idx
+ i
].find(WriterNames
[i
]) ==
1311 std::string::npos
) {
1312 SchedTables
.WriterNames
[idx
+ i
] += std::string("_") + WriterNames
[i
];
1316 SCDesc
.WriteLatencyIdx
= SchedTables
.WriteLatencies
.size();
1317 llvm::append_range(SchedTables
.WriteLatencies
, WriteLatencies
);
1318 llvm::append_range(SchedTables
.WriterNames
, WriterNames
);
1320 // ReadAdvanceEntries must remain in operand order.
1321 SCDesc
.NumReadAdvanceEntries
= ReadAdvanceEntries
.size();
1322 std::vector
<MCReadAdvanceEntry
>::iterator RAPos
=
1323 std::search(SchedTables
.ReadAdvanceEntries
.begin(),
1324 SchedTables
.ReadAdvanceEntries
.end(),
1325 ReadAdvanceEntries
.begin(), ReadAdvanceEntries
.end());
1326 if (RAPos
!= SchedTables
.ReadAdvanceEntries
.end())
1327 SCDesc
.ReadAdvanceIdx
= RAPos
- SchedTables
.ReadAdvanceEntries
.begin();
1329 SCDesc
.ReadAdvanceIdx
= SchedTables
.ReadAdvanceEntries
.size();
1330 llvm::append_range(SchedTables
.ReadAdvanceEntries
, ReadAdvanceEntries
);
1335 // Emit SchedClass tables for all processors and associated global tables.
1336 void SubtargetEmitter::EmitSchedClassTables(SchedClassTables
&SchedTables
,
1338 // Emit global WriteProcResTable.
1339 OS
<< "\n// {ProcResourceIdx, ReleaseAtCycle, AcquireAtCycle}\n"
1340 << "extern const llvm::MCWriteProcResEntry " << Target
1341 << "WriteProcResTable[] = {\n"
1342 << " { 0, 0, 0 }, // Invalid\n";
1343 for (unsigned WPRIdx
= 1, WPREnd
= SchedTables
.WriteProcResources
.size();
1344 WPRIdx
!= WPREnd
; ++WPRIdx
) {
1345 MCWriteProcResEntry
&WPREntry
= SchedTables
.WriteProcResources
[WPRIdx
];
1346 OS
<< " {" << format("%2d", WPREntry
.ProcResourceIdx
) << ", "
1347 << format("%2d", WPREntry
.ReleaseAtCycle
) << ", "
1348 << format("%2d", WPREntry
.AcquireAtCycle
) << "}";
1349 if (WPRIdx
+ 1 < WPREnd
)
1351 OS
<< " // #" << WPRIdx
<< '\n';
1353 OS
<< "}; // " << Target
<< "WriteProcResTable\n";
1355 // Emit global WriteLatencyTable.
1356 OS
<< "\n// {Cycles, WriteResourceID}\n"
1357 << "extern const llvm::MCWriteLatencyEntry "
1358 << Target
<< "WriteLatencyTable[] = {\n"
1359 << " { 0, 0}, // Invalid\n";
1360 for (unsigned WLIdx
= 1, WLEnd
= SchedTables
.WriteLatencies
.size();
1361 WLIdx
!= WLEnd
; ++WLIdx
) {
1362 MCWriteLatencyEntry
&WLEntry
= SchedTables
.WriteLatencies
[WLIdx
];
1363 OS
<< " {" << format("%2d", WLEntry
.Cycles
) << ", "
1364 << format("%2d", WLEntry
.WriteResourceID
) << "}";
1365 if (WLIdx
+ 1 < WLEnd
)
1367 OS
<< " // #" << WLIdx
<< " " << SchedTables
.WriterNames
[WLIdx
] << '\n';
1369 OS
<< "}; // " << Target
<< "WriteLatencyTable\n";
1371 // Emit global ReadAdvanceTable.
1372 OS
<< "\n// {UseIdx, WriteResourceID, Cycles}\n"
1373 << "extern const llvm::MCReadAdvanceEntry "
1374 << Target
<< "ReadAdvanceTable[] = {\n"
1375 << " {0, 0, 0}, // Invalid\n";
1376 for (unsigned RAIdx
= 1, RAEnd
= SchedTables
.ReadAdvanceEntries
.size();
1377 RAIdx
!= RAEnd
; ++RAIdx
) {
1378 MCReadAdvanceEntry
&RAEntry
= SchedTables
.ReadAdvanceEntries
[RAIdx
];
1379 OS
<< " {" << RAEntry
.UseIdx
<< ", "
1380 << format("%2d", RAEntry
.WriteResourceID
) << ", "
1381 << format("%2d", RAEntry
.Cycles
) << "}";
1382 if (RAIdx
+ 1 < RAEnd
)
1384 OS
<< " // #" << RAIdx
<< '\n';
1386 OS
<< "}; // " << Target
<< "ReadAdvanceTable\n";
1388 // Emit a SchedClass table for each processor.
1389 for (CodeGenSchedModels::ProcIter PI
= SchedModels
.procModelBegin(),
1390 PE
= SchedModels
.procModelEnd(); PI
!= PE
; ++PI
) {
1391 if (!PI
->hasInstrSchedModel())
1394 std::vector
<MCSchedClassDesc
> &SCTab
=
1395 SchedTables
.ProcSchedClasses
[1 + (PI
- SchedModels
.procModelBegin())];
1397 OS
<< "\n// {Name, NumMicroOps, BeginGroup, EndGroup, RetireOOO,"
1398 << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
1399 OS
<< "static const llvm::MCSchedClassDesc "
1400 << PI
->ModelName
<< "SchedClasses[] = {\n";
1402 // The first class is always invalid. We no way to distinguish it except by
1403 // name and position.
1404 assert(SchedModels
.getSchedClass(0).Name
== "NoInstrModel"
1405 && "invalid class not first");
1406 OS
<< " {DBGFIELD(\"InvalidSchedClass\") "
1407 << MCSchedClassDesc::InvalidNumMicroOps
1408 << ", false, false, false, 0, 0, 0, 0, 0, 0},\n";
1410 for (unsigned SCIdx
= 1, SCEnd
= SCTab
.size(); SCIdx
!= SCEnd
; ++SCIdx
) {
1411 MCSchedClassDesc
&MCDesc
= SCTab
[SCIdx
];
1412 const CodeGenSchedClass
&SchedClass
= SchedModels
.getSchedClass(SCIdx
);
1413 OS
<< " {DBGFIELD(\"" << SchedClass
.Name
<< "\") ";
1414 if (SchedClass
.Name
.size() < 18)
1415 OS
.indent(18 - SchedClass
.Name
.size());
1416 OS
<< MCDesc
.NumMicroOps
1417 << ", " << ( MCDesc
.BeginGroup
? "true" : "false" )
1418 << ", " << ( MCDesc
.EndGroup
? "true" : "false" )
1419 << ", " << ( MCDesc
.RetireOOO
? "true" : "false" )
1420 << ", " << format("%2d", MCDesc
.WriteProcResIdx
)
1421 << ", " << MCDesc
.NumWriteProcResEntries
1422 << ", " << format("%2d", MCDesc
.WriteLatencyIdx
)
1423 << ", " << MCDesc
.NumWriteLatencyEntries
1424 << ", " << format("%2d", MCDesc
.ReadAdvanceIdx
)
1425 << ", " << MCDesc
.NumReadAdvanceEntries
1426 << "}, // #" << SCIdx
<< '\n';
1428 OS
<< "}; // " << PI
->ModelName
<< "SchedClasses\n";
1432 void SubtargetEmitter::EmitProcessorModels(raw_ostream
&OS
) {
1433 // For each processor model.
1434 for (const CodeGenProcModel
&PM
: SchedModels
.procModels()) {
1435 // Emit extra processor info if available.
1436 if (PM
.hasExtraProcessorInfo())
1437 EmitExtraProcessorInfo(PM
, OS
);
1438 // Emit processor resource table.
1439 if (PM
.hasInstrSchedModel())
1440 EmitProcessorResources(PM
, OS
);
1441 else if(!PM
.ProcResourceDefs
.empty())
1442 PrintFatalError(PM
.ModelDef
->getLoc(), "SchedMachineModel defines "
1443 "ProcResources without defining WriteRes SchedWriteRes");
1445 // Begin processor itinerary properties
1447 OS
<< "static const llvm::MCSchedModel " << PM
.ModelName
<< " = {\n";
1448 EmitProcessorProp(OS
, PM
.ModelDef
, "IssueWidth", ',');
1449 EmitProcessorProp(OS
, PM
.ModelDef
, "MicroOpBufferSize", ',');
1450 EmitProcessorProp(OS
, PM
.ModelDef
, "LoopMicroOpBufferSize", ',');
1451 EmitProcessorProp(OS
, PM
.ModelDef
, "LoadLatency", ',');
1452 EmitProcessorProp(OS
, PM
.ModelDef
, "HighLatency", ',');
1453 EmitProcessorProp(OS
, PM
.ModelDef
, "MispredictPenalty", ',');
1455 bool PostRAScheduler
=
1456 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("PostRAScheduler") : false);
1458 OS
<< " " << (PostRAScheduler
? "true" : "false") << ", // "
1459 << "PostRAScheduler\n";
1461 bool CompleteModel
=
1462 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("CompleteModel") : false);
1464 OS
<< " " << (CompleteModel
? "true" : "false") << ", // "
1465 << "CompleteModel\n";
1467 bool EnableIntervals
=
1468 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("EnableIntervals") : false);
1470 OS
<< " " << (EnableIntervals
? "true" : "false") << ", // "
1471 << "EnableIntervals\n";
1473 OS
<< " " << PM
.Index
<< ", // Processor ID\n";
1474 if (PM
.hasInstrSchedModel())
1475 OS
<< " " << PM
.ModelName
<< "ProcResources" << ",\n"
1476 << " " << PM
.ModelName
<< "SchedClasses" << ",\n"
1477 << " " << PM
.ProcResourceDefs
.size()+1 << ",\n"
1478 << " " << (SchedModels
.schedClassEnd()
1479 - SchedModels
.schedClassBegin()) << ",\n";
1481 OS
<< " nullptr, nullptr, 0, 0,"
1482 << " // No instruction-level machine model.\n";
1483 if (PM
.hasItineraries())
1484 OS
<< " " << PM
.ItinsDef
->getName() << ",\n";
1486 OS
<< " nullptr, // No Itinerary\n";
1487 if (PM
.hasExtraProcessorInfo())
1488 OS
<< " &" << PM
.ModelName
<< "ExtraInfo,\n";
1490 OS
<< " nullptr // No extra processor descriptor\n";
1496 // EmitSchedModel - Emits all scheduling model tables, folding common patterns.
1498 void SubtargetEmitter::EmitSchedModel(raw_ostream
&OS
) {
1499 OS
<< "#ifdef DBGFIELD\n"
1500 << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
1502 << "#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)\n"
1503 << "#define DBGFIELD(x) x,\n"
1505 << "#define DBGFIELD(x)\n"
1508 if (SchedModels
.hasItineraries()) {
1509 std::vector
<std::vector
<InstrItinerary
>> ProcItinLists
;
1510 // Emit the stage data
1511 EmitStageAndOperandCycleData(OS
, ProcItinLists
);
1512 EmitItineraries(OS
, ProcItinLists
);
1514 OS
<< "\n// ===============================================================\n"
1515 << "// Data tables for the new per-operand machine model.\n";
1517 SchedClassTables SchedTables
;
1518 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
1519 GenSchedClassTables(ProcModel
, SchedTables
);
1521 EmitSchedClassTables(SchedTables
, OS
);
1523 OS
<< "\n#undef DBGFIELD\n";
1525 // Emit the processor machine model
1526 EmitProcessorModels(OS
);
1529 static void emitPredicateProlog(const RecordKeeper
&Records
, raw_ostream
&OS
) {
1531 raw_string_ostream
Stream(Buffer
);
1533 // Collect all the PredicateProlog records and print them to the output
1535 std::vector
<Record
*> Prologs
=
1536 Records
.getAllDerivedDefinitions("PredicateProlog");
1537 llvm::sort(Prologs
, LessRecord());
1538 for (Record
*P
: Prologs
)
1539 Stream
<< P
->getValueAsString("Code") << '\n';
1544 static bool isTruePredicate(const Record
*Rec
) {
1545 return Rec
->isSubClassOf("MCSchedPredicate") &&
1546 Rec
->getValueAsDef("Pred")->isSubClassOf("MCTrue");
1549 static void emitPredicates(const CodeGenSchedTransition
&T
,
1550 const CodeGenSchedClass
&SC
, PredicateExpander
&PE
,
1553 raw_string_ostream
SS(Buffer
);
1555 // If not all predicates are MCTrue, then we need an if-stmt.
1556 unsigned NumNonTruePreds
=
1557 T
.PredTerm
.size() - count_if(T
.PredTerm
, isTruePredicate
);
1559 SS
.indent(PE
.getIndentLevel() * 2);
1561 if (NumNonTruePreds
) {
1562 bool FirstNonTruePredicate
= true;
1565 PE
.setIndentLevel(PE
.getIndentLevel() + 2);
1567 for (const Record
*Rec
: T
.PredTerm
) {
1568 // Skip predicates that evaluate to "true".
1569 if (isTruePredicate(Rec
))
1572 if (FirstNonTruePredicate
) {
1573 FirstNonTruePredicate
= false;
1576 SS
.indent(PE
.getIndentLevel() * 2);
1580 if (Rec
->isSubClassOf("MCSchedPredicate")) {
1581 PE
.expandPredicate(SS
, Rec
->getValueAsDef("Pred"));
1585 // Expand this legacy predicate and wrap it around braces if there is more
1586 // than one predicate to expand.
1587 SS
<< ((NumNonTruePreds
> 1) ? "(" : "")
1588 << Rec
->getValueAsString("Predicate")
1589 << ((NumNonTruePreds
> 1) ? ")" : "");
1592 SS
<< ")\n"; // end of if-stmt
1593 PE
.decreaseIndentLevel();
1594 SS
.indent(PE
.getIndentLevel() * 2);
1595 PE
.decreaseIndentLevel();
1598 SS
<< "return " << T
.ToClassIdx
<< "; // " << SC
.Name
<< '\n';
1602 // Used by method `SubtargetEmitter::emitSchedModelHelpersImpl()` to generate
1603 // epilogue code for the auto-generated helper.
1604 static void emitSchedModelHelperEpilogue(raw_ostream
&OS
,
1605 bool ShouldReturnZero
) {
1606 if (ShouldReturnZero
) {
1607 OS
<< " // Don't know how to resolve this scheduling class.\n"
1612 OS
<< " report_fatal_error(\"Expected a variant SchedClass\");\n";
1615 static bool hasMCSchedPredicates(const CodeGenSchedTransition
&T
) {
1616 return all_of(T
.PredTerm
, [](const Record
*Rec
) {
1617 return Rec
->isSubClassOf("MCSchedPredicate");
1621 static void collectVariantClasses(const CodeGenSchedModels
&SchedModels
,
1622 IdxVec
&VariantClasses
,
1623 bool OnlyExpandMCInstPredicates
) {
1624 for (const CodeGenSchedClass
&SC
: SchedModels
.schedClasses()) {
1625 // Ignore non-variant scheduling classes.
1626 if (SC
.Transitions
.empty())
1629 if (OnlyExpandMCInstPredicates
) {
1630 // Ignore this variant scheduling class no transitions use any meaningful
1631 // MCSchedPredicate definitions.
1632 if (llvm::none_of(SC
.Transitions
, hasMCSchedPredicates
))
1636 VariantClasses
.push_back(SC
.Index
);
1640 static void collectProcessorIndices(const CodeGenSchedClass
&SC
,
1641 IdxVec
&ProcIndices
) {
1642 // A variant scheduling class may define transitions for multiple
1643 // processors. This function identifies wich processors are associated with
1644 // transition rules specified by variant class `SC`.
1645 for (const CodeGenSchedTransition
&T
: SC
.Transitions
) {
1647 std::set_union(&T
.ProcIndex
, &T
.ProcIndex
+ 1, ProcIndices
.begin(),
1648 ProcIndices
.end(), std::back_inserter(PI
));
1649 ProcIndices
.swap(PI
);
1653 static bool isAlwaysTrue(const CodeGenSchedTransition
&T
) {
1654 return llvm::all_of(T
.PredTerm
, isTruePredicate
);
1657 void SubtargetEmitter::emitSchedModelHelpersImpl(
1658 raw_ostream
&OS
, bool OnlyExpandMCInstPredicates
) {
1659 IdxVec VariantClasses
;
1660 collectVariantClasses(SchedModels
, VariantClasses
,
1661 OnlyExpandMCInstPredicates
);
1663 if (VariantClasses
.empty()) {
1664 emitSchedModelHelperEpilogue(OS
, OnlyExpandMCInstPredicates
);
1668 // Construct a switch statement where the condition is a check on the
1669 // scheduling class identifier. There is a `case` for every variant class
1670 // defined by the processor models of this target.
1671 // Each `case` implements a number of rules to resolve (i.e. to transition from)
1672 // a variant scheduling class to another scheduling class. Rules are
1673 // described by instances of CodeGenSchedTransition. Note that transitions may
1674 // not be valid for all processors.
1675 OS
<< " switch (SchedClass) {\n";
1676 for (unsigned VC
: VariantClasses
) {
1678 const CodeGenSchedClass
&SC
= SchedModels
.getSchedClass(VC
);
1679 collectProcessorIndices(SC
, ProcIndices
);
1681 OS
<< " case " << VC
<< ": // " << SC
.Name
<< '\n';
1683 PredicateExpander
PE(Target
);
1685 PE
.setExpandForMC(OnlyExpandMCInstPredicates
);
1686 for (unsigned PI
: ProcIndices
) {
1689 // Emit a guard on the processor ID.
1691 OS
<< (OnlyExpandMCInstPredicates
1693 : "if (SchedModel->getProcessorID() == ");
1695 OS
<< "{ // " << (SchedModels
.procModelBegin() + PI
)->ModelName
<< '\n';
1698 // Now emit transitions associated with processor PI.
1699 const CodeGenSchedTransition
*FinalT
= nullptr;
1700 for (const CodeGenSchedTransition
&T
: SC
.Transitions
) {
1701 if (PI
!= 0 && T
.ProcIndex
!= PI
)
1704 // Emit only transitions based on MCSchedPredicate, if it's the case.
1705 // At least the transition specified by NoSchedPred is emitted,
1706 // which becomes the default transition for those variants otherwise
1707 // not based on MCSchedPredicate.
1708 // FIXME: preferably, llvm-mca should instead assume a reasonable
1709 // default when a variant transition is not based on MCSchedPredicate
1710 // for a given processor.
1711 if (OnlyExpandMCInstPredicates
&& !hasMCSchedPredicates(T
))
1714 // If transition is folded to 'return X' it should be the last one.
1715 if (isAlwaysTrue(T
)) {
1719 PE
.setIndentLevel(3);
1720 emitPredicates(T
, SchedModels
.getSchedClass(T
.ToClassIdx
), PE
, OS
);
1723 emitPredicates(*FinalT
, SchedModels
.getSchedClass(FinalT
->ToClassIdx
),
1732 if (SC
.isInferred())
1733 OS
<< " return " << SC
.Index
<< ";\n";
1739 emitSchedModelHelperEpilogue(OS
, OnlyExpandMCInstPredicates
);
1742 void SubtargetEmitter::EmitSchedModelHelpers(const std::string
&ClassName
,
1744 OS
<< "unsigned " << ClassName
1745 << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
1746 << " const TargetSchedModel *SchedModel) const {\n";
1748 // Emit the predicate prolog code.
1749 emitPredicateProlog(Records
, OS
);
1751 // Emit target predicates.
1752 emitSchedModelHelpersImpl(OS
);
1754 OS
<< "} // " << ClassName
<< "::resolveSchedClass\n\n";
1756 OS
<< "unsigned " << ClassName
1757 << "\n::resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI,"
1758 << " const MCInstrInfo *MCII, unsigned CPUID) const {\n"
1759 << " return " << Target
<< "_MC"
1760 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n"
1761 << "} // " << ClassName
<< "::resolveVariantSchedClass\n\n";
1763 STIPredicateExpander
PE(Target
);
1764 PE
.setClassPrefix(ClassName
);
1765 PE
.setExpandDefinition(true);
1767 PE
.setIndentLevel(0);
1769 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1770 PE
.expandSTIPredicate(OS
, Fn
);
1773 void SubtargetEmitter::EmitHwModeCheck(const std::string
&ClassName
,
1775 const CodeGenHwModes
&CGH
= TGT
.getHwModes();
1776 assert(CGH
.getNumModeIds() > 0);
1777 if (CGH
.getNumModeIds() == 1)
1780 OS
<< "unsigned " << ClassName
<< "::getHwMode() const {\n";
1781 for (unsigned M
= 1, NumModes
= CGH
.getNumModeIds(); M
!= NumModes
; ++M
) {
1782 const HwMode
&HM
= CGH
.getMode(M
);
1783 OS
<< " if (checkFeatures(\"" << HM
.Features
1784 << "\")) return " << M
<< ";\n";
1786 OS
<< " return 0;\n}\n";
1789 // Produces a subtarget specific function for parsing
1790 // the subtarget features string.
1791 void SubtargetEmitter::ParseFeaturesFunction(raw_ostream
&OS
) {
1792 std::vector
<Record
*> Features
=
1793 Records
.getAllDerivedDefinitions("SubtargetFeature");
1794 llvm::sort(Features
, LessRecord());
1796 OS
<< "// ParseSubtargetFeatures - Parses features string setting specified\n"
1797 << "// subtarget options.\n"
1800 OS
<< "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, "
1801 << "StringRef FS) {\n"
1802 << " LLVM_DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
1803 << " LLVM_DEBUG(dbgs() << \"\\nCPU:\" << CPU);\n"
1804 << " LLVM_DEBUG(dbgs() << \"\\nTuneCPU:\" << TuneCPU << \"\\n\\n\");\n";
1806 if (Features
.empty()) {
1811 OS
<< " InitMCProcessorInfo(CPU, TuneCPU, FS);\n"
1812 << " const FeatureBitset &Bits = getFeatureBits();\n";
1814 for (Record
*R
: Features
) {
1816 StringRef Instance
= R
->getName();
1817 StringRef Value
= R
->getValueAsString("Value");
1818 StringRef FieldName
= R
->getValueAsString("FieldName");
1820 if (Value
=="true" || Value
=="false")
1821 OS
<< " if (Bits[" << Target
<< "::"
1822 << Instance
<< "]) "
1823 << FieldName
<< " = " << Value
<< ";\n";
1825 OS
<< " if (Bits[" << Target
<< "::"
1826 << Instance
<< "] && "
1827 << FieldName
<< " < " << Value
<< ") "
1828 << FieldName
<< " = " << Value
<< ";\n";
1834 void SubtargetEmitter::emitGenMCSubtargetInfo(raw_ostream
&OS
) {
1835 OS
<< "namespace " << Target
<< "_MC {\n"
1836 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,\n"
1837 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID) {\n";
1838 emitSchedModelHelpersImpl(OS
, /* OnlyExpandMCPredicates */ true);
1840 OS
<< "} // end namespace " << Target
<< "_MC\n\n";
1842 OS
<< "struct " << Target
1843 << "GenMCSubtargetInfo : public MCSubtargetInfo {\n";
1844 OS
<< " " << Target
<< "GenMCSubtargetInfo(const Triple &TT,\n"
1845 << " StringRef CPU, StringRef TuneCPU, StringRef FS,\n"
1846 << " ArrayRef<SubtargetFeatureKV> PF,\n"
1847 << " ArrayRef<SubtargetSubTypeKV> PD,\n"
1848 << " const MCWriteProcResEntry *WPR,\n"
1849 << " const MCWriteLatencyEntry *WL,\n"
1850 << " const MCReadAdvanceEntry *RA, const InstrStage *IS,\n"
1851 << " const unsigned *OC, const unsigned *FP) :\n"
1852 << " MCSubtargetInfo(TT, CPU, TuneCPU, FS, PF, PD,\n"
1853 << " WPR, WL, RA, IS, OC, FP) { }\n\n"
1854 << " unsigned resolveVariantSchedClass(unsigned SchedClass,\n"
1855 << " const MCInst *MI, const MCInstrInfo *MCII,\n"
1856 << " unsigned CPUID) const override {\n"
1857 << " return " << Target
<< "_MC"
1858 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n";
1860 if (TGT
.getHwModes().getNumModeIds() > 1)
1861 OS
<< " unsigned getHwMode() const override;\n";
1863 EmitHwModeCheck(Target
+ "GenMCSubtargetInfo", OS
);
1866 void SubtargetEmitter::EmitMCInstrAnalysisPredicateFunctions(raw_ostream
&OS
) {
1867 OS
<< "\n#ifdef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n";
1868 OS
<< "#undef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1870 STIPredicateExpander
PE(Target
);
1871 PE
.setExpandForMC(true);
1873 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1874 PE
.expandSTIPredicate(OS
, Fn
);
1876 OS
<< "#endif // GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1878 OS
<< "\n#ifdef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n";
1879 OS
<< "#undef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1881 std::string ClassPrefix
= Target
+ "MCInstrAnalysis";
1882 PE
.setExpandDefinition(true);
1883 PE
.setClassPrefix(ClassPrefix
);
1884 PE
.setIndentLevel(0);
1885 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1886 PE
.expandSTIPredicate(OS
, Fn
);
1888 OS
<< "#endif // GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1892 // SubtargetEmitter::run - Main subtarget enumeration emitter.
1894 void SubtargetEmitter::run(raw_ostream
&OS
) {
1895 emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS
);
1897 OS
<< "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
1898 OS
<< "#undef GET_SUBTARGETINFO_ENUM\n\n";
1900 DenseMap
<Record
*, unsigned> FeatureMap
;
1902 OS
<< "namespace llvm {\n";
1903 Enumeration(OS
, FeatureMap
);
1904 OS
<< "} // end namespace llvm\n\n";
1905 OS
<< "#endif // GET_SUBTARGETINFO_ENUM\n\n";
1907 EmitSubtargetInfoMacroCalls(OS
);
1909 OS
<< "namespace llvm {\n";
1911 OS
<< "namespace {\n";
1913 unsigned NumFeatures
= FeatureKeyValues(OS
, FeatureMap
);
1917 unsigned NumProcs
= CPUKeyValues(OS
, FeatureMap
);
1920 OS
<< "} // end anonymous namespace\n\n";
1923 // MCInstrInfo initialization routine.
1924 emitGenMCSubtargetInfo(OS
);
1926 OS
<< "\nstatic inline MCSubtargetInfo *create" << Target
1927 << "MCSubtargetInfoImpl("
1928 << "const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS) {\n";
1929 OS
<< " return new " << Target
1930 << "GenMCSubtargetInfo(TT, CPU, TuneCPU, FS, ";
1932 OS
<< Target
<< "FeatureKV, ";
1934 OS
<< "std::nullopt, ";
1936 OS
<< Target
<< "SubTypeKV, ";
1938 OS
<< "std::nullopt, ";
1939 OS
<< '\n'; OS
.indent(22);
1940 OS
<< Target
<< "WriteProcResTable, "
1941 << Target
<< "WriteLatencyTable, "
1942 << Target
<< "ReadAdvanceTable, ";
1943 OS
<< '\n'; OS
.indent(22);
1944 if (SchedModels
.hasItineraries()) {
1945 OS
<< Target
<< "Stages, "
1946 << Target
<< "OperandCycles, "
1947 << Target
<< "ForwardingPaths";
1949 OS
<< "nullptr, nullptr, nullptr";
1952 OS
<< "} // end namespace llvm\n\n";
1954 OS
<< "#endif // GET_SUBTARGETINFO_MC_DESC\n\n";
1956 OS
<< "\n#ifdef GET_SUBTARGETINFO_TARGET_DESC\n";
1957 OS
<< "#undef GET_SUBTARGETINFO_TARGET_DESC\n\n";
1959 OS
<< "#include \"llvm/Support/Debug.h\"\n";
1960 OS
<< "#include \"llvm/Support/raw_ostream.h\"\n\n";
1961 ParseFeaturesFunction(OS
);
1963 OS
<< "#endif // GET_SUBTARGETINFO_TARGET_DESC\n\n";
1965 // Create a TargetSubtargetInfo subclass to hide the MC layer initialization.
1966 OS
<< "\n#ifdef GET_SUBTARGETINFO_HEADER\n";
1967 OS
<< "#undef GET_SUBTARGETINFO_HEADER\n\n";
1969 std::string ClassName
= Target
+ "GenSubtargetInfo";
1970 OS
<< "namespace llvm {\n";
1971 OS
<< "class DFAPacketizer;\n";
1972 OS
<< "namespace " << Target
<< "_MC {\n"
1973 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,"
1974 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID);\n"
1975 << "} // end namespace " << Target
<< "_MC\n\n";
1976 OS
<< "struct " << ClassName
<< " : public TargetSubtargetInfo {\n"
1977 << " explicit " << ClassName
<< "(const Triple &TT, StringRef CPU, "
1978 << "StringRef TuneCPU, StringRef FS);\n"
1980 << " unsigned resolveSchedClass(unsigned SchedClass, "
1981 << " const MachineInstr *DefMI,"
1982 << " const TargetSchedModel *SchedModel) const override;\n"
1983 << " unsigned resolveVariantSchedClass(unsigned SchedClass,"
1984 << " const MCInst *MI, const MCInstrInfo *MCII,"
1985 << " unsigned CPUID) const override;\n"
1986 << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
1988 if (TGT
.getHwModes().getNumModeIds() > 1)
1989 OS
<< " unsigned getHwMode() const override;\n";
1991 STIPredicateExpander
PE(Target
);
1993 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1994 PE
.expandSTIPredicate(OS
, Fn
);
1997 << "} // end namespace llvm\n\n";
1999 OS
<< "#endif // GET_SUBTARGETINFO_HEADER\n\n";
2001 OS
<< "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
2002 OS
<< "#undef GET_SUBTARGETINFO_CTOR\n\n";
2004 OS
<< "#include \"llvm/CodeGen/TargetSchedule.h\"\n\n";
2005 OS
<< "namespace llvm {\n";
2006 OS
<< "extern const llvm::SubtargetFeatureKV " << Target
<< "FeatureKV[];\n";
2007 OS
<< "extern const llvm::SubtargetSubTypeKV " << Target
<< "SubTypeKV[];\n";
2008 OS
<< "extern const llvm::MCWriteProcResEntry "
2009 << Target
<< "WriteProcResTable[];\n";
2010 OS
<< "extern const llvm::MCWriteLatencyEntry "
2011 << Target
<< "WriteLatencyTable[];\n";
2012 OS
<< "extern const llvm::MCReadAdvanceEntry "
2013 << Target
<< "ReadAdvanceTable[];\n";
2015 if (SchedModels
.hasItineraries()) {
2016 OS
<< "extern const llvm::InstrStage " << Target
<< "Stages[];\n";
2017 OS
<< "extern const unsigned " << Target
<< "OperandCycles[];\n";
2018 OS
<< "extern const unsigned " << Target
<< "ForwardingPaths[];\n";
2021 OS
<< ClassName
<< "::" << ClassName
<< "(const Triple &TT, StringRef CPU, "
2022 << "StringRef TuneCPU, StringRef FS)\n"
2023 << " : TargetSubtargetInfo(TT, CPU, TuneCPU, FS, ";
2025 OS
<< "ArrayRef(" << Target
<< "FeatureKV, " << NumFeatures
<< "), ";
2027 OS
<< "std::nullopt, ";
2029 OS
<< "ArrayRef(" << Target
<< "SubTypeKV, " << NumProcs
<< "), ";
2031 OS
<< "std::nullopt, ";
2032 OS
<< '\n'; OS
.indent(24);
2033 OS
<< Target
<< "WriteProcResTable, "
2034 << Target
<< "WriteLatencyTable, "
2035 << Target
<< "ReadAdvanceTable, ";
2036 OS
<< '\n'; OS
.indent(24);
2037 if (SchedModels
.hasItineraries()) {
2038 OS
<< Target
<< "Stages, "
2039 << Target
<< "OperandCycles, "
2040 << Target
<< "ForwardingPaths";
2042 OS
<< "nullptr, nullptr, nullptr";
2045 EmitSchedModelHelpers(ClassName
, OS
);
2046 EmitHwModeCheck(ClassName
, OS
);
2048 OS
<< "} // end namespace llvm\n\n";
2050 OS
<< "#endif // GET_SUBTARGETINFO_CTOR\n\n";
2052 EmitMCInstrAnalysisPredicateFunctions(OS
);
2055 static TableGen::Emitter::OptClass
<SubtargetEmitter
>
2056 X("gen-subtarget", "Generate subtarget enumerations");