1 //===- SubtargetEmitter.cpp - Generate subtarget enumerations -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This tablegen backend emits subtarget enumerations.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenTarget.h"
14 #include "CodeGenSchedule.h"
15 #include "PredicateExpander.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/MC/MCInstrItineraries.h"
21 #include "llvm/MC/MCSchedule.h"
22 #include "llvm/MC/SubtargetFeature.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/Format.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/TableGen/Error.h"
27 #include "llvm/TableGen/Record.h"
28 #include "llvm/TableGen/TableGenBackend.h"
39 #define DEBUG_TYPE "subtarget-emitter"
43 class SubtargetEmitter
{
44 // Each processor has a SchedClassDesc table with an entry for each SchedClass.
45 // The SchedClassDesc table indexes into a global write resource table, write
46 // latency table, and read advance table.
47 struct SchedClassTables
{
48 std::vector
<std::vector
<MCSchedClassDesc
>> ProcSchedClasses
;
49 std::vector
<MCWriteProcResEntry
> WriteProcResources
;
50 std::vector
<MCWriteLatencyEntry
> WriteLatencies
;
51 std::vector
<std::string
> WriterNames
;
52 std::vector
<MCReadAdvanceEntry
> ReadAdvanceEntries
;
54 // Reserve an invalid entry at index 0
56 ProcSchedClasses
.resize(1);
57 WriteProcResources
.resize(1);
58 WriteLatencies
.resize(1);
59 WriterNames
.push_back("InvalidWrite");
60 ReadAdvanceEntries
.resize(1);
64 struct LessWriteProcResources
{
65 bool operator()(const MCWriteProcResEntry
&LHS
,
66 const MCWriteProcResEntry
&RHS
) {
67 return LHS
.ProcResourceIdx
< RHS
.ProcResourceIdx
;
71 const CodeGenTarget
&TGT
;
72 RecordKeeper
&Records
;
73 CodeGenSchedModels
&SchedModels
;
76 void Enumeration(raw_ostream
&OS
, DenseMap
<Record
*, unsigned> &FeatureMap
);
77 unsigned FeatureKeyValues(raw_ostream
&OS
,
78 const DenseMap
<Record
*, unsigned> &FeatureMap
);
79 unsigned CPUKeyValues(raw_ostream
&OS
,
80 const DenseMap
<Record
*, unsigned> &FeatureMap
);
81 void FormItineraryStageString(const std::string
&Names
,
82 Record
*ItinData
, std::string
&ItinString
,
84 void FormItineraryOperandCycleString(Record
*ItinData
, std::string
&ItinString
,
85 unsigned &NOperandCycles
);
86 void FormItineraryBypassString(const std::string
&Names
,
88 std::string
&ItinString
, unsigned NOperandCycles
);
89 void EmitStageAndOperandCycleData(raw_ostream
&OS
,
90 std::vector
<std::vector
<InstrItinerary
>>
92 void EmitItineraries(raw_ostream
&OS
,
93 std::vector
<std::vector
<InstrItinerary
>>
95 unsigned EmitRegisterFileTables(const CodeGenProcModel
&ProcModel
,
97 void EmitLoadStoreQueueInfo(const CodeGenProcModel
&ProcModel
,
99 void EmitExtraProcessorInfo(const CodeGenProcModel
&ProcModel
,
101 void EmitProcessorProp(raw_ostream
&OS
, const Record
*R
, StringRef Name
,
103 void EmitProcessorResourceSubUnits(const CodeGenProcModel
&ProcModel
,
105 void EmitProcessorResources(const CodeGenProcModel
&ProcModel
,
107 Record
*FindWriteResources(const CodeGenSchedRW
&SchedWrite
,
108 const CodeGenProcModel
&ProcModel
);
109 Record
*FindReadAdvance(const CodeGenSchedRW
&SchedRead
,
110 const CodeGenProcModel
&ProcModel
);
111 void ExpandProcResources(RecVec
&PRVec
, std::vector
<int64_t> &Cycles
,
112 const CodeGenProcModel
&ProcModel
);
113 void GenSchedClassTables(const CodeGenProcModel
&ProcModel
,
114 SchedClassTables
&SchedTables
);
115 void EmitSchedClassTables(SchedClassTables
&SchedTables
, raw_ostream
&OS
);
116 void EmitProcessorModels(raw_ostream
&OS
);
117 void EmitSchedModelHelpers(const std::string
&ClassName
, raw_ostream
&OS
);
118 void emitSchedModelHelpersImpl(raw_ostream
&OS
,
119 bool OnlyExpandMCInstPredicates
= false);
120 void emitGenMCSubtargetInfo(raw_ostream
&OS
);
121 void EmitMCInstrAnalysisPredicateFunctions(raw_ostream
&OS
);
123 void EmitSchedModel(raw_ostream
&OS
);
124 void EmitHwModeCheck(const std::string
&ClassName
, raw_ostream
&OS
);
125 void ParseFeaturesFunction(raw_ostream
&OS
, unsigned NumFeatures
,
129 SubtargetEmitter(RecordKeeper
&R
, CodeGenTarget
&TGT
)
130 : TGT(TGT
), Records(R
), SchedModels(TGT
.getSchedModels()),
131 Target(TGT
.getName()) {}
133 void run(raw_ostream
&o
);
136 } // end anonymous namespace
139 // Enumeration - Emit the specified class as an enumeration.
141 void SubtargetEmitter::Enumeration(raw_ostream
&OS
,
142 DenseMap
<Record
*, unsigned> &FeatureMap
) {
143 // Get all records of class and sort
144 std::vector
<Record
*> DefList
=
145 Records
.getAllDerivedDefinitions("SubtargetFeature");
146 llvm::sort(DefList
, LessRecord());
148 unsigned N
= DefList
.size();
151 if (N
+ 1 > MAX_SUBTARGET_FEATURES
)
152 PrintFatalError("Too many subtarget features! Bump MAX_SUBTARGET_FEATURES.");
154 OS
<< "namespace " << Target
<< " {\n";
160 for (unsigned i
= 0; i
< N
; ++i
) {
162 Record
*Def
= DefList
[i
];
165 OS
<< " " << Def
->getName() << " = " << i
<< ",\n";
167 // Save the index for this feature.
172 << "NumSubtargetFeatures = " << N
<< "\n";
174 // Close enumeration and namespace
176 OS
<< "} // end namespace " << Target
<< "\n";
179 static void printFeatureMask(raw_ostream
&OS
, RecVec
&FeatureList
,
180 const DenseMap
<Record
*, unsigned> &FeatureMap
) {
181 std::array
<uint64_t, MAX_SUBTARGET_WORDS
> Mask
= {};
182 for (const Record
*Feature
: FeatureList
) {
183 unsigned Bit
= FeatureMap
.lookup(Feature
);
184 Mask
[Bit
/ 64] |= 1ULL << (Bit
% 64);
188 for (unsigned i
= 0; i
!= Mask
.size(); ++i
) {
190 OS
.write_hex(Mask
[i
]);
197 // FeatureKeyValues - Emit data of all the subtarget features. Used by the
200 unsigned SubtargetEmitter::FeatureKeyValues(
201 raw_ostream
&OS
, const DenseMap
<Record
*, unsigned> &FeatureMap
) {
202 // Gather and sort all the features
203 std::vector
<Record
*> FeatureList
=
204 Records
.getAllDerivedDefinitions("SubtargetFeature");
206 if (FeatureList
.empty())
209 llvm::sort(FeatureList
, LessRecordFieldName());
211 // Begin feature table
212 OS
<< "// Sorted (by key) array of values for CPU features.\n"
213 << "extern const llvm::SubtargetFeatureKV " << Target
214 << "FeatureKV[] = {\n";
217 unsigned NumFeatures
= 0;
218 for (const Record
*Feature
: FeatureList
) {
220 StringRef Name
= Feature
->getName();
221 StringRef CommandLineName
= Feature
->getValueAsString("Name");
222 StringRef Desc
= Feature
->getValueAsString("Desc");
224 if (CommandLineName
.empty()) continue;
226 // Emit as { "feature", "description", { featureEnum }, { i1 , i2 , ... , in } }
228 << "\"" << CommandLineName
<< "\", "
229 << "\"" << Desc
<< "\", "
230 << Target
<< "::" << Name
<< ", ";
232 RecVec ImpliesList
= Feature
->getValueAsListOfDefs("Implies");
234 printFeatureMask(OS
, ImpliesList
, FeatureMap
);
247 // CPUKeyValues - Emit data of all the subtarget processors. Used by command
251 SubtargetEmitter::CPUKeyValues(raw_ostream
&OS
,
252 const DenseMap
<Record
*, unsigned> &FeatureMap
) {
253 // Gather and sort processor information
254 std::vector
<Record
*> ProcessorList
=
255 Records
.getAllDerivedDefinitions("Processor");
256 llvm::sort(ProcessorList
, LessRecordFieldName());
258 // Begin processor table
259 OS
<< "// Sorted (by key) array of values for CPU subtype.\n"
260 << "extern const llvm::SubtargetSubTypeKV " << Target
261 << "SubTypeKV[] = {\n";
263 // For each processor
264 for (Record
*Processor
: ProcessorList
) {
265 StringRef Name
= Processor
->getValueAsString("Name");
266 RecVec FeatureList
= Processor
->getValueAsListOfDefs("Features");
267 RecVec TuneFeatureList
= Processor
->getValueAsListOfDefs("TuneFeatures");
269 // Emit as { "cpu", "description", 0, { f1 , f2 , ... fn } },
271 << "\"" << Name
<< "\", ";
273 printFeatureMask(OS
, FeatureList
, FeatureMap
);
275 printFeatureMask(OS
, TuneFeatureList
, FeatureMap
);
277 // Emit the scheduler model pointer.
278 const std::string
&ProcModelName
=
279 SchedModels
.getModelForProc(Processor
).ModelName
;
280 OS
<< ", &" << ProcModelName
<< " },\n";
283 // End processor table
286 return ProcessorList
.size();
290 // FormItineraryStageString - Compose a string containing the stage
291 // data initialization for the specified itinerary. N is the number
294 void SubtargetEmitter::FormItineraryStageString(const std::string
&Name
,
296 std::string
&ItinString
,
299 RecVec StageList
= ItinData
->getValueAsListOfDefs("Stages");
302 unsigned N
= NStages
= StageList
.size();
303 for (unsigned i
= 0; i
< N
;) {
305 const Record
*Stage
= StageList
[i
];
307 // Form string as ,{ cycles, u1 | u2 | ... | un, timeinc, kind }
308 int Cycles
= Stage
->getValueAsInt("Cycles");
309 ItinString
+= " { " + itostr(Cycles
) + ", ";
312 RecVec UnitList
= Stage
->getValueAsListOfDefs("Units");
315 for (unsigned j
= 0, M
= UnitList
.size(); j
< M
;) {
316 // Add name and bitwise or
317 ItinString
+= Name
+ "FU::" + UnitList
[j
]->getName().str();
318 if (++j
< M
) ItinString
+= " | ";
321 int TimeInc
= Stage
->getValueAsInt("TimeInc");
322 ItinString
+= ", " + itostr(TimeInc
);
324 int Kind
= Stage
->getValueAsInt("Kind");
325 ItinString
+= ", (llvm::InstrStage::ReservationKinds)" + itostr(Kind
);
329 if (++i
< N
) ItinString
+= ", ";
334 // FormItineraryOperandCycleString - Compose a string containing the
335 // operand cycle initialization for the specified itinerary. N is the
336 // number of operands that has cycles specified.
338 void SubtargetEmitter::FormItineraryOperandCycleString(Record
*ItinData
,
339 std::string
&ItinString
, unsigned &NOperandCycles
) {
340 // Get operand cycle list
341 std::vector
<int64_t> OperandCycleList
=
342 ItinData
->getValueAsListOfInts("OperandCycles");
344 // For each operand cycle
345 NOperandCycles
= OperandCycleList
.size();
347 for (int OCycle
: OperandCycleList
) {
348 // Next operand cycle
350 ItinString
+= " " + itostr(OCycle
);
354 void SubtargetEmitter::FormItineraryBypassString(const std::string
&Name
,
356 std::string
&ItinString
,
357 unsigned NOperandCycles
) {
358 RecVec BypassList
= ItinData
->getValueAsListOfDefs("Bypasses");
359 unsigned N
= BypassList
.size();
364 ItinString
+= Name
+ "Bypass::" + BypassList
[i
]->getName().str();
366 for (; i
< NOperandCycles
; ++i
) {
373 // EmitStageAndOperandCycleData - Generate unique itinerary stages and operand
374 // cycle tables. Create a list of InstrItinerary objects (ProcItinLists) indexed
375 // by CodeGenSchedClass::Index.
377 void SubtargetEmitter::
378 EmitStageAndOperandCycleData(raw_ostream
&OS
,
379 std::vector
<std::vector
<InstrItinerary
>>
381 // Multiple processor models may share an itinerary record. Emit it once.
382 SmallPtrSet
<Record
*, 8> ItinsDefSet
;
384 // Emit functional units for all the itineraries.
385 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
387 if (!ItinsDefSet
.insert(ProcModel
.ItinsDef
).second
)
390 RecVec FUs
= ProcModel
.ItinsDef
->getValueAsListOfDefs("FU");
394 StringRef Name
= ProcModel
.ItinsDef
->getName();
395 OS
<< "\n// Functional units for \"" << Name
<< "\"\n"
396 << "namespace " << Name
<< "FU {\n";
398 for (unsigned j
= 0, FUN
= FUs
.size(); j
< FUN
; ++j
)
399 OS
<< " const InstrStage::FuncUnits " << FUs
[j
]->getName()
400 << " = 1ULL << " << j
<< ";\n";
402 OS
<< "} // end namespace " << Name
<< "FU\n";
404 RecVec BPs
= ProcModel
.ItinsDef
->getValueAsListOfDefs("BP");
406 OS
<< "\n// Pipeline forwarding paths for itineraries \"" << Name
407 << "\"\n" << "namespace " << Name
<< "Bypass {\n";
409 OS
<< " const unsigned NoBypass = 0;\n";
410 for (unsigned j
= 0, BPN
= BPs
.size(); j
< BPN
; ++j
)
411 OS
<< " const unsigned " << BPs
[j
]->getName()
412 << " = 1 << " << j
<< ";\n";
414 OS
<< "} // end namespace " << Name
<< "Bypass\n";
418 // Begin stages table
419 std::string StageTable
= "\nextern const llvm::InstrStage " + Target
+
421 StageTable
+= " { 0, 0, 0, llvm::InstrStage::Required }, // No itinerary\n";
423 // Begin operand cycle table
424 std::string OperandCycleTable
= "extern const unsigned " + Target
+
425 "OperandCycles[] = {\n";
426 OperandCycleTable
+= " 0, // No itinerary\n";
428 // Begin pipeline bypass table
429 std::string BypassTable
= "extern const unsigned " + Target
+
430 "ForwardingPaths[] = {\n";
431 BypassTable
+= " 0, // No itinerary\n";
433 // For each Itinerary across all processors, add a unique entry to the stages,
434 // operand cycles, and pipeline bypass tables. Then add the new Itinerary
435 // object with computed offsets to the ProcItinLists result.
436 unsigned StageCount
= 1, OperandCycleCount
= 1;
437 std::map
<std::string
, unsigned> ItinStageMap
, ItinOperandMap
;
438 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
439 // Add process itinerary to the list.
440 ProcItinLists
.resize(ProcItinLists
.size()+1);
442 // If this processor defines no itineraries, then leave the itinerary list
444 std::vector
<InstrItinerary
> &ItinList
= ProcItinLists
.back();
445 if (!ProcModel
.hasItineraries())
448 StringRef Name
= ProcModel
.ItinsDef
->getName();
450 ItinList
.resize(SchedModels
.numInstrSchedClasses());
451 assert(ProcModel
.ItinDefList
.size() == ItinList
.size() && "bad Itins");
453 for (unsigned SchedClassIdx
= 0, SchedClassEnd
= ItinList
.size();
454 SchedClassIdx
< SchedClassEnd
; ++SchedClassIdx
) {
456 // Next itinerary data
457 Record
*ItinData
= ProcModel
.ItinDefList
[SchedClassIdx
];
459 // Get string and stage count
460 std::string ItinStageString
;
461 unsigned NStages
= 0;
463 FormItineraryStageString(std::string(Name
), ItinData
, ItinStageString
,
466 // Get string and operand cycle count
467 std::string ItinOperandCycleString
;
468 unsigned NOperandCycles
= 0;
469 std::string ItinBypassString
;
471 FormItineraryOperandCycleString(ItinData
, ItinOperandCycleString
,
474 FormItineraryBypassString(std::string(Name
), ItinData
, ItinBypassString
,
478 // Check to see if stage already exists and create if it doesn't
479 uint16_t FindStage
= 0;
481 FindStage
= ItinStageMap
[ItinStageString
];
482 if (FindStage
== 0) {
483 // Emit as { cycles, u1 | u2 | ... | un, timeinc }, // indices
484 StageTable
+= ItinStageString
+ ", // " + itostr(StageCount
);
486 StageTable
+= "-" + itostr(StageCount
+ NStages
- 1);
488 // Record Itin class number.
489 ItinStageMap
[ItinStageString
] = FindStage
= StageCount
;
490 StageCount
+= NStages
;
494 // Check to see if operand cycle already exists and create if it doesn't
495 uint16_t FindOperandCycle
= 0;
496 if (NOperandCycles
> 0) {
497 std::string ItinOperandString
= ItinOperandCycleString
+ItinBypassString
;
498 FindOperandCycle
= ItinOperandMap
[ItinOperandString
];
499 if (FindOperandCycle
== 0) {
500 // Emit as cycle, // index
501 OperandCycleTable
+= ItinOperandCycleString
+ ", // ";
502 std::string OperandIdxComment
= itostr(OperandCycleCount
);
503 if (NOperandCycles
> 1)
504 OperandIdxComment
+= "-"
505 + itostr(OperandCycleCount
+ NOperandCycles
- 1);
506 OperandCycleTable
+= OperandIdxComment
+ "\n";
507 // Record Itin class number.
508 ItinOperandMap
[ItinOperandCycleString
] =
509 FindOperandCycle
= OperandCycleCount
;
510 // Emit as bypass, // index
511 BypassTable
+= ItinBypassString
+ ", // " + OperandIdxComment
+ "\n";
512 OperandCycleCount
+= NOperandCycles
;
516 // Set up itinerary as location and location + stage count
517 int16_t NumUOps
= ItinData
? ItinData
->getValueAsInt("NumMicroOps") : 0;
518 InstrItinerary Intinerary
= {
521 uint16_t(FindStage
+ NStages
),
523 uint16_t(FindOperandCycle
+ NOperandCycles
),
526 // Inject - empty slots will be 0, 0
527 ItinList
[SchedClassIdx
] = Intinerary
;
532 StageTable
+= " { 0, 0, 0, llvm::InstrStage::Required } // End stages\n";
533 StageTable
+= "};\n";
535 // Closing operand cycles
536 OperandCycleTable
+= " 0 // End operand cycles\n";
537 OperandCycleTable
+= "};\n";
539 BypassTable
+= " 0 // End bypass tables\n";
540 BypassTable
+= "};\n";
544 OS
<< OperandCycleTable
;
549 // EmitProcessorData - Generate data for processor itineraries that were
550 // computed during EmitStageAndOperandCycleData(). ProcItinLists lists all
551 // Itineraries for each processor. The Itinerary lists are indexed on
552 // CodeGenSchedClass::Index.
554 void SubtargetEmitter::
555 EmitItineraries(raw_ostream
&OS
,
556 std::vector
<std::vector
<InstrItinerary
>> &ProcItinLists
) {
557 // Multiple processor models may share an itinerary record. Emit it once.
558 SmallPtrSet
<Record
*, 8> ItinsDefSet
;
560 // For each processor's machine model
561 std::vector
<std::vector
<InstrItinerary
>>::iterator
562 ProcItinListsIter
= ProcItinLists
.begin();
563 for (CodeGenSchedModels::ProcIter PI
= SchedModels
.procModelBegin(),
564 PE
= SchedModels
.procModelEnd(); PI
!= PE
; ++PI
, ++ProcItinListsIter
) {
566 Record
*ItinsDef
= PI
->ItinsDef
;
567 if (!ItinsDefSet
.insert(ItinsDef
).second
)
570 // Get the itinerary list for the processor.
571 assert(ProcItinListsIter
!= ProcItinLists
.end() && "bad iterator");
572 std::vector
<InstrItinerary
> &ItinList
= *ProcItinListsIter
;
574 // Empty itineraries aren't referenced anywhere in the tablegen output
575 // so don't emit them.
576 if (ItinList
.empty())
580 OS
<< "static const llvm::InstrItinerary ";
582 // Begin processor itinerary table
583 OS
<< ItinsDef
->getName() << "[] = {\n";
585 // For each itinerary class in CodeGenSchedClass::Index order.
586 for (unsigned j
= 0, M
= ItinList
.size(); j
< M
; ++j
) {
587 InstrItinerary
&Intinerary
= ItinList
[j
];
589 // Emit Itinerary in the form of
590 // { firstStage, lastStage, firstCycle, lastCycle } // index
592 Intinerary
.NumMicroOps
<< ", " <<
593 Intinerary
.FirstStage
<< ", " <<
594 Intinerary
.LastStage
<< ", " <<
595 Intinerary
.FirstOperandCycle
<< ", " <<
596 Intinerary
.LastOperandCycle
<< " }" <<
597 ", // " << j
<< " " << SchedModels
.getSchedClass(j
).Name
<< "\n";
599 // End processor itinerary table
600 OS
<< " { 0, uint16_t(~0U), uint16_t(~0U), uint16_t(~0U), uint16_t(~0U) }"
606 // Emit either the value defined in the TableGen Record, or the default
607 // value defined in the C++ header. The Record is null if the processor does not
609 void SubtargetEmitter::EmitProcessorProp(raw_ostream
&OS
, const Record
*R
,
610 StringRef Name
, char Separator
) {
612 int V
= R
? R
->getValueAsInt(Name
) : -1;
614 OS
<< V
<< Separator
<< " // " << Name
;
616 OS
<< "MCSchedModel::Default" << Name
<< Separator
;
620 void SubtargetEmitter::EmitProcessorResourceSubUnits(
621 const CodeGenProcModel
&ProcModel
, raw_ostream
&OS
) {
622 OS
<< "\nstatic const unsigned " << ProcModel
.ModelName
623 << "ProcResourceSubUnits[] = {\n"
624 << " 0, // Invalid\n";
626 for (unsigned i
= 0, e
= ProcModel
.ProcResourceDefs
.size(); i
< e
; ++i
) {
627 Record
*PRDef
= ProcModel
.ProcResourceDefs
[i
];
628 if (!PRDef
->isSubClassOf("ProcResGroup"))
630 RecVec ResUnits
= PRDef
->getValueAsListOfDefs("Resources");
631 for (Record
*RUDef
: ResUnits
) {
633 SchedModels
.findProcResUnits(RUDef
, ProcModel
, PRDef
->getLoc());
634 for (unsigned J
= 0; J
< RU
->getValueAsInt("NumUnits"); ++J
) {
635 OS
<< " " << ProcModel
.getProcResourceIdx(RU
) << ", ";
638 OS
<< " // " << PRDef
->getName() << "\n";
643 static void EmitRetireControlUnitInfo(const CodeGenProcModel
&ProcModel
,
645 int64_t ReorderBufferSize
= 0, MaxRetirePerCycle
= 0;
646 if (Record
*RCU
= ProcModel
.RetireControlUnit
) {
648 std::max(ReorderBufferSize
, RCU
->getValueAsInt("ReorderBufferSize"));
650 std::max(MaxRetirePerCycle
, RCU
->getValueAsInt("MaxRetirePerCycle"));
653 OS
<< ReorderBufferSize
<< ", // ReorderBufferSize\n ";
654 OS
<< MaxRetirePerCycle
<< ", // MaxRetirePerCycle\n ";
657 static void EmitRegisterFileInfo(const CodeGenProcModel
&ProcModel
,
658 unsigned NumRegisterFiles
,
659 unsigned NumCostEntries
, raw_ostream
&OS
) {
660 if (NumRegisterFiles
)
661 OS
<< ProcModel
.ModelName
<< "RegisterFiles,\n " << (1 + NumRegisterFiles
);
663 OS
<< "nullptr,\n 0";
665 OS
<< ", // Number of register files.\n ";
667 OS
<< ProcModel
.ModelName
<< "RegisterCosts,\n ";
670 OS
<< NumCostEntries
<< ", // Number of register cost entries.\n";
674 SubtargetEmitter::EmitRegisterFileTables(const CodeGenProcModel
&ProcModel
,
676 if (llvm::all_of(ProcModel
.RegisterFiles
, [](const CodeGenRegisterFile
&RF
) {
677 return RF
.hasDefaultCosts();
681 // Print the RegisterCost table first.
682 OS
<< "\n// {RegisterClassID, Register Cost, AllowMoveElimination }\n";
683 OS
<< "static const llvm::MCRegisterCostEntry " << ProcModel
.ModelName
687 for (const CodeGenRegisterFile
&RF
: ProcModel
.RegisterFiles
) {
688 // Skip register files with a default cost table.
689 if (RF
.hasDefaultCosts())
691 // Add entries to the cost table.
692 for (const CodeGenRegisterCost
&RC
: RF
.Costs
) {
694 Record
*Rec
= RC
.RCDef
;
695 if (Rec
->getValue("Namespace"))
696 OS
<< Rec
->getValueAsString("Namespace") << "::";
697 OS
<< Rec
->getName() << "RegClassID, " << RC
.Cost
<< ", "
698 << RC
.AllowMoveElimination
<< "},\n";
703 // Now generate a table with register file info.
704 OS
<< "\n // {Name, #PhysRegs, #CostEntries, IndexToCostTbl, "
705 << "MaxMovesEliminatedPerCycle, AllowZeroMoveEliminationOnly }\n";
706 OS
<< "static const llvm::MCRegisterFileDesc " << ProcModel
.ModelName
709 << " { \"InvalidRegisterFile\", 0, 0, 0, 0, 0 },\n";
710 unsigned CostTblIndex
= 0;
712 for (const CodeGenRegisterFile
&RD
: ProcModel
.RegisterFiles
) {
714 OS
<< '"' << RD
.Name
<< '"' << ", " << RD
.NumPhysRegs
<< ", ";
715 unsigned NumCostEntries
= RD
.Costs
.size();
716 OS
<< NumCostEntries
<< ", " << CostTblIndex
<< ", "
717 << RD
.MaxMovesEliminatedPerCycle
<< ", "
718 << RD
.AllowZeroMoveEliminationOnly
<< "},\n";
719 CostTblIndex
+= NumCostEntries
;
726 void SubtargetEmitter::EmitLoadStoreQueueInfo(const CodeGenProcModel
&ProcModel
,
728 unsigned QueueID
= 0;
729 if (ProcModel
.LoadQueue
) {
730 const Record
*Queue
= ProcModel
.LoadQueue
->getValueAsDef("QueueDescriptor");
731 QueueID
= 1 + std::distance(ProcModel
.ProcResourceDefs
.begin(),
732 find(ProcModel
.ProcResourceDefs
, Queue
));
734 OS
<< " " << QueueID
<< ", // Resource Descriptor for the Load Queue\n";
737 if (ProcModel
.StoreQueue
) {
738 const Record
*Queue
=
739 ProcModel
.StoreQueue
->getValueAsDef("QueueDescriptor");
740 QueueID
= 1 + std::distance(ProcModel
.ProcResourceDefs
.begin(),
741 find(ProcModel
.ProcResourceDefs
, Queue
));
743 OS
<< " " << QueueID
<< ", // Resource Descriptor for the Store Queue\n";
746 void SubtargetEmitter::EmitExtraProcessorInfo(const CodeGenProcModel
&ProcModel
,
748 // Generate a table of register file descriptors (one entry per each user
749 // defined register file), and a table of register costs.
750 unsigned NumCostEntries
= EmitRegisterFileTables(ProcModel
, OS
);
752 // Now generate a table for the extra processor info.
753 OS
<< "\nstatic const llvm::MCExtraProcessorInfo " << ProcModel
.ModelName
754 << "ExtraInfo = {\n ";
756 // Add information related to the retire control unit.
757 EmitRetireControlUnitInfo(ProcModel
, OS
);
759 // Add information related to the register files (i.e. where to find register
760 // file descriptors and register costs).
761 EmitRegisterFileInfo(ProcModel
, ProcModel
.RegisterFiles
.size(),
764 // Add information about load/store queues.
765 EmitLoadStoreQueueInfo(ProcModel
, OS
);
770 void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel
&ProcModel
,
772 EmitProcessorResourceSubUnits(ProcModel
, OS
);
774 OS
<< "\n// {Name, NumUnits, SuperIdx, BufferSize, SubUnitsIdxBegin}\n";
775 OS
<< "static const llvm::MCProcResourceDesc " << ProcModel
.ModelName
778 << " {\"InvalidUnit\", 0, 0, 0, 0},\n";
780 unsigned SubUnitsOffset
= 1;
781 for (unsigned i
= 0, e
= ProcModel
.ProcResourceDefs
.size(); i
< e
; ++i
) {
782 Record
*PRDef
= ProcModel
.ProcResourceDefs
[i
];
784 Record
*SuperDef
= nullptr;
785 unsigned SuperIdx
= 0;
786 unsigned NumUnits
= 0;
787 const unsigned SubUnitsBeginOffset
= SubUnitsOffset
;
788 int BufferSize
= PRDef
->getValueAsInt("BufferSize");
789 if (PRDef
->isSubClassOf("ProcResGroup")) {
790 RecVec ResUnits
= PRDef
->getValueAsListOfDefs("Resources");
791 for (Record
*RU
: ResUnits
) {
792 NumUnits
+= RU
->getValueAsInt("NumUnits");
793 SubUnitsOffset
+= RU
->getValueAsInt("NumUnits");
798 if (PRDef
->getValueInit("Super")->isComplete()) {
800 SchedModels
.findProcResUnits(PRDef
->getValueAsDef("Super"),
801 ProcModel
, PRDef
->getLoc());
802 SuperIdx
= ProcModel
.getProcResourceIdx(SuperDef
);
804 NumUnits
= PRDef
->getValueAsInt("NumUnits");
806 // Emit the ProcResourceDesc
807 OS
<< " {\"" << PRDef
->getName() << "\", ";
808 if (PRDef
->getName().size() < 15)
809 OS
.indent(15 - PRDef
->getName().size());
810 OS
<< NumUnits
<< ", " << SuperIdx
<< ", " << BufferSize
<< ", ";
811 if (SubUnitsBeginOffset
!= SubUnitsOffset
) {
812 OS
<< ProcModel
.ModelName
<< "ProcResourceSubUnits + "
813 << SubUnitsBeginOffset
;
817 OS
<< "}, // #" << i
+1;
819 OS
<< ", Super=" << SuperDef
->getName();
825 // Find the WriteRes Record that defines processor resources for this
827 Record
*SubtargetEmitter::FindWriteResources(
828 const CodeGenSchedRW
&SchedWrite
, const CodeGenProcModel
&ProcModel
) {
830 // Check if the SchedWrite is already subtarget-specific and directly
831 // specifies a set of processor resources.
832 if (SchedWrite
.TheDef
->isSubClassOf("SchedWriteRes"))
833 return SchedWrite
.TheDef
;
835 Record
*AliasDef
= nullptr;
836 for (Record
*A
: SchedWrite
.Aliases
) {
837 const CodeGenSchedRW
&AliasRW
=
838 SchedModels
.getSchedRW(A
->getValueAsDef("AliasRW"));
839 if (AliasRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
840 Record
*ModelDef
= AliasRW
.TheDef
->getValueAsDef("SchedModel");
841 if (&SchedModels
.getProcModel(ModelDef
) != &ProcModel
)
845 PrintFatalError(AliasRW
.TheDef
->getLoc(), "Multiple aliases "
846 "defined for processor " + ProcModel
.ModelName
+
847 " Ensure only one SchedAlias exists per RW.");
848 AliasDef
= AliasRW
.TheDef
;
850 if (AliasDef
&& AliasDef
->isSubClassOf("SchedWriteRes"))
853 // Check this processor's list of write resources.
854 Record
*ResDef
= nullptr;
855 for (Record
*WR
: ProcModel
.WriteResDefs
) {
856 if (!WR
->isSubClassOf("WriteRes"))
858 if (AliasDef
== WR
->getValueAsDef("WriteType")
859 || SchedWrite
.TheDef
== WR
->getValueAsDef("WriteType")) {
861 PrintFatalError(WR
->getLoc(), "Resources are defined for both "
862 "SchedWrite and its alias on processor " +
863 ProcModel
.ModelName
);
868 // TODO: If ProcModel has a base model (previous generation processor),
869 // then call FindWriteResources recursively with that model here.
871 PrintFatalError(ProcModel
.ModelDef
->getLoc(),
872 Twine("Processor does not define resources for ") +
873 SchedWrite
.TheDef
->getName());
878 /// Find the ReadAdvance record for the given SchedRead on this processor or
880 Record
*SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW
&SchedRead
,
881 const CodeGenProcModel
&ProcModel
) {
882 // Check for SchedReads that directly specify a ReadAdvance.
883 if (SchedRead
.TheDef
->isSubClassOf("SchedReadAdvance"))
884 return SchedRead
.TheDef
;
886 // Check this processor's list of aliases for SchedRead.
887 Record
*AliasDef
= nullptr;
888 for (Record
*A
: SchedRead
.Aliases
) {
889 const CodeGenSchedRW
&AliasRW
=
890 SchedModels
.getSchedRW(A
->getValueAsDef("AliasRW"));
891 if (AliasRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
892 Record
*ModelDef
= AliasRW
.TheDef
->getValueAsDef("SchedModel");
893 if (&SchedModels
.getProcModel(ModelDef
) != &ProcModel
)
897 PrintFatalError(AliasRW
.TheDef
->getLoc(), "Multiple aliases "
898 "defined for processor " + ProcModel
.ModelName
+
899 " Ensure only one SchedAlias exists per RW.");
900 AliasDef
= AliasRW
.TheDef
;
902 if (AliasDef
&& AliasDef
->isSubClassOf("SchedReadAdvance"))
905 // Check this processor's ReadAdvanceList.
906 Record
*ResDef
= nullptr;
907 for (Record
*RA
: ProcModel
.ReadAdvanceDefs
) {
908 if (!RA
->isSubClassOf("ReadAdvance"))
910 if (AliasDef
== RA
->getValueAsDef("ReadType")
911 || SchedRead
.TheDef
== RA
->getValueAsDef("ReadType")) {
913 PrintFatalError(RA
->getLoc(), "Resources are defined for both "
914 "SchedRead and its alias on processor " +
915 ProcModel
.ModelName
);
920 // TODO: If ProcModel has a base model (previous generation processor),
921 // then call FindReadAdvance recursively with that model here.
922 if (!ResDef
&& SchedRead
.TheDef
->getName() != "ReadDefault") {
923 PrintFatalError(ProcModel
.ModelDef
->getLoc(),
924 Twine("Processor does not define resources for ") +
925 SchedRead
.TheDef
->getName());
930 // Expand an explicit list of processor resources into a full list of implied
931 // resource groups and super resources that cover them.
932 void SubtargetEmitter::ExpandProcResources(RecVec
&PRVec
,
933 std::vector
<int64_t> &Cycles
,
934 const CodeGenProcModel
&PM
) {
935 assert(PRVec
.size() == Cycles
.size() && "failed precondition");
936 for (unsigned i
= 0, e
= PRVec
.size(); i
!= e
; ++i
) {
937 Record
*PRDef
= PRVec
[i
];
939 if (PRDef
->isSubClassOf("ProcResGroup"))
940 SubResources
= PRDef
->getValueAsListOfDefs("Resources");
942 SubResources
.push_back(PRDef
);
943 PRDef
= SchedModels
.findProcResUnits(PRDef
, PM
, PRDef
->getLoc());
944 for (Record
*SubDef
= PRDef
;
945 SubDef
->getValueInit("Super")->isComplete();) {
946 if (SubDef
->isSubClassOf("ProcResGroup")) {
947 // Disallow this for simplicitly.
948 PrintFatalError(SubDef
->getLoc(), "Processor resource group "
949 " cannot be a super resources.");
952 SchedModels
.findProcResUnits(SubDef
->getValueAsDef("Super"), PM
,
954 PRVec
.push_back(SuperDef
);
955 Cycles
.push_back(Cycles
[i
]);
959 for (Record
*PR
: PM
.ProcResourceDefs
) {
960 if (PR
== PRDef
|| !PR
->isSubClassOf("ProcResGroup"))
962 RecVec SuperResources
= PR
->getValueAsListOfDefs("Resources");
963 RecIter SubI
= SubResources
.begin(), SubE
= SubResources
.end();
964 for( ; SubI
!= SubE
; ++SubI
) {
965 if (!is_contained(SuperResources
, *SubI
)) {
971 Cycles
.push_back(Cycles
[i
]);
977 // Generate the SchedClass table for this processor and update global
978 // tables. Must be called for each processor in order.
979 void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel
&ProcModel
,
980 SchedClassTables
&SchedTables
) {
981 SchedTables
.ProcSchedClasses
.resize(SchedTables
.ProcSchedClasses
.size() + 1);
982 if (!ProcModel
.hasInstrSchedModel())
985 std::vector
<MCSchedClassDesc
> &SCTab
= SchedTables
.ProcSchedClasses
.back();
986 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (GenSchedClassTables) +++\n");
987 for (const CodeGenSchedClass
&SC
: SchedModels
.schedClasses()) {
988 LLVM_DEBUG(SC
.dump(&SchedModels
));
990 SCTab
.resize(SCTab
.size() + 1);
991 MCSchedClassDesc
&SCDesc
= SCTab
.back();
992 // SCDesc.Name is guarded by NDEBUG
993 SCDesc
.NumMicroOps
= 0;
994 SCDesc
.BeginGroup
= false;
995 SCDesc
.EndGroup
= false;
996 SCDesc
.RetireOOO
= false;
997 SCDesc
.WriteProcResIdx
= 0;
998 SCDesc
.WriteLatencyIdx
= 0;
999 SCDesc
.ReadAdvanceIdx
= 0;
1001 // A Variant SchedClass has no resources of its own.
1002 bool HasVariants
= false;
1003 for (const CodeGenSchedTransition
&CGT
:
1004 make_range(SC
.Transitions
.begin(), SC
.Transitions
.end())) {
1005 if (CGT
.ProcIndex
== ProcModel
.Index
) {
1011 SCDesc
.NumMicroOps
= MCSchedClassDesc::VariantNumMicroOps
;
1015 // Determine if the SchedClass is actually reachable on this processor. If
1016 // not don't try to locate the processor resources, it will fail.
1017 // If ProcIndices contains 0, this class applies to all processors.
1018 assert(!SC
.ProcIndices
.empty() && "expect at least one procidx");
1019 if (SC
.ProcIndices
[0] != 0) {
1020 if (!is_contained(SC
.ProcIndices
, ProcModel
.Index
))
1023 IdxVec Writes
= SC
.Writes
;
1024 IdxVec Reads
= SC
.Reads
;
1025 if (!SC
.InstRWs
.empty()) {
1026 // This class has a default ReadWrite list which can be overridden by
1027 // InstRW definitions.
1028 Record
*RWDef
= nullptr;
1029 for (Record
*RW
: SC
.InstRWs
) {
1030 Record
*RWModelDef
= RW
->getValueAsDef("SchedModel");
1031 if (&ProcModel
== &SchedModels
.getProcModel(RWModelDef
)) {
1039 SchedModels
.findRWs(RWDef
->getValueAsListOfDefs("OperandReadWrites"),
1043 if (Writes
.empty()) {
1044 // Check this processor's itinerary class resources.
1045 for (Record
*I
: ProcModel
.ItinRWDefs
) {
1046 RecVec Matched
= I
->getValueAsListOfDefs("MatchedItinClasses");
1047 if (is_contained(Matched
, SC
.ItinClassDef
)) {
1048 SchedModels
.findRWs(I
->getValueAsListOfDefs("OperandReadWrites"),
1053 if (Writes
.empty()) {
1054 LLVM_DEBUG(dbgs() << ProcModel
.ModelName
1055 << " does not have resources for class " << SC
.Name
1057 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1060 // Sum resources across all operand writes.
1061 std::vector
<MCWriteProcResEntry
> WriteProcResources
;
1062 std::vector
<MCWriteLatencyEntry
> WriteLatencies
;
1063 std::vector
<std::string
> WriterNames
;
1064 std::vector
<MCReadAdvanceEntry
> ReadAdvanceEntries
;
1065 for (unsigned W
: Writes
) {
1067 SchedModels
.expandRWSeqForProc(W
, WriteSeq
, /*IsRead=*/false,
1070 // For each operand, create a latency entry.
1071 MCWriteLatencyEntry WLEntry
;
1073 unsigned WriteID
= WriteSeq
.back();
1074 WriterNames
.push_back(SchedModels
.getSchedWrite(WriteID
).Name
);
1075 // If this Write is not referenced by a ReadAdvance, don't distinguish it
1076 // from other WriteLatency entries.
1077 if (!SchedModels
.hasReadOfWrite(
1078 SchedModels
.getSchedWrite(WriteID
).TheDef
)) {
1081 WLEntry
.WriteResourceID
= WriteID
;
1083 for (unsigned WS
: WriteSeq
) {
1086 FindWriteResources(SchedModels
.getSchedWrite(WS
), ProcModel
);
1088 // Mark the parent class as invalid for unsupported write types.
1089 if (WriteRes
->getValueAsBit("Unsupported")) {
1090 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1093 WLEntry
.Cycles
+= WriteRes
->getValueAsInt("Latency");
1094 SCDesc
.NumMicroOps
+= WriteRes
->getValueAsInt("NumMicroOps");
1095 SCDesc
.BeginGroup
|= WriteRes
->getValueAsBit("BeginGroup");
1096 SCDesc
.EndGroup
|= WriteRes
->getValueAsBit("EndGroup");
1097 SCDesc
.BeginGroup
|= WriteRes
->getValueAsBit("SingleIssue");
1098 SCDesc
.EndGroup
|= WriteRes
->getValueAsBit("SingleIssue");
1099 SCDesc
.RetireOOO
|= WriteRes
->getValueAsBit("RetireOOO");
1101 // Create an entry for each ProcResource listed in WriteRes.
1102 RecVec PRVec
= WriteRes
->getValueAsListOfDefs("ProcResources");
1103 std::vector
<int64_t> Cycles
=
1104 WriteRes
->getValueAsListOfInts("ResourceCycles");
1106 if (Cycles
.empty()) {
1107 // If ResourceCycles is not provided, default to one cycle per
1109 Cycles
.resize(PRVec
.size(), 1);
1110 } else if (Cycles
.size() != PRVec
.size()) {
1111 // If ResourceCycles is provided, check consistency.
1114 Twine("Inconsistent resource cycles: !size(ResourceCycles) != "
1115 "!size(ProcResources): ")
1116 .concat(Twine(PRVec
.size()))
1118 .concat(Twine(Cycles
.size())));
1121 ExpandProcResources(PRVec
, Cycles
, ProcModel
);
1123 for (unsigned PRIdx
= 0, PREnd
= PRVec
.size();
1124 PRIdx
!= PREnd
; ++PRIdx
) {
1125 MCWriteProcResEntry WPREntry
;
1126 WPREntry
.ProcResourceIdx
= ProcModel
.getProcResourceIdx(PRVec
[PRIdx
]);
1127 assert(WPREntry
.ProcResourceIdx
&& "Bad ProcResourceIdx");
1128 WPREntry
.Cycles
= Cycles
[PRIdx
];
1129 // If this resource is already used in this sequence, add the current
1130 // entry's cycles so that the same resource appears to be used
1131 // serially, rather than multiple parallel uses. This is important for
1132 // in-order machine where the resource consumption is a hazard.
1133 unsigned WPRIdx
= 0, WPREnd
= WriteProcResources
.size();
1134 for( ; WPRIdx
!= WPREnd
; ++WPRIdx
) {
1135 if (WriteProcResources
[WPRIdx
].ProcResourceIdx
1136 == WPREntry
.ProcResourceIdx
) {
1137 WriteProcResources
[WPRIdx
].Cycles
+= WPREntry
.Cycles
;
1141 if (WPRIdx
== WPREnd
)
1142 WriteProcResources
.push_back(WPREntry
);
1145 WriteLatencies
.push_back(WLEntry
);
1147 // Create an entry for each operand Read in this SchedClass.
1148 // Entries must be sorted first by UseIdx then by WriteResourceID.
1149 for (unsigned UseIdx
= 0, EndIdx
= Reads
.size();
1150 UseIdx
!= EndIdx
; ++UseIdx
) {
1151 Record
*ReadAdvance
=
1152 FindReadAdvance(SchedModels
.getSchedRead(Reads
[UseIdx
]), ProcModel
);
1156 // Mark the parent class as invalid for unsupported write types.
1157 if (ReadAdvance
->getValueAsBit("Unsupported")) {
1158 SCDesc
.NumMicroOps
= MCSchedClassDesc::InvalidNumMicroOps
;
1161 RecVec ValidWrites
= ReadAdvance
->getValueAsListOfDefs("ValidWrites");
1163 if (ValidWrites
.empty())
1164 WriteIDs
.push_back(0);
1166 for (Record
*VW
: ValidWrites
) {
1167 WriteIDs
.push_back(SchedModels
.getSchedRWIdx(VW
, /*IsRead=*/false));
1170 llvm::sort(WriteIDs
);
1171 for(unsigned W
: WriteIDs
) {
1172 MCReadAdvanceEntry RAEntry
;
1173 RAEntry
.UseIdx
= UseIdx
;
1174 RAEntry
.WriteResourceID
= W
;
1175 RAEntry
.Cycles
= ReadAdvance
->getValueAsInt("Cycles");
1176 ReadAdvanceEntries
.push_back(RAEntry
);
1179 if (SCDesc
.NumMicroOps
== MCSchedClassDesc::InvalidNumMicroOps
) {
1180 WriteProcResources
.clear();
1181 WriteLatencies
.clear();
1182 ReadAdvanceEntries
.clear();
1184 // Add the information for this SchedClass to the global tables using basic
1187 // WritePrecRes entries are sorted by ProcResIdx.
1188 llvm::sort(WriteProcResources
, LessWriteProcResources());
1190 SCDesc
.NumWriteProcResEntries
= WriteProcResources
.size();
1191 std::vector
<MCWriteProcResEntry
>::iterator WPRPos
=
1192 std::search(SchedTables
.WriteProcResources
.begin(),
1193 SchedTables
.WriteProcResources
.end(),
1194 WriteProcResources
.begin(), WriteProcResources
.end());
1195 if (WPRPos
!= SchedTables
.WriteProcResources
.end())
1196 SCDesc
.WriteProcResIdx
= WPRPos
- SchedTables
.WriteProcResources
.begin();
1198 SCDesc
.WriteProcResIdx
= SchedTables
.WriteProcResources
.size();
1199 SchedTables
.WriteProcResources
.insert(WPRPos
, WriteProcResources
.begin(),
1200 WriteProcResources
.end());
1202 // Latency entries must remain in operand order.
1203 SCDesc
.NumWriteLatencyEntries
= WriteLatencies
.size();
1204 std::vector
<MCWriteLatencyEntry
>::iterator WLPos
=
1205 std::search(SchedTables
.WriteLatencies
.begin(),
1206 SchedTables
.WriteLatencies
.end(),
1207 WriteLatencies
.begin(), WriteLatencies
.end());
1208 if (WLPos
!= SchedTables
.WriteLatencies
.end()) {
1209 unsigned idx
= WLPos
- SchedTables
.WriteLatencies
.begin();
1210 SCDesc
.WriteLatencyIdx
= idx
;
1211 for (unsigned i
= 0, e
= WriteLatencies
.size(); i
< e
; ++i
)
1212 if (SchedTables
.WriterNames
[idx
+ i
].find(WriterNames
[i
]) ==
1213 std::string::npos
) {
1214 SchedTables
.WriterNames
[idx
+ i
] += std::string("_") + WriterNames
[i
];
1218 SCDesc
.WriteLatencyIdx
= SchedTables
.WriteLatencies
.size();
1219 llvm::append_range(SchedTables
.WriteLatencies
, WriteLatencies
);
1220 llvm::append_range(SchedTables
.WriterNames
, WriterNames
);
1222 // ReadAdvanceEntries must remain in operand order.
1223 SCDesc
.NumReadAdvanceEntries
= ReadAdvanceEntries
.size();
1224 std::vector
<MCReadAdvanceEntry
>::iterator RAPos
=
1225 std::search(SchedTables
.ReadAdvanceEntries
.begin(),
1226 SchedTables
.ReadAdvanceEntries
.end(),
1227 ReadAdvanceEntries
.begin(), ReadAdvanceEntries
.end());
1228 if (RAPos
!= SchedTables
.ReadAdvanceEntries
.end())
1229 SCDesc
.ReadAdvanceIdx
= RAPos
- SchedTables
.ReadAdvanceEntries
.begin();
1231 SCDesc
.ReadAdvanceIdx
= SchedTables
.ReadAdvanceEntries
.size();
1232 llvm::append_range(SchedTables
.ReadAdvanceEntries
, ReadAdvanceEntries
);
1237 // Emit SchedClass tables for all processors and associated global tables.
1238 void SubtargetEmitter::EmitSchedClassTables(SchedClassTables
&SchedTables
,
1240 // Emit global WriteProcResTable.
1241 OS
<< "\n// {ProcResourceIdx, Cycles}\n"
1242 << "extern const llvm::MCWriteProcResEntry "
1243 << Target
<< "WriteProcResTable[] = {\n"
1244 << " { 0, 0}, // Invalid\n";
1245 for (unsigned WPRIdx
= 1, WPREnd
= SchedTables
.WriteProcResources
.size();
1246 WPRIdx
!= WPREnd
; ++WPRIdx
) {
1247 MCWriteProcResEntry
&WPREntry
= SchedTables
.WriteProcResources
[WPRIdx
];
1248 OS
<< " {" << format("%2d", WPREntry
.ProcResourceIdx
) << ", "
1249 << format("%2d", WPREntry
.Cycles
) << "}";
1250 if (WPRIdx
+ 1 < WPREnd
)
1252 OS
<< " // #" << WPRIdx
<< '\n';
1254 OS
<< "}; // " << Target
<< "WriteProcResTable\n";
1256 // Emit global WriteLatencyTable.
1257 OS
<< "\n// {Cycles, WriteResourceID}\n"
1258 << "extern const llvm::MCWriteLatencyEntry "
1259 << Target
<< "WriteLatencyTable[] = {\n"
1260 << " { 0, 0}, // Invalid\n";
1261 for (unsigned WLIdx
= 1, WLEnd
= SchedTables
.WriteLatencies
.size();
1262 WLIdx
!= WLEnd
; ++WLIdx
) {
1263 MCWriteLatencyEntry
&WLEntry
= SchedTables
.WriteLatencies
[WLIdx
];
1264 OS
<< " {" << format("%2d", WLEntry
.Cycles
) << ", "
1265 << format("%2d", WLEntry
.WriteResourceID
) << "}";
1266 if (WLIdx
+ 1 < WLEnd
)
1268 OS
<< " // #" << WLIdx
<< " " << SchedTables
.WriterNames
[WLIdx
] << '\n';
1270 OS
<< "}; // " << Target
<< "WriteLatencyTable\n";
1272 // Emit global ReadAdvanceTable.
1273 OS
<< "\n// {UseIdx, WriteResourceID, Cycles}\n"
1274 << "extern const llvm::MCReadAdvanceEntry "
1275 << Target
<< "ReadAdvanceTable[] = {\n"
1276 << " {0, 0, 0}, // Invalid\n";
1277 for (unsigned RAIdx
= 1, RAEnd
= SchedTables
.ReadAdvanceEntries
.size();
1278 RAIdx
!= RAEnd
; ++RAIdx
) {
1279 MCReadAdvanceEntry
&RAEntry
= SchedTables
.ReadAdvanceEntries
[RAIdx
];
1280 OS
<< " {" << RAEntry
.UseIdx
<< ", "
1281 << format("%2d", RAEntry
.WriteResourceID
) << ", "
1282 << format("%2d", RAEntry
.Cycles
) << "}";
1283 if (RAIdx
+ 1 < RAEnd
)
1285 OS
<< " // #" << RAIdx
<< '\n';
1287 OS
<< "}; // " << Target
<< "ReadAdvanceTable\n";
1289 // Emit a SchedClass table for each processor.
1290 for (CodeGenSchedModels::ProcIter PI
= SchedModels
.procModelBegin(),
1291 PE
= SchedModels
.procModelEnd(); PI
!= PE
; ++PI
) {
1292 if (!PI
->hasInstrSchedModel())
1295 std::vector
<MCSchedClassDesc
> &SCTab
=
1296 SchedTables
.ProcSchedClasses
[1 + (PI
- SchedModels
.procModelBegin())];
1298 OS
<< "\n// {Name, NumMicroOps, BeginGroup, EndGroup, RetireOOO,"
1299 << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n";
1300 OS
<< "static const llvm::MCSchedClassDesc "
1301 << PI
->ModelName
<< "SchedClasses[] = {\n";
1303 // The first class is always invalid. We no way to distinguish it except by
1304 // name and position.
1305 assert(SchedModels
.getSchedClass(0).Name
== "NoInstrModel"
1306 && "invalid class not first");
1307 OS
<< " {DBGFIELD(\"InvalidSchedClass\") "
1308 << MCSchedClassDesc::InvalidNumMicroOps
1309 << ", false, false, false, 0, 0, 0, 0, 0, 0},\n";
1311 for (unsigned SCIdx
= 1, SCEnd
= SCTab
.size(); SCIdx
!= SCEnd
; ++SCIdx
) {
1312 MCSchedClassDesc
&MCDesc
= SCTab
[SCIdx
];
1313 const CodeGenSchedClass
&SchedClass
= SchedModels
.getSchedClass(SCIdx
);
1314 OS
<< " {DBGFIELD(\"" << SchedClass
.Name
<< "\") ";
1315 if (SchedClass
.Name
.size() < 18)
1316 OS
.indent(18 - SchedClass
.Name
.size());
1317 OS
<< MCDesc
.NumMicroOps
1318 << ", " << ( MCDesc
.BeginGroup
? "true" : "false" )
1319 << ", " << ( MCDesc
.EndGroup
? "true" : "false" )
1320 << ", " << ( MCDesc
.RetireOOO
? "true" : "false" )
1321 << ", " << format("%2d", MCDesc
.WriteProcResIdx
)
1322 << ", " << MCDesc
.NumWriteProcResEntries
1323 << ", " << format("%2d", MCDesc
.WriteLatencyIdx
)
1324 << ", " << MCDesc
.NumWriteLatencyEntries
1325 << ", " << format("%2d", MCDesc
.ReadAdvanceIdx
)
1326 << ", " << MCDesc
.NumReadAdvanceEntries
1327 << "}, // #" << SCIdx
<< '\n';
1329 OS
<< "}; // " << PI
->ModelName
<< "SchedClasses\n";
1333 void SubtargetEmitter::EmitProcessorModels(raw_ostream
&OS
) {
1334 // For each processor model.
1335 for (const CodeGenProcModel
&PM
: SchedModels
.procModels()) {
1336 // Emit extra processor info if available.
1337 if (PM
.hasExtraProcessorInfo())
1338 EmitExtraProcessorInfo(PM
, OS
);
1339 // Emit processor resource table.
1340 if (PM
.hasInstrSchedModel())
1341 EmitProcessorResources(PM
, OS
);
1342 else if(!PM
.ProcResourceDefs
.empty())
1343 PrintFatalError(PM
.ModelDef
->getLoc(), "SchedMachineModel defines "
1344 "ProcResources without defining WriteRes SchedWriteRes");
1346 // Begin processor itinerary properties
1348 OS
<< "static const llvm::MCSchedModel " << PM
.ModelName
<< " = {\n";
1349 EmitProcessorProp(OS
, PM
.ModelDef
, "IssueWidth", ',');
1350 EmitProcessorProp(OS
, PM
.ModelDef
, "MicroOpBufferSize", ',');
1351 EmitProcessorProp(OS
, PM
.ModelDef
, "LoopMicroOpBufferSize", ',');
1352 EmitProcessorProp(OS
, PM
.ModelDef
, "LoadLatency", ',');
1353 EmitProcessorProp(OS
, PM
.ModelDef
, "HighLatency", ',');
1354 EmitProcessorProp(OS
, PM
.ModelDef
, "MispredictPenalty", ',');
1356 bool PostRAScheduler
=
1357 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("PostRAScheduler") : false);
1359 OS
<< " " << (PostRAScheduler
? "true" : "false") << ", // "
1360 << "PostRAScheduler\n";
1362 bool CompleteModel
=
1363 (PM
.ModelDef
? PM
.ModelDef
->getValueAsBit("CompleteModel") : false);
1365 OS
<< " " << (CompleteModel
? "true" : "false") << ", // "
1366 << "CompleteModel\n";
1368 OS
<< " " << PM
.Index
<< ", // Processor ID\n";
1369 if (PM
.hasInstrSchedModel())
1370 OS
<< " " << PM
.ModelName
<< "ProcResources" << ",\n"
1371 << " " << PM
.ModelName
<< "SchedClasses" << ",\n"
1372 << " " << PM
.ProcResourceDefs
.size()+1 << ",\n"
1373 << " " << (SchedModels
.schedClassEnd()
1374 - SchedModels
.schedClassBegin()) << ",\n";
1376 OS
<< " nullptr, nullptr, 0, 0,"
1377 << " // No instruction-level machine model.\n";
1378 if (PM
.hasItineraries())
1379 OS
<< " " << PM
.ItinsDef
->getName() << ",\n";
1381 OS
<< " nullptr, // No Itinerary\n";
1382 if (PM
.hasExtraProcessorInfo())
1383 OS
<< " &" << PM
.ModelName
<< "ExtraInfo,\n";
1385 OS
<< " nullptr // No extra processor descriptor\n";
1391 // EmitSchedModel - Emits all scheduling model tables, folding common patterns.
1393 void SubtargetEmitter::EmitSchedModel(raw_ostream
&OS
) {
1394 OS
<< "#ifdef DBGFIELD\n"
1395 << "#error \"<target>GenSubtargetInfo.inc requires a DBGFIELD macro\"\n"
1397 << "#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)\n"
1398 << "#define DBGFIELD(x) x,\n"
1400 << "#define DBGFIELD(x)\n"
1403 if (SchedModels
.hasItineraries()) {
1404 std::vector
<std::vector
<InstrItinerary
>> ProcItinLists
;
1405 // Emit the stage data
1406 EmitStageAndOperandCycleData(OS
, ProcItinLists
);
1407 EmitItineraries(OS
, ProcItinLists
);
1409 OS
<< "\n// ===============================================================\n"
1410 << "// Data tables for the new per-operand machine model.\n";
1412 SchedClassTables SchedTables
;
1413 for (const CodeGenProcModel
&ProcModel
: SchedModels
.procModels()) {
1414 GenSchedClassTables(ProcModel
, SchedTables
);
1416 EmitSchedClassTables(SchedTables
, OS
);
1418 OS
<< "\n#undef DBGFIELD\n";
1420 // Emit the processor machine model
1421 EmitProcessorModels(OS
);
1424 static void emitPredicateProlog(const RecordKeeper
&Records
, raw_ostream
&OS
) {
1426 raw_string_ostream
Stream(Buffer
);
1428 // Collect all the PredicateProlog records and print them to the output
1430 std::vector
<Record
*> Prologs
=
1431 Records
.getAllDerivedDefinitions("PredicateProlog");
1432 llvm::sort(Prologs
, LessRecord());
1433 for (Record
*P
: Prologs
)
1434 Stream
<< P
->getValueAsString("Code") << '\n';
1440 static bool isTruePredicate(const Record
*Rec
) {
1441 return Rec
->isSubClassOf("MCSchedPredicate") &&
1442 Rec
->getValueAsDef("Pred")->isSubClassOf("MCTrue");
1445 static void emitPredicates(const CodeGenSchedTransition
&T
,
1446 const CodeGenSchedClass
&SC
, PredicateExpander
&PE
,
1449 raw_string_ostream
SS(Buffer
);
1451 // If not all predicates are MCTrue, then we need an if-stmt.
1452 unsigned NumNonTruePreds
=
1453 T
.PredTerm
.size() - count_if(T
.PredTerm
, isTruePredicate
);
1455 SS
.indent(PE
.getIndentLevel() * 2);
1457 if (NumNonTruePreds
) {
1458 bool FirstNonTruePredicate
= true;
1461 PE
.setIndentLevel(PE
.getIndentLevel() + 2);
1463 for (const Record
*Rec
: T
.PredTerm
) {
1464 // Skip predicates that evaluate to "true".
1465 if (isTruePredicate(Rec
))
1468 if (FirstNonTruePredicate
) {
1469 FirstNonTruePredicate
= false;
1472 SS
.indent(PE
.getIndentLevel() * 2);
1476 if (Rec
->isSubClassOf("MCSchedPredicate")) {
1477 PE
.expandPredicate(SS
, Rec
->getValueAsDef("Pred"));
1481 // Expand this legacy predicate and wrap it around braces if there is more
1482 // than one predicate to expand.
1483 SS
<< ((NumNonTruePreds
> 1) ? "(" : "")
1484 << Rec
->getValueAsString("Predicate")
1485 << ((NumNonTruePreds
> 1) ? ")" : "");
1488 SS
<< ")\n"; // end of if-stmt
1489 PE
.decreaseIndentLevel();
1490 SS
.indent(PE
.getIndentLevel() * 2);
1491 PE
.decreaseIndentLevel();
1494 SS
<< "return " << T
.ToClassIdx
<< "; // " << SC
.Name
<< '\n';
1499 // Used by method `SubtargetEmitter::emitSchedModelHelpersImpl()` to generate
1500 // epilogue code for the auto-generated helper.
1501 static void emitSchedModelHelperEpilogue(raw_ostream
&OS
,
1502 bool ShouldReturnZero
) {
1503 if (ShouldReturnZero
) {
1504 OS
<< " // Don't know how to resolve this scheduling class.\n"
1509 OS
<< " report_fatal_error(\"Expected a variant SchedClass\");\n";
1512 static bool hasMCSchedPredicates(const CodeGenSchedTransition
&T
) {
1513 return all_of(T
.PredTerm
, [](const Record
*Rec
) {
1514 return Rec
->isSubClassOf("MCSchedPredicate");
1518 static void collectVariantClasses(const CodeGenSchedModels
&SchedModels
,
1519 IdxVec
&VariantClasses
,
1520 bool OnlyExpandMCInstPredicates
) {
1521 for (const CodeGenSchedClass
&SC
: SchedModels
.schedClasses()) {
1522 // Ignore non-variant scheduling classes.
1523 if (SC
.Transitions
.empty())
1526 if (OnlyExpandMCInstPredicates
) {
1527 // Ignore this variant scheduling class no transitions use any meaningful
1528 // MCSchedPredicate definitions.
1529 if (!any_of(SC
.Transitions
, [](const CodeGenSchedTransition
&T
) {
1530 return hasMCSchedPredicates(T
);
1535 VariantClasses
.push_back(SC
.Index
);
1539 static void collectProcessorIndices(const CodeGenSchedClass
&SC
,
1540 IdxVec
&ProcIndices
) {
1541 // A variant scheduling class may define transitions for multiple
1542 // processors. This function identifies wich processors are associated with
1543 // transition rules specified by variant class `SC`.
1544 for (const CodeGenSchedTransition
&T
: SC
.Transitions
) {
1546 std::set_union(&T
.ProcIndex
, &T
.ProcIndex
+ 1, ProcIndices
.begin(),
1547 ProcIndices
.end(), std::back_inserter(PI
));
1548 ProcIndices
.swap(PI
);
1552 static bool isAlwaysTrue(const CodeGenSchedTransition
&T
) {
1553 return llvm::all_of(T
.PredTerm
,
1554 [](const Record
*R
) { return isTruePredicate(R
); });
1557 void SubtargetEmitter::emitSchedModelHelpersImpl(
1558 raw_ostream
&OS
, bool OnlyExpandMCInstPredicates
) {
1559 IdxVec VariantClasses
;
1560 collectVariantClasses(SchedModels
, VariantClasses
,
1561 OnlyExpandMCInstPredicates
);
1563 if (VariantClasses
.empty()) {
1564 emitSchedModelHelperEpilogue(OS
, OnlyExpandMCInstPredicates
);
1568 // Construct a switch statement where the condition is a check on the
1569 // scheduling class identifier. There is a `case` for every variant class
1570 // defined by the processor models of this target.
1571 // Each `case` implements a number of rules to resolve (i.e. to transition from)
1572 // a variant scheduling class to another scheduling class. Rules are
1573 // described by instances of CodeGenSchedTransition. Note that transitions may
1574 // not be valid for all processors.
1575 OS
<< " switch (SchedClass) {\n";
1576 for (unsigned VC
: VariantClasses
) {
1578 const CodeGenSchedClass
&SC
= SchedModels
.getSchedClass(VC
);
1579 collectProcessorIndices(SC
, ProcIndices
);
1581 OS
<< " case " << VC
<< ": // " << SC
.Name
<< '\n';
1583 PredicateExpander
PE(Target
);
1585 PE
.setExpandForMC(OnlyExpandMCInstPredicates
);
1586 for (unsigned PI
: ProcIndices
) {
1589 // Emit a guard on the processor ID.
1591 OS
<< (OnlyExpandMCInstPredicates
1593 : "if (SchedModel->getProcessorID() == ");
1595 OS
<< "{ // " << (SchedModels
.procModelBegin() + PI
)->ModelName
<< '\n';
1598 // Now emit transitions associated with processor PI.
1599 const CodeGenSchedTransition
*FinalT
= nullptr;
1600 for (const CodeGenSchedTransition
&T
: SC
.Transitions
) {
1601 if (PI
!= 0 && T
.ProcIndex
!= PI
)
1604 // Emit only transitions based on MCSchedPredicate, if it's the case.
1605 // At least the transition specified by NoSchedPred is emitted,
1606 // which becomes the default transition for those variants otherwise
1607 // not based on MCSchedPredicate.
1608 // FIXME: preferably, llvm-mca should instead assume a reasonable
1609 // default when a variant transition is not based on MCSchedPredicate
1610 // for a given processor.
1611 if (OnlyExpandMCInstPredicates
&& !hasMCSchedPredicates(T
))
1614 // If transition is folded to 'return X' it should be the last one.
1615 if (isAlwaysTrue(T
)) {
1619 PE
.setIndentLevel(3);
1620 emitPredicates(T
, SchedModels
.getSchedClass(T
.ToClassIdx
), PE
, OS
);
1623 emitPredicates(*FinalT
, SchedModels
.getSchedClass(FinalT
->ToClassIdx
),
1632 if (SC
.isInferred())
1633 OS
<< " return " << SC
.Index
<< ";\n";
1639 emitSchedModelHelperEpilogue(OS
, OnlyExpandMCInstPredicates
);
1642 void SubtargetEmitter::EmitSchedModelHelpers(const std::string
&ClassName
,
1644 OS
<< "unsigned " << ClassName
1645 << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI,"
1646 << " const TargetSchedModel *SchedModel) const {\n";
1648 // Emit the predicate prolog code.
1649 emitPredicateProlog(Records
, OS
);
1651 // Emit target predicates.
1652 emitSchedModelHelpersImpl(OS
);
1654 OS
<< "} // " << ClassName
<< "::resolveSchedClass\n\n";
1656 OS
<< "unsigned " << ClassName
1657 << "\n::resolveVariantSchedClass(unsigned SchedClass, const MCInst *MI,"
1658 << " const MCInstrInfo *MCII, unsigned CPUID) const {\n"
1659 << " return " << Target
<< "_MC"
1660 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n"
1661 << "} // " << ClassName
<< "::resolveVariantSchedClass\n\n";
1663 STIPredicateExpander
PE(Target
);
1664 PE
.setClassPrefix(ClassName
);
1665 PE
.setExpandDefinition(true);
1667 PE
.setIndentLevel(0);
1669 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1670 PE
.expandSTIPredicate(OS
, Fn
);
1673 void SubtargetEmitter::EmitHwModeCheck(const std::string
&ClassName
,
1675 const CodeGenHwModes
&CGH
= TGT
.getHwModes();
1676 assert(CGH
.getNumModeIds() > 0);
1677 if (CGH
.getNumModeIds() == 1)
1680 OS
<< "unsigned " << ClassName
<< "::getHwMode() const {\n";
1681 for (unsigned M
= 1, NumModes
= CGH
.getNumModeIds(); M
!= NumModes
; ++M
) {
1682 const HwMode
&HM
= CGH
.getMode(M
);
1683 OS
<< " if (checkFeatures(\"" << HM
.Features
1684 << "\")) return " << M
<< ";\n";
1686 OS
<< " return 0;\n}\n";
1690 // ParseFeaturesFunction - Produces a subtarget specific function for parsing
1691 // the subtarget features string.
1693 void SubtargetEmitter::ParseFeaturesFunction(raw_ostream
&OS
,
1694 unsigned NumFeatures
,
1695 unsigned NumProcs
) {
1696 std::vector
<Record
*> Features
=
1697 Records
.getAllDerivedDefinitions("SubtargetFeature");
1698 llvm::sort(Features
, LessRecord());
1700 OS
<< "// ParseSubtargetFeatures - Parses features string setting specified\n"
1701 << "// subtarget options.\n"
1704 OS
<< "Subtarget::ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, "
1705 << "StringRef FS) {\n"
1706 << " LLVM_DEBUG(dbgs() << \"\\nFeatures:\" << FS);\n"
1707 << " LLVM_DEBUG(dbgs() << \"\\nCPU:\" << CPU);\n"
1708 << " LLVM_DEBUG(dbgs() << \"\\nTuneCPU:\" << TuneCPU << \"\\n\\n\");\n";
1710 if (Features
.empty()) {
1715 OS
<< " InitMCProcessorInfo(CPU, TuneCPU, FS);\n"
1716 << " const FeatureBitset &Bits = getFeatureBits();\n";
1718 for (Record
*R
: Features
) {
1720 StringRef Instance
= R
->getName();
1721 StringRef Value
= R
->getValueAsString("Value");
1722 StringRef Attribute
= R
->getValueAsString("Attribute");
1724 if (Value
=="true" || Value
=="false")
1725 OS
<< " if (Bits[" << Target
<< "::"
1726 << Instance
<< "]) "
1727 << Attribute
<< " = " << Value
<< ";\n";
1729 OS
<< " if (Bits[" << Target
<< "::"
1730 << Instance
<< "] && "
1731 << Attribute
<< " < " << Value
<< ") "
1732 << Attribute
<< " = " << Value
<< ";\n";
1738 void SubtargetEmitter::emitGenMCSubtargetInfo(raw_ostream
&OS
) {
1739 OS
<< "namespace " << Target
<< "_MC {\n"
1740 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,\n"
1741 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID) {\n";
1742 emitSchedModelHelpersImpl(OS
, /* OnlyExpandMCPredicates */ true);
1744 OS
<< "} // end namespace " << Target
<< "_MC\n\n";
1746 OS
<< "struct " << Target
1747 << "GenMCSubtargetInfo : public MCSubtargetInfo {\n";
1748 OS
<< " " << Target
<< "GenMCSubtargetInfo(const Triple &TT,\n"
1749 << " StringRef CPU, StringRef TuneCPU, StringRef FS,\n"
1750 << " ArrayRef<SubtargetFeatureKV> PF,\n"
1751 << " ArrayRef<SubtargetSubTypeKV> PD,\n"
1752 << " const MCWriteProcResEntry *WPR,\n"
1753 << " const MCWriteLatencyEntry *WL,\n"
1754 << " const MCReadAdvanceEntry *RA, const InstrStage *IS,\n"
1755 << " const unsigned *OC, const unsigned *FP) :\n"
1756 << " MCSubtargetInfo(TT, CPU, TuneCPU, FS, PF, PD,\n"
1757 << " WPR, WL, RA, IS, OC, FP) { }\n\n"
1758 << " unsigned resolveVariantSchedClass(unsigned SchedClass,\n"
1759 << " const MCInst *MI, const MCInstrInfo *MCII,\n"
1760 << " unsigned CPUID) const override {\n"
1761 << " return " << Target
<< "_MC"
1762 << "::resolveVariantSchedClassImpl(SchedClass, MI, MCII, CPUID);\n";
1764 if (TGT
.getHwModes().getNumModeIds() > 1)
1765 OS
<< " unsigned getHwMode() const override;\n";
1767 EmitHwModeCheck(Target
+ "GenMCSubtargetInfo", OS
);
1770 void SubtargetEmitter::EmitMCInstrAnalysisPredicateFunctions(raw_ostream
&OS
) {
1771 OS
<< "\n#ifdef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n";
1772 OS
<< "#undef GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1774 STIPredicateExpander
PE(Target
);
1775 PE
.setExpandForMC(true);
1777 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1778 PE
.expandSTIPredicate(OS
, Fn
);
1780 OS
<< "#endif // GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS\n\n";
1782 OS
<< "\n#ifdef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n";
1783 OS
<< "#undef GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1785 std::string ClassPrefix
= Target
+ "MCInstrAnalysis";
1786 PE
.setExpandDefinition(true);
1787 PE
.setClassPrefix(ClassPrefix
);
1788 PE
.setIndentLevel(0);
1789 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1790 PE
.expandSTIPredicate(OS
, Fn
);
1792 OS
<< "#endif // GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS\n\n";
1796 // SubtargetEmitter::run - Main subtarget enumeration emitter.
1798 void SubtargetEmitter::run(raw_ostream
&OS
) {
1799 emitSourceFileHeader("Subtarget Enumeration Source Fragment", OS
);
1801 OS
<< "\n#ifdef GET_SUBTARGETINFO_ENUM\n";
1802 OS
<< "#undef GET_SUBTARGETINFO_ENUM\n\n";
1804 DenseMap
<Record
*, unsigned> FeatureMap
;
1806 OS
<< "namespace llvm {\n";
1807 Enumeration(OS
, FeatureMap
);
1808 OS
<< "} // end namespace llvm\n\n";
1809 OS
<< "#endif // GET_SUBTARGETINFO_ENUM\n\n";
1811 OS
<< "\n#ifdef GET_SUBTARGETINFO_MC_DESC\n";
1812 OS
<< "#undef GET_SUBTARGETINFO_MC_DESC\n\n";
1814 OS
<< "namespace llvm {\n";
1816 OS
<< "namespace {\n";
1818 unsigned NumFeatures
= FeatureKeyValues(OS
, FeatureMap
);
1822 unsigned NumProcs
= CPUKeyValues(OS
, FeatureMap
);
1825 OS
<< "} // end anonymous namespace\n\n";
1828 // MCInstrInfo initialization routine.
1829 emitGenMCSubtargetInfo(OS
);
1831 OS
<< "\nstatic inline MCSubtargetInfo *create" << Target
1832 << "MCSubtargetInfoImpl("
1833 << "const Triple &TT, StringRef CPU, StringRef TuneCPU, StringRef FS) {\n";
1834 OS
<< " return new " << Target
1835 << "GenMCSubtargetInfo(TT, CPU, TuneCPU, FS, ";
1837 OS
<< Target
<< "FeatureKV, ";
1841 OS
<< Target
<< "SubTypeKV, ";
1844 OS
<< '\n'; OS
.indent(22);
1845 OS
<< Target
<< "WriteProcResTable, "
1846 << Target
<< "WriteLatencyTable, "
1847 << Target
<< "ReadAdvanceTable, ";
1848 OS
<< '\n'; OS
.indent(22);
1849 if (SchedModels
.hasItineraries()) {
1850 OS
<< Target
<< "Stages, "
1851 << Target
<< "OperandCycles, "
1852 << Target
<< "ForwardingPaths";
1854 OS
<< "nullptr, nullptr, nullptr";
1857 OS
<< "} // end namespace llvm\n\n";
1859 OS
<< "#endif // GET_SUBTARGETINFO_MC_DESC\n\n";
1861 OS
<< "\n#ifdef GET_SUBTARGETINFO_TARGET_DESC\n";
1862 OS
<< "#undef GET_SUBTARGETINFO_TARGET_DESC\n\n";
1864 OS
<< "#include \"llvm/Support/Debug.h\"\n";
1865 OS
<< "#include \"llvm/Support/raw_ostream.h\"\n\n";
1866 ParseFeaturesFunction(OS
, NumFeatures
, NumProcs
);
1868 OS
<< "#endif // GET_SUBTARGETINFO_TARGET_DESC\n\n";
1870 // Create a TargetSubtargetInfo subclass to hide the MC layer initialization.
1871 OS
<< "\n#ifdef GET_SUBTARGETINFO_HEADER\n";
1872 OS
<< "#undef GET_SUBTARGETINFO_HEADER\n\n";
1874 std::string ClassName
= Target
+ "GenSubtargetInfo";
1875 OS
<< "namespace llvm {\n";
1876 OS
<< "class DFAPacketizer;\n";
1877 OS
<< "namespace " << Target
<< "_MC {\n"
1878 << "unsigned resolveVariantSchedClassImpl(unsigned SchedClass,"
1879 << " const MCInst *MI, const MCInstrInfo *MCII, unsigned CPUID);\n"
1880 << "} // end namespace " << Target
<< "_MC\n\n";
1881 OS
<< "struct " << ClassName
<< " : public TargetSubtargetInfo {\n"
1882 << " explicit " << ClassName
<< "(const Triple &TT, StringRef CPU, "
1883 << "StringRef TuneCPU, StringRef FS);\n"
1885 << " unsigned resolveSchedClass(unsigned SchedClass, "
1886 << " const MachineInstr *DefMI,"
1887 << " const TargetSchedModel *SchedModel) const override;\n"
1888 << " unsigned resolveVariantSchedClass(unsigned SchedClass,"
1889 << " const MCInst *MI, const MCInstrInfo *MCII,"
1890 << " unsigned CPUID) const override;\n"
1891 << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
1893 if (TGT
.getHwModes().getNumModeIds() > 1)
1894 OS
<< " unsigned getHwMode() const override;\n";
1896 STIPredicateExpander
PE(Target
);
1898 for (const STIPredicateFunction
&Fn
: SchedModels
.getSTIPredicates())
1899 PE
.expandSTIPredicate(OS
, Fn
);
1902 << "} // end namespace llvm\n\n";
1904 OS
<< "#endif // GET_SUBTARGETINFO_HEADER\n\n";
1906 OS
<< "\n#ifdef GET_SUBTARGETINFO_CTOR\n";
1907 OS
<< "#undef GET_SUBTARGETINFO_CTOR\n\n";
1909 OS
<< "#include \"llvm/CodeGen/TargetSchedule.h\"\n\n";
1910 OS
<< "namespace llvm {\n";
1911 OS
<< "extern const llvm::SubtargetFeatureKV " << Target
<< "FeatureKV[];\n";
1912 OS
<< "extern const llvm::SubtargetSubTypeKV " << Target
<< "SubTypeKV[];\n";
1913 OS
<< "extern const llvm::MCWriteProcResEntry "
1914 << Target
<< "WriteProcResTable[];\n";
1915 OS
<< "extern const llvm::MCWriteLatencyEntry "
1916 << Target
<< "WriteLatencyTable[];\n";
1917 OS
<< "extern const llvm::MCReadAdvanceEntry "
1918 << Target
<< "ReadAdvanceTable[];\n";
1920 if (SchedModels
.hasItineraries()) {
1921 OS
<< "extern const llvm::InstrStage " << Target
<< "Stages[];\n";
1922 OS
<< "extern const unsigned " << Target
<< "OperandCycles[];\n";
1923 OS
<< "extern const unsigned " << Target
<< "ForwardingPaths[];\n";
1926 OS
<< ClassName
<< "::" << ClassName
<< "(const Triple &TT, StringRef CPU, "
1927 << "StringRef TuneCPU, StringRef FS)\n"
1928 << " : TargetSubtargetInfo(TT, CPU, TuneCPU, FS, ";
1930 OS
<< "makeArrayRef(" << Target
<< "FeatureKV, " << NumFeatures
<< "), ";
1934 OS
<< "makeArrayRef(" << Target
<< "SubTypeKV, " << NumProcs
<< "), ";
1937 OS
<< '\n'; OS
.indent(24);
1938 OS
<< Target
<< "WriteProcResTable, "
1939 << Target
<< "WriteLatencyTable, "
1940 << Target
<< "ReadAdvanceTable, ";
1941 OS
<< '\n'; OS
.indent(24);
1942 if (SchedModels
.hasItineraries()) {
1943 OS
<< Target
<< "Stages, "
1944 << Target
<< "OperandCycles, "
1945 << Target
<< "ForwardingPaths";
1947 OS
<< "nullptr, nullptr, nullptr";
1950 EmitSchedModelHelpers(ClassName
, OS
);
1951 EmitHwModeCheck(ClassName
, OS
);
1953 OS
<< "} // end namespace llvm\n\n";
1955 OS
<< "#endif // GET_SUBTARGETINFO_CTOR\n\n";
1957 EmitMCInstrAnalysisPredicateFunctions(OS
);
1962 void EmitSubtarget(RecordKeeper
&RK
, raw_ostream
&OS
) {
1963 CodeGenTarget
CGTarget(RK
);
1964 SubtargetEmitter(RK
, CGTarget
).run(OS
);
1967 } // end namespace llvm