1 //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines structures to encapsulate the machine model as described in
10 // the target description.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenSchedule.h"
15 #include "CodeGenInstruction.h"
16 #include "CodeGenTarget.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/Casting.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/Regex.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/TableGen/Error.h"
32 #define DEBUG_TYPE "subtarget-emitter"
35 static void dumpIdxVec(ArrayRef
<unsigned> V
) {
36 for (unsigned Idx
: V
)
37 dbgs() << Idx
<< ", ";
43 // (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
44 struct InstrsOp
: public SetTheory::Operator
{
45 void apply(SetTheory
&ST
, DagInit
*Expr
, SetTheory::RecSet
&Elts
,
46 ArrayRef
<SMLoc
> Loc
) override
{
47 ST
.evaluate(Expr
->arg_begin(), Expr
->arg_end(), Elts
, Loc
);
51 // (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
52 struct InstRegexOp
: public SetTheory::Operator
{
53 const CodeGenTarget
&Target
;
54 InstRegexOp(const CodeGenTarget
&t
): Target(t
) {}
56 /// Remove any text inside of parentheses from S.
57 static std::string
removeParens(llvm::StringRef S
) {
60 // NB: We don't care about escaped parens here.
77 void apply(SetTheory
&ST
, DagInit
*Expr
, SetTheory::RecSet
&Elts
,
78 ArrayRef
<SMLoc
> Loc
) override
{
79 ArrayRef
<const CodeGenInstruction
*> Instructions
=
80 Target
.getInstructionsByEnumValue();
82 unsigned NumGeneric
= Target
.getNumFixedInstructions();
83 unsigned NumPseudos
= Target
.getNumPseudoInstructions();
84 auto Generics
= Instructions
.slice(0, NumGeneric
);
85 auto Pseudos
= Instructions
.slice(NumGeneric
, NumPseudos
);
86 auto NonPseudos
= Instructions
.slice(NumGeneric
+ NumPseudos
);
88 for (Init
*Arg
: Expr
->getArgs()) {
89 StringInit
*SI
= dyn_cast
<StringInit
>(Arg
);
91 PrintFatalError(Loc
, "instregex requires pattern string: " +
93 StringRef Original
= SI
->getValue();
94 // Drop an explicit ^ anchor to not interfere with prefix search.
95 bool HadAnchor
= Original
.consume_front("^");
97 // Extract a prefix that we can binary search on.
98 static const char RegexMetachars
[] = "()^$|*+?.[]\\{}";
99 auto FirstMeta
= Original
.find_first_of(RegexMetachars
);
100 if (FirstMeta
!= StringRef::npos
&& FirstMeta
> 0) {
101 // If we have a regex like ABC* we can only use AB as the prefix, as
103 switch (Original
[FirstMeta
]) {
114 // Look for top-level | or ?. We cannot optimize them to binary search.
115 if (removeParens(Original
).find_first_of("|?") != std::string::npos
)
118 std::optional
<Regex
> Regexpr
;
119 StringRef Prefix
= Original
.substr(0, FirstMeta
);
120 StringRef PatStr
= Original
.substr(FirstMeta
);
121 if (!PatStr
.empty()) {
122 // For the rest use a python-style prefix match.
123 std::string pat
= std::string(PatStr
);
124 // Add ^ anchor. If we had one originally, don't need the group.
129 pat
.insert(pat
.end(), ')');
131 Regexpr
= Regex(pat
);
136 // The generic opcodes are unsorted, handle them manually.
137 for (auto *Inst
: Generics
) {
138 StringRef InstName
= Inst
->TheDef
->getName();
139 if (InstName
.starts_with(Prefix
) &&
140 (!Regexpr
|| Regexpr
->match(InstName
.substr(Prefix
.size())))) {
141 Elts
.insert(Inst
->TheDef
);
146 // Target instructions are split into two ranges: pseudo instructions
147 // first, than non-pseudos. Each range is in lexicographical order
148 // sorted by name. Find the sub-ranges that start with our prefix.
150 bool operator()(const CodeGenInstruction
*LHS
, StringRef RHS
) {
151 return LHS
->TheDef
->getName() < RHS
;
153 bool operator()(StringRef LHS
, const CodeGenInstruction
*RHS
) {
154 return LHS
< RHS
->TheDef
->getName() &&
155 !RHS
->TheDef
->getName().starts_with(LHS
);
159 std::equal_range(Pseudos
.begin(), Pseudos
.end(), Prefix
, Comp());
160 auto Range2
= std::equal_range(NonPseudos
.begin(), NonPseudos
.end(),
163 // For these ranges we know that instruction names start with the prefix.
164 // Check if there's a regex that needs to be checked.
165 const auto HandleNonGeneric
= [&](const CodeGenInstruction
*Inst
) {
166 StringRef InstName
= Inst
->TheDef
->getName();
167 if (!Regexpr
|| Regexpr
->match(InstName
.substr(Prefix
.size()))) {
168 Elts
.insert(Inst
->TheDef
);
172 std::for_each(Range1
.first
, Range1
.second
, HandleNonGeneric
);
173 std::for_each(Range2
.first
, Range2
.second
, HandleNonGeneric
);
176 PrintFatalError(Loc
, "instregex has no matches: " + Original
);
181 } // end anonymous namespace
183 /// CodeGenModels ctor interprets machine model records and populates maps.
184 CodeGenSchedModels::CodeGenSchedModels(RecordKeeper
&RK
,
185 const CodeGenTarget
&TGT
):
186 Records(RK
), Target(TGT
) {
188 Sets
.addFieldExpander("InstRW", "Instrs");
190 // Allow Set evaluation to recognize the dags used in InstRW records:
191 // (instrs Op1, Op1...)
192 Sets
.addOperator("instrs", std::make_unique
<InstrsOp
>());
193 Sets
.addOperator("instregex", std::make_unique
<InstRegexOp
>(Target
));
195 // Instantiate a CodeGenProcModel for each SchedMachineModel with the values
196 // that are explicitly referenced in tablegen records. Resources associated
197 // with each processor will be derived later. Populate ProcModelMap with the
198 // CodeGenProcModel instances.
201 // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
202 // defined, and populate SchedReads and SchedWrites vectors. Implicit
203 // SchedReadWrites that represent sequences derived from expanded variant will
204 // be inferred later.
207 // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
208 // required by an instruction definition, and populate SchedClassIdxMap. Set
209 // NumItineraryClasses to the number of explicit itinerary classes referenced
210 // by instructions. Set NumInstrSchedClasses to the number of itinerary
211 // classes plus any classes implied by instructions that derive from class
212 // Sched and provide SchedRW list. This does not infer any new classes from
214 collectSchedClasses();
216 // Find instruction itineraries for each processor. Sort and populate
217 // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
218 // all itinerary classes to be discovered.
221 // Find ItinRW records for each processor and itinerary class.
222 // (For per-operand resources mapped to itinerary classes).
225 // Find UnsupportedFeatures records for each processor.
226 // (For per-operand resources mapped to itinerary classes).
227 collectProcUnsupportedFeatures();
229 // Infer new SchedClasses from SchedVariant.
232 // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
235 dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n");
236 collectProcResources();
238 // Collect optional processor description.
239 collectOptionalProcessorInfo();
241 // Check MCInstPredicate definitions.
242 checkMCInstPredicates();
244 // Check STIPredicate definitions.
245 checkSTIPredicates();
247 // Find STIPredicate definitions for each processor model, and construct
248 // STIPredicateFunction objects.
249 collectSTIPredicates();
254 void CodeGenSchedModels::checkSTIPredicates() const {
255 DenseMap
<StringRef
, const Record
*> Declarations
;
257 // There cannot be multiple declarations with the same name.
258 const RecVec Decls
= Records
.getAllDerivedDefinitions("STIPredicateDecl");
259 for (const Record
*R
: Decls
) {
260 StringRef Name
= R
->getValueAsString("Name");
261 const auto It
= Declarations
.find(Name
);
262 if (It
== Declarations
.end()) {
263 Declarations
[Name
] = R
;
267 PrintError(R
->getLoc(), "STIPredicate " + Name
+ " multiply declared.");
268 PrintFatalNote(It
->second
->getLoc(), "Previous declaration was here.");
271 // Disallow InstructionEquivalenceClasses with an empty instruction list.
273 Records
.getAllDerivedDefinitions("InstructionEquivalenceClass");
274 for (const Record
*R
: Defs
) {
275 RecVec Opcodes
= R
->getValueAsListOfDefs("Opcodes");
276 if (Opcodes
.empty()) {
277 PrintFatalError(R
->getLoc(), "Invalid InstructionEquivalenceClass "
278 "defined with an empty opcode list.");
283 // Used by function `processSTIPredicate` to construct a mask of machine
284 // instruction operands.
285 static APInt
constructOperandMask(ArrayRef
<int64_t> Indices
) {
290 int64_t MaxIndex
= *std::max_element(Indices
.begin(), Indices
.end());
291 assert(MaxIndex
>= 0 && "Invalid negative indices in input!");
292 OperandMask
= OperandMask
.zext(MaxIndex
+ 1);
293 for (const int64_t Index
: Indices
) {
294 assert(Index
>= 0 && "Invalid negative indices!");
295 OperandMask
.setBit(Index
);
302 processSTIPredicate(STIPredicateFunction
&Fn
,
303 const ProcModelMapTy
&ProcModelMap
) {
304 DenseMap
<const Record
*, unsigned> Opcode2Index
;
305 using OpcodeMapPair
= std::pair
<const Record
*, OpcodeInfo
>;
306 std::vector
<OpcodeMapPair
> OpcodeMappings
;
307 std::vector
<std::pair
<APInt
, APInt
>> OpcodeMasks
;
309 DenseMap
<const Record
*, unsigned> Predicate2Index
;
310 unsigned NumUniquePredicates
= 0;
312 // Number unique predicates and opcodes used by InstructionEquivalenceClass
313 // definitions. Each unique opcode will be associated with an OpcodeInfo
315 for (const Record
*Def
: Fn
.getDefinitions()) {
316 RecVec Classes
= Def
->getValueAsListOfDefs("Classes");
317 for (const Record
*EC
: Classes
) {
318 const Record
*Pred
= EC
->getValueAsDef("Predicate");
319 if (!Predicate2Index
.contains(Pred
))
320 Predicate2Index
[Pred
] = NumUniquePredicates
++;
322 RecVec Opcodes
= EC
->getValueAsListOfDefs("Opcodes");
323 for (const Record
*Opcode
: Opcodes
) {
324 if (!Opcode2Index
.contains(Opcode
)) {
325 Opcode2Index
[Opcode
] = OpcodeMappings
.size();
326 OpcodeMappings
.emplace_back(Opcode
, OpcodeInfo());
332 // Initialize vector `OpcodeMasks` with default values. We want to keep track
333 // of which processors "use" which opcodes. We also want to be able to
334 // identify predicates that are used by different processors for a same
336 // This information is used later on by this algorithm to sort OpcodeMapping
337 // elements based on their processor and predicate sets.
338 OpcodeMasks
.resize(OpcodeMappings
.size());
339 APInt
DefaultProcMask(ProcModelMap
.size(), 0);
340 APInt
DefaultPredMask(NumUniquePredicates
, 0);
341 for (std::pair
<APInt
, APInt
> &MaskPair
: OpcodeMasks
)
342 MaskPair
= std::make_pair(DefaultProcMask
, DefaultPredMask
);
344 // Construct a OpcodeInfo object for every unique opcode declared by an
345 // InstructionEquivalenceClass definition.
346 for (const Record
*Def
: Fn
.getDefinitions()) {
347 RecVec Classes
= Def
->getValueAsListOfDefs("Classes");
348 const Record
*SchedModel
= Def
->getValueAsDef("SchedModel");
349 unsigned ProcIndex
= ProcModelMap
.find(SchedModel
)->second
;
350 APInt
ProcMask(ProcModelMap
.size(), 0);
351 ProcMask
.setBit(ProcIndex
);
353 for (const Record
*EC
: Classes
) {
354 RecVec Opcodes
= EC
->getValueAsListOfDefs("Opcodes");
356 std::vector
<int64_t> OpIndices
=
357 EC
->getValueAsListOfInts("OperandIndices");
358 APInt OperandMask
= constructOperandMask(OpIndices
);
360 const Record
*Pred
= EC
->getValueAsDef("Predicate");
361 APInt
PredMask(NumUniquePredicates
, 0);
362 PredMask
.setBit(Predicate2Index
[Pred
]);
364 for (const Record
*Opcode
: Opcodes
) {
365 unsigned OpcodeIdx
= Opcode2Index
[Opcode
];
366 if (OpcodeMasks
[OpcodeIdx
].first
[ProcIndex
]) {
367 std::string Message
=
368 "Opcode " + Opcode
->getName().str() +
369 " used by multiple InstructionEquivalenceClass definitions.";
370 PrintFatalError(EC
->getLoc(), Message
);
372 OpcodeMasks
[OpcodeIdx
].first
|= ProcMask
;
373 OpcodeMasks
[OpcodeIdx
].second
|= PredMask
;
374 OpcodeInfo
&OI
= OpcodeMappings
[OpcodeIdx
].second
;
376 OI
.addPredicateForProcModel(ProcMask
, OperandMask
, Pred
);
381 // Sort OpcodeMappings elements based on their CPU and predicate masks.
382 // As a last resort, order elements by opcode identifier.
383 llvm::sort(OpcodeMappings
,
384 [&](const OpcodeMapPair
&Lhs
, const OpcodeMapPair
&Rhs
) {
385 unsigned LhsIdx
= Opcode2Index
[Lhs
.first
];
386 unsigned RhsIdx
= Opcode2Index
[Rhs
.first
];
387 const std::pair
<APInt
, APInt
> &LhsMasks
= OpcodeMasks
[LhsIdx
];
388 const std::pair
<APInt
, APInt
> &RhsMasks
= OpcodeMasks
[RhsIdx
];
390 auto PopulationCountAndLeftBit
=
391 [](const APInt
&Other
) -> std::pair
<int, int> {
392 return std::pair
<int, int>(Other
.popcount(),
393 -Other
.countl_zero());
395 auto lhsmask_first
= PopulationCountAndLeftBit(LhsMasks
.first
);
396 auto rhsmask_first
= PopulationCountAndLeftBit(RhsMasks
.first
);
397 if (lhsmask_first
!= rhsmask_first
)
398 return lhsmask_first
< rhsmask_first
;
400 auto lhsmask_second
= PopulationCountAndLeftBit(LhsMasks
.second
);
401 auto rhsmask_second
= PopulationCountAndLeftBit(RhsMasks
.second
);
402 if (lhsmask_second
!= rhsmask_second
)
403 return lhsmask_second
< rhsmask_second
;
405 return LhsIdx
< RhsIdx
;
408 // Now construct opcode groups. Groups are used by the SubtargetEmitter when
409 // expanding the body of a STIPredicate function. In particular, each opcode
410 // group is expanded into a sequence of labels in a switch statement.
411 // It identifies opcodes for which different processors define same predicates
412 // and same opcode masks.
413 for (OpcodeMapPair
&Info
: OpcodeMappings
)
414 Fn
.addOpcode(Info
.first
, std::move(Info
.second
));
417 void CodeGenSchedModels::collectSTIPredicates() {
418 // Map STIPredicateDecl records to elements of vector
419 // CodeGenSchedModels::STIPredicates.
420 DenseMap
<const Record
*, unsigned> Decl2Index
;
422 RecVec RV
= Records
.getAllDerivedDefinitions("STIPredicate");
423 for (const Record
*R
: RV
) {
424 const Record
*Decl
= R
->getValueAsDef("Declaration");
426 const auto It
= Decl2Index
.find(Decl
);
427 if (It
== Decl2Index
.end()) {
428 Decl2Index
[Decl
] = STIPredicates
.size();
429 STIPredicateFunction
Predicate(Decl
);
430 Predicate
.addDefinition(R
);
431 STIPredicates
.emplace_back(std::move(Predicate
));
435 STIPredicateFunction
&PreviousDef
= STIPredicates
[It
->second
];
436 PreviousDef
.addDefinition(R
);
439 for (STIPredicateFunction
&Fn
: STIPredicates
)
440 processSTIPredicate(Fn
, ProcModelMap
);
443 void OpcodeInfo::addPredicateForProcModel(const llvm::APInt
&CpuMask
,
444 const llvm::APInt
&OperandMask
,
445 const Record
*Predicate
) {
446 auto It
= llvm::find_if(
447 Predicates
, [&OperandMask
, &Predicate
](const PredicateInfo
&P
) {
448 return P
.Predicate
== Predicate
&& P
.OperandMask
== OperandMask
;
450 if (It
== Predicates
.end()) {
451 Predicates
.emplace_back(CpuMask
, OperandMask
, Predicate
);
454 It
->ProcModelMask
|= CpuMask
;
457 void CodeGenSchedModels::checkMCInstPredicates() const {
458 RecVec MCPredicates
= Records
.getAllDerivedDefinitions("TIIPredicate");
459 if (MCPredicates
.empty())
462 // A target cannot have multiple TIIPredicate definitions with a same name.
463 llvm::StringMap
<const Record
*> TIIPredicates(MCPredicates
.size());
464 for (const Record
*TIIPred
: MCPredicates
) {
465 StringRef Name
= TIIPred
->getValueAsString("FunctionName");
466 StringMap
<const Record
*>::const_iterator It
= TIIPredicates
.find(Name
);
467 if (It
== TIIPredicates
.end()) {
468 TIIPredicates
[Name
] = TIIPred
;
472 PrintError(TIIPred
->getLoc(),
473 "TIIPredicate " + Name
+ " is multiply defined.");
474 PrintFatalNote(It
->second
->getLoc(),
475 " Previous definition of " + Name
+ " was here.");
479 void CodeGenSchedModels::collectRetireControlUnits() {
480 RecVec Units
= Records
.getAllDerivedDefinitions("RetireControlUnit");
482 for (Record
*RCU
: Units
) {
483 CodeGenProcModel
&PM
= getProcModel(RCU
->getValueAsDef("SchedModel"));
484 if (PM
.RetireControlUnit
) {
485 PrintError(RCU
->getLoc(),
486 "Expected a single RetireControlUnit definition");
487 PrintNote(PM
.RetireControlUnit
->getLoc(),
488 "Previous definition of RetireControlUnit was here");
490 PM
.RetireControlUnit
= RCU
;
494 void CodeGenSchedModels::collectLoadStoreQueueInfo() {
495 RecVec Queues
= Records
.getAllDerivedDefinitions("MemoryQueue");
497 for (Record
*Queue
: Queues
) {
498 CodeGenProcModel
&PM
= getProcModel(Queue
->getValueAsDef("SchedModel"));
499 if (Queue
->isSubClassOf("LoadQueue")) {
501 PrintError(Queue
->getLoc(),
502 "Expected a single LoadQueue definition");
503 PrintNote(PM
.LoadQueue
->getLoc(),
504 "Previous definition of LoadQueue was here");
507 PM
.LoadQueue
= Queue
;
510 if (Queue
->isSubClassOf("StoreQueue")) {
512 PrintError(Queue
->getLoc(),
513 "Expected a single StoreQueue definition");
514 PrintNote(PM
.StoreQueue
->getLoc(),
515 "Previous definition of StoreQueue was here");
518 PM
.StoreQueue
= Queue
;
523 /// Collect optional processor information.
524 void CodeGenSchedModels::collectOptionalProcessorInfo() {
525 // Find register file definitions for each processor.
526 collectRegisterFiles();
528 // Collect processor RetireControlUnit descriptors if available.
529 collectRetireControlUnits();
531 // Collect information about load/store queues.
532 collectLoadStoreQueueInfo();
537 /// Gather all processor models.
538 void CodeGenSchedModels::collectProcModels() {
539 RecVec ProcRecords
= Records
.getAllDerivedDefinitions("Processor");
540 llvm::sort(ProcRecords
, LessRecordFieldName());
542 // Check for duplicated names.
543 auto I
= std::adjacent_find(ProcRecords
.begin(), ProcRecords
.end(),
544 [](const Record
*Rec1
, const Record
*Rec2
) {
545 return Rec1
->getValueAsString("Name") == Rec2
->getValueAsString("Name");
547 if (I
!= ProcRecords
.end())
548 PrintFatalError((*I
)->getLoc(), "Duplicate processor name " +
549 (*I
)->getValueAsString("Name"));
551 // Reserve space because we can. Reallocation would be ok.
552 ProcModels
.reserve(ProcRecords
.size()+1);
554 // Use idx=0 for NoModel/NoItineraries.
555 Record
*NoModelDef
= Records
.getDef("NoSchedModel");
556 Record
*NoItinsDef
= Records
.getDef("NoItineraries");
557 ProcModels
.emplace_back(0, "NoSchedModel", NoModelDef
, NoItinsDef
);
558 ProcModelMap
[NoModelDef
] = 0;
560 // For each processor, find a unique machine model.
561 LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n");
562 for (Record
*ProcRecord
: ProcRecords
)
563 addProcModel(ProcRecord
);
566 /// Get a unique processor model based on the defined MachineModel and
567 /// ProcessorItineraries.
568 void CodeGenSchedModels::addProcModel(Record
*ProcDef
) {
569 Record
*ModelKey
= getModelOrItinDef(ProcDef
);
570 if (!ProcModelMap
.insert(std::make_pair(ModelKey
, ProcModels
.size())).second
)
573 std::string Name
= std::string(ModelKey
->getName());
574 if (ModelKey
->isSubClassOf("SchedMachineModel")) {
575 Record
*ItinsDef
= ModelKey
->getValueAsDef("Itineraries");
576 ProcModels
.emplace_back(ProcModels
.size(), Name
, ModelKey
, ItinsDef
);
579 // An itinerary is defined without a machine model. Infer a new model.
580 if (!ModelKey
->getValueAsListOfDefs("IID").empty())
581 Name
= Name
+ "Model";
582 ProcModels
.emplace_back(ProcModels
.size(), Name
,
583 ProcDef
->getValueAsDef("SchedModel"), ModelKey
);
585 LLVM_DEBUG(ProcModels
.back().dump());
588 // Recursively find all reachable SchedReadWrite records.
589 static void scanSchedRW(Record
*RWDef
, RecVec
&RWDefs
,
590 SmallPtrSet
<Record
*, 16> &RWSet
) {
591 if (!RWSet
.insert(RWDef
).second
)
593 RWDefs
.push_back(RWDef
);
594 // Reads don't currently have sequence records, but it can be added later.
595 if (RWDef
->isSubClassOf("WriteSequence")) {
596 RecVec Seq
= RWDef
->getValueAsListOfDefs("Writes");
597 for (Record
*WSRec
: Seq
)
598 scanSchedRW(WSRec
, RWDefs
, RWSet
);
600 else if (RWDef
->isSubClassOf("SchedVariant")) {
601 // Visit each variant (guarded by a different predicate).
602 RecVec Vars
= RWDef
->getValueAsListOfDefs("Variants");
603 for (Record
*Variant
: Vars
) {
604 // Visit each RW in the sequence selected by the current variant.
605 RecVec Selected
= Variant
->getValueAsListOfDefs("Selected");
606 for (Record
*SelDef
: Selected
)
607 scanSchedRW(SelDef
, RWDefs
, RWSet
);
612 // Collect and sort all SchedReadWrites reachable via tablegen records.
613 // More may be inferred later when inferring new SchedClasses from variants.
614 void CodeGenSchedModels::collectSchedRW() {
615 // Reserve idx=0 for invalid writes/reads.
616 SchedWrites
.resize(1);
617 SchedReads
.resize(1);
619 SmallPtrSet
<Record
*, 16> RWSet
;
621 // Find all SchedReadWrites referenced by instruction defs.
622 RecVec SWDefs
, SRDefs
;
623 for (const CodeGenInstruction
*Inst
: Target
.getInstructionsByEnumValue()) {
624 Record
*SchedDef
= Inst
->TheDef
;
625 if (SchedDef
->isValueUnset("SchedRW"))
627 RecVec RWs
= SchedDef
->getValueAsListOfDefs("SchedRW");
628 for (Record
*RW
: RWs
) {
629 if (RW
->isSubClassOf("SchedWrite"))
630 scanSchedRW(RW
, SWDefs
, RWSet
);
632 assert(RW
->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
633 scanSchedRW(RW
, SRDefs
, RWSet
);
637 // Find all ReadWrites referenced by InstRW.
638 RecVec InstRWDefs
= Records
.getAllDerivedDefinitions("InstRW");
639 for (Record
*InstRWDef
: InstRWDefs
) {
640 // For all OperandReadWrites.
641 RecVec RWDefs
= InstRWDef
->getValueAsListOfDefs("OperandReadWrites");
642 for (Record
*RWDef
: RWDefs
) {
643 if (RWDef
->isSubClassOf("SchedWrite"))
644 scanSchedRW(RWDef
, SWDefs
, RWSet
);
646 assert(RWDef
->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
647 scanSchedRW(RWDef
, SRDefs
, RWSet
);
651 // Find all ReadWrites referenced by ItinRW.
652 RecVec ItinRWDefs
= Records
.getAllDerivedDefinitions("ItinRW");
653 for (Record
*ItinRWDef
: ItinRWDefs
) {
654 // For all OperandReadWrites.
655 RecVec RWDefs
= ItinRWDef
->getValueAsListOfDefs("OperandReadWrites");
656 for (Record
*RWDef
: RWDefs
) {
657 if (RWDef
->isSubClassOf("SchedWrite"))
658 scanSchedRW(RWDef
, SWDefs
, RWSet
);
660 assert(RWDef
->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
661 scanSchedRW(RWDef
, SRDefs
, RWSet
);
665 // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
666 // for the loop below that initializes Alias vectors.
667 RecVec AliasDefs
= Records
.getAllDerivedDefinitions("SchedAlias");
668 llvm::sort(AliasDefs
, LessRecord());
669 for (Record
*ADef
: AliasDefs
) {
670 Record
*MatchDef
= ADef
->getValueAsDef("MatchRW");
671 Record
*AliasDef
= ADef
->getValueAsDef("AliasRW");
672 if (MatchDef
->isSubClassOf("SchedWrite")) {
673 if (!AliasDef
->isSubClassOf("SchedWrite"))
674 PrintFatalError(ADef
->getLoc(), "SchedWrite Alias must be SchedWrite");
675 scanSchedRW(AliasDef
, SWDefs
, RWSet
);
678 assert(MatchDef
->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
679 if (!AliasDef
->isSubClassOf("SchedRead"))
680 PrintFatalError(ADef
->getLoc(), "SchedRead Alias must be SchedRead");
681 scanSchedRW(AliasDef
, SRDefs
, RWSet
);
684 // Sort and add the SchedReadWrites directly referenced by instructions or
685 // itinerary resources. Index reads and writes in separate domains.
686 llvm::sort(SWDefs
, LessRecord());
687 for (Record
*SWDef
: SWDefs
) {
688 assert(!getSchedRWIdx(SWDef
, /*IsRead=*/false) && "duplicate SchedWrite");
689 SchedWrites
.emplace_back(SchedWrites
.size(), SWDef
);
691 llvm::sort(SRDefs
, LessRecord());
692 for (Record
*SRDef
: SRDefs
) {
693 assert(!getSchedRWIdx(SRDef
, /*IsRead-*/true) && "duplicate SchedWrite");
694 SchedReads
.emplace_back(SchedReads
.size(), SRDef
);
696 // Initialize WriteSequence vectors.
697 for (CodeGenSchedRW
&CGRW
: SchedWrites
) {
698 if (!CGRW
.IsSequence
)
700 findRWs(CGRW
.TheDef
->getValueAsListOfDefs("Writes"), CGRW
.Sequence
,
703 // Initialize Aliases vectors.
704 for (Record
*ADef
: AliasDefs
) {
705 Record
*AliasDef
= ADef
->getValueAsDef("AliasRW");
706 getSchedRW(AliasDef
).IsAlias
= true;
707 Record
*MatchDef
= ADef
->getValueAsDef("MatchRW");
708 CodeGenSchedRW
&RW
= getSchedRW(MatchDef
);
710 PrintFatalError(ADef
->getLoc(), "Cannot Alias an Alias");
711 RW
.Aliases
.push_back(ADef
);
714 dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n";
715 for (unsigned WIdx
= 0, WEnd
= SchedWrites
.size(); WIdx
!= WEnd
; ++WIdx
) {
716 dbgs() << WIdx
<< ": ";
717 SchedWrites
[WIdx
].dump();
719 } for (unsigned RIdx
= 0, REnd
= SchedReads
.size(); RIdx
!= REnd
;
721 dbgs() << RIdx
<< ": ";
722 SchedReads
[RIdx
].dump();
724 } RecVec RWDefs
= Records
.getAllDerivedDefinitions("SchedReadWrite");
727 if (!getSchedRWIdx(RWDef
, RWDef
->isSubClassOf("SchedRead"))) {
728 StringRef Name
= RWDef
->getName();
729 if (Name
!= "NoWrite" && Name
!= "ReadDefault")
730 dbgs() << "Unused SchedReadWrite " << Name
<< '\n';
735 /// Compute a SchedWrite name from a sequence of writes.
736 std::string
CodeGenSchedModels::genRWName(ArrayRef
<unsigned> Seq
, bool IsRead
) {
737 std::string
Name("(");
738 ListSeparator
LS("_");
739 for (unsigned I
: Seq
) {
741 Name
+= getSchedRW(I
, IsRead
).Name
;
747 unsigned CodeGenSchedModels::getSchedRWIdx(const Record
*Def
,
749 const std::vector
<CodeGenSchedRW
> &RWVec
= IsRead
? SchedReads
: SchedWrites
;
750 const auto I
= find_if(
751 RWVec
, [Def
](const CodeGenSchedRW
&RW
) { return RW
.TheDef
== Def
; });
752 return I
== RWVec
.end() ? 0 : std::distance(RWVec
.begin(), I
);
755 bool CodeGenSchedModels::hasReadOfWrite(Record
*WriteDef
) const {
756 for (auto& ProcModel
: ProcModels
) {
757 const RecVec
&RADefs
= ProcModel
.ReadAdvanceDefs
;
758 for (auto& RADef
: RADefs
) {
759 RecVec ValidWrites
= RADef
->getValueAsListOfDefs("ValidWrites");
760 if (is_contained(ValidWrites
, WriteDef
))
767 static void splitSchedReadWrites(const RecVec
&RWDefs
,
768 RecVec
&WriteDefs
, RecVec
&ReadDefs
) {
769 for (Record
*RWDef
: RWDefs
) {
770 if (RWDef
->isSubClassOf("SchedWrite"))
771 WriteDefs
.push_back(RWDef
);
773 assert(RWDef
->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
774 ReadDefs
.push_back(RWDef
);
779 // Split the SchedReadWrites defs and call findRWs for each list.
780 void CodeGenSchedModels::findRWs(const RecVec
&RWDefs
,
781 IdxVec
&Writes
, IdxVec
&Reads
) const {
784 splitSchedReadWrites(RWDefs
, WriteDefs
, ReadDefs
);
785 findRWs(WriteDefs
, Writes
, false);
786 findRWs(ReadDefs
, Reads
, true);
789 // Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
790 void CodeGenSchedModels::findRWs(const RecVec
&RWDefs
, IdxVec
&RWs
,
792 for (Record
*RWDef
: RWDefs
) {
793 unsigned Idx
= getSchedRWIdx(RWDef
, IsRead
);
794 assert(Idx
&& "failed to collect SchedReadWrite");
799 void CodeGenSchedModels::expandRWSequence(unsigned RWIdx
, IdxVec
&RWSeq
,
801 const CodeGenSchedRW
&SchedRW
= getSchedRW(RWIdx
, IsRead
);
802 if (!SchedRW
.IsSequence
) {
803 RWSeq
.push_back(RWIdx
);
807 SchedRW
.TheDef
? SchedRW
.TheDef
->getValueAsInt("Repeat") : 1;
808 for (int i
= 0; i
< Repeat
; ++i
) {
809 for (unsigned I
: SchedRW
.Sequence
) {
810 expandRWSequence(I
, RWSeq
, IsRead
);
815 // Expand a SchedWrite as a sequence following any aliases that coincide with
816 // the given processor model.
817 void CodeGenSchedModels::expandRWSeqForProc(
818 unsigned RWIdx
, IdxVec
&RWSeq
, bool IsRead
,
819 const CodeGenProcModel
&ProcModel
) const {
821 const CodeGenSchedRW
&SchedWrite
= getSchedRW(RWIdx
, IsRead
);
822 Record
*AliasDef
= nullptr;
823 for (const Record
*Rec
: SchedWrite
.Aliases
) {
824 const CodeGenSchedRW
&AliasRW
= getSchedRW(Rec
->getValueAsDef("AliasRW"));
825 if (Rec
->getValueInit("SchedModel")->isComplete()) {
826 Record
*ModelDef
= Rec
->getValueAsDef("SchedModel");
827 if (&getProcModel(ModelDef
) != &ProcModel
)
831 PrintFatalError(AliasRW
.TheDef
->getLoc(), "Multiple aliases "
832 "defined for processor " + ProcModel
.ModelName
+
833 " Ensure only one SchedAlias exists per RW.");
834 AliasDef
= AliasRW
.TheDef
;
837 expandRWSeqForProc(getSchedRWIdx(AliasDef
, IsRead
),
838 RWSeq
, IsRead
,ProcModel
);
841 if (!SchedWrite
.IsSequence
) {
842 RWSeq
.push_back(RWIdx
);
846 SchedWrite
.TheDef
? SchedWrite
.TheDef
->getValueAsInt("Repeat") : 1;
847 for (int I
= 0, E
= Repeat
; I
< E
; ++I
) {
848 for (unsigned Idx
: SchedWrite
.Sequence
) {
849 expandRWSeqForProc(Idx
, RWSeq
, IsRead
, ProcModel
);
854 // Find the existing SchedWrite that models this sequence of writes.
855 unsigned CodeGenSchedModels::findRWForSequence(ArrayRef
<unsigned> Seq
,
857 std::vector
<CodeGenSchedRW
> &RWVec
= IsRead
? SchedReads
: SchedWrites
;
859 auto I
= find_if(RWVec
, [Seq
](CodeGenSchedRW
&RW
) {
860 return ArrayRef(RW
.Sequence
) == Seq
;
862 // Index zero reserved for invalid RW.
863 return I
== RWVec
.end() ? 0 : std::distance(RWVec
.begin(), I
);
866 /// Add this ReadWrite if it doesn't already exist.
867 unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef
<unsigned> Seq
,
869 assert(!Seq
.empty() && "cannot insert empty sequence");
873 unsigned Idx
= findRWForSequence(Seq
, IsRead
);
877 std::vector
<CodeGenSchedRW
> &RWVec
= IsRead
? SchedReads
: SchedWrites
;
878 unsigned RWIdx
= RWVec
.size();
879 CodeGenSchedRW
SchedRW(RWIdx
, IsRead
, Seq
, genRWName(Seq
, IsRead
));
880 RWVec
.push_back(SchedRW
);
884 /// Visit all the instruction definitions for this target to gather and
885 /// enumerate the itinerary classes. These are the explicitly specified
886 /// SchedClasses. More SchedClasses may be inferred.
887 void CodeGenSchedModels::collectSchedClasses() {
889 // NoItinerary is always the first class at Idx=0
890 assert(SchedClasses
.empty() && "Expected empty sched class");
891 SchedClasses
.emplace_back(0, "NoInstrModel",
892 Records
.getDef("NoItinerary"));
893 SchedClasses
.back().ProcIndices
.push_back(0);
895 // Create a SchedClass for each unique combination of itinerary class and
897 for (const CodeGenInstruction
*Inst
: Target
.getInstructionsByEnumValue()) {
898 Record
*ItinDef
= Inst
->TheDef
->getValueAsDef("Itinerary");
899 IdxVec Writes
, Reads
;
900 if (!Inst
->TheDef
->isValueUnset("SchedRW"))
901 findRWs(Inst
->TheDef
->getValueAsListOfDefs("SchedRW"), Writes
, Reads
);
903 // ProcIdx == 0 indicates the class applies to all processors.
904 unsigned SCIdx
= addSchedClass(ItinDef
, Writes
, Reads
, /*ProcIndices*/{0});
905 InstrClassMap
[Inst
->TheDef
] = SCIdx
;
907 // Create classes for InstRW defs.
908 RecVec InstRWDefs
= Records
.getAllDerivedDefinitions("InstRW");
909 llvm::sort(InstRWDefs
, LessRecord());
910 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
911 for (Record
*RWDef
: InstRWDefs
)
912 createInstRWClass(RWDef
);
914 NumInstrSchedClasses
= SchedClasses
.size();
916 bool EnableDump
= false;
917 LLVM_DEBUG(EnableDump
= true);
923 << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n");
924 for (const CodeGenInstruction
*Inst
: Target
.getInstructionsByEnumValue()) {
925 StringRef InstName
= Inst
->TheDef
->getName();
926 unsigned SCIdx
= getSchedClassIdx(*Inst
);
929 if (!Inst
->hasNoSchedulingInfo
)
930 dbgs() << "No machine model for " << Inst
->TheDef
->getName() << '\n';
934 CodeGenSchedClass
&SC
= getSchedClass(SCIdx
);
935 if (SC
.ProcIndices
[0] != 0)
936 PrintFatalError(Inst
->TheDef
->getLoc(), "Instruction's sched class "
937 "must not be subtarget specific.");
940 if (SC
.ItinClassDef
->getName() != "NoItinerary") {
941 ProcIndices
.push_back(0);
942 dbgs() << "Itinerary for " << InstName
<< ": "
943 << SC
.ItinClassDef
->getName() << '\n';
945 if (!SC
.Writes
.empty()) {
946 ProcIndices
.push_back(0);
948 dbgs() << "SchedRW machine model for " << InstName
;
949 for (unsigned int Write
: SC
.Writes
)
950 dbgs() << " " << SchedWrites
[Write
].Name
;
951 for (unsigned int Read
: SC
.Reads
)
952 dbgs() << " " << SchedReads
[Read
].Name
;
956 const RecVec
&RWDefs
= SchedClasses
[SCIdx
].InstRWs
;
957 for (Record
*RWDef
: RWDefs
) {
958 const CodeGenProcModel
&ProcModel
=
959 getProcModel(RWDef
->getValueAsDef("SchedModel"));
960 ProcIndices
.push_back(ProcModel
.Index
);
961 LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel
.ModelName
<< " for "
965 findRWs(RWDef
->getValueAsListOfDefs("OperandReadWrites"),
968 for (unsigned WIdx
: Writes
)
969 dbgs() << " " << SchedWrites
[WIdx
].Name
;
970 for (unsigned RIdx
: Reads
)
971 dbgs() << " " << SchedReads
[RIdx
].Name
;
975 // If ProcIndices contains zero, the class applies to all processors.
977 if (!llvm::is_contained(ProcIndices
, 0)) {
978 for (const CodeGenProcModel
&PM
: ProcModels
) {
979 if (!llvm::is_contained(ProcIndices
, PM
.Index
))
980 dbgs() << "No machine model for " << Inst
->TheDef
->getName()
981 << " on processor " << PM
.ModelName
<< '\n';
988 // Get the SchedClass index for an instruction.
990 CodeGenSchedModels::getSchedClassIdx(const CodeGenInstruction
&Inst
) const {
991 return InstrClassMap
.lookup(Inst
.TheDef
);
995 CodeGenSchedModels::createSchedClassName(Record
*ItinClassDef
,
996 ArrayRef
<unsigned> OperWrites
,
997 ArrayRef
<unsigned> OperReads
) {
1000 if (ItinClassDef
&& ItinClassDef
->getName() != "NoItinerary")
1001 Name
= std::string(ItinClassDef
->getName());
1002 for (unsigned Idx
: OperWrites
) {
1005 Name
+= SchedWrites
[Idx
].Name
;
1007 for (unsigned Idx
: OperReads
) {
1009 Name
+= SchedReads
[Idx
].Name
;
1014 std::string
CodeGenSchedModels::createSchedClassName(const RecVec
&InstDefs
) {
1017 ListSeparator
LS("_");
1018 for (const Record
*InstDef
: InstDefs
) {
1020 Name
+= InstDef
->getName();
1025 /// Add an inferred sched class from an itinerary class and per-operand list of
1026 /// SchedWrites and SchedReads. ProcIndices contains the set of IDs of
1027 /// processors that may utilize this class.
1028 unsigned CodeGenSchedModels::addSchedClass(Record
*ItinClassDef
,
1029 ArrayRef
<unsigned> OperWrites
,
1030 ArrayRef
<unsigned> OperReads
,
1031 ArrayRef
<unsigned> ProcIndices
) {
1032 assert(!ProcIndices
.empty() && "expect at least one ProcIdx");
1034 auto IsKeyEqual
= [=](const CodeGenSchedClass
&SC
) {
1035 return SC
.isKeyEqual(ItinClassDef
, OperWrites
, OperReads
);
1038 auto I
= find_if(make_range(schedClassBegin(), schedClassEnd()), IsKeyEqual
);
1039 unsigned Idx
= I
== schedClassEnd() ? 0 : std::distance(schedClassBegin(), I
);
1040 if (Idx
|| SchedClasses
[0].isKeyEqual(ItinClassDef
, OperWrites
, OperReads
)) {
1042 std::set_union(SchedClasses
[Idx
].ProcIndices
.begin(),
1043 SchedClasses
[Idx
].ProcIndices
.end(),
1044 ProcIndices
.begin(), ProcIndices
.end(),
1045 std::back_inserter(PI
));
1046 SchedClasses
[Idx
].ProcIndices
= std::move(PI
);
1049 Idx
= SchedClasses
.size();
1050 SchedClasses
.emplace_back(Idx
,
1051 createSchedClassName(ItinClassDef
, OperWrites
,
1054 CodeGenSchedClass
&SC
= SchedClasses
.back();
1055 SC
.Writes
= OperWrites
;
1056 SC
.Reads
= OperReads
;
1057 SC
.ProcIndices
= ProcIndices
;
1062 // Create classes for each set of opcodes that are in the same InstReadWrite
1063 // definition across all processors.
1064 void CodeGenSchedModels::createInstRWClass(Record
*InstRWDef
) {
1065 // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
1066 // intersects with an existing class via a previous InstRWDef. Instrs that do
1067 // not intersect with an existing class refer back to their former class as
1068 // determined from ItinDef or SchedRW.
1069 SmallMapVector
<unsigned, SmallVector
<Record
*, 8>, 4> ClassInstrs
;
1070 // Sort Instrs into sets.
1071 const RecVec
*InstDefs
= Sets
.expand(InstRWDef
);
1072 if (InstDefs
->empty())
1073 PrintFatalError(InstRWDef
->getLoc(), "No matching instruction opcodes");
1075 for (Record
*InstDef
: *InstDefs
) {
1076 InstClassMapTy::const_iterator Pos
= InstrClassMap
.find(InstDef
);
1077 if (Pos
== InstrClassMap
.end())
1078 PrintFatalError(InstDef
->getLoc(), "No sched class for instruction.");
1079 unsigned SCIdx
= Pos
->second
;
1080 ClassInstrs
[SCIdx
].push_back(InstDef
);
1082 // For each set of Instrs, create a new class if necessary, and map or remap
1083 // the Instrs to it.
1084 for (auto &Entry
: ClassInstrs
) {
1085 unsigned OldSCIdx
= Entry
.first
;
1086 ArrayRef
<Record
*> InstDefs
= Entry
.second
;
1087 // If the all instrs in the current class are accounted for, then leave
1088 // them mapped to their old class.
1090 const RecVec
&RWDefs
= SchedClasses
[OldSCIdx
].InstRWs
;
1091 if (!RWDefs
.empty()) {
1092 const RecVec
*OrigInstDefs
= Sets
.expand(RWDefs
[0]);
1093 unsigned OrigNumInstrs
=
1094 count_if(*OrigInstDefs
, [&](Record
*OIDef
) {
1095 return InstrClassMap
[OIDef
] == OldSCIdx
;
1097 if (OrigNumInstrs
== InstDefs
.size()) {
1098 assert(SchedClasses
[OldSCIdx
].ProcIndices
[0] == 0 &&
1099 "expected a generic SchedClass");
1100 Record
*RWModelDef
= InstRWDef
->getValueAsDef("SchedModel");
1101 // Make sure we didn't already have a InstRW containing this
1102 // instruction on this model.
1103 for (Record
*RWD
: RWDefs
) {
1104 if (RWD
->getValueAsDef("SchedModel") == RWModelDef
&&
1105 RWModelDef
->getValueAsBit("FullInstRWOverlapCheck")) {
1106 assert(!InstDefs
.empty()); // Checked at function start.
1108 InstRWDef
->getLoc(),
1109 "Overlapping InstRW definition for \"" +
1110 InstDefs
.front()->getName() +
1111 "\" also matches previous \"" +
1112 RWD
->getValue("Instrs")->getValue()->getAsString() +
1114 PrintFatalNote(RWD
->getLoc(), "Previous match was here.");
1117 LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx
<< ":"
1118 << SchedClasses
[OldSCIdx
].Name
<< " on "
1119 << RWModelDef
->getName() << "\n");
1120 SchedClasses
[OldSCIdx
].InstRWs
.push_back(InstRWDef
);
1125 unsigned SCIdx
= SchedClasses
.size();
1126 SchedClasses
.emplace_back(SCIdx
, createSchedClassName(InstDefs
), nullptr);
1127 CodeGenSchedClass
&SC
= SchedClasses
.back();
1128 LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx
<< ":" << SC
.Name
<< " on "
1129 << InstRWDef
->getValueAsDef("SchedModel")->getName()
1132 // Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
1133 SC
.ItinClassDef
= SchedClasses
[OldSCIdx
].ItinClassDef
;
1134 SC
.Writes
= SchedClasses
[OldSCIdx
].Writes
;
1135 SC
.Reads
= SchedClasses
[OldSCIdx
].Reads
;
1136 SC
.ProcIndices
.push_back(0);
1137 // If we had an old class, copy it's InstRWs to this new class.
1139 Record
*RWModelDef
= InstRWDef
->getValueAsDef("SchedModel");
1140 for (Record
*OldRWDef
: SchedClasses
[OldSCIdx
].InstRWs
) {
1141 if (OldRWDef
->getValueAsDef("SchedModel") == RWModelDef
) {
1142 assert(!InstDefs
.empty()); // Checked at function start.
1144 InstRWDef
->getLoc(),
1145 "Overlapping InstRW definition for \"" +
1146 InstDefs
.front()->getName() + "\" also matches previous \"" +
1147 OldRWDef
->getValue("Instrs")->getValue()->getAsString() +
1149 PrintFatalNote(OldRWDef
->getLoc(), "Previous match was here.");
1151 assert(OldRWDef
!= InstRWDef
&&
1152 "SchedClass has duplicate InstRW def");
1153 SC
.InstRWs
.push_back(OldRWDef
);
1156 // Map each Instr to this new class.
1157 for (Record
*InstDef
: InstDefs
)
1158 InstrClassMap
[InstDef
] = SCIdx
;
1159 SC
.InstRWs
.push_back(InstRWDef
);
1163 // True if collectProcItins found anything.
1164 bool CodeGenSchedModels::hasItineraries() const {
1165 for (const CodeGenProcModel
&PM
: make_range(procModelBegin(),procModelEnd()))
1166 if (PM
.hasItineraries())
1171 // Gather the processor itineraries.
1172 void CodeGenSchedModels::collectProcItins() {
1173 LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n");
1174 for (CodeGenProcModel
&ProcModel
: ProcModels
) {
1175 if (!ProcModel
.hasItineraries())
1178 RecVec ItinRecords
= ProcModel
.ItinsDef
->getValueAsListOfDefs("IID");
1179 assert(!ItinRecords
.empty() && "ProcModel.hasItineraries is incorrect");
1181 // Populate ItinDefList with Itinerary records.
1182 ProcModel
.ItinDefList
.resize(NumInstrSchedClasses
);
1184 // Insert each itinerary data record in the correct position within
1185 // the processor model's ItinDefList.
1186 for (Record
*ItinData
: ItinRecords
) {
1187 const Record
*ItinDef
= ItinData
->getValueAsDef("TheClass");
1188 bool FoundClass
= false;
1190 for (const CodeGenSchedClass
&SC
:
1191 make_range(schedClassBegin(), schedClassEnd())) {
1192 // Multiple SchedClasses may share an itinerary. Update all of them.
1193 if (SC
.ItinClassDef
== ItinDef
) {
1194 ProcModel
.ItinDefList
[SC
.Index
] = ItinData
;
1199 LLVM_DEBUG(dbgs() << ProcModel
.ItinsDef
->getName()
1200 << " missing class for itinerary "
1201 << ItinDef
->getName() << '\n');
1204 // Check for missing itinerary entries.
1205 assert(!ProcModel
.ItinDefList
[0] && "NoItinerary class can't have rec");
1207 for (unsigned i
= 1, N
= ProcModel
.ItinDefList
.size(); i
< N
; ++i
) {
1208 if (!ProcModel
.ItinDefList
[i
])
1209 dbgs() << ProcModel
.ItinsDef
->getName()
1210 << " missing itinerary for class " << SchedClasses
[i
].Name
1216 // Gather the read/write types for each itinerary class.
1217 void CodeGenSchedModels::collectProcItinRW() {
1218 RecVec ItinRWDefs
= Records
.getAllDerivedDefinitions("ItinRW");
1219 llvm::sort(ItinRWDefs
, LessRecord());
1220 for (Record
*RWDef
: ItinRWDefs
) {
1221 if (!RWDef
->getValueInit("SchedModel")->isComplete())
1222 PrintFatalError(RWDef
->getLoc(), "SchedModel is undefined");
1223 Record
*ModelDef
= RWDef
->getValueAsDef("SchedModel");
1224 ProcModelMapTy::const_iterator I
= ProcModelMap
.find(ModelDef
);
1225 if (I
== ProcModelMap
.end()) {
1226 PrintFatalError(RWDef
->getLoc(), "Undefined SchedMachineModel "
1227 + ModelDef
->getName());
1229 ProcModels
[I
->second
].ItinRWDefs
.push_back(RWDef
);
1233 // Gather the unsupported features for processor models.
1234 void CodeGenSchedModels::collectProcUnsupportedFeatures() {
1235 for (CodeGenProcModel
&ProcModel
: ProcModels
)
1237 ProcModel
.UnsupportedFeaturesDefs
,
1238 ProcModel
.ModelDef
->getValueAsListOfDefs("UnsupportedFeatures"));
1241 /// Infer new classes from existing classes. In the process, this may create new
1242 /// SchedWrites from sequences of existing SchedWrites.
1243 void CodeGenSchedModels::inferSchedClasses() {
1245 dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n");
1246 LLVM_DEBUG(dbgs() << NumInstrSchedClasses
<< " instr sched classes.\n");
1248 // Visit all existing classes and newly created classes.
1249 for (unsigned Idx
= 0; Idx
!= SchedClasses
.size(); ++Idx
) {
1250 assert(SchedClasses
[Idx
].Index
== Idx
&& "bad SCIdx");
1252 if (SchedClasses
[Idx
].ItinClassDef
)
1253 inferFromItinClass(SchedClasses
[Idx
].ItinClassDef
, Idx
);
1254 if (!SchedClasses
[Idx
].InstRWs
.empty())
1255 inferFromInstRWs(Idx
);
1256 if (!SchedClasses
[Idx
].Writes
.empty()) {
1257 inferFromRW(SchedClasses
[Idx
].Writes
, SchedClasses
[Idx
].Reads
,
1258 Idx
, SchedClasses
[Idx
].ProcIndices
);
1260 assert(SchedClasses
.size() < (NumInstrSchedClasses
*6) &&
1261 "too many SchedVariants");
1265 /// Infer classes from per-processor itinerary resources.
1266 void CodeGenSchedModels::inferFromItinClass(Record
*ItinClassDef
,
1267 unsigned FromClassIdx
) {
1268 for (unsigned PIdx
= 0, PEnd
= ProcModels
.size(); PIdx
!= PEnd
; ++PIdx
) {
1269 const CodeGenProcModel
&PM
= ProcModels
[PIdx
];
1270 // For all ItinRW entries.
1271 bool HasMatch
= false;
1272 for (const Record
*Rec
: PM
.ItinRWDefs
) {
1273 RecVec Matched
= Rec
->getValueAsListOfDefs("MatchedItinClasses");
1274 if (!llvm::is_contained(Matched
, ItinClassDef
))
1277 PrintFatalError(Rec
->getLoc(), "Duplicate itinerary class "
1278 + ItinClassDef
->getName()
1279 + " in ItinResources for " + PM
.ModelName
);
1281 IdxVec Writes
, Reads
;
1282 findRWs(Rec
->getValueAsListOfDefs("OperandReadWrites"), Writes
, Reads
);
1283 inferFromRW(Writes
, Reads
, FromClassIdx
, PIdx
);
1288 /// Infer classes from per-processor InstReadWrite definitions.
1289 void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx
) {
1290 for (unsigned I
= 0, E
= SchedClasses
[SCIdx
].InstRWs
.size(); I
!= E
; ++I
) {
1291 assert(SchedClasses
[SCIdx
].InstRWs
.size() == E
&& "InstrRWs was mutated!");
1292 Record
*Rec
= SchedClasses
[SCIdx
].InstRWs
[I
];
1293 const RecVec
*InstDefs
= Sets
.expand(Rec
);
1294 RecIter II
= InstDefs
->begin(), IE
= InstDefs
->end();
1295 for (; II
!= IE
; ++II
) {
1296 if (InstrClassMap
[*II
] == SCIdx
)
1299 // If this class no longer has any instructions mapped to it, it has become
1303 IdxVec Writes
, Reads
;
1304 findRWs(Rec
->getValueAsListOfDefs("OperandReadWrites"), Writes
, Reads
);
1305 unsigned PIdx
= getProcModel(Rec
->getValueAsDef("SchedModel")).Index
;
1306 inferFromRW(Writes
, Reads
, SCIdx
, PIdx
); // May mutate SchedClasses.
1307 SchedClasses
[SCIdx
].InstRWProcIndices
.insert(PIdx
);
1313 // Helper for substituteVariantOperand.
1314 struct TransVariant
{
1315 Record
*VarOrSeqDef
; // Variant or sequence.
1316 unsigned RWIdx
; // Index of this variant or sequence's matched type.
1317 unsigned ProcIdx
; // Processor model index or zero for any.
1318 unsigned TransVecIdx
; // Index into PredTransitions::TransVec.
1320 TransVariant(Record
*def
, unsigned rwi
, unsigned pi
, unsigned ti
):
1321 VarOrSeqDef(def
), RWIdx(rwi
), ProcIdx(pi
), TransVecIdx(ti
) {}
1324 // Associate a predicate with the SchedReadWrite that it guards.
1325 // RWIdx is the index of the read/write variant.
1331 PredCheck(bool r
, unsigned w
, Record
*p
): IsRead(r
), RWIdx(w
), Predicate(p
) {}
1334 // A Predicate transition is a list of RW sequences guarded by a PredTerm.
1335 struct PredTransition
{
1336 // A predicate term is a conjunction of PredChecks.
1337 SmallVector
<PredCheck
, 4> PredTerm
;
1338 SmallVector
<SmallVector
<unsigned,4>, 16> WriteSequences
;
1339 SmallVector
<SmallVector
<unsigned,4>, 16> ReadSequences
;
1340 unsigned ProcIndex
= 0;
1342 PredTransition() = default;
1343 PredTransition(ArrayRef
<PredCheck
> PT
, unsigned ProcId
) {
1344 PredTerm
.assign(PT
.begin(), PT
.end());
1349 // Encapsulate a set of partially constructed transitions.
1350 // The results are built by repeated calls to substituteVariants.
1351 class PredTransitions
{
1352 CodeGenSchedModels
&SchedModels
;
1355 std::vector
<PredTransition
> TransVec
;
1357 PredTransitions(CodeGenSchedModels
&sm
): SchedModels(sm
) {}
1359 bool substituteVariantOperand(const SmallVectorImpl
<unsigned> &RWSeq
,
1360 bool IsRead
, unsigned StartIdx
);
1362 bool substituteVariants(const PredTransition
&Trans
);
1369 bool mutuallyExclusive(Record
*PredDef
, ArrayRef
<Record
*> Preds
,
1370 ArrayRef
<PredCheck
> Term
);
1371 void getIntersectingVariants(
1372 const CodeGenSchedRW
&SchedRW
, unsigned TransIdx
,
1373 std::vector
<TransVariant
> &IntersectingVariants
);
1374 void pushVariant(const TransVariant
&VInfo
, bool IsRead
);
1377 } // end anonymous namespace
1379 // Return true if this predicate is mutually exclusive with a PredTerm. This
1380 // degenerates into checking if the predicate is mutually exclusive with any
1381 // predicate in the Term's conjunction.
1383 // All predicates associated with a given SchedRW are considered mutually
1384 // exclusive. This should work even if the conditions expressed by the
1385 // predicates are not exclusive because the predicates for a given SchedWrite
1386 // are always checked in the order they are defined in the .td file. Later
1387 // conditions implicitly negate any prior condition.
1388 bool PredTransitions::mutuallyExclusive(Record
*PredDef
,
1389 ArrayRef
<Record
*> Preds
,
1390 ArrayRef
<PredCheck
> Term
) {
1391 for (const PredCheck
&PC
: Term
) {
1392 if (PC
.Predicate
== PredDef
)
1395 const CodeGenSchedRW
&SchedRW
= SchedModels
.getSchedRW(PC
.RWIdx
, PC
.IsRead
);
1396 assert(SchedRW
.HasVariants
&& "PredCheck must refer to a SchedVariant");
1397 RecVec Variants
= SchedRW
.TheDef
->getValueAsListOfDefs("Variants");
1398 if (any_of(Variants
, [PredDef
](const Record
*R
) {
1399 return R
->getValueAsDef("Predicate") == PredDef
;
1401 // To check if PredDef is mutually exclusive with PC we also need to
1402 // check that PC.Predicate is exclusive with all predicates from variant
1403 // we're expanding. Consider following RW sequence with two variants
1404 // (1 & 2), where A, B and C are predicates from corresponding SchedVars:
1408 // Here C is not mutually exclusive with variant (1), because A doesn't
1409 // exist in variant (2). This means we have possible transitions from A
1410 // to C and from A to B, and fully expanded sequence would look like:
1412 // if (A & C) return ...;
1413 // if (A & B) return ...;
1414 // if (B) return ...;
1416 // Now let's consider another sequence:
1420 // Here A in variant (2) is mutually exclusive with variant (1), because
1421 // A also exists in (2). This means A->B transition is impossible and
1422 // expanded sequence would look like:
1424 // if (A) return ...;
1425 // if (B) return ...;
1426 if (!llvm::is_contained(Preds
, PC
.Predicate
))
1434 static std::vector
<Record
*> getAllPredicates(ArrayRef
<TransVariant
> Variants
,
1436 std::vector
<Record
*> Preds
;
1437 for (auto &Variant
: Variants
) {
1438 if (!Variant
.VarOrSeqDef
->isSubClassOf("SchedVar"))
1440 Preds
.push_back(Variant
.VarOrSeqDef
->getValueAsDef("Predicate"));
1445 // Populate IntersectingVariants with any variants or aliased sequences of the
1446 // given SchedRW whose processor indices and predicates are not mutually
1447 // exclusive with the given transition.
1448 void PredTransitions::getIntersectingVariants(
1449 const CodeGenSchedRW
&SchedRW
, unsigned TransIdx
,
1450 std::vector
<TransVariant
> &IntersectingVariants
) {
1452 bool GenericRW
= false;
1454 std::vector
<TransVariant
> Variants
;
1455 if (SchedRW
.HasVariants
) {
1456 unsigned VarProcIdx
= 0;
1457 if (SchedRW
.TheDef
->getValueInit("SchedModel")->isComplete()) {
1458 Record
*ModelDef
= SchedRW
.TheDef
->getValueAsDef("SchedModel");
1459 VarProcIdx
= SchedModels
.getProcModel(ModelDef
).Index
;
1461 if (VarProcIdx
== 0 || VarProcIdx
== TransVec
[TransIdx
].ProcIndex
) {
1462 // Push each variant. Assign TransVecIdx later.
1463 const RecVec VarDefs
= SchedRW
.TheDef
->getValueAsListOfDefs("Variants");
1464 for (Record
*VarDef
: VarDefs
)
1465 Variants
.emplace_back(VarDef
, SchedRW
.Index
, VarProcIdx
, 0);
1466 if (VarProcIdx
== 0)
1470 for (RecIter AI
= SchedRW
.Aliases
.begin(), AE
= SchedRW
.Aliases
.end();
1472 // If either the SchedAlias itself or the SchedReadWrite that it aliases
1473 // to is defined within a processor model, constrain all variants to
1475 unsigned AliasProcIdx
= 0;
1476 if ((*AI
)->getValueInit("SchedModel")->isComplete()) {
1477 Record
*ModelDef
= (*AI
)->getValueAsDef("SchedModel");
1478 AliasProcIdx
= SchedModels
.getProcModel(ModelDef
).Index
;
1480 if (AliasProcIdx
&& AliasProcIdx
!= TransVec
[TransIdx
].ProcIndex
)
1482 if (!Variants
.empty()) {
1483 const CodeGenProcModel
&PM
=
1484 *(SchedModels
.procModelBegin() + AliasProcIdx
);
1485 PrintFatalError((*AI
)->getLoc(),
1486 "Multiple variants defined for processor " +
1488 " Ensure only one SchedAlias exists per RW.");
1491 const CodeGenSchedRW
&AliasRW
=
1492 SchedModels
.getSchedRW((*AI
)->getValueAsDef("AliasRW"));
1494 if (AliasRW
.HasVariants
) {
1495 const RecVec VarDefs
= AliasRW
.TheDef
->getValueAsListOfDefs("Variants");
1496 for (Record
*VD
: VarDefs
)
1497 Variants
.emplace_back(VD
, AliasRW
.Index
, AliasProcIdx
, 0);
1499 if (AliasRW
.IsSequence
)
1500 Variants
.emplace_back(AliasRW
.TheDef
, SchedRW
.Index
, AliasProcIdx
, 0);
1501 if (AliasProcIdx
== 0)
1504 std::vector
<Record
*> AllPreds
=
1505 getAllPredicates(Variants
, TransVec
[TransIdx
].ProcIndex
);
1506 for (TransVariant
&Variant
: Variants
) {
1507 // Don't expand variants if the processor models don't intersect.
1508 // A zero processor index means any processor.
1509 if (Variant
.VarOrSeqDef
->isSubClassOf("SchedVar")) {
1510 Record
*PredDef
= Variant
.VarOrSeqDef
->getValueAsDef("Predicate");
1511 if (mutuallyExclusive(PredDef
, AllPreds
, TransVec
[TransIdx
].PredTerm
))
1515 if (IntersectingVariants
.empty()) {
1516 // The first variant builds on the existing transition.
1517 Variant
.TransVecIdx
= TransIdx
;
1518 IntersectingVariants
.push_back(Variant
);
1521 // Push another copy of the current transition for more variants.
1522 Variant
.TransVecIdx
= TransVec
.size();
1523 IntersectingVariants
.push_back(Variant
);
1524 TransVec
.push_back(TransVec
[TransIdx
]);
1527 if (GenericRW
&& IntersectingVariants
.empty()) {
1528 PrintFatalError(SchedRW
.TheDef
->getLoc(), "No variant of this type has "
1529 "a matching predicate on any processor");
1533 // Push the Reads/Writes selected by this variant onto the PredTransition
1534 // specified by VInfo.
1535 void PredTransitions::
1536 pushVariant(const TransVariant
&VInfo
, bool IsRead
) {
1537 PredTransition
&Trans
= TransVec
[VInfo
.TransVecIdx
];
1539 // If this operand transition is reached through a processor-specific alias,
1540 // then the whole transition is specific to this processor.
1542 if (VInfo
.VarOrSeqDef
->isSubClassOf("SchedVar")) {
1543 Record
*PredDef
= VInfo
.VarOrSeqDef
->getValueAsDef("Predicate");
1544 Trans
.PredTerm
.emplace_back(IsRead
, VInfo
.RWIdx
,PredDef
);
1545 RecVec SelectedDefs
= VInfo
.VarOrSeqDef
->getValueAsListOfDefs("Selected");
1546 SchedModels
.findRWs(SelectedDefs
, SelectedRWs
, IsRead
);
1549 assert(VInfo
.VarOrSeqDef
->isSubClassOf("WriteSequence") &&
1550 "variant must be a SchedVariant or aliased WriteSequence");
1551 SelectedRWs
.push_back(SchedModels
.getSchedRWIdx(VInfo
.VarOrSeqDef
, IsRead
));
1554 const CodeGenSchedRW
&SchedRW
= SchedModels
.getSchedRW(VInfo
.RWIdx
, IsRead
);
1556 SmallVectorImpl
<SmallVector
<unsigned,4>> &RWSequences
= IsRead
1557 ? Trans
.ReadSequences
: Trans
.WriteSequences
;
1558 if (SchedRW
.IsVariadic
) {
1559 unsigned OperIdx
= RWSequences
.size()-1;
1560 // Make N-1 copies of this transition's last sequence.
1561 RWSequences
.reserve(RWSequences
.size() + SelectedRWs
.size() - 1);
1562 RWSequences
.insert(RWSequences
.end(), SelectedRWs
.size() - 1,
1563 RWSequences
[OperIdx
]);
1564 // Push each of the N elements of the SelectedRWs onto a copy of the last
1565 // sequence (split the current operand into N operands).
1566 // Note that write sequences should be expanded within this loop--the entire
1567 // sequence belongs to a single operand.
1568 for (IdxIter RWI
= SelectedRWs
.begin(), RWE
= SelectedRWs
.end();
1569 RWI
!= RWE
; ++RWI
, ++OperIdx
) {
1572 ExpandedRWs
.push_back(*RWI
);
1574 SchedModels
.expandRWSequence(*RWI
, ExpandedRWs
, IsRead
);
1575 llvm::append_range(RWSequences
[OperIdx
], ExpandedRWs
);
1577 assert(OperIdx
== RWSequences
.size() && "missed a sequence");
1580 // Push this transition's expanded sequence onto this transition's last
1581 // sequence (add to the current operand's sequence).
1582 SmallVectorImpl
<unsigned> &Seq
= RWSequences
.back();
1584 for (unsigned int SelectedRW
: SelectedRWs
) {
1586 ExpandedRWs
.push_back(SelectedRW
);
1588 SchedModels
.expandRWSequence(SelectedRW
, ExpandedRWs
, IsRead
);
1590 llvm::append_range(Seq
, ExpandedRWs
);
1594 // RWSeq is a sequence of all Reads or all Writes for the next read or write
1595 // operand. StartIdx is an index into TransVec where partial results
1596 // starts. RWSeq must be applied to all transitions between StartIdx and the end
1598 bool PredTransitions::substituteVariantOperand(
1599 const SmallVectorImpl
<unsigned> &RWSeq
, bool IsRead
, unsigned StartIdx
) {
1601 // Visit each original RW within the current sequence.
1602 for (unsigned int RWI
: RWSeq
) {
1603 const CodeGenSchedRW
&SchedRW
= SchedModels
.getSchedRW(RWI
, IsRead
);
1604 // Push this RW on all partial PredTransitions or distribute variants.
1605 // New PredTransitions may be pushed within this loop which should not be
1606 // revisited (TransEnd must be loop invariant).
1607 for (unsigned TransIdx
= StartIdx
, TransEnd
= TransVec
.size();
1608 TransIdx
!= TransEnd
; ++TransIdx
) {
1609 // Distribute this partial PredTransition across intersecting variants.
1610 // This will push a copies of TransVec[TransIdx] on the back of TransVec.
1611 std::vector
<TransVariant
> IntersectingVariants
;
1612 getIntersectingVariants(SchedRW
, TransIdx
, IntersectingVariants
);
1613 // Now expand each variant on top of its copy of the transition.
1614 for (const TransVariant
&IV
: IntersectingVariants
)
1615 pushVariant(IV
, IsRead
);
1616 if (IntersectingVariants
.empty()) {
1618 TransVec
[TransIdx
].ReadSequences
.back().push_back(RWI
);
1620 TransVec
[TransIdx
].WriteSequences
.back().push_back(RWI
);
1630 // For each variant of a Read/Write in Trans, substitute the sequence of
1631 // Read/Writes guarded by the variant. This is exponential in the number of
1632 // variant Read/Writes, but in practice detection of mutually exclusive
1633 // predicates should result in linear growth in the total number variants.
1635 // This is one step in a breadth-first search of nested variants.
1636 bool PredTransitions::substituteVariants(const PredTransition
&Trans
) {
1637 // Build up a set of partial results starting at the back of
1638 // PredTransitions. Remember the first new transition.
1639 unsigned StartIdx
= TransVec
.size();
1641 assert(Trans
.ProcIndex
!= 0);
1642 TransVec
.emplace_back(Trans
.PredTerm
, Trans
.ProcIndex
);
1644 // Visit each original write sequence.
1645 for (const auto &WriteSequence
: Trans
.WriteSequences
) {
1646 // Push a new (empty) write sequence onto all partial Transitions.
1647 for (std::vector
<PredTransition
>::iterator I
=
1648 TransVec
.begin() + StartIdx
, E
= TransVec
.end(); I
!= E
; ++I
) {
1649 I
->WriteSequences
.emplace_back();
1652 substituteVariantOperand(WriteSequence
, /*IsRead=*/false, StartIdx
);
1654 // Visit each original read sequence.
1655 for (const auto &ReadSequence
: Trans
.ReadSequences
) {
1656 // Push a new (empty) read sequence onto all partial Transitions.
1657 for (std::vector
<PredTransition
>::iterator I
=
1658 TransVec
.begin() + StartIdx
, E
= TransVec
.end(); I
!= E
; ++I
) {
1659 I
->ReadSequences
.emplace_back();
1661 Subst
|= substituteVariantOperand(ReadSequence
, /*IsRead=*/true, StartIdx
);
1666 static void addSequences(CodeGenSchedModels
&SchedModels
,
1667 const SmallVectorImpl
<SmallVector
<unsigned, 4>> &Seqs
,
1668 IdxVec
&Result
, bool IsRead
) {
1669 for (const auto &S
: Seqs
)
1671 Result
.push_back(SchedModels
.findOrInsertRW(S
, IsRead
));
1675 static void dumpRecVec(const RecVec
&RV
) {
1676 for (const Record
*R
: RV
)
1677 dbgs() << R
->getName() << ", ";
1681 static void dumpTransition(const CodeGenSchedModels
&SchedModels
,
1682 const CodeGenSchedClass
&FromSC
,
1683 const CodeGenSchedTransition
&SCTrans
,
1684 const RecVec
&Preds
) {
1685 LLVM_DEBUG(dbgs() << "Adding transition from " << FromSC
.Name
<< "("
1686 << FromSC
.Index
<< ") to "
1687 << SchedModels
.getSchedClass(SCTrans
.ToClassIdx
).Name
<< "("
1688 << SCTrans
.ToClassIdx
<< ") on pred term: (";
1690 dbgs() << ") on processor (" << SCTrans
.ProcIndex
<< ")\n");
1692 // Create a new SchedClass for each variant found by inferFromRW. Pass
1693 static void inferFromTransitions(ArrayRef
<PredTransition
> LastTransitions
,
1694 unsigned FromClassIdx
,
1695 CodeGenSchedModels
&SchedModels
) {
1696 // For each PredTransition, create a new CodeGenSchedTransition, which usually
1697 // requires creating a new SchedClass.
1698 for (const auto &LastTransition
: LastTransitions
) {
1699 // Variant expansion (substituteVariants) may create unconditional
1700 // transitions. We don't need to build sched classes for them.
1701 if (LastTransition
.PredTerm
.empty())
1703 IdxVec OperWritesVariant
, OperReadsVariant
;
1704 addSequences(SchedModels
, LastTransition
.WriteSequences
, OperWritesVariant
,
1706 addSequences(SchedModels
, LastTransition
.ReadSequences
, OperReadsVariant
,
1708 CodeGenSchedTransition SCTrans
;
1710 // Transition should not contain processor indices already assigned to
1711 // InstRWs in this scheduling class.
1712 const CodeGenSchedClass
&FromSC
= SchedModels
.getSchedClass(FromClassIdx
);
1713 if (FromSC
.InstRWProcIndices
.count(LastTransition
.ProcIndex
))
1715 SCTrans
.ProcIndex
= LastTransition
.ProcIndex
;
1716 SCTrans
.ToClassIdx
=
1717 SchedModels
.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant
,
1718 OperReadsVariant
, LastTransition
.ProcIndex
);
1720 // The final PredTerm is unique set of predicates guarding the transition.
1722 transform(LastTransition
.PredTerm
, std::back_inserter(Preds
),
1723 [](const PredCheck
&P
) { return P
.Predicate
; });
1724 Preds
.erase(std::unique(Preds
.begin(), Preds
.end()), Preds
.end());
1725 dumpTransition(SchedModels
, FromSC
, SCTrans
, Preds
);
1726 SCTrans
.PredTerm
= std::move(Preds
);
1727 SchedModels
.getSchedClass(FromClassIdx
)
1728 .Transitions
.push_back(std::move(SCTrans
));
1732 std::vector
<unsigned> CodeGenSchedModels::getAllProcIndices() const {
1733 std::vector
<unsigned> ProcIdVec
;
1734 for (const auto &PM
: ProcModelMap
)
1736 ProcIdVec
.push_back(PM
.second
);
1737 // The order of the keys (Record pointers) of ProcModelMap are not stable.
1738 // Sort to stabalize the values.
1739 llvm::sort(ProcIdVec
);
1743 static std::vector
<PredTransition
>
1744 makePerProcessorTransitions(const PredTransition
&Trans
,
1745 ArrayRef
<unsigned> ProcIndices
) {
1746 std::vector
<PredTransition
> PerCpuTransVec
;
1747 for (unsigned ProcId
: ProcIndices
) {
1748 assert(ProcId
!= 0);
1749 PerCpuTransVec
.push_back(Trans
);
1750 PerCpuTransVec
.back().ProcIndex
= ProcId
;
1752 return PerCpuTransVec
;
1755 // Create new SchedClasses for the given ReadWrite list. If any of the
1756 // ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
1757 // of the ReadWrite list, following Aliases if necessary.
1758 void CodeGenSchedModels::inferFromRW(ArrayRef
<unsigned> OperWrites
,
1759 ArrayRef
<unsigned> OperReads
,
1760 unsigned FromClassIdx
,
1761 ArrayRef
<unsigned> ProcIndices
) {
1762 LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices
);
1764 // Create a seed transition with an empty PredTerm and the expanded sequences
1765 // of SchedWrites for the current SchedClass.
1766 std::vector
<PredTransition
> LastTransitions
;
1767 LastTransitions
.emplace_back();
1769 for (unsigned WriteIdx
: OperWrites
) {
1771 expandRWSequence(WriteIdx
, WriteSeq
, /*IsRead=*/false);
1772 LastTransitions
[0].WriteSequences
.emplace_back();
1773 SmallVectorImpl
<unsigned> &Seq
= LastTransitions
[0].WriteSequences
.back();
1774 Seq
.append(WriteSeq
.begin(), WriteSeq
.end());
1775 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq
); dbgs() << ") ");
1777 LLVM_DEBUG(dbgs() << " Reads: ");
1778 for (unsigned ReadIdx
: OperReads
) {
1780 expandRWSequence(ReadIdx
, ReadSeq
, /*IsRead=*/true);
1781 LastTransitions
[0].ReadSequences
.emplace_back();
1782 SmallVectorImpl
<unsigned> &Seq
= LastTransitions
[0].ReadSequences
.back();
1783 Seq
.append(ReadSeq
.begin(), ReadSeq
.end());
1784 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq
); dbgs() << ") ");
1786 LLVM_DEBUG(dbgs() << '\n');
1788 LastTransitions
= makePerProcessorTransitions(
1789 LastTransitions
[0], llvm::is_contained(ProcIndices
, 0)
1790 ? ArrayRef
<unsigned>(getAllProcIndices())
1792 // Collect all PredTransitions for individual operands.
1793 // Iterate until no variant writes remain.
1794 bool SubstitutedAny
;
1796 SubstitutedAny
= false;
1797 PredTransitions
Transitions(*this);
1798 for (const PredTransition
&Trans
: LastTransitions
)
1799 SubstitutedAny
|= Transitions
.substituteVariants(Trans
);
1800 LLVM_DEBUG(Transitions
.dump());
1801 LastTransitions
.swap(Transitions
.TransVec
);
1802 } while (SubstitutedAny
);
1804 // WARNING: We are about to mutate the SchedClasses vector. Do not refer to
1805 // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
1806 inferFromTransitions(LastTransitions
, FromClassIdx
, *this);
1809 // Check if any processor resource group contains all resource records in
1811 bool CodeGenSchedModels::hasSuperGroup(RecVec
&SubUnits
, CodeGenProcModel
&PM
) {
1812 for (Record
*ProcResourceDef
: PM
.ProcResourceDefs
) {
1813 if (!ProcResourceDef
->isSubClassOf("ProcResGroup"))
1815 RecVec SuperUnits
= ProcResourceDef
->getValueAsListOfDefs("Resources");
1816 RecIter RI
= SubUnits
.begin(), RE
= SubUnits
.end();
1817 for ( ; RI
!= RE
; ++RI
) {
1818 if (!is_contained(SuperUnits
, *RI
)) {
1828 // Verify that overlapping groups have a common supergroup.
1829 void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel
&PM
) {
1830 for (unsigned i
= 0, e
= PM
.ProcResourceDefs
.size(); i
< e
; ++i
) {
1831 if (!PM
.ProcResourceDefs
[i
]->isSubClassOf("ProcResGroup"))
1834 PM
.ProcResourceDefs
[i
]->getValueAsListOfDefs("Resources");
1835 for (unsigned j
= i
+1; j
< e
; ++j
) {
1836 if (!PM
.ProcResourceDefs
[j
]->isSubClassOf("ProcResGroup"))
1839 PM
.ProcResourceDefs
[j
]->getValueAsListOfDefs("Resources");
1840 if (std::find_first_of(CheckUnits
.begin(), CheckUnits
.end(),
1841 OtherUnits
.begin(), OtherUnits
.end())
1842 != CheckUnits
.end()) {
1843 // CheckUnits and OtherUnits overlap
1844 llvm::append_range(OtherUnits
, CheckUnits
);
1845 if (!hasSuperGroup(OtherUnits
, PM
)) {
1846 PrintFatalError((PM
.ProcResourceDefs
[i
])->getLoc(),
1847 "proc resource group overlaps with "
1848 + PM
.ProcResourceDefs
[j
]->getName()
1849 + " but no supergroup contains both.");
1856 // Collect all the RegisterFile definitions available in this target.
1857 void CodeGenSchedModels::collectRegisterFiles() {
1858 RecVec RegisterFileDefs
= Records
.getAllDerivedDefinitions("RegisterFile");
1860 // RegisterFiles is the vector of CodeGenRegisterFile.
1861 for (Record
*RF
: RegisterFileDefs
) {
1862 // For each register file definition, construct a CodeGenRegisterFile object
1863 // and add it to the appropriate scheduling model.
1864 CodeGenProcModel
&PM
= getProcModel(RF
->getValueAsDef("SchedModel"));
1865 PM
.RegisterFiles
.emplace_back(CodeGenRegisterFile(RF
->getName(),RF
));
1866 CodeGenRegisterFile
&CGRF
= PM
.RegisterFiles
.back();
1867 CGRF
.MaxMovesEliminatedPerCycle
=
1868 RF
->getValueAsInt("MaxMovesEliminatedPerCycle");
1869 CGRF
.AllowZeroMoveEliminationOnly
=
1870 RF
->getValueAsBit("AllowZeroMoveEliminationOnly");
1872 // Now set the number of physical registers as well as the cost of registers
1873 // in each register class.
1874 CGRF
.NumPhysRegs
= RF
->getValueAsInt("NumPhysRegs");
1875 if (!CGRF
.NumPhysRegs
) {
1876 PrintFatalError(RF
->getLoc(),
1877 "Invalid RegisterFile with zero physical registers");
1880 RecVec RegisterClasses
= RF
->getValueAsListOfDefs("RegClasses");
1881 std::vector
<int64_t> RegisterCosts
= RF
->getValueAsListOfInts("RegCosts");
1882 ListInit
*MoveElimInfo
= RF
->getValueAsListInit("AllowMoveElimination");
1883 for (unsigned I
= 0, E
= RegisterClasses
.size(); I
< E
; ++I
) {
1884 int Cost
= RegisterCosts
.size() > I
? RegisterCosts
[I
] : 1;
1886 bool AllowMoveElim
= false;
1887 if (MoveElimInfo
->size() > I
) {
1888 BitInit
*Val
= cast
<BitInit
>(MoveElimInfo
->getElement(I
));
1889 AllowMoveElim
= Val
->getValue();
1892 CGRF
.Costs
.emplace_back(RegisterClasses
[I
], Cost
, AllowMoveElim
);
1897 // Collect and sort WriteRes, ReadAdvance, and ProcResources.
1898 void CodeGenSchedModels::collectProcResources() {
1899 ProcResourceDefs
= Records
.getAllDerivedDefinitions("ProcResourceUnits");
1900 ProcResGroups
= Records
.getAllDerivedDefinitions("ProcResGroup");
1902 // Add any subtarget-specific SchedReadWrites that are directly associated
1903 // with processor resources. Refer to the parent SchedClass's ProcIndices to
1904 // determine which processors they apply to.
1905 for (const CodeGenSchedClass
&SC
:
1906 make_range(schedClassBegin(), schedClassEnd())) {
1907 if (SC
.ItinClassDef
) {
1908 collectItinProcResources(SC
.ItinClassDef
);
1912 // This class may have a default ReadWrite list which can be overriden by
1913 // InstRW definitions.
1914 for (Record
*RW
: SC
.InstRWs
) {
1915 Record
*RWModelDef
= RW
->getValueAsDef("SchedModel");
1916 unsigned PIdx
= getProcModel(RWModelDef
).Index
;
1917 IdxVec Writes
, Reads
;
1918 findRWs(RW
->getValueAsListOfDefs("OperandReadWrites"), Writes
, Reads
);
1919 collectRWResources(Writes
, Reads
, PIdx
);
1922 collectRWResources(SC
.Writes
, SC
.Reads
, SC
.ProcIndices
);
1924 // Add resources separately defined by each subtarget.
1925 RecVec WRDefs
= Records
.getAllDerivedDefinitions("WriteRes");
1926 for (Record
*WR
: WRDefs
) {
1927 Record
*ModelDef
= WR
->getValueAsDef("SchedModel");
1928 addWriteRes(WR
, getProcModel(ModelDef
).Index
);
1930 RecVec SWRDefs
= Records
.getAllDerivedDefinitions("SchedWriteRes");
1931 for (Record
*SWR
: SWRDefs
) {
1932 Record
*ModelDef
= SWR
->getValueAsDef("SchedModel");
1933 addWriteRes(SWR
, getProcModel(ModelDef
).Index
);
1935 RecVec RADefs
= Records
.getAllDerivedDefinitions("ReadAdvance");
1936 for (Record
*RA
: RADefs
) {
1937 Record
*ModelDef
= RA
->getValueAsDef("SchedModel");
1938 addReadAdvance(RA
, getProcModel(ModelDef
).Index
);
1940 RecVec SRADefs
= Records
.getAllDerivedDefinitions("SchedReadAdvance");
1941 for (Record
*SRA
: SRADefs
) {
1942 if (SRA
->getValueInit("SchedModel")->isComplete()) {
1943 Record
*ModelDef
= SRA
->getValueAsDef("SchedModel");
1944 addReadAdvance(SRA
, getProcModel(ModelDef
).Index
);
1947 // Add ProcResGroups that are defined within this processor model, which may
1948 // not be directly referenced but may directly specify a buffer size.
1949 RecVec ProcResGroups
= Records
.getAllDerivedDefinitions("ProcResGroup");
1950 for (Record
*PRG
: ProcResGroups
) {
1951 if (!PRG
->getValueInit("SchedModel")->isComplete())
1953 CodeGenProcModel
&PM
= getProcModel(PRG
->getValueAsDef("SchedModel"));
1954 if (!is_contained(PM
.ProcResourceDefs
, PRG
))
1955 PM
.ProcResourceDefs
.push_back(PRG
);
1957 // Add ProcResourceUnits unconditionally.
1958 for (Record
*PRU
: Records
.getAllDerivedDefinitions("ProcResourceUnits")) {
1959 if (!PRU
->getValueInit("SchedModel")->isComplete())
1961 CodeGenProcModel
&PM
= getProcModel(PRU
->getValueAsDef("SchedModel"));
1962 if (!is_contained(PM
.ProcResourceDefs
, PRU
))
1963 PM
.ProcResourceDefs
.push_back(PRU
);
1965 // Finalize each ProcModel by sorting the record arrays.
1966 for (CodeGenProcModel
&PM
: ProcModels
) {
1967 llvm::sort(PM
.WriteResDefs
, LessRecord());
1968 llvm::sort(PM
.ReadAdvanceDefs
, LessRecord());
1969 llvm::sort(PM
.ProcResourceDefs
, LessRecord());
1971 PM
.dump(); dbgs() << "WriteResDefs: "; for (auto WriteResDef
1972 : PM
.WriteResDefs
) {
1973 if (WriteResDef
->isSubClassOf("WriteRes"))
1974 dbgs() << WriteResDef
->getValueAsDef("WriteType")->getName() << " ";
1976 dbgs() << WriteResDef
->getName() << " ";
1977 } dbgs() << "\nReadAdvanceDefs: ";
1978 for (Record
*ReadAdvanceDef
1979 : PM
.ReadAdvanceDefs
) {
1980 if (ReadAdvanceDef
->isSubClassOf("ReadAdvance"))
1981 dbgs() << ReadAdvanceDef
->getValueAsDef("ReadType")->getName()
1984 dbgs() << ReadAdvanceDef
->getName() << " ";
1986 << "\nProcResourceDefs: ";
1987 for (Record
*ProcResourceDef
1988 : PM
.ProcResourceDefs
) {
1989 dbgs() << ProcResourceDef
->getName() << " ";
1992 verifyProcResourceGroups(PM
);
1995 ProcResourceDefs
.clear();
1996 ProcResGroups
.clear();
1999 void CodeGenSchedModels::checkCompleteness() {
2000 bool Complete
= true;
2001 for (const CodeGenProcModel
&ProcModel
: procModels()) {
2002 const bool HasItineraries
= ProcModel
.hasItineraries();
2003 if (!ProcModel
.ModelDef
->getValueAsBit("CompleteModel"))
2005 for (const CodeGenInstruction
*Inst
: Target
.getInstructionsByEnumValue()) {
2006 if (Inst
->hasNoSchedulingInfo
)
2008 if (ProcModel
.isUnsupported(*Inst
))
2010 unsigned SCIdx
= getSchedClassIdx(*Inst
);
2012 if (Inst
->TheDef
->isValueUnset("SchedRW")) {
2013 PrintError(Inst
->TheDef
->getLoc(),
2014 "No schedule information for instruction '" +
2015 Inst
->TheDef
->getName() + "' in SchedMachineModel '" +
2016 ProcModel
.ModelDef
->getName() + "'");
2022 const CodeGenSchedClass
&SC
= getSchedClass(SCIdx
);
2023 if (!SC
.Writes
.empty())
2025 if (HasItineraries
&& SC
.ItinClassDef
!= nullptr &&
2026 SC
.ItinClassDef
->getName() != "NoItinerary")
2029 const RecVec
&InstRWs
= SC
.InstRWs
;
2030 auto I
= find_if(InstRWs
, [&ProcModel
](const Record
*R
) {
2031 return R
->getValueAsDef("SchedModel") == ProcModel
.ModelDef
;
2033 if (I
== InstRWs
.end()) {
2034 PrintError(Inst
->TheDef
->getLoc(), "'" + ProcModel
.ModelName
+
2035 "' lacks information for '" +
2036 Inst
->TheDef
->getName() + "'");
2042 errs() << "\n\nIncomplete schedule models found.\n"
2043 << "- Consider setting 'CompleteModel = 0' while developing new models.\n"
2044 << "- Pseudo instructions can be marked with 'hasNoSchedulingInfo = 1'.\n"
2045 << "- Instructions should usually have Sched<[...]> as a superclass, "
2046 "you may temporarily use an empty list.\n"
2047 << "- Instructions related to unsupported features can be excluded with "
2048 "list<Predicate> UnsupportedFeatures = [HasA,..,HasY]; in the "
2049 "processor model.\n\n";
2050 PrintFatalError("Incomplete schedule model");
2054 // Collect itinerary class resources for each processor.
2055 void CodeGenSchedModels::collectItinProcResources(Record
*ItinClassDef
) {
2056 for (unsigned PIdx
= 0, PEnd
= ProcModels
.size(); PIdx
!= PEnd
; ++PIdx
) {
2057 const CodeGenProcModel
&PM
= ProcModels
[PIdx
];
2058 // For all ItinRW entries.
2059 bool HasMatch
= false;
2060 for (RecIter II
= PM
.ItinRWDefs
.begin(), IE
= PM
.ItinRWDefs
.end();
2062 RecVec Matched
= (*II
)->getValueAsListOfDefs("MatchedItinClasses");
2063 if (!llvm::is_contained(Matched
, ItinClassDef
))
2066 PrintFatalError((*II
)->getLoc(), "Duplicate itinerary class "
2067 + ItinClassDef
->getName()
2068 + " in ItinResources for " + PM
.ModelName
);
2070 IdxVec Writes
, Reads
;
2071 findRWs((*II
)->getValueAsListOfDefs("OperandReadWrites"), Writes
, Reads
);
2072 collectRWResources(Writes
, Reads
, PIdx
);
2077 void CodeGenSchedModels::collectRWResources(unsigned RWIdx
, bool IsRead
,
2078 ArrayRef
<unsigned> ProcIndices
) {
2079 const CodeGenSchedRW
&SchedRW
= getSchedRW(RWIdx
, IsRead
);
2080 if (SchedRW
.TheDef
) {
2081 if (!IsRead
&& SchedRW
.TheDef
->isSubClassOf("SchedWriteRes")) {
2082 for (unsigned Idx
: ProcIndices
)
2083 addWriteRes(SchedRW
.TheDef
, Idx
);
2085 else if (IsRead
&& SchedRW
.TheDef
->isSubClassOf("SchedReadAdvance")) {
2086 for (unsigned Idx
: ProcIndices
)
2087 addReadAdvance(SchedRW
.TheDef
, Idx
);
2090 for (auto *Alias
: SchedRW
.Aliases
) {
2091 IdxVec AliasProcIndices
;
2092 if (Alias
->getValueInit("SchedModel")->isComplete()) {
2093 AliasProcIndices
.push_back(
2094 getProcModel(Alias
->getValueAsDef("SchedModel")).Index
);
2096 AliasProcIndices
= ProcIndices
;
2097 const CodeGenSchedRW
&AliasRW
= getSchedRW(Alias
->getValueAsDef("AliasRW"));
2098 assert(AliasRW
.IsRead
== IsRead
&& "cannot alias reads to writes");
2101 expandRWSequence(AliasRW
.Index
, ExpandedRWs
, IsRead
);
2102 for (unsigned int ExpandedRW
: ExpandedRWs
) {
2103 collectRWResources(ExpandedRW
, IsRead
, AliasProcIndices
);
2108 // Collect resources for a set of read/write types and processor indices.
2109 void CodeGenSchedModels::collectRWResources(ArrayRef
<unsigned> Writes
,
2110 ArrayRef
<unsigned> Reads
,
2111 ArrayRef
<unsigned> ProcIndices
) {
2112 for (unsigned Idx
: Writes
)
2113 collectRWResources(Idx
, /*IsRead=*/false, ProcIndices
);
2115 for (unsigned Idx
: Reads
)
2116 collectRWResources(Idx
, /*IsRead=*/true, ProcIndices
);
2119 // Find the processor's resource units for this kind of resource.
2120 Record
*CodeGenSchedModels::findProcResUnits(Record
*ProcResKind
,
2121 const CodeGenProcModel
&PM
,
2122 ArrayRef
<SMLoc
> Loc
) const {
2123 if (ProcResKind
->isSubClassOf("ProcResourceUnits"))
2126 Record
*ProcUnitDef
= nullptr;
2127 assert(!ProcResourceDefs
.empty());
2128 assert(!ProcResGroups
.empty());
2130 for (Record
*ProcResDef
: ProcResourceDefs
) {
2131 if (ProcResDef
->getValueAsDef("Kind") == ProcResKind
2132 && ProcResDef
->getValueAsDef("SchedModel") == PM
.ModelDef
) {
2134 PrintFatalError(Loc
,
2135 "Multiple ProcessorResourceUnits associated with "
2136 + ProcResKind
->getName());
2138 ProcUnitDef
= ProcResDef
;
2141 for (Record
*ProcResGroup
: ProcResGroups
) {
2142 if (ProcResGroup
== ProcResKind
2143 && ProcResGroup
->getValueAsDef("SchedModel") == PM
.ModelDef
) {
2145 PrintFatalError(Loc
,
2146 "Multiple ProcessorResourceUnits associated with "
2147 + ProcResKind
->getName());
2149 ProcUnitDef
= ProcResGroup
;
2153 PrintFatalError(Loc
,
2154 "No ProcessorResources associated with "
2155 + ProcResKind
->getName());
2160 // Iteratively add a resource and its super resources.
2161 void CodeGenSchedModels::addProcResource(Record
*ProcResKind
,
2162 CodeGenProcModel
&PM
,
2163 ArrayRef
<SMLoc
> Loc
) {
2165 Record
*ProcResUnits
= findProcResUnits(ProcResKind
, PM
, Loc
);
2167 // See if this ProcResource is already associated with this processor.
2168 if (is_contained(PM
.ProcResourceDefs
, ProcResUnits
))
2171 PM
.ProcResourceDefs
.push_back(ProcResUnits
);
2172 if (ProcResUnits
->isSubClassOf("ProcResGroup"))
2175 if (!ProcResUnits
->getValueInit("Super")->isComplete())
2178 ProcResKind
= ProcResUnits
->getValueAsDef("Super");
2182 // Add resources for a SchedWrite to this processor if they don't exist.
2183 void CodeGenSchedModels::addWriteRes(Record
*ProcWriteResDef
, unsigned PIdx
) {
2184 assert(PIdx
&& "don't add resources to an invalid Processor model");
2186 RecVec
&WRDefs
= ProcModels
[PIdx
].WriteResDefs
;
2187 if (is_contained(WRDefs
, ProcWriteResDef
))
2189 WRDefs
.push_back(ProcWriteResDef
);
2191 // Visit ProcResourceKinds referenced by the newly discovered WriteRes.
2192 RecVec ProcResDefs
= ProcWriteResDef
->getValueAsListOfDefs("ProcResources");
2193 for (auto *ProcResDef
: ProcResDefs
) {
2194 addProcResource(ProcResDef
, ProcModels
[PIdx
], ProcWriteResDef
->getLoc());
2198 // Add resources for a ReadAdvance to this processor if they don't exist.
2199 void CodeGenSchedModels::addReadAdvance(Record
*ProcReadAdvanceDef
,
2201 RecVec
&RADefs
= ProcModels
[PIdx
].ReadAdvanceDefs
;
2202 if (is_contained(RADefs
, ProcReadAdvanceDef
))
2204 RADefs
.push_back(ProcReadAdvanceDef
);
2207 unsigned CodeGenProcModel::getProcResourceIdx(Record
*PRDef
) const {
2208 RecIter PRPos
= find(ProcResourceDefs
, PRDef
);
2209 if (PRPos
== ProcResourceDefs
.end())
2210 PrintFatalError(PRDef
->getLoc(), "ProcResource def is not included in "
2211 "the ProcResources list for " + ModelName
);
2212 // Idx=0 is reserved for invalid.
2213 return 1 + (PRPos
- ProcResourceDefs
.begin());
2216 bool CodeGenProcModel::isUnsupported(const CodeGenInstruction
&Inst
) const {
2217 for (const Record
*TheDef
: UnsupportedFeaturesDefs
) {
2218 for (const Record
*PredDef
: Inst
.TheDef
->getValueAsListOfDefs("Predicates")) {
2219 if (TheDef
->getName() == PredDef
->getName())
2227 void CodeGenProcModel::dump() const {
2228 dbgs() << Index
<< ": " << ModelName
<< " "
2229 << (ModelDef
? ModelDef
->getName() : "inferred") << " "
2230 << (ItinsDef
? ItinsDef
->getName() : "no itinerary") << '\n';
2233 void CodeGenSchedRW::dump() const {
2234 dbgs() << Name
<< (IsVariadic
? " (V) " : " ");
2237 dumpIdxVec(Sequence
);
2242 void CodeGenSchedClass::dump(const CodeGenSchedModels
* SchedModels
) const {
2243 dbgs() << "SCHEDCLASS " << Index
<< ":" << Name
<< '\n'
2245 for (unsigned i
= 0, N
= Writes
.size(); i
< N
; ++i
) {
2246 SchedModels
->getSchedWrite(Writes
[i
]).dump();
2252 dbgs() << "\n Reads: ";
2253 for (unsigned i
= 0, N
= Reads
.size(); i
< N
; ++i
) {
2254 SchedModels
->getSchedRead(Reads
[i
]).dump();
2260 dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices
);
2261 if (!Transitions
.empty()) {
2262 dbgs() << "\n Transitions for Proc ";
2263 for (const CodeGenSchedTransition
&Transition
: Transitions
) {
2264 dbgs() << Transition
.ProcIndex
<< ", ";
2270 void PredTransitions::dump() const {
2271 dbgs() << "Expanded Variants:\n";
2272 for (const auto &TI
: TransVec
) {
2275 for (const PredCheck
&PC
: TI
.PredTerm
)
2276 dbgs() << LS
<< SchedModels
.getSchedRW(PC
.RWIdx
, PC
.IsRead
).Name
<< ":"
2277 << PC
.Predicate
->getName();
2278 dbgs() << "},\n => {";
2279 for (SmallVectorImpl
<SmallVector
<unsigned, 4>>::const_iterator
2280 WSI
= TI
.WriteSequences
.begin(),
2281 WSE
= TI
.WriteSequences
.end();
2282 WSI
!= WSE
; ++WSI
) {
2285 for (unsigned N
: *WSI
)
2286 dbgs() << LS
<< SchedModels
.getSchedWrite(N
).Name
;