[PowerPC] Do not use vectors to codegen bswap with Altivec turned off
[llvm-core.git] / utils / TableGen / CodeGenSchedule.cpp
bloba9a36a87ef3fa9beab44c4e189e0b6214f335011
1 //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines structures to encapsulate the machine model as described in
11 // the target description.
13 //===----------------------------------------------------------------------===//
15 #include "CodeGenSchedule.h"
16 #include "CodeGenInstruction.h"
17 #include "CodeGenTarget.h"
18 #include "llvm/ADT/MapVector.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Support/Casting.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/Regex.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/TableGen/Error.h"
28 #include <algorithm>
29 #include <iterator>
30 #include <utility>
32 using namespace llvm;
34 #define DEBUG_TYPE "subtarget-emitter"
36 #ifndef NDEBUG
37 static void dumpIdxVec(ArrayRef<unsigned> V) {
38 for (unsigned Idx : V)
39 dbgs() << Idx << ", ";
41 #endif
43 namespace {
45 // (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp.
46 struct InstrsOp : public SetTheory::Operator {
47 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
48 ArrayRef<SMLoc> Loc) override {
49 ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc);
53 // (instregex "OpcPat",...) Find all instructions matching an opcode pattern.
54 struct InstRegexOp : public SetTheory::Operator {
55 const CodeGenTarget &Target;
56 InstRegexOp(const CodeGenTarget &t): Target(t) {}
58 /// Remove any text inside of parentheses from S.
59 static std::string removeParens(llvm::StringRef S) {
60 std::string Result;
61 unsigned Paren = 0;
62 // NB: We don't care about escaped parens here.
63 for (char C : S) {
64 switch (C) {
65 case '(':
66 ++Paren;
67 break;
68 case ')':
69 --Paren;
70 break;
71 default:
72 if (Paren == 0)
73 Result += C;
76 return Result;
79 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts,
80 ArrayRef<SMLoc> Loc) override {
81 ArrayRef<const CodeGenInstruction *> Instructions =
82 Target.getInstructionsByEnumValue();
84 unsigned NumGeneric = Target.getNumFixedInstructions();
85 unsigned NumPseudos = Target.getNumPseudoInstructions();
86 auto Generics = Instructions.slice(0, NumGeneric);
87 auto Pseudos = Instructions.slice(NumGeneric, NumPseudos);
88 auto NonPseudos = Instructions.slice(NumGeneric + NumPseudos);
90 for (Init *Arg : make_range(Expr->arg_begin(), Expr->arg_end())) {
91 StringInit *SI = dyn_cast<StringInit>(Arg);
92 if (!SI)
93 PrintFatalError(Loc, "instregex requires pattern string: " +
94 Expr->getAsString());
95 StringRef Original = SI->getValue();
97 // Extract a prefix that we can binary search on.
98 static const char RegexMetachars[] = "()^$|*+?.[]\\{}";
99 auto FirstMeta = Original.find_first_of(RegexMetachars);
101 // Look for top-level | or ?. We cannot optimize them to binary search.
102 if (removeParens(Original).find_first_of("|?") != std::string::npos)
103 FirstMeta = 0;
105 Optional<Regex> Regexpr = None;
106 StringRef Prefix = Original.substr(0, FirstMeta);
107 StringRef PatStr = Original.substr(FirstMeta);
108 if (!PatStr.empty()) {
109 // For the rest use a python-style prefix match.
110 std::string pat = PatStr;
111 if (pat[0] != '^') {
112 pat.insert(0, "^(");
113 pat.insert(pat.end(), ')');
115 Regexpr = Regex(pat);
118 int NumMatches = 0;
120 // The generic opcodes are unsorted, handle them manually.
121 for (auto *Inst : Generics) {
122 StringRef InstName = Inst->TheDef->getName();
123 if (InstName.startswith(Prefix) &&
124 (!Regexpr || Regexpr->match(InstName.substr(Prefix.size())))) {
125 Elts.insert(Inst->TheDef);
126 NumMatches++;
130 // Target instructions are split into two ranges: pseudo instructions
131 // first, than non-pseudos. Each range is in lexicographical order
132 // sorted by name. Find the sub-ranges that start with our prefix.
133 struct Comp {
134 bool operator()(const CodeGenInstruction *LHS, StringRef RHS) {
135 return LHS->TheDef->getName() < RHS;
137 bool operator()(StringRef LHS, const CodeGenInstruction *RHS) {
138 return LHS < RHS->TheDef->getName() &&
139 !RHS->TheDef->getName().startswith(LHS);
142 auto Range1 =
143 std::equal_range(Pseudos.begin(), Pseudos.end(), Prefix, Comp());
144 auto Range2 = std::equal_range(NonPseudos.begin(), NonPseudos.end(),
145 Prefix, Comp());
147 // For these ranges we know that instruction names start with the prefix.
148 // Check if there's a regex that needs to be checked.
149 const auto HandleNonGeneric = [&](const CodeGenInstruction *Inst) {
150 StringRef InstName = Inst->TheDef->getName();
151 if (!Regexpr || Regexpr->match(InstName.substr(Prefix.size()))) {
152 Elts.insert(Inst->TheDef);
153 NumMatches++;
156 std::for_each(Range1.first, Range1.second, HandleNonGeneric);
157 std::for_each(Range2.first, Range2.second, HandleNonGeneric);
159 if (0 == NumMatches)
160 PrintFatalError(Loc, "instregex has no matches: " + Original);
165 } // end anonymous namespace
167 /// CodeGenModels ctor interprets machine model records and populates maps.
168 CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK,
169 const CodeGenTarget &TGT):
170 Records(RK), Target(TGT) {
172 Sets.addFieldExpander("InstRW", "Instrs");
174 // Allow Set evaluation to recognize the dags used in InstRW records:
175 // (instrs Op1, Op1...)
176 Sets.addOperator("instrs", llvm::make_unique<InstrsOp>());
177 Sets.addOperator("instregex", llvm::make_unique<InstRegexOp>(Target));
179 // Instantiate a CodeGenProcModel for each SchedMachineModel with the values
180 // that are explicitly referenced in tablegen records. Resources associated
181 // with each processor will be derived later. Populate ProcModelMap with the
182 // CodeGenProcModel instances.
183 collectProcModels();
185 // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly
186 // defined, and populate SchedReads and SchedWrites vectors. Implicit
187 // SchedReadWrites that represent sequences derived from expanded variant will
188 // be inferred later.
189 collectSchedRW();
191 // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly
192 // required by an instruction definition, and populate SchedClassIdxMap. Set
193 // NumItineraryClasses to the number of explicit itinerary classes referenced
194 // by instructions. Set NumInstrSchedClasses to the number of itinerary
195 // classes plus any classes implied by instructions that derive from class
196 // Sched and provide SchedRW list. This does not infer any new classes from
197 // SchedVariant.
198 collectSchedClasses();
200 // Find instruction itineraries for each processor. Sort and populate
201 // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires
202 // all itinerary classes to be discovered.
203 collectProcItins();
205 // Find ItinRW records for each processor and itinerary class.
206 // (For per-operand resources mapped to itinerary classes).
207 collectProcItinRW();
209 // Find UnsupportedFeatures records for each processor.
210 // (For per-operand resources mapped to itinerary classes).
211 collectProcUnsupportedFeatures();
213 // Infer new SchedClasses from SchedVariant.
214 inferSchedClasses();
216 // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and
217 // ProcResourceDefs.
218 LLVM_DEBUG(
219 dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n");
220 collectProcResources();
222 // Collect optional processor description.
223 collectOptionalProcessorInfo();
225 // Check MCInstPredicate definitions.
226 checkMCInstPredicates();
228 // Check STIPredicate definitions.
229 checkSTIPredicates();
231 // Find STIPredicate definitions for each processor model, and construct
232 // STIPredicateFunction objects.
233 collectSTIPredicates();
235 checkCompleteness();
238 void CodeGenSchedModels::checkSTIPredicates() const {
239 DenseMap<StringRef, const Record *> Declarations;
241 // There cannot be multiple declarations with the same name.
242 const RecVec Decls = Records.getAllDerivedDefinitions("STIPredicateDecl");
243 for (const Record *R : Decls) {
244 StringRef Name = R->getValueAsString("Name");
245 const auto It = Declarations.find(Name);
246 if (It == Declarations.end()) {
247 Declarations[Name] = R;
248 continue;
251 PrintError(R->getLoc(), "STIPredicate " + Name + " multiply declared.");
252 PrintNote(It->second->getLoc(), "Previous declaration was here.");
253 PrintFatalError(R->getLoc(), "Invalid STIPredicateDecl found.");
256 // Disallow InstructionEquivalenceClasses with an empty instruction list.
257 const RecVec Defs =
258 Records.getAllDerivedDefinitions("InstructionEquivalenceClass");
259 for (const Record *R : Defs) {
260 RecVec Opcodes = R->getValueAsListOfDefs("Opcodes");
261 if (Opcodes.empty()) {
262 PrintFatalError(R->getLoc(), "Invalid InstructionEquivalenceClass "
263 "defined with an empty opcode list.");
268 // Used by function `processSTIPredicate` to construct a mask of machine
269 // instruction operands.
270 static APInt constructOperandMask(ArrayRef<int64_t> Indices) {
271 APInt OperandMask;
272 if (Indices.empty())
273 return OperandMask;
275 int64_t MaxIndex = *std::max_element(Indices.begin(), Indices.end());
276 assert(MaxIndex >= 0 && "Invalid negative indices in input!");
277 OperandMask = OperandMask.zext(MaxIndex + 1);
278 for (const int64_t Index : Indices) {
279 assert(Index >= 0 && "Invalid negative indices!");
280 OperandMask.setBit(Index);
283 return OperandMask;
286 static void
287 processSTIPredicate(STIPredicateFunction &Fn,
288 const DenseMap<Record *, unsigned> &ProcModelMap) {
289 DenseMap<const Record *, unsigned> Opcode2Index;
290 using OpcodeMapPair = std::pair<const Record *, OpcodeInfo>;
291 std::vector<OpcodeMapPair> OpcodeMappings;
292 std::vector<std::pair<APInt, APInt>> OpcodeMasks;
294 DenseMap<const Record *, unsigned> Predicate2Index;
295 unsigned NumUniquePredicates = 0;
297 // Number unique predicates and opcodes used by InstructionEquivalenceClass
298 // definitions. Each unique opcode will be associated with an OpcodeInfo
299 // object.
300 for (const Record *Def : Fn.getDefinitions()) {
301 RecVec Classes = Def->getValueAsListOfDefs("Classes");
302 for (const Record *EC : Classes) {
303 const Record *Pred = EC->getValueAsDef("Predicate");
304 if (Predicate2Index.find(Pred) == Predicate2Index.end())
305 Predicate2Index[Pred] = NumUniquePredicates++;
307 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes");
308 for (const Record *Opcode : Opcodes) {
309 if (Opcode2Index.find(Opcode) == Opcode2Index.end()) {
310 Opcode2Index[Opcode] = OpcodeMappings.size();
311 OpcodeMappings.emplace_back(Opcode, OpcodeInfo());
317 // Initialize vector `OpcodeMasks` with default values. We want to keep track
318 // of which processors "use" which opcodes. We also want to be able to
319 // identify predicates that are used by different processors for a same
320 // opcode.
321 // This information is used later on by this algorithm to sort OpcodeMapping
322 // elements based on their processor and predicate sets.
323 OpcodeMasks.resize(OpcodeMappings.size());
324 APInt DefaultProcMask(ProcModelMap.size(), 0);
325 APInt DefaultPredMask(NumUniquePredicates, 0);
326 for (std::pair<APInt, APInt> &MaskPair : OpcodeMasks)
327 MaskPair = std::make_pair(DefaultProcMask, DefaultPredMask);
329 // Construct a OpcodeInfo object for every unique opcode declared by an
330 // InstructionEquivalenceClass definition.
331 for (const Record *Def : Fn.getDefinitions()) {
332 RecVec Classes = Def->getValueAsListOfDefs("Classes");
333 const Record *SchedModel = Def->getValueAsDef("SchedModel");
334 unsigned ProcIndex = ProcModelMap.find(SchedModel)->second;
335 APInt ProcMask(ProcModelMap.size(), 0);
336 ProcMask.setBit(ProcIndex);
338 for (const Record *EC : Classes) {
339 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes");
341 std::vector<int64_t> OpIndices =
342 EC->getValueAsListOfInts("OperandIndices");
343 APInt OperandMask = constructOperandMask(OpIndices);
345 const Record *Pred = EC->getValueAsDef("Predicate");
346 APInt PredMask(NumUniquePredicates, 0);
347 PredMask.setBit(Predicate2Index[Pred]);
349 for (const Record *Opcode : Opcodes) {
350 unsigned OpcodeIdx = Opcode2Index[Opcode];
351 if (OpcodeMasks[OpcodeIdx].first[ProcIndex]) {
352 std::string Message =
353 "Opcode " + Opcode->getName().str() +
354 " used by multiple InstructionEquivalenceClass definitions.";
355 PrintFatalError(EC->getLoc(), Message);
357 OpcodeMasks[OpcodeIdx].first |= ProcMask;
358 OpcodeMasks[OpcodeIdx].second |= PredMask;
359 OpcodeInfo &OI = OpcodeMappings[OpcodeIdx].second;
361 OI.addPredicateForProcModel(ProcMask, OperandMask, Pred);
366 // Sort OpcodeMappings elements based on their CPU and predicate masks.
367 // As a last resort, order elements by opcode identifier.
368 llvm::sort(OpcodeMappings,
369 [&](const OpcodeMapPair &Lhs, const OpcodeMapPair &Rhs) {
370 unsigned LhsIdx = Opcode2Index[Lhs.first];
371 unsigned RhsIdx = Opcode2Index[Rhs.first];
372 std::pair<APInt, APInt> &LhsMasks = OpcodeMasks[LhsIdx];
373 std::pair<APInt, APInt> &RhsMasks = OpcodeMasks[RhsIdx];
375 if (LhsMasks.first != RhsMasks.first) {
376 if (LhsMasks.first.countPopulation() <
377 RhsMasks.first.countPopulation())
378 return true;
379 return LhsMasks.first.countLeadingZeros() >
380 RhsMasks.first.countLeadingZeros();
383 if (LhsMasks.second != RhsMasks.second) {
384 if (LhsMasks.second.countPopulation() <
385 RhsMasks.second.countPopulation())
386 return true;
387 return LhsMasks.second.countLeadingZeros() >
388 RhsMasks.second.countLeadingZeros();
391 return LhsIdx < RhsIdx;
394 // Now construct opcode groups. Groups are used by the SubtargetEmitter when
395 // expanding the body of a STIPredicate function. In particular, each opcode
396 // group is expanded into a sequence of labels in a switch statement.
397 // It identifies opcodes for which different processors define same predicates
398 // and same opcode masks.
399 for (OpcodeMapPair &Info : OpcodeMappings)
400 Fn.addOpcode(Info.first, std::move(Info.second));
403 void CodeGenSchedModels::collectSTIPredicates() {
404 // Map STIPredicateDecl records to elements of vector
405 // CodeGenSchedModels::STIPredicates.
406 DenseMap<const Record *, unsigned> Decl2Index;
408 RecVec RV = Records.getAllDerivedDefinitions("STIPredicate");
409 for (const Record *R : RV) {
410 const Record *Decl = R->getValueAsDef("Declaration");
412 const auto It = Decl2Index.find(Decl);
413 if (It == Decl2Index.end()) {
414 Decl2Index[Decl] = STIPredicates.size();
415 STIPredicateFunction Predicate(Decl);
416 Predicate.addDefinition(R);
417 STIPredicates.emplace_back(std::move(Predicate));
418 continue;
421 STIPredicateFunction &PreviousDef = STIPredicates[It->second];
422 PreviousDef.addDefinition(R);
425 for (STIPredicateFunction &Fn : STIPredicates)
426 processSTIPredicate(Fn, ProcModelMap);
429 void OpcodeInfo::addPredicateForProcModel(const llvm::APInt &CpuMask,
430 const llvm::APInt &OperandMask,
431 const Record *Predicate) {
432 auto It = llvm::find_if(
433 Predicates, [&OperandMask, &Predicate](const PredicateInfo &P) {
434 return P.Predicate == Predicate && P.OperandMask == OperandMask;
436 if (It == Predicates.end()) {
437 Predicates.emplace_back(CpuMask, OperandMask, Predicate);
438 return;
440 It->ProcModelMask |= CpuMask;
443 void CodeGenSchedModels::checkMCInstPredicates() const {
444 RecVec MCPredicates = Records.getAllDerivedDefinitions("TIIPredicate");
445 if (MCPredicates.empty())
446 return;
448 // A target cannot have multiple TIIPredicate definitions with a same name.
449 llvm::StringMap<const Record *> TIIPredicates(MCPredicates.size());
450 for (const Record *TIIPred : MCPredicates) {
451 StringRef Name = TIIPred->getValueAsString("FunctionName");
452 StringMap<const Record *>::const_iterator It = TIIPredicates.find(Name);
453 if (It == TIIPredicates.end()) {
454 TIIPredicates[Name] = TIIPred;
455 continue;
458 PrintError(TIIPred->getLoc(),
459 "TIIPredicate " + Name + " is multiply defined.");
460 PrintNote(It->second->getLoc(),
461 " Previous definition of " + Name + " was here.");
462 PrintFatalError(TIIPred->getLoc(),
463 "Found conflicting definitions of TIIPredicate.");
467 void CodeGenSchedModels::collectRetireControlUnits() {
468 RecVec Units = Records.getAllDerivedDefinitions("RetireControlUnit");
470 for (Record *RCU : Units) {
471 CodeGenProcModel &PM = getProcModel(RCU->getValueAsDef("SchedModel"));
472 if (PM.RetireControlUnit) {
473 PrintError(RCU->getLoc(),
474 "Expected a single RetireControlUnit definition");
475 PrintNote(PM.RetireControlUnit->getLoc(),
476 "Previous definition of RetireControlUnit was here");
478 PM.RetireControlUnit = RCU;
482 /// Collect optional processor information.
483 void CodeGenSchedModels::collectOptionalProcessorInfo() {
484 // Find register file definitions for each processor.
485 collectRegisterFiles();
487 // Collect processor RetireControlUnit descriptors if available.
488 collectRetireControlUnits();
490 checkCompleteness();
493 /// Gather all processor models.
494 void CodeGenSchedModels::collectProcModels() {
495 RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor");
496 llvm::sort(ProcRecords, LessRecordFieldName());
498 // Reserve space because we can. Reallocation would be ok.
499 ProcModels.reserve(ProcRecords.size()+1);
501 // Use idx=0 for NoModel/NoItineraries.
502 Record *NoModelDef = Records.getDef("NoSchedModel");
503 Record *NoItinsDef = Records.getDef("NoItineraries");
504 ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef);
505 ProcModelMap[NoModelDef] = 0;
507 // For each processor, find a unique machine model.
508 LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n");
509 for (Record *ProcRecord : ProcRecords)
510 addProcModel(ProcRecord);
513 /// Get a unique processor model based on the defined MachineModel and
514 /// ProcessorItineraries.
515 void CodeGenSchedModels::addProcModel(Record *ProcDef) {
516 Record *ModelKey = getModelOrItinDef(ProcDef);
517 if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second)
518 return;
520 std::string Name = ModelKey->getName();
521 if (ModelKey->isSubClassOf("SchedMachineModel")) {
522 Record *ItinsDef = ModelKey->getValueAsDef("Itineraries");
523 ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef);
525 else {
526 // An itinerary is defined without a machine model. Infer a new model.
527 if (!ModelKey->getValueAsListOfDefs("IID").empty())
528 Name = Name + "Model";
529 ProcModels.emplace_back(ProcModels.size(), Name,
530 ProcDef->getValueAsDef("SchedModel"), ModelKey);
532 LLVM_DEBUG(ProcModels.back().dump());
535 // Recursively find all reachable SchedReadWrite records.
536 static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
537 SmallPtrSet<Record*, 16> &RWSet) {
538 if (!RWSet.insert(RWDef).second)
539 return;
540 RWDefs.push_back(RWDef);
541 // Reads don't currently have sequence records, but it can be added later.
542 if (RWDef->isSubClassOf("WriteSequence")) {
543 RecVec Seq = RWDef->getValueAsListOfDefs("Writes");
544 for (Record *WSRec : Seq)
545 scanSchedRW(WSRec, RWDefs, RWSet);
547 else if (RWDef->isSubClassOf("SchedVariant")) {
548 // Visit each variant (guarded by a different predicate).
549 RecVec Vars = RWDef->getValueAsListOfDefs("Variants");
550 for (Record *Variant : Vars) {
551 // Visit each RW in the sequence selected by the current variant.
552 RecVec Selected = Variant->getValueAsListOfDefs("Selected");
553 for (Record *SelDef : Selected)
554 scanSchedRW(SelDef, RWDefs, RWSet);
559 // Collect and sort all SchedReadWrites reachable via tablegen records.
560 // More may be inferred later when inferring new SchedClasses from variants.
561 void CodeGenSchedModels::collectSchedRW() {
562 // Reserve idx=0 for invalid writes/reads.
563 SchedWrites.resize(1);
564 SchedReads.resize(1);
566 SmallPtrSet<Record*, 16> RWSet;
568 // Find all SchedReadWrites referenced by instruction defs.
569 RecVec SWDefs, SRDefs;
570 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
571 Record *SchedDef = Inst->TheDef;
572 if (SchedDef->isValueUnset("SchedRW"))
573 continue;
574 RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW");
575 for (Record *RW : RWs) {
576 if (RW->isSubClassOf("SchedWrite"))
577 scanSchedRW(RW, SWDefs, RWSet);
578 else {
579 assert(RW->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
580 scanSchedRW(RW, SRDefs, RWSet);
584 // Find all ReadWrites referenced by InstRW.
585 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
586 for (Record *InstRWDef : InstRWDefs) {
587 // For all OperandReadWrites.
588 RecVec RWDefs = InstRWDef->getValueAsListOfDefs("OperandReadWrites");
589 for (Record *RWDef : RWDefs) {
590 if (RWDef->isSubClassOf("SchedWrite"))
591 scanSchedRW(RWDef, SWDefs, RWSet);
592 else {
593 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
594 scanSchedRW(RWDef, SRDefs, RWSet);
598 // Find all ReadWrites referenced by ItinRW.
599 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
600 for (Record *ItinRWDef : ItinRWDefs) {
601 // For all OperandReadWrites.
602 RecVec RWDefs = ItinRWDef->getValueAsListOfDefs("OperandReadWrites");
603 for (Record *RWDef : RWDefs) {
604 if (RWDef->isSubClassOf("SchedWrite"))
605 scanSchedRW(RWDef, SWDefs, RWSet);
606 else {
607 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
608 scanSchedRW(RWDef, SRDefs, RWSet);
612 // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted
613 // for the loop below that initializes Alias vectors.
614 RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias");
615 llvm::sort(AliasDefs, LessRecord());
616 for (Record *ADef : AliasDefs) {
617 Record *MatchDef = ADef->getValueAsDef("MatchRW");
618 Record *AliasDef = ADef->getValueAsDef("AliasRW");
619 if (MatchDef->isSubClassOf("SchedWrite")) {
620 if (!AliasDef->isSubClassOf("SchedWrite"))
621 PrintFatalError(ADef->getLoc(), "SchedWrite Alias must be SchedWrite");
622 scanSchedRW(AliasDef, SWDefs, RWSet);
624 else {
625 assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite");
626 if (!AliasDef->isSubClassOf("SchedRead"))
627 PrintFatalError(ADef->getLoc(), "SchedRead Alias must be SchedRead");
628 scanSchedRW(AliasDef, SRDefs, RWSet);
631 // Sort and add the SchedReadWrites directly referenced by instructions or
632 // itinerary resources. Index reads and writes in separate domains.
633 llvm::sort(SWDefs, LessRecord());
634 for (Record *SWDef : SWDefs) {
635 assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite");
636 SchedWrites.emplace_back(SchedWrites.size(), SWDef);
638 llvm::sort(SRDefs, LessRecord());
639 for (Record *SRDef : SRDefs) {
640 assert(!getSchedRWIdx(SRDef, /*IsRead-*/true) && "duplicate SchedWrite");
641 SchedReads.emplace_back(SchedReads.size(), SRDef);
643 // Initialize WriteSequence vectors.
644 for (CodeGenSchedRW &CGRW : SchedWrites) {
645 if (!CGRW.IsSequence)
646 continue;
647 findRWs(CGRW.TheDef->getValueAsListOfDefs("Writes"), CGRW.Sequence,
648 /*IsRead=*/false);
650 // Initialize Aliases vectors.
651 for (Record *ADef : AliasDefs) {
652 Record *AliasDef = ADef->getValueAsDef("AliasRW");
653 getSchedRW(AliasDef).IsAlias = true;
654 Record *MatchDef = ADef->getValueAsDef("MatchRW");
655 CodeGenSchedRW &RW = getSchedRW(MatchDef);
656 if (RW.IsAlias)
657 PrintFatalError(ADef->getLoc(), "Cannot Alias an Alias");
658 RW.Aliases.push_back(ADef);
660 LLVM_DEBUG(
661 dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n";
662 for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) {
663 dbgs() << WIdx << ": ";
664 SchedWrites[WIdx].dump();
665 dbgs() << '\n';
666 } for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd;
667 ++RIdx) {
668 dbgs() << RIdx << ": ";
669 SchedReads[RIdx].dump();
670 dbgs() << '\n';
671 } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite");
672 for (Record *RWDef
673 : RWDefs) {
674 if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) {
675 StringRef Name = RWDef->getName();
676 if (Name != "NoWrite" && Name != "ReadDefault")
677 dbgs() << "Unused SchedReadWrite " << Name << '\n';
682 /// Compute a SchedWrite name from a sequence of writes.
683 std::string CodeGenSchedModels::genRWName(ArrayRef<unsigned> Seq, bool IsRead) {
684 std::string Name("(");
685 for (auto I = Seq.begin(), E = Seq.end(); I != E; ++I) {
686 if (I != Seq.begin())
687 Name += '_';
688 Name += getSchedRW(*I, IsRead).Name;
690 Name += ')';
691 return Name;
694 unsigned CodeGenSchedModels::getSchedRWIdx(const Record *Def,
695 bool IsRead) const {
696 const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
697 const auto I = find_if(
698 RWVec, [Def](const CodeGenSchedRW &RW) { return RW.TheDef == Def; });
699 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I);
702 bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const {
703 for (const CodeGenSchedRW &Read : SchedReads) {
704 Record *ReadDef = Read.TheDef;
705 if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance"))
706 continue;
708 RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites");
709 if (is_contained(ValidWrites, WriteDef)) {
710 return true;
713 return false;
716 static void splitSchedReadWrites(const RecVec &RWDefs,
717 RecVec &WriteDefs, RecVec &ReadDefs) {
718 for (Record *RWDef : RWDefs) {
719 if (RWDef->isSubClassOf("SchedWrite"))
720 WriteDefs.push_back(RWDef);
721 else {
722 assert(RWDef->isSubClassOf("SchedRead") && "unknown SchedReadWrite");
723 ReadDefs.push_back(RWDef);
728 // Split the SchedReadWrites defs and call findRWs for each list.
729 void CodeGenSchedModels::findRWs(const RecVec &RWDefs,
730 IdxVec &Writes, IdxVec &Reads) const {
731 RecVec WriteDefs;
732 RecVec ReadDefs;
733 splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs);
734 findRWs(WriteDefs, Writes, false);
735 findRWs(ReadDefs, Reads, true);
738 // Call getSchedRWIdx for all elements in a sequence of SchedRW defs.
739 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs,
740 bool IsRead) const {
741 for (Record *RWDef : RWDefs) {
742 unsigned Idx = getSchedRWIdx(RWDef, IsRead);
743 assert(Idx && "failed to collect SchedReadWrite");
744 RWs.push_back(Idx);
748 void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq,
749 bool IsRead) const {
750 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
751 if (!SchedRW.IsSequence) {
752 RWSeq.push_back(RWIdx);
753 return;
755 int Repeat =
756 SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1;
757 for (int i = 0; i < Repeat; ++i) {
758 for (unsigned I : SchedRW.Sequence) {
759 expandRWSequence(I, RWSeq, IsRead);
764 // Expand a SchedWrite as a sequence following any aliases that coincide with
765 // the given processor model.
766 void CodeGenSchedModels::expandRWSeqForProc(
767 unsigned RWIdx, IdxVec &RWSeq, bool IsRead,
768 const CodeGenProcModel &ProcModel) const {
770 const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead);
771 Record *AliasDef = nullptr;
772 for (const Record *Rec : SchedWrite.Aliases) {
773 const CodeGenSchedRW &AliasRW = getSchedRW(Rec->getValueAsDef("AliasRW"));
774 if (Rec->getValueInit("SchedModel")->isComplete()) {
775 Record *ModelDef = Rec->getValueAsDef("SchedModel");
776 if (&getProcModel(ModelDef) != &ProcModel)
777 continue;
779 if (AliasDef)
780 PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases "
781 "defined for processor " + ProcModel.ModelName +
782 " Ensure only one SchedAlias exists per RW.");
783 AliasDef = AliasRW.TheDef;
785 if (AliasDef) {
786 expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead),
787 RWSeq, IsRead,ProcModel);
788 return;
790 if (!SchedWrite.IsSequence) {
791 RWSeq.push_back(RWIdx);
792 return;
794 int Repeat =
795 SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1;
796 for (int I = 0, E = Repeat; I < E; ++I) {
797 for (unsigned Idx : SchedWrite.Sequence) {
798 expandRWSeqForProc(Idx, RWSeq, IsRead, ProcModel);
803 // Find the existing SchedWrite that models this sequence of writes.
804 unsigned CodeGenSchedModels::findRWForSequence(ArrayRef<unsigned> Seq,
805 bool IsRead) {
806 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
808 auto I = find_if(RWVec, [Seq](CodeGenSchedRW &RW) {
809 return makeArrayRef(RW.Sequence) == Seq;
811 // Index zero reserved for invalid RW.
812 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I);
815 /// Add this ReadWrite if it doesn't already exist.
816 unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq,
817 bool IsRead) {
818 assert(!Seq.empty() && "cannot insert empty sequence");
819 if (Seq.size() == 1)
820 return Seq.back();
822 unsigned Idx = findRWForSequence(Seq, IsRead);
823 if (Idx)
824 return Idx;
826 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites;
827 unsigned RWIdx = RWVec.size();
828 CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead));
829 RWVec.push_back(SchedRW);
830 return RWIdx;
833 /// Visit all the instruction definitions for this target to gather and
834 /// enumerate the itinerary classes. These are the explicitly specified
835 /// SchedClasses. More SchedClasses may be inferred.
836 void CodeGenSchedModels::collectSchedClasses() {
838 // NoItinerary is always the first class at Idx=0
839 assert(SchedClasses.empty() && "Expected empty sched class");
840 SchedClasses.emplace_back(0, "NoInstrModel",
841 Records.getDef("NoItinerary"));
842 SchedClasses.back().ProcIndices.push_back(0);
844 // Create a SchedClass for each unique combination of itinerary class and
845 // SchedRW list.
846 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
847 Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary");
848 IdxVec Writes, Reads;
849 if (!Inst->TheDef->isValueUnset("SchedRW"))
850 findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads);
852 // ProcIdx == 0 indicates the class applies to all processors.
853 unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, /*ProcIndices*/{0});
854 InstrClassMap[Inst->TheDef] = SCIdx;
856 // Create classes for InstRW defs.
857 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW");
858 llvm::sort(InstRWDefs, LessRecord());
859 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n");
860 for (Record *RWDef : InstRWDefs)
861 createInstRWClass(RWDef);
863 NumInstrSchedClasses = SchedClasses.size();
865 bool EnableDump = false;
866 LLVM_DEBUG(EnableDump = true);
867 if (!EnableDump)
868 return;
870 LLVM_DEBUG(
871 dbgs()
872 << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n");
873 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
874 StringRef InstName = Inst->TheDef->getName();
875 unsigned SCIdx = getSchedClassIdx(*Inst);
876 if (!SCIdx) {
877 LLVM_DEBUG({
878 if (!Inst->hasNoSchedulingInfo)
879 dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n';
881 continue;
883 CodeGenSchedClass &SC = getSchedClass(SCIdx);
884 if (SC.ProcIndices[0] != 0)
885 PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class "
886 "must not be subtarget specific.");
888 IdxVec ProcIndices;
889 if (SC.ItinClassDef->getName() != "NoItinerary") {
890 ProcIndices.push_back(0);
891 dbgs() << "Itinerary for " << InstName << ": "
892 << SC.ItinClassDef->getName() << '\n';
894 if (!SC.Writes.empty()) {
895 ProcIndices.push_back(0);
896 LLVM_DEBUG({
897 dbgs() << "SchedRW machine model for " << InstName;
898 for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE;
899 ++WI)
900 dbgs() << " " << SchedWrites[*WI].Name;
901 for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI)
902 dbgs() << " " << SchedReads[*RI].Name;
903 dbgs() << '\n';
906 const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs;
907 for (Record *RWDef : RWDefs) {
908 const CodeGenProcModel &ProcModel =
909 getProcModel(RWDef->getValueAsDef("SchedModel"));
910 ProcIndices.push_back(ProcModel.Index);
911 LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for "
912 << InstName);
913 IdxVec Writes;
914 IdxVec Reads;
915 findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"),
916 Writes, Reads);
917 LLVM_DEBUG({
918 for (unsigned WIdx : Writes)
919 dbgs() << " " << SchedWrites[WIdx].Name;
920 for (unsigned RIdx : Reads)
921 dbgs() << " " << SchedReads[RIdx].Name;
922 dbgs() << '\n';
925 // If ProcIndices contains zero, the class applies to all processors.
926 LLVM_DEBUG({
927 if (!std::count(ProcIndices.begin(), ProcIndices.end(), 0)) {
928 for (const CodeGenProcModel &PM : ProcModels) {
929 if (!std::count(ProcIndices.begin(), ProcIndices.end(), PM.Index))
930 dbgs() << "No machine model for " << Inst->TheDef->getName()
931 << " on processor " << PM.ModelName << '\n';
938 // Get the SchedClass index for an instruction.
939 unsigned
940 CodeGenSchedModels::getSchedClassIdx(const CodeGenInstruction &Inst) const {
941 return InstrClassMap.lookup(Inst.TheDef);
944 std::string
945 CodeGenSchedModels::createSchedClassName(Record *ItinClassDef,
946 ArrayRef<unsigned> OperWrites,
947 ArrayRef<unsigned> OperReads) {
949 std::string Name;
950 if (ItinClassDef && ItinClassDef->getName() != "NoItinerary")
951 Name = ItinClassDef->getName();
952 for (unsigned Idx : OperWrites) {
953 if (!Name.empty())
954 Name += '_';
955 Name += SchedWrites[Idx].Name;
957 for (unsigned Idx : OperReads) {
958 Name += '_';
959 Name += SchedReads[Idx].Name;
961 return Name;
964 std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) {
966 std::string Name;
967 for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) {
968 if (I != InstDefs.begin())
969 Name += '_';
970 Name += (*I)->getName();
972 return Name;
975 /// Add an inferred sched class from an itinerary class and per-operand list of
976 /// SchedWrites and SchedReads. ProcIndices contains the set of IDs of
977 /// processors that may utilize this class.
978 unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef,
979 ArrayRef<unsigned> OperWrites,
980 ArrayRef<unsigned> OperReads,
981 ArrayRef<unsigned> ProcIndices) {
982 assert(!ProcIndices.empty() && "expect at least one ProcIdx");
984 auto IsKeyEqual = [=](const CodeGenSchedClass &SC) {
985 return SC.isKeyEqual(ItinClassDef, OperWrites, OperReads);
988 auto I = find_if(make_range(schedClassBegin(), schedClassEnd()), IsKeyEqual);
989 unsigned Idx = I == schedClassEnd() ? 0 : std::distance(schedClassBegin(), I);
990 if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) {
991 IdxVec PI;
992 std::set_union(SchedClasses[Idx].ProcIndices.begin(),
993 SchedClasses[Idx].ProcIndices.end(),
994 ProcIndices.begin(), ProcIndices.end(),
995 std::back_inserter(PI));
996 SchedClasses[Idx].ProcIndices = std::move(PI);
997 return Idx;
999 Idx = SchedClasses.size();
1000 SchedClasses.emplace_back(Idx,
1001 createSchedClassName(ItinClassDef, OperWrites,
1002 OperReads),
1003 ItinClassDef);
1004 CodeGenSchedClass &SC = SchedClasses.back();
1005 SC.Writes = OperWrites;
1006 SC.Reads = OperReads;
1007 SC.ProcIndices = ProcIndices;
1009 return Idx;
1012 // Create classes for each set of opcodes that are in the same InstReadWrite
1013 // definition across all processors.
1014 void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) {
1015 // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that
1016 // intersects with an existing class via a previous InstRWDef. Instrs that do
1017 // not intersect with an existing class refer back to their former class as
1018 // determined from ItinDef or SchedRW.
1019 SmallMapVector<unsigned, SmallVector<Record *, 8>, 4> ClassInstrs;
1020 // Sort Instrs into sets.
1021 const RecVec *InstDefs = Sets.expand(InstRWDef);
1022 if (InstDefs->empty())
1023 PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes");
1025 for (Record *InstDef : *InstDefs) {
1026 InstClassMapTy::const_iterator Pos = InstrClassMap.find(InstDef);
1027 if (Pos == InstrClassMap.end())
1028 PrintFatalError(InstDef->getLoc(), "No sched class for instruction.");
1029 unsigned SCIdx = Pos->second;
1030 ClassInstrs[SCIdx].push_back(InstDef);
1032 // For each set of Instrs, create a new class if necessary, and map or remap
1033 // the Instrs to it.
1034 for (auto &Entry : ClassInstrs) {
1035 unsigned OldSCIdx = Entry.first;
1036 ArrayRef<Record*> InstDefs = Entry.second;
1037 // If the all instrs in the current class are accounted for, then leave
1038 // them mapped to their old class.
1039 if (OldSCIdx) {
1040 const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs;
1041 if (!RWDefs.empty()) {
1042 const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]);
1043 unsigned OrigNumInstrs =
1044 count_if(*OrigInstDefs, [&](Record *OIDef) {
1045 return InstrClassMap[OIDef] == OldSCIdx;
1047 if (OrigNumInstrs == InstDefs.size()) {
1048 assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 &&
1049 "expected a generic SchedClass");
1050 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
1051 // Make sure we didn't already have a InstRW containing this
1052 // instruction on this model.
1053 for (Record *RWD : RWDefs) {
1054 if (RWD->getValueAsDef("SchedModel") == RWModelDef &&
1055 RWModelDef->getValueAsBit("FullInstRWOverlapCheck")) {
1056 for (Record *Inst : InstDefs) {
1057 PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " +
1058 Inst->getName() + " also matches " +
1059 RWD->getValue("Instrs")->getValue()->getAsString());
1063 LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":"
1064 << SchedClasses[OldSCIdx].Name << " on "
1065 << RWModelDef->getName() << "\n");
1066 SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef);
1067 continue;
1071 unsigned SCIdx = SchedClasses.size();
1072 SchedClasses.emplace_back(SCIdx, createSchedClassName(InstDefs), nullptr);
1073 CodeGenSchedClass &SC = SchedClasses.back();
1074 LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on "
1075 << InstRWDef->getValueAsDef("SchedModel")->getName()
1076 << "\n");
1078 // Preserve ItinDef and Writes/Reads for processors without an InstRW entry.
1079 SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef;
1080 SC.Writes = SchedClasses[OldSCIdx].Writes;
1081 SC.Reads = SchedClasses[OldSCIdx].Reads;
1082 SC.ProcIndices.push_back(0);
1083 // If we had an old class, copy it's InstRWs to this new class.
1084 if (OldSCIdx) {
1085 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel");
1086 for (Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) {
1087 if (OldRWDef->getValueAsDef("SchedModel") == RWModelDef) {
1088 for (Record *InstDef : InstDefs) {
1089 PrintFatalError(OldRWDef->getLoc(), "Overlapping InstRW def " +
1090 InstDef->getName() + " also matches " +
1091 OldRWDef->getValue("Instrs")->getValue()->getAsString());
1094 assert(OldRWDef != InstRWDef &&
1095 "SchedClass has duplicate InstRW def");
1096 SC.InstRWs.push_back(OldRWDef);
1099 // Map each Instr to this new class.
1100 for (Record *InstDef : InstDefs)
1101 InstrClassMap[InstDef] = SCIdx;
1102 SC.InstRWs.push_back(InstRWDef);
1106 // True if collectProcItins found anything.
1107 bool CodeGenSchedModels::hasItineraries() const {
1108 for (const CodeGenProcModel &PM : make_range(procModelBegin(),procModelEnd()))
1109 if (PM.hasItineraries())
1110 return true;
1111 return false;
1114 // Gather the processor itineraries.
1115 void CodeGenSchedModels::collectProcItins() {
1116 LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n");
1117 for (CodeGenProcModel &ProcModel : ProcModels) {
1118 if (!ProcModel.hasItineraries())
1119 continue;
1121 RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID");
1122 assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect");
1124 // Populate ItinDefList with Itinerary records.
1125 ProcModel.ItinDefList.resize(NumInstrSchedClasses);
1127 // Insert each itinerary data record in the correct position within
1128 // the processor model's ItinDefList.
1129 for (Record *ItinData : ItinRecords) {
1130 const Record *ItinDef = ItinData->getValueAsDef("TheClass");
1131 bool FoundClass = false;
1133 for (const CodeGenSchedClass &SC :
1134 make_range(schedClassBegin(), schedClassEnd())) {
1135 // Multiple SchedClasses may share an itinerary. Update all of them.
1136 if (SC.ItinClassDef == ItinDef) {
1137 ProcModel.ItinDefList[SC.Index] = ItinData;
1138 FoundClass = true;
1141 if (!FoundClass) {
1142 LLVM_DEBUG(dbgs() << ProcModel.ItinsDef->getName()
1143 << " missing class for itinerary "
1144 << ItinDef->getName() << '\n');
1147 // Check for missing itinerary entries.
1148 assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec");
1149 LLVM_DEBUG(
1150 for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) {
1151 if (!ProcModel.ItinDefList[i])
1152 dbgs() << ProcModel.ItinsDef->getName()
1153 << " missing itinerary for class " << SchedClasses[i].Name
1154 << '\n';
1159 // Gather the read/write types for each itinerary class.
1160 void CodeGenSchedModels::collectProcItinRW() {
1161 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW");
1162 llvm::sort(ItinRWDefs, LessRecord());
1163 for (Record *RWDef : ItinRWDefs) {
1164 if (!RWDef->getValueInit("SchedModel")->isComplete())
1165 PrintFatalError(RWDef->getLoc(), "SchedModel is undefined");
1166 Record *ModelDef = RWDef->getValueAsDef("SchedModel");
1167 ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef);
1168 if (I == ProcModelMap.end()) {
1169 PrintFatalError(RWDef->getLoc(), "Undefined SchedMachineModel "
1170 + ModelDef->getName());
1172 ProcModels[I->second].ItinRWDefs.push_back(RWDef);
1176 // Gather the unsupported features for processor models.
1177 void CodeGenSchedModels::collectProcUnsupportedFeatures() {
1178 for (CodeGenProcModel &ProcModel : ProcModels) {
1179 for (Record *Pred : ProcModel.ModelDef->getValueAsListOfDefs("UnsupportedFeatures")) {
1180 ProcModel.UnsupportedFeaturesDefs.push_back(Pred);
1185 /// Infer new classes from existing classes. In the process, this may create new
1186 /// SchedWrites from sequences of existing SchedWrites.
1187 void CodeGenSchedModels::inferSchedClasses() {
1188 LLVM_DEBUG(
1189 dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n");
1190 LLVM_DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n");
1192 // Visit all existing classes and newly created classes.
1193 for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) {
1194 assert(SchedClasses[Idx].Index == Idx && "bad SCIdx");
1196 if (SchedClasses[Idx].ItinClassDef)
1197 inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx);
1198 if (!SchedClasses[Idx].InstRWs.empty())
1199 inferFromInstRWs(Idx);
1200 if (!SchedClasses[Idx].Writes.empty()) {
1201 inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads,
1202 Idx, SchedClasses[Idx].ProcIndices);
1204 assert(SchedClasses.size() < (NumInstrSchedClasses*6) &&
1205 "too many SchedVariants");
1209 /// Infer classes from per-processor itinerary resources.
1210 void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef,
1211 unsigned FromClassIdx) {
1212 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
1213 const CodeGenProcModel &PM = ProcModels[PIdx];
1214 // For all ItinRW entries.
1215 bool HasMatch = false;
1216 for (const Record *Rec : PM.ItinRWDefs) {
1217 RecVec Matched = Rec->getValueAsListOfDefs("MatchedItinClasses");
1218 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
1219 continue;
1220 if (HasMatch)
1221 PrintFatalError(Rec->getLoc(), "Duplicate itinerary class "
1222 + ItinClassDef->getName()
1223 + " in ItinResources for " + PM.ModelName);
1224 HasMatch = true;
1225 IdxVec Writes, Reads;
1226 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
1227 inferFromRW(Writes, Reads, FromClassIdx, PIdx);
1232 /// Infer classes from per-processor InstReadWrite definitions.
1233 void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) {
1234 for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) {
1235 assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!");
1236 Record *Rec = SchedClasses[SCIdx].InstRWs[I];
1237 const RecVec *InstDefs = Sets.expand(Rec);
1238 RecIter II = InstDefs->begin(), IE = InstDefs->end();
1239 for (; II != IE; ++II) {
1240 if (InstrClassMap[*II] == SCIdx)
1241 break;
1243 // If this class no longer has any instructions mapped to it, it has become
1244 // irrelevant.
1245 if (II == IE)
1246 continue;
1247 IdxVec Writes, Reads;
1248 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
1249 unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index;
1250 inferFromRW(Writes, Reads, SCIdx, PIdx); // May mutate SchedClasses.
1254 namespace {
1256 // Helper for substituteVariantOperand.
1257 struct TransVariant {
1258 Record *VarOrSeqDef; // Variant or sequence.
1259 unsigned RWIdx; // Index of this variant or sequence's matched type.
1260 unsigned ProcIdx; // Processor model index or zero for any.
1261 unsigned TransVecIdx; // Index into PredTransitions::TransVec.
1263 TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti):
1264 VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {}
1267 // Associate a predicate with the SchedReadWrite that it guards.
1268 // RWIdx is the index of the read/write variant.
1269 struct PredCheck {
1270 bool IsRead;
1271 unsigned RWIdx;
1272 Record *Predicate;
1274 PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {}
1277 // A Predicate transition is a list of RW sequences guarded by a PredTerm.
1278 struct PredTransition {
1279 // A predicate term is a conjunction of PredChecks.
1280 SmallVector<PredCheck, 4> PredTerm;
1281 SmallVector<SmallVector<unsigned,4>, 16> WriteSequences;
1282 SmallVector<SmallVector<unsigned,4>, 16> ReadSequences;
1283 SmallVector<unsigned, 4> ProcIndices;
1286 // Encapsulate a set of partially constructed transitions.
1287 // The results are built by repeated calls to substituteVariants.
1288 class PredTransitions {
1289 CodeGenSchedModels &SchedModels;
1291 public:
1292 std::vector<PredTransition> TransVec;
1294 PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {}
1296 void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq,
1297 bool IsRead, unsigned StartIdx);
1299 void substituteVariants(const PredTransition &Trans);
1301 #ifndef NDEBUG
1302 void dump() const;
1303 #endif
1305 private:
1306 bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term);
1307 void getIntersectingVariants(
1308 const CodeGenSchedRW &SchedRW, unsigned TransIdx,
1309 std::vector<TransVariant> &IntersectingVariants);
1310 void pushVariant(const TransVariant &VInfo, bool IsRead);
1313 } // end anonymous namespace
1315 // Return true if this predicate is mutually exclusive with a PredTerm. This
1316 // degenerates into checking if the predicate is mutually exclusive with any
1317 // predicate in the Term's conjunction.
1319 // All predicates associated with a given SchedRW are considered mutually
1320 // exclusive. This should work even if the conditions expressed by the
1321 // predicates are not exclusive because the predicates for a given SchedWrite
1322 // are always checked in the order they are defined in the .td file. Later
1323 // conditions implicitly negate any prior condition.
1324 bool PredTransitions::mutuallyExclusive(Record *PredDef,
1325 ArrayRef<PredCheck> Term) {
1326 for (const PredCheck &PC: Term) {
1327 if (PC.Predicate == PredDef)
1328 return false;
1330 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(PC.RWIdx, PC.IsRead);
1331 assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant");
1332 RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants");
1333 if (any_of(Variants, [PredDef](const Record *R) {
1334 return R->getValueAsDef("Predicate") == PredDef;
1336 return true;
1338 return false;
1341 static bool hasAliasedVariants(const CodeGenSchedRW &RW,
1342 CodeGenSchedModels &SchedModels) {
1343 if (RW.HasVariants)
1344 return true;
1346 for (Record *Alias : RW.Aliases) {
1347 const CodeGenSchedRW &AliasRW =
1348 SchedModels.getSchedRW(Alias->getValueAsDef("AliasRW"));
1349 if (AliasRW.HasVariants)
1350 return true;
1351 if (AliasRW.IsSequence) {
1352 IdxVec ExpandedRWs;
1353 SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead);
1354 for (unsigned SI : ExpandedRWs) {
1355 if (hasAliasedVariants(SchedModels.getSchedRW(SI, AliasRW.IsRead),
1356 SchedModels))
1357 return true;
1361 return false;
1364 static bool hasVariant(ArrayRef<PredTransition> Transitions,
1365 CodeGenSchedModels &SchedModels) {
1366 for (const PredTransition &PTI : Transitions) {
1367 for (const SmallVectorImpl<unsigned> &WSI : PTI.WriteSequences)
1368 for (unsigned WI : WSI)
1369 if (hasAliasedVariants(SchedModels.getSchedWrite(WI), SchedModels))
1370 return true;
1372 for (const SmallVectorImpl<unsigned> &RSI : PTI.ReadSequences)
1373 for (unsigned RI : RSI)
1374 if (hasAliasedVariants(SchedModels.getSchedRead(RI), SchedModels))
1375 return true;
1377 return false;
1380 // Populate IntersectingVariants with any variants or aliased sequences of the
1381 // given SchedRW whose processor indices and predicates are not mutually
1382 // exclusive with the given transition.
1383 void PredTransitions::getIntersectingVariants(
1384 const CodeGenSchedRW &SchedRW, unsigned TransIdx,
1385 std::vector<TransVariant> &IntersectingVariants) {
1387 bool GenericRW = false;
1389 std::vector<TransVariant> Variants;
1390 if (SchedRW.HasVariants) {
1391 unsigned VarProcIdx = 0;
1392 if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) {
1393 Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel");
1394 VarProcIdx = SchedModels.getProcModel(ModelDef).Index;
1396 // Push each variant. Assign TransVecIdx later.
1397 const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants");
1398 for (Record *VarDef : VarDefs)
1399 Variants.emplace_back(VarDef, SchedRW.Index, VarProcIdx, 0);
1400 if (VarProcIdx == 0)
1401 GenericRW = true;
1403 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
1404 AI != AE; ++AI) {
1405 // If either the SchedAlias itself or the SchedReadWrite that it aliases
1406 // to is defined within a processor model, constrain all variants to
1407 // that processor.
1408 unsigned AliasProcIdx = 0;
1409 if ((*AI)->getValueInit("SchedModel")->isComplete()) {
1410 Record *ModelDef = (*AI)->getValueAsDef("SchedModel");
1411 AliasProcIdx = SchedModels.getProcModel(ModelDef).Index;
1413 const CodeGenSchedRW &AliasRW =
1414 SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW"));
1416 if (AliasRW.HasVariants) {
1417 const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants");
1418 for (Record *VD : VarDefs)
1419 Variants.emplace_back(VD, AliasRW.Index, AliasProcIdx, 0);
1421 if (AliasRW.IsSequence)
1422 Variants.emplace_back(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0);
1423 if (AliasProcIdx == 0)
1424 GenericRW = true;
1426 for (TransVariant &Variant : Variants) {
1427 // Don't expand variants if the processor models don't intersect.
1428 // A zero processor index means any processor.
1429 SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices;
1430 if (ProcIndices[0] && Variant.ProcIdx) {
1431 unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
1432 Variant.ProcIdx);
1433 if (!Cnt)
1434 continue;
1435 if (Cnt > 1) {
1436 const CodeGenProcModel &PM =
1437 *(SchedModels.procModelBegin() + Variant.ProcIdx);
1438 PrintFatalError(Variant.VarOrSeqDef->getLoc(),
1439 "Multiple variants defined for processor " +
1440 PM.ModelName +
1441 " Ensure only one SchedAlias exists per RW.");
1444 if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) {
1445 Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate");
1446 if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm))
1447 continue;
1449 if (IntersectingVariants.empty()) {
1450 // The first variant builds on the existing transition.
1451 Variant.TransVecIdx = TransIdx;
1452 IntersectingVariants.push_back(Variant);
1454 else {
1455 // Push another copy of the current transition for more variants.
1456 Variant.TransVecIdx = TransVec.size();
1457 IntersectingVariants.push_back(Variant);
1458 TransVec.push_back(TransVec[TransIdx]);
1461 if (GenericRW && IntersectingVariants.empty()) {
1462 PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has "
1463 "a matching predicate on any processor");
1467 // Push the Reads/Writes selected by this variant onto the PredTransition
1468 // specified by VInfo.
1469 void PredTransitions::
1470 pushVariant(const TransVariant &VInfo, bool IsRead) {
1471 PredTransition &Trans = TransVec[VInfo.TransVecIdx];
1473 // If this operand transition is reached through a processor-specific alias,
1474 // then the whole transition is specific to this processor.
1475 if (VInfo.ProcIdx != 0)
1476 Trans.ProcIndices.assign(1, VInfo.ProcIdx);
1478 IdxVec SelectedRWs;
1479 if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) {
1480 Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate");
1481 Trans.PredTerm.emplace_back(IsRead, VInfo.RWIdx,PredDef);
1482 RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected");
1483 SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead);
1485 else {
1486 assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") &&
1487 "variant must be a SchedVariant or aliased WriteSequence");
1488 SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead));
1491 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead);
1493 SmallVectorImpl<SmallVector<unsigned,4>> &RWSequences = IsRead
1494 ? Trans.ReadSequences : Trans.WriteSequences;
1495 if (SchedRW.IsVariadic) {
1496 unsigned OperIdx = RWSequences.size()-1;
1497 // Make N-1 copies of this transition's last sequence.
1498 RWSequences.insert(RWSequences.end(), SelectedRWs.size() - 1,
1499 RWSequences[OperIdx]);
1500 // Push each of the N elements of the SelectedRWs onto a copy of the last
1501 // sequence (split the current operand into N operands).
1502 // Note that write sequences should be expanded within this loop--the entire
1503 // sequence belongs to a single operand.
1504 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
1505 RWI != RWE; ++RWI, ++OperIdx) {
1506 IdxVec ExpandedRWs;
1507 if (IsRead)
1508 ExpandedRWs.push_back(*RWI);
1509 else
1510 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
1511 RWSequences[OperIdx].insert(RWSequences[OperIdx].end(),
1512 ExpandedRWs.begin(), ExpandedRWs.end());
1514 assert(OperIdx == RWSequences.size() && "missed a sequence");
1516 else {
1517 // Push this transition's expanded sequence onto this transition's last
1518 // sequence (add to the current operand's sequence).
1519 SmallVectorImpl<unsigned> &Seq = RWSequences.back();
1520 IdxVec ExpandedRWs;
1521 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end();
1522 RWI != RWE; ++RWI) {
1523 if (IsRead)
1524 ExpandedRWs.push_back(*RWI);
1525 else
1526 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead);
1528 Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end());
1532 // RWSeq is a sequence of all Reads or all Writes for the next read or write
1533 // operand. StartIdx is an index into TransVec where partial results
1534 // starts. RWSeq must be applied to all transitions between StartIdx and the end
1535 // of TransVec.
1536 void PredTransitions::substituteVariantOperand(
1537 const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) {
1539 // Visit each original RW within the current sequence.
1540 for (SmallVectorImpl<unsigned>::const_iterator
1541 RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) {
1542 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead);
1543 // Push this RW on all partial PredTransitions or distribute variants.
1544 // New PredTransitions may be pushed within this loop which should not be
1545 // revisited (TransEnd must be loop invariant).
1546 for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size();
1547 TransIdx != TransEnd; ++TransIdx) {
1548 // In the common case, push RW onto the current operand's sequence.
1549 if (!hasAliasedVariants(SchedRW, SchedModels)) {
1550 if (IsRead)
1551 TransVec[TransIdx].ReadSequences.back().push_back(*RWI);
1552 else
1553 TransVec[TransIdx].WriteSequences.back().push_back(*RWI);
1554 continue;
1556 // Distribute this partial PredTransition across intersecting variants.
1557 // This will push a copies of TransVec[TransIdx] on the back of TransVec.
1558 std::vector<TransVariant> IntersectingVariants;
1559 getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants);
1560 // Now expand each variant on top of its copy of the transition.
1561 for (std::vector<TransVariant>::const_iterator
1562 IVI = IntersectingVariants.begin(),
1563 IVE = IntersectingVariants.end();
1564 IVI != IVE; ++IVI) {
1565 pushVariant(*IVI, IsRead);
1571 // For each variant of a Read/Write in Trans, substitute the sequence of
1572 // Read/Writes guarded by the variant. This is exponential in the number of
1573 // variant Read/Writes, but in practice detection of mutually exclusive
1574 // predicates should result in linear growth in the total number variants.
1576 // This is one step in a breadth-first search of nested variants.
1577 void PredTransitions::substituteVariants(const PredTransition &Trans) {
1578 // Build up a set of partial results starting at the back of
1579 // PredTransitions. Remember the first new transition.
1580 unsigned StartIdx = TransVec.size();
1581 TransVec.emplace_back();
1582 TransVec.back().PredTerm = Trans.PredTerm;
1583 TransVec.back().ProcIndices = Trans.ProcIndices;
1585 // Visit each original write sequence.
1586 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator
1587 WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end();
1588 WSI != WSE; ++WSI) {
1589 // Push a new (empty) write sequence onto all partial Transitions.
1590 for (std::vector<PredTransition>::iterator I =
1591 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
1592 I->WriteSequences.emplace_back();
1594 substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx);
1596 // Visit each original read sequence.
1597 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator
1598 RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end();
1599 RSI != RSE; ++RSI) {
1600 // Push a new (empty) read sequence onto all partial Transitions.
1601 for (std::vector<PredTransition>::iterator I =
1602 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) {
1603 I->ReadSequences.emplace_back();
1605 substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx);
1609 // Create a new SchedClass for each variant found by inferFromRW. Pass
1610 static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions,
1611 unsigned FromClassIdx,
1612 CodeGenSchedModels &SchedModels) {
1613 // For each PredTransition, create a new CodeGenSchedTransition, which usually
1614 // requires creating a new SchedClass.
1615 for (ArrayRef<PredTransition>::iterator
1616 I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) {
1617 IdxVec OperWritesVariant;
1618 transform(I->WriteSequences, std::back_inserter(OperWritesVariant),
1619 [&SchedModels](ArrayRef<unsigned> WS) {
1620 return SchedModels.findOrInsertRW(WS, /*IsRead=*/false);
1622 IdxVec OperReadsVariant;
1623 transform(I->ReadSequences, std::back_inserter(OperReadsVariant),
1624 [&SchedModels](ArrayRef<unsigned> RS) {
1625 return SchedModels.findOrInsertRW(RS, /*IsRead=*/true);
1627 CodeGenSchedTransition SCTrans;
1628 SCTrans.ToClassIdx =
1629 SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant,
1630 OperReadsVariant, I->ProcIndices);
1631 SCTrans.ProcIndices.assign(I->ProcIndices.begin(), I->ProcIndices.end());
1632 // The final PredTerm is unique set of predicates guarding the transition.
1633 RecVec Preds;
1634 transform(I->PredTerm, std::back_inserter(Preds),
1635 [](const PredCheck &P) {
1636 return P.Predicate;
1638 Preds.erase(std::unique(Preds.begin(), Preds.end()), Preds.end());
1639 SCTrans.PredTerm = std::move(Preds);
1640 SchedModels.getSchedClass(FromClassIdx)
1641 .Transitions.push_back(std::move(SCTrans));
1645 // Create new SchedClasses for the given ReadWrite list. If any of the
1646 // ReadWrites refers to a SchedVariant, create a new SchedClass for each variant
1647 // of the ReadWrite list, following Aliases if necessary.
1648 void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites,
1649 ArrayRef<unsigned> OperReads,
1650 unsigned FromClassIdx,
1651 ArrayRef<unsigned> ProcIndices) {
1652 LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices);
1653 dbgs() << ") ");
1655 // Create a seed transition with an empty PredTerm and the expanded sequences
1656 // of SchedWrites for the current SchedClass.
1657 std::vector<PredTransition> LastTransitions;
1658 LastTransitions.emplace_back();
1659 LastTransitions.back().ProcIndices.append(ProcIndices.begin(),
1660 ProcIndices.end());
1662 for (unsigned WriteIdx : OperWrites) {
1663 IdxVec WriteSeq;
1664 expandRWSequence(WriteIdx, WriteSeq, /*IsRead=*/false);
1665 LastTransitions[0].WriteSequences.emplace_back();
1666 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences.back();
1667 Seq.append(WriteSeq.begin(), WriteSeq.end());
1668 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
1670 LLVM_DEBUG(dbgs() << " Reads: ");
1671 for (unsigned ReadIdx : OperReads) {
1672 IdxVec ReadSeq;
1673 expandRWSequence(ReadIdx, ReadSeq, /*IsRead=*/true);
1674 LastTransitions[0].ReadSequences.emplace_back();
1675 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences.back();
1676 Seq.append(ReadSeq.begin(), ReadSeq.end());
1677 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") ");
1679 LLVM_DEBUG(dbgs() << '\n');
1681 // Collect all PredTransitions for individual operands.
1682 // Iterate until no variant writes remain.
1683 while (hasVariant(LastTransitions, *this)) {
1684 PredTransitions Transitions(*this);
1685 for (const PredTransition &Trans : LastTransitions)
1686 Transitions.substituteVariants(Trans);
1687 LLVM_DEBUG(Transitions.dump());
1688 LastTransitions.swap(Transitions.TransVec);
1690 // If the first transition has no variants, nothing to do.
1691 if (LastTransitions[0].PredTerm.empty())
1692 return;
1694 // WARNING: We are about to mutate the SchedClasses vector. Do not refer to
1695 // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions.
1696 inferFromTransitions(LastTransitions, FromClassIdx, *this);
1699 // Check if any processor resource group contains all resource records in
1700 // SubUnits.
1701 bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) {
1702 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
1703 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
1704 continue;
1705 RecVec SuperUnits =
1706 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
1707 RecIter RI = SubUnits.begin(), RE = SubUnits.end();
1708 for ( ; RI != RE; ++RI) {
1709 if (!is_contained(SuperUnits, *RI)) {
1710 break;
1713 if (RI == RE)
1714 return true;
1716 return false;
1719 // Verify that overlapping groups have a common supergroup.
1720 void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) {
1721 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) {
1722 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup"))
1723 continue;
1724 RecVec CheckUnits =
1725 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources");
1726 for (unsigned j = i+1; j < e; ++j) {
1727 if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup"))
1728 continue;
1729 RecVec OtherUnits =
1730 PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources");
1731 if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(),
1732 OtherUnits.begin(), OtherUnits.end())
1733 != CheckUnits.end()) {
1734 // CheckUnits and OtherUnits overlap
1735 OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(),
1736 CheckUnits.end());
1737 if (!hasSuperGroup(OtherUnits, PM)) {
1738 PrintFatalError((PM.ProcResourceDefs[i])->getLoc(),
1739 "proc resource group overlaps with "
1740 + PM.ProcResourceDefs[j]->getName()
1741 + " but no supergroup contains both.");
1748 // Collect all the RegisterFile definitions available in this target.
1749 void CodeGenSchedModels::collectRegisterFiles() {
1750 RecVec RegisterFileDefs = Records.getAllDerivedDefinitions("RegisterFile");
1752 // RegisterFiles is the vector of CodeGenRegisterFile.
1753 for (Record *RF : RegisterFileDefs) {
1754 // For each register file definition, construct a CodeGenRegisterFile object
1755 // and add it to the appropriate scheduling model.
1756 CodeGenProcModel &PM = getProcModel(RF->getValueAsDef("SchedModel"));
1757 PM.RegisterFiles.emplace_back(CodeGenRegisterFile(RF->getName(),RF));
1758 CodeGenRegisterFile &CGRF = PM.RegisterFiles.back();
1759 CGRF.MaxMovesEliminatedPerCycle =
1760 RF->getValueAsInt("MaxMovesEliminatedPerCycle");
1761 CGRF.AllowZeroMoveEliminationOnly =
1762 RF->getValueAsBit("AllowZeroMoveEliminationOnly");
1764 // Now set the number of physical registers as well as the cost of registers
1765 // in each register class.
1766 CGRF.NumPhysRegs = RF->getValueAsInt("NumPhysRegs");
1767 if (!CGRF.NumPhysRegs) {
1768 PrintFatalError(RF->getLoc(),
1769 "Invalid RegisterFile with zero physical registers");
1772 RecVec RegisterClasses = RF->getValueAsListOfDefs("RegClasses");
1773 std::vector<int64_t> RegisterCosts = RF->getValueAsListOfInts("RegCosts");
1774 ListInit *MoveElimInfo = RF->getValueAsListInit("AllowMoveElimination");
1775 for (unsigned I = 0, E = RegisterClasses.size(); I < E; ++I) {
1776 int Cost = RegisterCosts.size() > I ? RegisterCosts[I] : 1;
1778 bool AllowMoveElim = false;
1779 if (MoveElimInfo->size() > I) {
1780 BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I));
1781 AllowMoveElim = Val->getValue();
1784 CGRF.Costs.emplace_back(RegisterClasses[I], Cost, AllowMoveElim);
1789 // Collect and sort WriteRes, ReadAdvance, and ProcResources.
1790 void CodeGenSchedModels::collectProcResources() {
1791 ProcResourceDefs = Records.getAllDerivedDefinitions("ProcResourceUnits");
1792 ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
1794 // Add any subtarget-specific SchedReadWrites that are directly associated
1795 // with processor resources. Refer to the parent SchedClass's ProcIndices to
1796 // determine which processors they apply to.
1797 for (const CodeGenSchedClass &SC :
1798 make_range(schedClassBegin(), schedClassEnd())) {
1799 if (SC.ItinClassDef) {
1800 collectItinProcResources(SC.ItinClassDef);
1801 continue;
1804 // This class may have a default ReadWrite list which can be overriden by
1805 // InstRW definitions.
1806 for (Record *RW : SC.InstRWs) {
1807 Record *RWModelDef = RW->getValueAsDef("SchedModel");
1808 unsigned PIdx = getProcModel(RWModelDef).Index;
1809 IdxVec Writes, Reads;
1810 findRWs(RW->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
1811 collectRWResources(Writes, Reads, PIdx);
1814 collectRWResources(SC.Writes, SC.Reads, SC.ProcIndices);
1816 // Add resources separately defined by each subtarget.
1817 RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes");
1818 for (Record *WR : WRDefs) {
1819 Record *ModelDef = WR->getValueAsDef("SchedModel");
1820 addWriteRes(WR, getProcModel(ModelDef).Index);
1822 RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes");
1823 for (Record *SWR : SWRDefs) {
1824 Record *ModelDef = SWR->getValueAsDef("SchedModel");
1825 addWriteRes(SWR, getProcModel(ModelDef).Index);
1827 RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance");
1828 for (Record *RA : RADefs) {
1829 Record *ModelDef = RA->getValueAsDef("SchedModel");
1830 addReadAdvance(RA, getProcModel(ModelDef).Index);
1832 RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance");
1833 for (Record *SRA : SRADefs) {
1834 if (SRA->getValueInit("SchedModel")->isComplete()) {
1835 Record *ModelDef = SRA->getValueAsDef("SchedModel");
1836 addReadAdvance(SRA, getProcModel(ModelDef).Index);
1839 // Add ProcResGroups that are defined within this processor model, which may
1840 // not be directly referenced but may directly specify a buffer size.
1841 RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup");
1842 for (Record *PRG : ProcResGroups) {
1843 if (!PRG->getValueInit("SchedModel")->isComplete())
1844 continue;
1845 CodeGenProcModel &PM = getProcModel(PRG->getValueAsDef("SchedModel"));
1846 if (!is_contained(PM.ProcResourceDefs, PRG))
1847 PM.ProcResourceDefs.push_back(PRG);
1849 // Add ProcResourceUnits unconditionally.
1850 for (Record *PRU : Records.getAllDerivedDefinitions("ProcResourceUnits")) {
1851 if (!PRU->getValueInit("SchedModel")->isComplete())
1852 continue;
1853 CodeGenProcModel &PM = getProcModel(PRU->getValueAsDef("SchedModel"));
1854 if (!is_contained(PM.ProcResourceDefs, PRU))
1855 PM.ProcResourceDefs.push_back(PRU);
1857 // Finalize each ProcModel by sorting the record arrays.
1858 for (CodeGenProcModel &PM : ProcModels) {
1859 llvm::sort(PM.WriteResDefs, LessRecord());
1860 llvm::sort(PM.ReadAdvanceDefs, LessRecord());
1861 llvm::sort(PM.ProcResourceDefs, LessRecord());
1862 LLVM_DEBUG(
1863 PM.dump();
1864 dbgs() << "WriteResDefs: "; for (RecIter RI = PM.WriteResDefs.begin(),
1865 RE = PM.WriteResDefs.end();
1866 RI != RE; ++RI) {
1867 if ((*RI)->isSubClassOf("WriteRes"))
1868 dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " ";
1869 else
1870 dbgs() << (*RI)->getName() << " ";
1871 } dbgs() << "\nReadAdvanceDefs: ";
1872 for (RecIter RI = PM.ReadAdvanceDefs.begin(),
1873 RE = PM.ReadAdvanceDefs.end();
1874 RI != RE; ++RI) {
1875 if ((*RI)->isSubClassOf("ReadAdvance"))
1876 dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " ";
1877 else
1878 dbgs() << (*RI)->getName() << " ";
1879 } dbgs()
1880 << "\nProcResourceDefs: ";
1881 for (RecIter RI = PM.ProcResourceDefs.begin(),
1882 RE = PM.ProcResourceDefs.end();
1883 RI != RE; ++RI) { dbgs() << (*RI)->getName() << " "; } dbgs()
1884 << '\n');
1885 verifyProcResourceGroups(PM);
1888 ProcResourceDefs.clear();
1889 ProcResGroups.clear();
1892 void CodeGenSchedModels::checkCompleteness() {
1893 bool Complete = true;
1894 bool HadCompleteModel = false;
1895 for (const CodeGenProcModel &ProcModel : procModels()) {
1896 const bool HasItineraries = ProcModel.hasItineraries();
1897 if (!ProcModel.ModelDef->getValueAsBit("CompleteModel"))
1898 continue;
1899 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) {
1900 if (Inst->hasNoSchedulingInfo)
1901 continue;
1902 if (ProcModel.isUnsupported(*Inst))
1903 continue;
1904 unsigned SCIdx = getSchedClassIdx(*Inst);
1905 if (!SCIdx) {
1906 if (Inst->TheDef->isValueUnset("SchedRW") && !HadCompleteModel) {
1907 PrintError("No schedule information for instruction '"
1908 + Inst->TheDef->getName() + "'");
1909 Complete = false;
1911 continue;
1914 const CodeGenSchedClass &SC = getSchedClass(SCIdx);
1915 if (!SC.Writes.empty())
1916 continue;
1917 if (HasItineraries && SC.ItinClassDef != nullptr &&
1918 SC.ItinClassDef->getName() != "NoItinerary")
1919 continue;
1921 const RecVec &InstRWs = SC.InstRWs;
1922 auto I = find_if(InstRWs, [&ProcModel](const Record *R) {
1923 return R->getValueAsDef("SchedModel") == ProcModel.ModelDef;
1925 if (I == InstRWs.end()) {
1926 PrintError("'" + ProcModel.ModelName + "' lacks information for '" +
1927 Inst->TheDef->getName() + "'");
1928 Complete = false;
1931 HadCompleteModel = true;
1933 if (!Complete) {
1934 errs() << "\n\nIncomplete schedule models found.\n"
1935 << "- Consider setting 'CompleteModel = 0' while developing new models.\n"
1936 << "- Pseudo instructions can be marked with 'hasNoSchedulingInfo = 1'.\n"
1937 << "- Instructions should usually have Sched<[...]> as a superclass, "
1938 "you may temporarily use an empty list.\n"
1939 << "- Instructions related to unsupported features can be excluded with "
1940 "list<Predicate> UnsupportedFeatures = [HasA,..,HasY]; in the "
1941 "processor model.\n\n";
1942 PrintFatalError("Incomplete schedule model");
1946 // Collect itinerary class resources for each processor.
1947 void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) {
1948 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) {
1949 const CodeGenProcModel &PM = ProcModels[PIdx];
1950 // For all ItinRW entries.
1951 bool HasMatch = false;
1952 for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end();
1953 II != IE; ++II) {
1954 RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses");
1955 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef))
1956 continue;
1957 if (HasMatch)
1958 PrintFatalError((*II)->getLoc(), "Duplicate itinerary class "
1959 + ItinClassDef->getName()
1960 + " in ItinResources for " + PM.ModelName);
1961 HasMatch = true;
1962 IdxVec Writes, Reads;
1963 findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads);
1964 collectRWResources(Writes, Reads, PIdx);
1969 void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead,
1970 ArrayRef<unsigned> ProcIndices) {
1971 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead);
1972 if (SchedRW.TheDef) {
1973 if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) {
1974 for (unsigned Idx : ProcIndices)
1975 addWriteRes(SchedRW.TheDef, Idx);
1977 else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) {
1978 for (unsigned Idx : ProcIndices)
1979 addReadAdvance(SchedRW.TheDef, Idx);
1982 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end();
1983 AI != AE; ++AI) {
1984 IdxVec AliasProcIndices;
1985 if ((*AI)->getValueInit("SchedModel")->isComplete()) {
1986 AliasProcIndices.push_back(
1987 getProcModel((*AI)->getValueAsDef("SchedModel")).Index);
1989 else
1990 AliasProcIndices = ProcIndices;
1991 const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW"));
1992 assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes");
1994 IdxVec ExpandedRWs;
1995 expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead);
1996 for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end();
1997 SI != SE; ++SI) {
1998 collectRWResources(*SI, IsRead, AliasProcIndices);
2003 // Collect resources for a set of read/write types and processor indices.
2004 void CodeGenSchedModels::collectRWResources(ArrayRef<unsigned> Writes,
2005 ArrayRef<unsigned> Reads,
2006 ArrayRef<unsigned> ProcIndices) {
2007 for (unsigned Idx : Writes)
2008 collectRWResources(Idx, /*IsRead=*/false, ProcIndices);
2010 for (unsigned Idx : Reads)
2011 collectRWResources(Idx, /*IsRead=*/true, ProcIndices);
2014 // Find the processor's resource units for this kind of resource.
2015 Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind,
2016 const CodeGenProcModel &PM,
2017 ArrayRef<SMLoc> Loc) const {
2018 if (ProcResKind->isSubClassOf("ProcResourceUnits"))
2019 return ProcResKind;
2021 Record *ProcUnitDef = nullptr;
2022 assert(!ProcResourceDefs.empty());
2023 assert(!ProcResGroups.empty());
2025 for (Record *ProcResDef : ProcResourceDefs) {
2026 if (ProcResDef->getValueAsDef("Kind") == ProcResKind
2027 && ProcResDef->getValueAsDef("SchedModel") == PM.ModelDef) {
2028 if (ProcUnitDef) {
2029 PrintFatalError(Loc,
2030 "Multiple ProcessorResourceUnits associated with "
2031 + ProcResKind->getName());
2033 ProcUnitDef = ProcResDef;
2036 for (Record *ProcResGroup : ProcResGroups) {
2037 if (ProcResGroup == ProcResKind
2038 && ProcResGroup->getValueAsDef("SchedModel") == PM.ModelDef) {
2039 if (ProcUnitDef) {
2040 PrintFatalError(Loc,
2041 "Multiple ProcessorResourceUnits associated with "
2042 + ProcResKind->getName());
2044 ProcUnitDef = ProcResGroup;
2047 if (!ProcUnitDef) {
2048 PrintFatalError(Loc,
2049 "No ProcessorResources associated with "
2050 + ProcResKind->getName());
2052 return ProcUnitDef;
2055 // Iteratively add a resource and its super resources.
2056 void CodeGenSchedModels::addProcResource(Record *ProcResKind,
2057 CodeGenProcModel &PM,
2058 ArrayRef<SMLoc> Loc) {
2059 while (true) {
2060 Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc);
2062 // See if this ProcResource is already associated with this processor.
2063 if (is_contained(PM.ProcResourceDefs, ProcResUnits))
2064 return;
2066 PM.ProcResourceDefs.push_back(ProcResUnits);
2067 if (ProcResUnits->isSubClassOf("ProcResGroup"))
2068 return;
2070 if (!ProcResUnits->getValueInit("Super")->isComplete())
2071 return;
2073 ProcResKind = ProcResUnits->getValueAsDef("Super");
2077 // Add resources for a SchedWrite to this processor if they don't exist.
2078 void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) {
2079 assert(PIdx && "don't add resources to an invalid Processor model");
2081 RecVec &WRDefs = ProcModels[PIdx].WriteResDefs;
2082 if (is_contained(WRDefs, ProcWriteResDef))
2083 return;
2084 WRDefs.push_back(ProcWriteResDef);
2086 // Visit ProcResourceKinds referenced by the newly discovered WriteRes.
2087 RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources");
2088 for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end();
2089 WritePRI != WritePRE; ++WritePRI) {
2090 addProcResource(*WritePRI, ProcModels[PIdx], ProcWriteResDef->getLoc());
2094 // Add resources for a ReadAdvance to this processor if they don't exist.
2095 void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef,
2096 unsigned PIdx) {
2097 RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs;
2098 if (is_contained(RADefs, ProcReadAdvanceDef))
2099 return;
2100 RADefs.push_back(ProcReadAdvanceDef);
2103 unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const {
2104 RecIter PRPos = find(ProcResourceDefs, PRDef);
2105 if (PRPos == ProcResourceDefs.end())
2106 PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in "
2107 "the ProcResources list for " + ModelName);
2108 // Idx=0 is reserved for invalid.
2109 return 1 + (PRPos - ProcResourceDefs.begin());
2112 bool CodeGenProcModel::isUnsupported(const CodeGenInstruction &Inst) const {
2113 for (const Record *TheDef : UnsupportedFeaturesDefs) {
2114 for (const Record *PredDef : Inst.TheDef->getValueAsListOfDefs("Predicates")) {
2115 if (TheDef->getName() == PredDef->getName())
2116 return true;
2119 return false;
2122 #ifndef NDEBUG
2123 void CodeGenProcModel::dump() const {
2124 dbgs() << Index << ": " << ModelName << " "
2125 << (ModelDef ? ModelDef->getName() : "inferred") << " "
2126 << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n';
2129 void CodeGenSchedRW::dump() const {
2130 dbgs() << Name << (IsVariadic ? " (V) " : " ");
2131 if (IsSequence) {
2132 dbgs() << "(";
2133 dumpIdxVec(Sequence);
2134 dbgs() << ")";
2138 void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const {
2139 dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n'
2140 << " Writes: ";
2141 for (unsigned i = 0, N = Writes.size(); i < N; ++i) {
2142 SchedModels->getSchedWrite(Writes[i]).dump();
2143 if (i < N-1) {
2144 dbgs() << '\n';
2145 dbgs().indent(10);
2148 dbgs() << "\n Reads: ";
2149 for (unsigned i = 0, N = Reads.size(); i < N; ++i) {
2150 SchedModels->getSchedRead(Reads[i]).dump();
2151 if (i < N-1) {
2152 dbgs() << '\n';
2153 dbgs().indent(10);
2156 dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n';
2157 if (!Transitions.empty()) {
2158 dbgs() << "\n Transitions for Proc ";
2159 for (const CodeGenSchedTransition &Transition : Transitions) {
2160 dumpIdxVec(Transition.ProcIndices);
2165 void PredTransitions::dump() const {
2166 dbgs() << "Expanded Variants:\n";
2167 for (std::vector<PredTransition>::const_iterator
2168 TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) {
2169 dbgs() << "{";
2170 for (SmallVectorImpl<PredCheck>::const_iterator
2171 PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end();
2172 PCI != PCE; ++PCI) {
2173 if (PCI != TI->PredTerm.begin())
2174 dbgs() << ", ";
2175 dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name
2176 << ":" << PCI->Predicate->getName();
2178 dbgs() << "},\n => {";
2179 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator
2180 WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end();
2181 WSI != WSE; ++WSI) {
2182 dbgs() << "(";
2183 for (SmallVectorImpl<unsigned>::const_iterator
2184 WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) {
2185 if (WI != WSI->begin())
2186 dbgs() << ", ";
2187 dbgs() << SchedModels.getSchedWrite(*WI).Name;
2189 dbgs() << "),";
2191 dbgs() << "}\n";
2194 #endif // NDEBUG