Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / utils / TableGen / X86FoldTablesEmitter.cpp
blob6144e8b214c986692a645933cf801a9ea0809c49
1 //===- utils/TableGen/X86FoldTablesEmitter.cpp - X86 backend-*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This tablegen backend is responsible for emitting the memory fold tables of
10 // the X86 backend instructions.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenInstruction.h"
15 #include "CodeGenTarget.h"
16 #include "X86RecognizableInstr.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/Support/FormattedStream.h"
19 #include "llvm/Support/X86FoldTablesUtils.h"
20 #include "llvm/TableGen/Record.h"
21 #include "llvm/TableGen/TableGenBackend.h"
23 using namespace llvm;
24 using namespace X86Disassembler;
26 namespace {
27 // Represents an entry in the manual mapped instructions set.
28 struct ManualMapEntry {
29 const char *RegInstStr;
30 const char *MemInstStr;
31 uint16_t Strategy;
34 // List of instructions requiring explicitly aligned memory.
35 const char *ExplicitAlign[] = {"MOVDQA", "MOVAPS", "MOVAPD", "MOVNTPS",
36 "MOVNTPD", "MOVNTDQ", "MOVNTDQA"};
38 // List of instructions NOT requiring explicit memory alignment.
39 const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD",
40 "PCMPESTRM", "PCMPESTRI",
41 "PCMPISTRM", "PCMPISTRI" };
43 const ManualMapEntry ManualMapSet[] = {
44 #define ENTRY(REG, MEM, FLAGS) {#REG, #MEM, FLAGS},
45 #include "X86ManualFoldTables.def"
48 const std::set<StringRef> NoFoldSet= {
49 #define NOFOLD(INSN) #INSN,
50 #include "X86ManualFoldTables.def"
53 static bool isExplicitAlign(const CodeGenInstruction *Inst) {
54 return any_of(ExplicitAlign, [Inst](const char *InstStr) {
55 return Inst->TheDef->getName().contains(InstStr);
56 });
59 static bool isExplicitUnalign(const CodeGenInstruction *Inst) {
60 return any_of(ExplicitUnalign, [Inst](const char *InstStr) {
61 return Inst->TheDef->getName().contains(InstStr);
62 });
65 class X86FoldTablesEmitter {
66 RecordKeeper &Records;
67 CodeGenTarget Target;
69 // Represents an entry in the folding table
70 class X86FoldTableEntry {
71 const CodeGenInstruction *RegInst;
72 const CodeGenInstruction *MemInst;
74 public:
75 bool NoReverse = false;
76 bool NoForward = false;
77 bool FoldLoad = false;
78 bool FoldStore = false;
79 Align Alignment;
81 X86FoldTableEntry() = default;
82 X86FoldTableEntry(const CodeGenInstruction *RegInst,
83 const CodeGenInstruction *MemInst)
84 : RegInst(RegInst), MemInst(MemInst) {}
86 void print(formatted_raw_ostream &OS) const {
87 OS.indent(2);
88 OS << "{X86::" << RegInst->TheDef->getName() << ", ";
89 OS << "X86::" << MemInst->TheDef->getName() << ", ";
91 std::string Attrs;
92 if (FoldLoad)
93 Attrs += "TB_FOLDED_LOAD|";
94 if (FoldStore)
95 Attrs += "TB_FOLDED_STORE|";
96 if (NoReverse)
97 Attrs += "TB_NO_REVERSE|";
98 if (NoForward)
99 Attrs += "TB_NO_FORWARD|";
100 if (Alignment != Align(1))
101 Attrs += "TB_ALIGN_" + std::to_string(Alignment.value()) + "|";
103 StringRef SimplifiedAttrs = StringRef(Attrs).rtrim("|");
104 if (SimplifiedAttrs.empty())
105 SimplifiedAttrs = "0";
107 OS << SimplifiedAttrs << "},\n";
110 #ifndef NDEBUG
111 // Check that Uses and Defs are same after memory fold.
112 void checkCorrectness() const {
113 auto &RegInstRec = *RegInst->TheDef;
114 auto &MemInstRec = *MemInst->TheDef;
115 auto ListOfUsesReg = RegInstRec.getValueAsListOfDefs("Uses");
116 auto ListOfUsesMem = MemInstRec.getValueAsListOfDefs("Uses");
117 auto ListOfDefsReg = RegInstRec.getValueAsListOfDefs("Defs");
118 auto ListOfDefsMem = MemInstRec.getValueAsListOfDefs("Defs");
119 if (ListOfUsesReg != ListOfUsesMem || ListOfDefsReg != ListOfDefsMem)
120 report_fatal_error("Uses/Defs couldn't be changed after folding " +
121 RegInstRec.getName() + " to " +
122 MemInstRec.getName());
124 #endif
127 // NOTE: We check the fold tables are sorted in X86InstrFoldTables.cpp by the enum of the
128 // instruction, which is computed in CodeGenTarget::ComputeInstrsByEnum. So we should
129 // use the same comparator here.
130 // FIXME: Could we share the code with CodeGenTarget::ComputeInstrsByEnum?
131 struct CompareInstrsByEnum {
132 bool operator()(const CodeGenInstruction *LHS,
133 const CodeGenInstruction *RHS) const {
134 assert(LHS && RHS && "LHS and RHS shouldn't be nullptr");
135 const auto &D1 = *LHS->TheDef;
136 const auto &D2 = *RHS->TheDef;
137 return std::make_tuple(!D1.getValueAsBit("isPseudo"), D1.getName()) <
138 std::make_tuple(!D2.getValueAsBit("isPseudo"), D2.getName());
142 typedef std::map<const CodeGenInstruction *, X86FoldTableEntry,
143 CompareInstrsByEnum>
144 FoldTable;
145 // std::vector for each folding table.
146 // Table2Addr - Holds instructions which their memory form performs load+store
147 // Table#i - Holds instructions which the their memory form perform a load OR
148 // a store, and their #i'th operand is folded.
149 FoldTable Table2Addr;
150 FoldTable Table0;
151 FoldTable Table1;
152 FoldTable Table2;
153 FoldTable Table3;
154 FoldTable Table4;
156 public:
157 X86FoldTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {}
159 // run - Generate the 6 X86 memory fold tables.
160 void run(raw_ostream &OS);
162 private:
163 // Decides to which table to add the entry with the given instructions.
164 // S sets the strategy of adding the TB_NO_REVERSE flag.
165 void updateTables(const CodeGenInstruction *RegInstr,
166 const CodeGenInstruction *MemInstr, uint16_t S = 0,
167 bool IsManual = false);
169 // Generates X86FoldTableEntry with the given instructions and fill it with
170 // the appropriate flags - then adds it to Table.
171 void addEntryWithFlags(FoldTable &Table, const CodeGenInstruction *RegInstr,
172 const CodeGenInstruction *MemInstr, uint16_t S,
173 unsigned FoldedIdx, bool isManual);
175 // Print the given table as a static const C++ array of type
176 // X86MemoryFoldTableEntry.
177 void printTable(const FoldTable &Table, StringRef TableName,
178 formatted_raw_ostream &OS) {
179 OS << "static const X86MemoryFoldTableEntry MemoryFold" << TableName
180 << "[] = {\n";
182 for (auto &E : Table)
183 E.second.print(OS);
185 OS << "};\n\n";
189 // Return true if one of the instruction's operands is a RST register class
190 static bool hasRSTRegClass(const CodeGenInstruction *Inst) {
191 return any_of(Inst->Operands, [](const CGIOperandList::OperandInfo &OpIn) {
192 return OpIn.Rec->getName() == "RST" || OpIn.Rec->getName() == "RSTi";
196 // Return true if one of the instruction's operands is a ptr_rc_tailcall
197 static bool hasPtrTailcallRegClass(const CodeGenInstruction *Inst) {
198 return any_of(Inst->Operands, [](const CGIOperandList::OperandInfo &OpIn) {
199 return OpIn.Rec->getName() == "ptr_rc_tailcall";
203 static uint8_t byteFromBitsInit(const BitsInit *B) {
204 unsigned N = B->getNumBits();
205 assert(N <= 8 && "Field is too large for uint8_t!");
207 uint8_t Value = 0;
208 for (unsigned I = 0; I != N; ++I) {
209 BitInit *Bit = cast<BitInit>(B->getBit(I));
210 Value |= Bit->getValue() << I;
212 return Value;
215 static bool mayFoldFromForm(uint8_t Form) {
216 switch (Form) {
217 default:
218 return Form >= X86Local::MRM0r && Form <= X86Local::MRM7r;
219 case X86Local::MRMXr:
220 case X86Local::MRMXrCC:
221 case X86Local::MRMDestReg:
222 case X86Local::MRMSrcReg:
223 case X86Local::MRMSrcReg4VOp3:
224 case X86Local::MRMSrcRegOp4:
225 case X86Local::MRMSrcRegCC:
226 return true;
230 static bool mayFoldToForm(uint8_t Form) {
231 switch (Form) {
232 default:
233 return Form >= X86Local::MRM0m && Form <= X86Local::MRM7m;
234 case X86Local::MRMXm:
235 case X86Local::MRMXmCC:
236 case X86Local::MRMDestMem:
237 case X86Local::MRMSrcMem:
238 case X86Local::MRMSrcMem4VOp3:
239 case X86Local::MRMSrcMemOp4:
240 case X86Local::MRMSrcMemCC:
241 return true;
245 static bool mayFoldFromLeftToRight(uint8_t LHS, uint8_t RHS) {
246 switch (LHS) {
247 default:
248 llvm_unreachable("Unexpected Form!");
249 case X86Local::MRM0r:
250 return RHS == X86Local::MRM0m;
251 case X86Local::MRM1r:
252 return RHS == X86Local::MRM1m;
253 case X86Local::MRM2r:
254 return RHS == X86Local::MRM2m;
255 case X86Local::MRM3r:
256 return RHS == X86Local::MRM3m;
257 case X86Local::MRM4r:
258 return RHS == X86Local::MRM4m;
259 case X86Local::MRM5r:
260 return RHS == X86Local::MRM5m;
261 case X86Local::MRM6r:
262 return RHS == X86Local::MRM6m;
263 case X86Local::MRM7r:
264 return RHS == X86Local::MRM7m;
265 case X86Local::MRMXr:
266 return RHS == X86Local::MRMXm;
267 case X86Local::MRMXrCC:
268 return RHS == X86Local::MRMXmCC;
269 case X86Local::MRMDestReg:
270 return RHS == X86Local::MRMDestMem;
271 case X86Local::MRMSrcReg:
272 return RHS == X86Local::MRMSrcMem;
273 case X86Local::MRMSrcReg4VOp3:
274 return RHS == X86Local::MRMSrcMem4VOp3;
275 case X86Local::MRMSrcRegOp4:
276 return RHS == X86Local::MRMSrcMemOp4;
277 case X86Local::MRMSrcRegCC:
278 return RHS == X86Local::MRMSrcMemCC;
282 static bool isNOREXRegClass(const Record *Op) {
283 return Op->getName().contains("_NOREX");
286 // Function object - Operator() returns true if the given Reg instruction
287 // matches the Mem instruction of this object.
288 class IsMatch {
289 const CodeGenInstruction *MemInst;
290 const X86Disassembler::RecognizableInstrBase MemRI;
291 const unsigned Variant;
293 public:
294 IsMatch(const CodeGenInstruction *Inst, unsigned V)
295 : MemInst(Inst), MemRI(*MemInst), Variant(V) {}
297 bool operator()(const CodeGenInstruction *RegInst) {
298 X86Disassembler::RecognizableInstrBase RegRI(*RegInst);
299 const Record *RegRec = RegInst->TheDef;
300 const Record *MemRec = MemInst->TheDef;
302 // EVEX_B means different things for memory and register forms.
303 if (RegRI.HasEVEX_B || MemRI.HasEVEX_B)
304 return false;
306 if (!mayFoldFromLeftToRight(RegRI.Form, MemRI.Form))
307 return false;
309 // X86 encoding is crazy, e.g
311 // f3 0f c7 30 vmxon (%rax)
312 // f3 0f c7 f0 senduipi %rax
314 // This two instruction have similiar encoding fields but are unrelated
315 if (X86Disassembler::getMnemonic(MemInst, Variant) !=
316 X86Disassembler::getMnemonic(RegInst, Variant))
317 return false;
319 // Return false if any of the following fields of does not match.
320 if (std::make_tuple(RegRI.Encoding, RegRI.Opcode, RegRI.OpPrefix,
321 RegRI.OpMap, RegRI.OpSize, RegRI.AdSize, RegRI.HasREX_W,
322 RegRI.HasVEX_4V, RegRI.HasVEX_L, RegRI.IgnoresVEX_L,
323 RegRI.IgnoresW, RegRI.HasEVEX_K, RegRI.HasEVEX_KZ,
324 RegRI.HasEVEX_L2, RegRec->getValueAsBit("hasEVEX_RC"),
325 RegRec->getValueAsBit("hasLockPrefix"),
326 RegRec->getValueAsBit("hasNoTrackPrefix"),
327 RegRec->getValueAsBit("EVEX_W1_VEX_W0")) !=
328 std::make_tuple(MemRI.Encoding, MemRI.Opcode, MemRI.OpPrefix,
329 MemRI.OpMap, MemRI.OpSize, MemRI.AdSize, MemRI.HasREX_W,
330 MemRI.HasVEX_4V, MemRI.HasVEX_L, MemRI.IgnoresVEX_L,
331 MemRI.IgnoresW, MemRI.HasEVEX_K, MemRI.HasEVEX_KZ,
332 MemRI.HasEVEX_L2, MemRec->getValueAsBit("hasEVEX_RC"),
333 MemRec->getValueAsBit("hasLockPrefix"),
334 MemRec->getValueAsBit("hasNoTrackPrefix"),
335 MemRec->getValueAsBit("EVEX_W1_VEX_W0")))
336 return false;
338 // Make sure the sizes of the operands of both instructions suit each other.
339 // This is needed for instructions with intrinsic version (_Int).
340 // Where the only difference is the size of the operands.
341 // For example: VUCOMISDZrm and VUCOMISDrm_Int
342 // Also for instructions that their EVEX version was upgraded to work with
343 // k-registers. For example VPCMPEQBrm (xmm output register) and
344 // VPCMPEQBZ128rm (k register output register).
345 unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs();
346 unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs();
347 unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs();
348 unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs();
350 // Instructions with one output in their memory form use the memory folded
351 // operand as source and destination (Read-Modify-Write).
352 unsigned RegStartIdx =
353 (MemOutSize + 1 == RegOutSize) && (MemInSize == RegInSize) ? 1 : 0;
355 bool FoundFoldedOp = false;
356 for (unsigned I = 0, E = MemInst->Operands.size(); I != E; I++) {
357 Record *MemOpRec = MemInst->Operands[I].Rec;
358 Record *RegOpRec = RegInst->Operands[I + RegStartIdx].Rec;
360 if (MemOpRec == RegOpRec)
361 continue;
363 if (isRegisterOperand(MemOpRec) && isRegisterOperand(RegOpRec) &&
364 ((getRegOperandSize(MemOpRec) != getRegOperandSize(RegOpRec)) ||
365 (isNOREXRegClass(MemOpRec) != isNOREXRegClass(RegOpRec))))
366 return false;
368 if (isMemoryOperand(MemOpRec) && isMemoryOperand(RegOpRec) &&
369 (getMemOperandSize(MemOpRec) != getMemOperandSize(RegOpRec)))
370 return false;
372 if (isImmediateOperand(MemOpRec) && isImmediateOperand(RegOpRec) &&
373 (MemOpRec->getValueAsDef("Type") != RegOpRec->getValueAsDef("Type")))
374 return false;
376 // Only one operand can be folded.
377 if (FoundFoldedOp)
378 return false;
380 assert(isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec));
381 FoundFoldedOp = true;
384 return FoundFoldedOp;
388 } // end anonymous namespace
390 void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table,
391 const CodeGenInstruction *RegInstr,
392 const CodeGenInstruction *MemInstr,
393 uint16_t S, unsigned FoldedIdx,
394 bool isManual) {
396 X86FoldTableEntry Result = X86FoldTableEntry(RegInstr, MemInstr);
397 Record *RegRec = RegInstr->TheDef;
398 Record *MemRec = MemInstr->TheDef;
400 Result.NoReverse = S & TB_NO_REVERSE;
401 Result.NoForward = S & TB_NO_FORWARD;
402 Result.FoldLoad = S & TB_FOLDED_LOAD;
403 Result.FoldStore = S & TB_FOLDED_STORE;
404 Result.Alignment = Align(1ULL << ((S & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT));
405 if (isManual) {
406 Table[RegInstr] = Result;
407 return;
410 // Only table0 entries should explicitly specify a load or store flag.
411 if (&Table == &Table0) {
412 unsigned MemInOpsNum = MemRec->getValueAsDag("InOperandList")->getNumArgs();
413 unsigned RegInOpsNum = RegRec->getValueAsDag("InOperandList")->getNumArgs();
414 // If the instruction writes to the folded operand, it will appear as an
415 // output in the register form instruction and as an input in the memory
416 // form instruction.
417 // If the instruction reads from the folded operand, it well appear as in
418 // input in both forms.
419 if (MemInOpsNum == RegInOpsNum)
420 Result.FoldLoad = true;
421 else
422 Result.FoldStore = true;
425 Record *RegOpRec = RegInstr->Operands[FoldedIdx].Rec;
426 Record *MemOpRec = MemInstr->Operands[FoldedIdx].Rec;
428 // Unfolding code generates a load/store instruction according to the size of
429 // the register in the register form instruction.
430 // If the register's size is greater than the memory's operand size, do not
431 // allow unfolding.
433 // the unfolded load size will be based on the register size. If that’s bigger
434 // than the memory operand size, the unfolded load will load more memory and
435 // potentially cause a memory fault.
436 if (getRegOperandSize(RegOpRec) > getMemOperandSize(MemOpRec))
437 Result.NoReverse = true;
439 // Check no-kz version's isMoveReg
440 StringRef RegInstName = RegRec->getName();
441 unsigned DropLen =
442 RegInstName.endswith("rkz") ? 2 : (RegInstName.endswith("rk") ? 1 : 0);
443 Record *BaseDef =
444 DropLen ? Records.getDef(RegInstName.drop_back(DropLen)) : nullptr;
445 bool IsMoveReg =
446 BaseDef ? Target.getInstruction(BaseDef).isMoveReg : RegInstr->isMoveReg;
447 // A masked load can not be unfolded to a full load, otherwise it would access
448 // unexpected memory. A simple store can not be unfolded.
449 if (IsMoveReg && (BaseDef || Result.FoldStore))
450 Result.NoReverse = true;
452 uint8_t Enc = byteFromBitsInit(RegRec->getValueAsBitsInit("OpEncBits"));
453 if (isExplicitAlign(RegInstr)) {
454 // The instruction require explicitly aligned memory.
455 BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize");
456 Result.Alignment = Align(byteFromBitsInit(VectSize));
457 } else if (!Enc && !isExplicitUnalign(RegInstr) &&
458 getMemOperandSize(MemOpRec) > 64) {
459 // Instructions with XOP/VEX/EVEX encoding do not require alignment while
460 // SSE packed vector instructions require a 16 byte alignment.
461 Result.Alignment = Align(16);
463 // Expand is only ever created as a masked instruction. It is not safe to
464 // unfold a masked expand because we don't know if it came from an expand load
465 // intrinsic or folding a plain load. If it is from a expand load intrinsic,
466 // Unfolding to plain load would read more elements and could trigger a fault.
467 if (RegRec->getName().contains("EXPAND"))
468 Result.NoReverse = true;
470 Table[RegInstr] = Result;
473 void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr,
474 const CodeGenInstruction *MemInstr,
475 uint16_t S, bool IsManual) {
477 Record *RegRec = RegInstr->TheDef;
478 Record *MemRec = MemInstr->TheDef;
479 unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs();
480 unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs();
481 unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs();
482 unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs();
484 // Instructions which Read-Modify-Write should be added to Table2Addr.
485 if (!MemOutSize && RegOutSize == 1 && MemInSize == RegInSize) {
486 // X86 would not unfold Read-Modify-Write instructions so add TB_NO_REVERSE.
487 addEntryWithFlags(Table2Addr, RegInstr, MemInstr, S | TB_NO_REVERSE, 0,
488 IsManual);
489 return;
492 if (MemInSize == RegInSize && MemOutSize == RegOutSize) {
493 // Load-Folding cases.
494 // If the i'th register form operand is a register and the i'th memory form
495 // operand is a memory operand, add instructions to Table#i.
496 for (unsigned i = RegOutSize, e = RegInstr->Operands.size(); i < e; i++) {
497 Record *RegOpRec = RegInstr->Operands[i].Rec;
498 Record *MemOpRec = MemInstr->Operands[i].Rec;
499 // PointerLikeRegClass: For instructions like TAILJMPr, TAILJMPr64, TAILJMPr64_REX
500 if ((isRegisterOperand(RegOpRec) ||
501 RegOpRec->isSubClassOf("PointerLikeRegClass")) &&
502 isMemoryOperand(MemOpRec)) {
503 switch (i) {
504 case 0:
505 addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0, IsManual);
506 return;
507 case 1:
508 addEntryWithFlags(Table1, RegInstr, MemInstr, S, 1, IsManual);
509 return;
510 case 2:
511 addEntryWithFlags(Table2, RegInstr, MemInstr, S, 2, IsManual);
512 return;
513 case 3:
514 addEntryWithFlags(Table3, RegInstr, MemInstr, S, 3, IsManual);
515 return;
516 case 4:
517 addEntryWithFlags(Table4, RegInstr, MemInstr, S, 4, IsManual);
518 return;
522 } else if (MemInSize == RegInSize + 1 && MemOutSize + 1 == RegOutSize) {
523 // Store-Folding cases.
524 // If the memory form instruction performs a store, the *output*
525 // register of the register form instructions disappear and instead a
526 // memory *input* operand appears in the memory form instruction.
527 // For example:
528 // MOVAPSrr => (outs VR128:$dst), (ins VR128:$src)
529 // MOVAPSmr => (outs), (ins f128mem:$dst, VR128:$src)
530 Record *RegOpRec = RegInstr->Operands[RegOutSize - 1].Rec;
531 Record *MemOpRec = MemInstr->Operands[RegOutSize - 1].Rec;
532 if (isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec) &&
533 getRegOperandSize(RegOpRec) == getMemOperandSize(MemOpRec))
534 addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0, IsManual);
538 void X86FoldTablesEmitter::run(raw_ostream &o) {
539 formatted_raw_ostream OS(o);
541 // Holds all memory instructions
542 std::vector<const CodeGenInstruction *> MemInsts;
543 // Holds all register instructions - divided according to opcode.
544 std::map<uint8_t, std::vector<const CodeGenInstruction *>> RegInsts;
546 ArrayRef<const CodeGenInstruction *> NumberedInstructions =
547 Target.getInstructionsByEnumValue();
549 for (const CodeGenInstruction *Inst : NumberedInstructions) {
550 const Record *Rec = Inst->TheDef;
551 if (!Rec->isSubClassOf("X86Inst") || Rec->getValueAsBit("isAsmParserOnly"))
552 continue;
554 if (NoFoldSet.find(Rec->getName()) != NoFoldSet.end())
555 continue;
557 // - Instructions including RST register class operands are not relevant
558 // for memory folding (for further details check the explanation in
559 // lib/Target/X86/X86InstrFPStack.td file).
560 // - Some instructions (listed in the manual map above) use the register
561 // class ptr_rc_tailcall, which can be of a size 32 or 64, to ensure
562 // safe mapping of these instruction we manually map them and exclude
563 // them from the automation.
564 if (hasRSTRegClass(Inst) || hasPtrTailcallRegClass(Inst))
565 continue;
567 // Add all the memory form instructions to MemInsts, and all the register
568 // form instructions to RegInsts[Opc], where Opc is the opcode of each
569 // instructions. this helps reducing the runtime of the backend.
570 const BitsInit *FormBits = Rec->getValueAsBitsInit("FormBits");
571 uint8_t Form = byteFromBitsInit(FormBits);
572 if (mayFoldToForm(Form))
573 MemInsts.push_back(Inst);
574 else if (mayFoldFromForm(Form)) {
575 uint8_t Opc = byteFromBitsInit(Rec->getValueAsBitsInit("Opcode"));
576 RegInsts[Opc].push_back(Inst);
580 Record *AsmWriter = Target.getAsmWriter();
581 unsigned Variant = AsmWriter->getValueAsInt("Variant");
582 // For each memory form instruction, try to find its register form
583 // instruction.
584 for (const CodeGenInstruction *MemInst : MemInsts) {
585 uint8_t Opc =
586 byteFromBitsInit(MemInst->TheDef->getValueAsBitsInit("Opcode"));
588 auto RegInstsIt = RegInsts.find(Opc);
589 if (RegInstsIt == RegInsts.end())
590 continue;
592 // Two forms (memory & register) of the same instruction must have the same
593 // opcode. try matching only with register form instructions with the same
594 // opcode.
595 std::vector<const CodeGenInstruction *> &OpcRegInsts = RegInstsIt->second;
597 auto Match = find_if(OpcRegInsts, IsMatch(MemInst, Variant));
598 if (Match != OpcRegInsts.end()) {
599 const CodeGenInstruction *RegInst = *Match;
600 StringRef RegInstName = RegInst->TheDef->getName();
601 if (RegInstName.endswith("_REV") || RegInstName.endswith("_alt")) {
602 if (auto *RegAltRec = Records.getDef(RegInstName.drop_back(4))) {
603 RegInst = &Target.getInstruction(RegAltRec);
606 updateTables(RegInst, MemInst);
607 OpcRegInsts.erase(Match);
611 // Add the manually mapped instructions listed above.
612 for (const ManualMapEntry &Entry : ManualMapSet) {
613 Record *RegInstIter = Records.getDef(Entry.RegInstStr);
614 Record *MemInstIter = Records.getDef(Entry.MemInstStr);
616 updateTables(&(Target.getInstruction(RegInstIter)),
617 &(Target.getInstruction(MemInstIter)), Entry.Strategy, true);
620 #ifndef NDEBUG
621 auto CheckMemFoldTable = [](const FoldTable &Table) -> void {
622 for (const auto &Record : Table) {
623 auto &FoldEntry = Record.second;
624 FoldEntry.checkCorrectness();
627 CheckMemFoldTable(Table2Addr);
628 CheckMemFoldTable(Table0);
629 CheckMemFoldTable(Table1);
630 CheckMemFoldTable(Table2);
631 CheckMemFoldTable(Table3);
632 CheckMemFoldTable(Table4);
633 #endif
634 // Print all tables.
635 printTable(Table2Addr, "Table2Addr", OS);
636 printTable(Table0, "Table0", OS);
637 printTable(Table1, "Table1", OS);
638 printTable(Table2, "Table2", OS);
639 printTable(Table3, "Table3", OS);
640 printTable(Table4, "Table4", OS);
643 static TableGen::Emitter::OptClass<X86FoldTablesEmitter>
644 X("gen-x86-fold-tables", "Generate X86 fold tables");