[InstCombine] Signed saturation patterns
[llvm-complete.git] / tools / llvm-exegesis / lib / X86 / Target.cpp
blob6cf3d465dd9a62db814e895174e15deb5bee6fd6
1 //===-- Target.cpp ----------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "../Target.h"
10 #include "../Error.h"
11 #include "../Latency.h"
12 #include "../SnippetGenerator.h"
13 #include "../Uops.h"
14 #include "MCTargetDesc/X86BaseInfo.h"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "X86.h"
17 #include "X86RegisterInfo.h"
18 #include "X86Subtarget.h"
19 #include "llvm/MC/MCInstBuilder.h"
20 #include "llvm/Support/FormatVariadic.h"
22 namespace llvm {
23 namespace exegesis {
25 // Returns an error if we cannot handle the memory references in this
26 // instruction.
27 static Error isInvalidMemoryInstr(const Instruction &Instr) {
28 switch (Instr.Description->TSFlags & X86II::FormMask) {
29 default:
30 llvm_unreachable("Unknown FormMask value");
31 // These have no memory access.
32 case X86II::Pseudo:
33 case X86II::RawFrm:
34 case X86II::AddCCFrm:
35 case X86II::MRMDestReg:
36 case X86II::MRMSrcReg:
37 case X86II::MRMSrcReg4VOp3:
38 case X86II::MRMSrcRegOp4:
39 case X86II::MRMSrcRegCC:
40 case X86II::MRMXrCC:
41 case X86II::MRMXr:
42 case X86II::MRM0r:
43 case X86II::MRM1r:
44 case X86II::MRM2r:
45 case X86II::MRM3r:
46 case X86II::MRM4r:
47 case X86II::MRM5r:
48 case X86II::MRM6r:
49 case X86II::MRM7r:
50 case X86II::MRM_C0:
51 case X86II::MRM_C1:
52 case X86II::MRM_C2:
53 case X86II::MRM_C3:
54 case X86II::MRM_C4:
55 case X86II::MRM_C5:
56 case X86II::MRM_C6:
57 case X86II::MRM_C7:
58 case X86II::MRM_C8:
59 case X86II::MRM_C9:
60 case X86II::MRM_CA:
61 case X86II::MRM_CB:
62 case X86II::MRM_CC:
63 case X86II::MRM_CD:
64 case X86II::MRM_CE:
65 case X86II::MRM_CF:
66 case X86II::MRM_D0:
67 case X86II::MRM_D1:
68 case X86II::MRM_D2:
69 case X86II::MRM_D3:
70 case X86II::MRM_D4:
71 case X86II::MRM_D5:
72 case X86II::MRM_D6:
73 case X86II::MRM_D7:
74 case X86II::MRM_D8:
75 case X86II::MRM_D9:
76 case X86II::MRM_DA:
77 case X86II::MRM_DB:
78 case X86II::MRM_DC:
79 case X86II::MRM_DD:
80 case X86II::MRM_DE:
81 case X86II::MRM_DF:
82 case X86II::MRM_E0:
83 case X86II::MRM_E1:
84 case X86II::MRM_E2:
85 case X86II::MRM_E3:
86 case X86II::MRM_E4:
87 case X86II::MRM_E5:
88 case X86II::MRM_E6:
89 case X86II::MRM_E7:
90 case X86II::MRM_E8:
91 case X86II::MRM_E9:
92 case X86II::MRM_EA:
93 case X86II::MRM_EB:
94 case X86II::MRM_EC:
95 case X86II::MRM_ED:
96 case X86II::MRM_EE:
97 case X86II::MRM_EF:
98 case X86II::MRM_F0:
99 case X86II::MRM_F1:
100 case X86II::MRM_F2:
101 case X86II::MRM_F3:
102 case X86II::MRM_F4:
103 case X86II::MRM_F5:
104 case X86II::MRM_F6:
105 case X86II::MRM_F7:
106 case X86II::MRM_F8:
107 case X86II::MRM_F9:
108 case X86II::MRM_FA:
109 case X86II::MRM_FB:
110 case X86II::MRM_FC:
111 case X86II::MRM_FD:
112 case X86II::MRM_FE:
113 case X86II::MRM_FF:
114 case X86II::RawFrmImm8:
115 return Error::success();
116 case X86II::AddRegFrm:
117 return (Instr.Description->Opcode == X86::POP16r ||
118 Instr.Description->Opcode == X86::POP32r ||
119 Instr.Description->Opcode == X86::PUSH16r ||
120 Instr.Description->Opcode == X86::PUSH32r)
121 ? make_error<Failure>(
122 "unsupported opcode: unsupported memory access")
123 : Error::success();
124 // These access memory and are handled.
125 case X86II::MRMDestMem:
126 case X86II::MRMSrcMem:
127 case X86II::MRMSrcMem4VOp3:
128 case X86II::MRMSrcMemOp4:
129 case X86II::MRMSrcMemCC:
130 case X86II::MRMXmCC:
131 case X86II::MRMXm:
132 case X86II::MRM0m:
133 case X86II::MRM1m:
134 case X86II::MRM2m:
135 case X86II::MRM3m:
136 case X86II::MRM4m:
137 case X86II::MRM5m:
138 case X86II::MRM6m:
139 case X86II::MRM7m:
140 return Error::success();
141 // These access memory and are not handled yet.
142 case X86II::RawFrmImm16:
143 case X86II::RawFrmMemOffs:
144 case X86II::RawFrmSrc:
145 case X86II::RawFrmDst:
146 case X86II::RawFrmDstSrc:
147 return make_error<Failure>("unsupported opcode: non uniform memory access");
151 static Error IsInvalidOpcode(const Instruction &Instr) {
152 const auto OpcodeName = Instr.Name;
153 if ((Instr.Description->TSFlags & X86II::FormMask) == X86II::Pseudo)
154 return make_error<Failure>("unsupported opcode: pseudo instruction");
155 if (OpcodeName.startswith("POPF") || OpcodeName.startswith("PUSHF") ||
156 OpcodeName.startswith("ADJCALLSTACK"))
157 return make_error<Failure>("unsupported opcode: Push/Pop/AdjCallStack");
158 if (Error Error = isInvalidMemoryInstr(Instr))
159 return Error;
160 // We do not handle instructions with OPERAND_PCREL.
161 for (const Operand &Op : Instr.Operands)
162 if (Op.isExplicit() &&
163 Op.getExplicitOperandInfo().OperandType == MCOI::OPERAND_PCREL)
164 return make_error<Failure>("unsupported opcode: PC relative operand");
165 // We do not handle second-form X87 instructions. We only handle first-form
166 // ones (_Fp), see comment in X86InstrFPStack.td.
167 for (const Operand &Op : Instr.Operands)
168 if (Op.isReg() && Op.isExplicit() &&
169 Op.getExplicitOperandInfo().RegClass == X86::RSTRegClassID)
170 return make_error<Failure>("unsupported second-form X87 instruction");
171 return Error::success();
174 static unsigned getX86FPFlags(const Instruction &Instr) {
175 return Instr.Description->TSFlags & X86II::FPTypeMask;
178 // Helper to fill a memory operand with a value.
179 static void setMemOp(InstructionTemplate &IT, int OpIdx,
180 const MCOperand &OpVal) {
181 const auto Op = IT.Instr.Operands[OpIdx];
182 assert(Op.isExplicit() && "invalid memory pattern");
183 IT.getValueFor(Op) = OpVal;
186 // Common (latency, uops) code for LEA templates. `GetDestReg` takes the
187 // addressing base and index registers and returns the LEA destination register.
188 static Expected<std::vector<CodeTemplate>> generateLEATemplatesCommon(
189 const Instruction &Instr, const BitVector &ForbiddenRegisters,
190 const LLVMState &State, const SnippetGenerator::Options &Opts,
191 std::function<unsigned(unsigned, unsigned)> GetDestReg) {
192 assert(Instr.Operands.size() == 6 && "invalid LEA");
193 assert(X86II::getMemoryOperandNo(Instr.Description->TSFlags) == 1 &&
194 "invalid LEA");
196 constexpr const int kDestOp = 0;
197 constexpr const int kBaseOp = 1;
198 constexpr const int kIndexOp = 3;
199 auto PossibleDestRegs =
200 Instr.Operands[kDestOp].getRegisterAliasing().sourceBits();
201 remove(PossibleDestRegs, ForbiddenRegisters);
202 auto PossibleBaseRegs =
203 Instr.Operands[kBaseOp].getRegisterAliasing().sourceBits();
204 remove(PossibleBaseRegs, ForbiddenRegisters);
205 auto PossibleIndexRegs =
206 Instr.Operands[kIndexOp].getRegisterAliasing().sourceBits();
207 remove(PossibleIndexRegs, ForbiddenRegisters);
209 const auto &RegInfo = State.getRegInfo();
210 std::vector<CodeTemplate> Result;
211 for (const unsigned BaseReg : PossibleBaseRegs.set_bits()) {
212 for (const unsigned IndexReg : PossibleIndexRegs.set_bits()) {
213 for (int LogScale = 0; LogScale <= 3; ++LogScale) {
214 // FIXME: Add an option for controlling how we explore immediates.
215 for (const int Disp : {0, 42}) {
216 InstructionTemplate IT(Instr);
217 const int64_t Scale = 1ull << LogScale;
218 setMemOp(IT, 1, MCOperand::createReg(BaseReg));
219 setMemOp(IT, 2, MCOperand::createImm(Scale));
220 setMemOp(IT, 3, MCOperand::createReg(IndexReg));
221 setMemOp(IT, 4, MCOperand::createImm(Disp));
222 // SegmentReg must be 0 for LEA.
223 setMemOp(IT, 5, MCOperand::createReg(0));
225 // Output reg is selected by the caller.
226 setMemOp(IT, 0, MCOperand::createReg(GetDestReg(BaseReg, IndexReg)));
228 CodeTemplate CT;
229 CT.Instructions.push_back(std::move(IT));
230 CT.Config = formatv("{3}(%{0}, %{1}, {2})", RegInfo.getName(BaseReg),
231 RegInfo.getName(IndexReg), Scale, Disp)
232 .str();
233 Result.push_back(std::move(CT));
234 if (Result.size() >= Opts.MaxConfigsPerOpcode)
235 return std::move(Result);
241 return std::move(Result);
244 namespace {
245 class X86LatencySnippetGenerator : public LatencySnippetGenerator {
246 public:
247 using LatencySnippetGenerator::LatencySnippetGenerator;
249 Expected<std::vector<CodeTemplate>>
250 generateCodeTemplates(const Instruction &Instr,
251 const BitVector &ForbiddenRegisters) const override;
253 } // namespace
255 Expected<std::vector<CodeTemplate>>
256 X86LatencySnippetGenerator::generateCodeTemplates(
257 const Instruction &Instr, const BitVector &ForbiddenRegisters) const {
258 if (auto E = IsInvalidOpcode(Instr))
259 return std::move(E);
261 // LEA gets special attention.
262 const auto Opcode = Instr.Description->getOpcode();
263 if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r) {
264 return generateLEATemplatesCommon(Instr, ForbiddenRegisters, State, Opts,
265 [](unsigned BaseReg, unsigned IndexReg) {
266 // We just select the same base and
267 // output register.
268 return BaseReg;
272 switch (getX86FPFlags(Instr)) {
273 case X86II::NotFP:
274 return LatencySnippetGenerator::generateCodeTemplates(Instr,
275 ForbiddenRegisters);
276 case X86II::ZeroArgFP:
277 case X86II::OneArgFP:
278 case X86II::SpecialFP:
279 case X86II::CompareFP:
280 case X86II::CondMovFP:
281 return make_error<Failure>("Unsupported x87 Instruction");
282 case X86II::OneArgFPRW:
283 case X86II::TwoArgFP:
284 // These are instructions like
285 // - `ST(0) = fsqrt(ST(0))` (OneArgFPRW)
286 // - `ST(0) = ST(0) + ST(i)` (TwoArgFP)
287 // They are intrinsically serial and do not modify the state of the stack.
288 return generateSelfAliasingCodeTemplates(Instr);
289 default:
290 llvm_unreachable("Unknown FP Type!");
294 namespace {
295 class X86UopsSnippetGenerator : public UopsSnippetGenerator {
296 public:
297 using UopsSnippetGenerator::UopsSnippetGenerator;
299 Expected<std::vector<CodeTemplate>>
300 generateCodeTemplates(const Instruction &Instr,
301 const BitVector &ForbiddenRegisters) const override;
304 } // namespace
306 Expected<std::vector<CodeTemplate>>
307 X86UopsSnippetGenerator::generateCodeTemplates(
308 const Instruction &Instr, const BitVector &ForbiddenRegisters) const {
309 if (auto E = IsInvalidOpcode(Instr))
310 return std::move(E);
312 // LEA gets special attention.
313 const auto Opcode = Instr.Description->getOpcode();
314 if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r) {
315 // Any destination register that is not used for adddressing is fine.
316 auto PossibleDestRegs =
317 Instr.Operands[0].getRegisterAliasing().sourceBits();
318 remove(PossibleDestRegs, ForbiddenRegisters);
319 return generateLEATemplatesCommon(
320 Instr, ForbiddenRegisters, State, Opts,
321 [this, &PossibleDestRegs](unsigned BaseReg, unsigned IndexReg) {
322 auto PossibleDestRegsNow = PossibleDestRegs;
323 remove(PossibleDestRegsNow,
324 State.getRATC().getRegister(BaseReg).aliasedBits());
325 remove(PossibleDestRegsNow,
326 State.getRATC().getRegister(IndexReg).aliasedBits());
327 assert(PossibleDestRegsNow.set_bits().begin() !=
328 PossibleDestRegsNow.set_bits().end() &&
329 "no remaining registers");
330 return *PossibleDestRegsNow.set_bits().begin();
334 switch (getX86FPFlags(Instr)) {
335 case X86II::NotFP:
336 return UopsSnippetGenerator::generateCodeTemplates(Instr,
337 ForbiddenRegisters);
338 case X86II::ZeroArgFP:
339 case X86II::OneArgFP:
340 case X86II::SpecialFP:
341 return make_error<Failure>("Unsupported x87 Instruction");
342 case X86II::OneArgFPRW:
343 case X86II::TwoArgFP:
344 // These are instructions like
345 // - `ST(0) = fsqrt(ST(0))` (OneArgFPRW)
346 // - `ST(0) = ST(0) + ST(i)` (TwoArgFP)
347 // They are intrinsically serial and do not modify the state of the stack.
348 // We generate the same code for latency and uops.
349 return generateSelfAliasingCodeTemplates(Instr);
350 case X86II::CompareFP:
351 case X86II::CondMovFP:
352 // We can compute uops for any FP instruction that does not grow or shrink
353 // the stack (either do not touch the stack or push as much as they pop).
354 return generateUnconstrainedCodeTemplates(
355 Instr, "instruction does not grow/shrink the FP stack");
356 default:
357 llvm_unreachable("Unknown FP Type!");
361 static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) {
362 switch (RegBitWidth) {
363 case 8:
364 return X86::MOV8ri;
365 case 16:
366 return X86::MOV16ri;
367 case 32:
368 return X86::MOV32ri;
369 case 64:
370 return X86::MOV64ri;
372 llvm_unreachable("Invalid Value Width");
375 // Generates instruction to load an immediate value into a register.
376 static MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth,
377 const APInt &Value) {
378 if (Value.getBitWidth() > RegBitWidth)
379 llvm_unreachable("Value must fit in the Register");
380 return MCInstBuilder(getLoadImmediateOpcode(RegBitWidth))
381 .addReg(Reg)
382 .addImm(Value.getZExtValue());
385 // Allocates scratch memory on the stack.
386 static MCInst allocateStackSpace(unsigned Bytes) {
387 return MCInstBuilder(X86::SUB64ri8)
388 .addReg(X86::RSP)
389 .addReg(X86::RSP)
390 .addImm(Bytes);
393 // Fills scratch memory at offset `OffsetBytes` with value `Imm`.
394 static MCInst fillStackSpace(unsigned MovOpcode, unsigned OffsetBytes,
395 uint64_t Imm) {
396 return MCInstBuilder(MovOpcode)
397 // Address = ESP
398 .addReg(X86::RSP) // BaseReg
399 .addImm(1) // ScaleAmt
400 .addReg(0) // IndexReg
401 .addImm(OffsetBytes) // Disp
402 .addReg(0) // Segment
403 // Immediate.
404 .addImm(Imm);
407 // Loads scratch memory into register `Reg` using opcode `RMOpcode`.
408 static MCInst loadToReg(unsigned Reg, unsigned RMOpcode) {
409 return MCInstBuilder(RMOpcode)
410 .addReg(Reg)
411 // Address = ESP
412 .addReg(X86::RSP) // BaseReg
413 .addImm(1) // ScaleAmt
414 .addReg(0) // IndexReg
415 .addImm(0) // Disp
416 .addReg(0); // Segment
419 // Releases scratch memory.
420 static MCInst releaseStackSpace(unsigned Bytes) {
421 return MCInstBuilder(X86::ADD64ri8)
422 .addReg(X86::RSP)
423 .addReg(X86::RSP)
424 .addImm(Bytes);
427 // Reserves some space on the stack, fills it with the content of the provided
428 // constant and provide methods to load the stack value into a register.
429 namespace {
430 struct ConstantInliner {
431 explicit ConstantInliner(const APInt &Constant) : Constant_(Constant) {}
433 std::vector<MCInst> loadAndFinalize(unsigned Reg, unsigned RegBitWidth,
434 unsigned Opcode);
436 std::vector<MCInst> loadX87STAndFinalize(unsigned Reg);
438 std::vector<MCInst> loadX87FPAndFinalize(unsigned Reg);
440 std::vector<MCInst> popFlagAndFinalize();
442 private:
443 ConstantInliner &add(const MCInst &Inst) {
444 Instructions.push_back(Inst);
445 return *this;
448 void initStack(unsigned Bytes);
450 static constexpr const unsigned kF80Bytes = 10; // 80 bits.
452 APInt Constant_;
453 std::vector<MCInst> Instructions;
455 } // namespace
457 std::vector<MCInst> ConstantInliner::loadAndFinalize(unsigned Reg,
458 unsigned RegBitWidth,
459 unsigned Opcode) {
460 assert((RegBitWidth & 7) == 0 && "RegBitWidth must be a multiple of 8 bits");
461 initStack(RegBitWidth / 8);
462 add(loadToReg(Reg, Opcode));
463 add(releaseStackSpace(RegBitWidth / 8));
464 return std::move(Instructions);
467 std::vector<MCInst> ConstantInliner::loadX87STAndFinalize(unsigned Reg) {
468 initStack(kF80Bytes);
469 add(MCInstBuilder(X86::LD_F80m)
470 // Address = ESP
471 .addReg(X86::RSP) // BaseReg
472 .addImm(1) // ScaleAmt
473 .addReg(0) // IndexReg
474 .addImm(0) // Disp
475 .addReg(0)); // Segment
476 if (Reg != X86::ST0)
477 add(MCInstBuilder(X86::ST_Frr).addReg(Reg));
478 add(releaseStackSpace(kF80Bytes));
479 return std::move(Instructions);
482 std::vector<MCInst> ConstantInliner::loadX87FPAndFinalize(unsigned Reg) {
483 initStack(kF80Bytes);
484 add(MCInstBuilder(X86::LD_Fp80m)
485 .addReg(Reg)
486 // Address = ESP
487 .addReg(X86::RSP) // BaseReg
488 .addImm(1) // ScaleAmt
489 .addReg(0) // IndexReg
490 .addImm(0) // Disp
491 .addReg(0)); // Segment
492 add(releaseStackSpace(kF80Bytes));
493 return std::move(Instructions);
496 std::vector<MCInst> ConstantInliner::popFlagAndFinalize() {
497 initStack(8);
498 add(MCInstBuilder(X86::POPF64));
499 return std::move(Instructions);
502 void ConstantInliner::initStack(unsigned Bytes) {
503 assert(Constant_.getBitWidth() <= Bytes * 8 &&
504 "Value does not have the correct size");
505 const APInt WideConstant = Constant_.getBitWidth() < Bytes * 8
506 ? Constant_.sext(Bytes * 8)
507 : Constant_;
508 add(allocateStackSpace(Bytes));
509 size_t ByteOffset = 0;
510 for (; Bytes - ByteOffset >= 4; ByteOffset += 4)
511 add(fillStackSpace(
512 X86::MOV32mi, ByteOffset,
513 WideConstant.extractBits(32, ByteOffset * 8).getZExtValue()));
514 if (Bytes - ByteOffset >= 2) {
515 add(fillStackSpace(
516 X86::MOV16mi, ByteOffset,
517 WideConstant.extractBits(16, ByteOffset * 8).getZExtValue()));
518 ByteOffset += 2;
520 if (Bytes - ByteOffset >= 1)
521 add(fillStackSpace(
522 X86::MOV8mi, ByteOffset,
523 WideConstant.extractBits(8, ByteOffset * 8).getZExtValue()));
526 #include "X86GenExegesis.inc"
528 namespace {
529 class ExegesisX86Target : public ExegesisTarget {
530 public:
531 ExegesisX86Target() : ExegesisTarget(X86CpuPfmCounters) {}
533 private:
534 void addTargetSpecificPasses(PassManagerBase &PM) const override;
536 unsigned getScratchMemoryRegister(const Triple &TT) const override;
538 unsigned getLoopCounterRegister(const Triple &) const override;
540 unsigned getMaxMemoryAccessSize() const override { return 64; }
542 void randomizeMCOperand(const Instruction &Instr, const Variable &Var,
543 MCOperand &AssignedValue,
544 const BitVector &ForbiddenRegs) const override;
546 void fillMemoryOperands(InstructionTemplate &IT, unsigned Reg,
547 unsigned Offset) const override;
549 void decrementLoopCounterAndJump(MachineBasicBlock &MBB,
550 MachineBasicBlock &TargetMBB,
551 const MCInstrInfo &MII) const override;
553 std::vector<MCInst> setRegTo(const MCSubtargetInfo &STI, unsigned Reg,
554 const APInt &Value) const override;
556 ArrayRef<unsigned> getUnavailableRegisters() const override {
557 return makeArrayRef(kUnavailableRegisters,
558 sizeof(kUnavailableRegisters) /
559 sizeof(kUnavailableRegisters[0]));
562 std::unique_ptr<SnippetGenerator> createLatencySnippetGenerator(
563 const LLVMState &State,
564 const SnippetGenerator::Options &Opts) const override {
565 return std::make_unique<X86LatencySnippetGenerator>(State, Opts);
568 std::unique_ptr<SnippetGenerator> createUopsSnippetGenerator(
569 const LLVMState &State,
570 const SnippetGenerator::Options &Opts) const override {
571 return std::make_unique<X86UopsSnippetGenerator>(State, Opts);
574 bool matchesArch(Triple::ArchType Arch) const override {
575 return Arch == Triple::x86_64 || Arch == Triple::x86;
578 static const unsigned kUnavailableRegisters[4];
581 // We disable a few registers that cannot be encoded on instructions with a REX
582 // prefix.
583 const unsigned ExegesisX86Target::kUnavailableRegisters[4] = {X86::AH, X86::BH,
584 X86::CH, X86::DH};
586 // We're using one of R8-R15 because these registers are never hardcoded in
587 // instructions (e.g. MOVS writes to EDI, ESI, EDX), so they have less
588 // conflicts.
589 constexpr const unsigned kLoopCounterReg = X86::R8;
591 } // namespace
593 void ExegesisX86Target::addTargetSpecificPasses(PassManagerBase &PM) const {
594 // Lowers FP pseudo-instructions, e.g. ABS_Fp32 -> ABS_F.
595 PM.add(createX86FloatingPointStackifierPass());
598 unsigned ExegesisX86Target::getScratchMemoryRegister(const Triple &TT) const {
599 if (!TT.isArch64Bit()) {
600 // FIXME: This would require popping from the stack, so we would have to
601 // add some additional setup code.
602 return 0;
604 return TT.isOSWindows() ? X86::RCX : X86::RDI;
607 unsigned ExegesisX86Target::getLoopCounterRegister(const Triple &TT) const {
608 if (!TT.isArch64Bit()) {
609 return 0;
611 return kLoopCounterReg;
614 void ExegesisX86Target::randomizeMCOperand(
615 const Instruction &Instr, const Variable &Var, MCOperand &AssignedValue,
616 const BitVector &ForbiddenRegs) const {
617 ExegesisTarget::randomizeMCOperand(Instr, Var, AssignedValue, ForbiddenRegs);
619 const Operand &Op = Instr.getPrimaryOperand(Var);
620 switch (Op.getExplicitOperandInfo().OperandType) {
621 case X86::OperandType::OPERAND_COND_CODE:
622 AssignedValue =
623 MCOperand::createImm(randomIndex(X86::CondCode::LAST_VALID_COND));
624 break;
625 default:
626 break;
630 void ExegesisX86Target::fillMemoryOperands(InstructionTemplate &IT,
631 unsigned Reg,
632 unsigned Offset) const {
633 assert(!isInvalidMemoryInstr(IT.Instr) &&
634 "fillMemoryOperands requires a valid memory instruction");
635 int MemOpIdx = X86II::getMemoryOperandNo(IT.Instr.Description->TSFlags);
636 assert(MemOpIdx >= 0 && "invalid memory operand index");
637 // getMemoryOperandNo() ignores tied operands, so we have to add them back.
638 for (unsigned I = 0; I <= static_cast<unsigned>(MemOpIdx); ++I) {
639 const auto &Op = IT.Instr.Operands[I];
640 if (Op.isTied() && Op.getTiedToIndex() < I) {
641 ++MemOpIdx;
644 setMemOp(IT, MemOpIdx + 0, MCOperand::createReg(Reg)); // BaseReg
645 setMemOp(IT, MemOpIdx + 1, MCOperand::createImm(1)); // ScaleAmt
646 setMemOp(IT, MemOpIdx + 2, MCOperand::createReg(0)); // IndexReg
647 setMemOp(IT, MemOpIdx + 3, MCOperand::createImm(Offset)); // Disp
648 setMemOp(IT, MemOpIdx + 4, MCOperand::createReg(0)); // Segment
651 void ExegesisX86Target::decrementLoopCounterAndJump(
652 MachineBasicBlock &MBB, MachineBasicBlock &TargetMBB,
653 const MCInstrInfo &MII) const {
654 BuildMI(&MBB, DebugLoc(), MII.get(X86::ADD64ri8))
655 .addDef(kLoopCounterReg)
656 .addUse(kLoopCounterReg)
657 .addImm(-1);
658 BuildMI(&MBB, DebugLoc(), MII.get(X86::JCC_1))
659 .addMBB(&TargetMBB)
660 .addImm(X86::COND_NE);
663 std::vector<MCInst> ExegesisX86Target::setRegTo(const MCSubtargetInfo &STI,
664 unsigned Reg,
665 const APInt &Value) const {
666 if (X86::GR8RegClass.contains(Reg))
667 return {loadImmediate(Reg, 8, Value)};
668 if (X86::GR16RegClass.contains(Reg))
669 return {loadImmediate(Reg, 16, Value)};
670 if (X86::GR32RegClass.contains(Reg))
671 return {loadImmediate(Reg, 32, Value)};
672 if (X86::GR64RegClass.contains(Reg))
673 return {loadImmediate(Reg, 64, Value)};
674 ConstantInliner CI(Value);
675 if (X86::VR64RegClass.contains(Reg))
676 return CI.loadAndFinalize(Reg, 64, X86::MMX_MOVQ64rm);
677 if (X86::VR128XRegClass.contains(Reg)) {
678 if (STI.getFeatureBits()[X86::FeatureAVX512])
679 return CI.loadAndFinalize(Reg, 128, X86::VMOVDQU32Z128rm);
680 if (STI.getFeatureBits()[X86::FeatureAVX])
681 return CI.loadAndFinalize(Reg, 128, X86::VMOVDQUrm);
682 return CI.loadAndFinalize(Reg, 128, X86::MOVDQUrm);
684 if (X86::VR256XRegClass.contains(Reg)) {
685 if (STI.getFeatureBits()[X86::FeatureAVX512])
686 return CI.loadAndFinalize(Reg, 256, X86::VMOVDQU32Z256rm);
687 if (STI.getFeatureBits()[X86::FeatureAVX])
688 return CI.loadAndFinalize(Reg, 256, X86::VMOVDQUYrm);
690 if (X86::VR512RegClass.contains(Reg))
691 if (STI.getFeatureBits()[X86::FeatureAVX512])
692 return CI.loadAndFinalize(Reg, 512, X86::VMOVDQU32Zrm);
693 if (X86::RSTRegClass.contains(Reg)) {
694 return CI.loadX87STAndFinalize(Reg);
696 if (X86::RFP32RegClass.contains(Reg) || X86::RFP64RegClass.contains(Reg) ||
697 X86::RFP80RegClass.contains(Reg)) {
698 return CI.loadX87FPAndFinalize(Reg);
700 if (Reg == X86::EFLAGS)
701 return CI.popFlagAndFinalize();
702 return {}; // Not yet implemented.
705 static ExegesisTarget *getTheExegesisX86Target() {
706 static ExegesisX86Target Target;
707 return &Target;
710 void InitializeX86ExegesisTarget() {
711 ExegesisTarget::registerTarget(getTheExegesisX86Target());
714 } // namespace exegesis
715 } // namespace llvm