[PowerPC] Do not emit record-form rotates when record-form andi/andis suffices
[llvm-core.git] / lib / Target / X86 / X86InstructionSelector.cpp
blobb2bcfa074f9657ee73d7873823283afd9908ce27
1 //===- X86InstructionSelector.cpp -----------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// X86.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/X86BaseInfo.h"
16 #include "X86InstrBuilder.h"
17 #include "X86InstrInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
25 #include "llvm/CodeGen/GlobalISel/Utils.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/TargetOpcodes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/Support/AtomicOrdering.h"
39 #include "llvm/Support/CodeGen.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/LowLevelTypeImpl.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include <cassert>
46 #include <cstdint>
47 #include <tuple>
49 #define DEBUG_TYPE "X86-isel"
51 using namespace llvm;
53 namespace {
55 #define GET_GLOBALISEL_PREDICATE_BITSET
56 #include "X86GenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATE_BITSET
59 class X86InstructionSelector : public InstructionSelector {
60 public:
61 X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
62 const X86RegisterBankInfo &RBI);
64 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
65 static const char *getName() { return DEBUG_TYPE; }
67 private:
68 /// tblgen-erated 'select' implementation, used as the initial selector for
69 /// the patterns that don't require complex C++.
70 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
72 // TODO: remove after supported by Tablegen-erated instruction selection.
73 unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
74 uint64_t Alignment) const;
76 bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
77 MachineFunction &MF) const;
78 bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
79 MachineFunction &MF) const;
80 bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
81 MachineFunction &MF) const;
82 bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
83 MachineFunction &MF) const;
84 bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
85 MachineFunction &MF) const;
86 bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
87 MachineFunction &MF) const;
88 bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
89 MachineFunction &MF) const;
90 bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
91 MachineFunction &MF) const;
92 bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
93 MachineFunction &MF) const;
94 bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
95 MachineFunction &MF) const;
96 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
97 bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
98 MachineFunction &MF,
99 CodeGenCoverage &CoverageInfo) const;
100 bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
101 MachineFunction &MF,
102 CodeGenCoverage &CoverageInfo) const;
103 bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
104 MachineFunction &MF) const;
105 bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
106 MachineFunction &MF) const;
107 bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
108 MachineFunction &MF) const;
109 bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
110 const unsigned DstReg,
111 const TargetRegisterClass *DstRC,
112 const unsigned SrcReg,
113 const TargetRegisterClass *SrcRC) const;
114 bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
115 MachineFunction &MF) const;
116 bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
117 bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI,
118 MachineFunction &MF) const;
119 bool selectSDiv(MachineInstr &I, MachineRegisterInfo &MRI,
120 MachineFunction &MF) const;
121 bool selectIntrinsicWSideEffects(MachineInstr &I, MachineRegisterInfo &MRI,
122 MachineFunction &MF) const;
124 // emit insert subreg instruction and insert it before MachineInstr &I
125 bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
126 MachineRegisterInfo &MRI, MachineFunction &MF) const;
127 // emit extract subreg instruction and insert it before MachineInstr &I
128 bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
129 MachineRegisterInfo &MRI, MachineFunction &MF) const;
131 const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
132 const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
133 MachineRegisterInfo &MRI) const;
135 const X86TargetMachine &TM;
136 const X86Subtarget &STI;
137 const X86InstrInfo &TII;
138 const X86RegisterInfo &TRI;
139 const X86RegisterBankInfo &RBI;
141 #define GET_GLOBALISEL_PREDICATES_DECL
142 #include "X86GenGlobalISel.inc"
143 #undef GET_GLOBALISEL_PREDICATES_DECL
145 #define GET_GLOBALISEL_TEMPORARIES_DECL
146 #include "X86GenGlobalISel.inc"
147 #undef GET_GLOBALISEL_TEMPORARIES_DECL
150 } // end anonymous namespace
152 #define GET_GLOBALISEL_IMPL
153 #include "X86GenGlobalISel.inc"
154 #undef GET_GLOBALISEL_IMPL
156 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
157 const X86Subtarget &STI,
158 const X86RegisterBankInfo &RBI)
159 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
160 TRI(*STI.getRegisterInfo()), RBI(RBI),
161 #define GET_GLOBALISEL_PREDICATES_INIT
162 #include "X86GenGlobalISel.inc"
163 #undef GET_GLOBALISEL_PREDICATES_INIT
164 #define GET_GLOBALISEL_TEMPORARIES_INIT
165 #include "X86GenGlobalISel.inc"
166 #undef GET_GLOBALISEL_TEMPORARIES_INIT
170 // FIXME: This should be target-independent, inferred from the types declared
171 // for each class in the bank.
172 const TargetRegisterClass *
173 X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
174 if (RB.getID() == X86::GPRRegBankID) {
175 if (Ty.getSizeInBits() <= 8)
176 return &X86::GR8RegClass;
177 if (Ty.getSizeInBits() == 16)
178 return &X86::GR16RegClass;
179 if (Ty.getSizeInBits() == 32)
180 return &X86::GR32RegClass;
181 if (Ty.getSizeInBits() == 64)
182 return &X86::GR64RegClass;
184 if (RB.getID() == X86::VECRRegBankID) {
185 if (Ty.getSizeInBits() == 32)
186 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
187 if (Ty.getSizeInBits() == 64)
188 return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
189 if (Ty.getSizeInBits() == 128)
190 return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
191 if (Ty.getSizeInBits() == 256)
192 return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
193 if (Ty.getSizeInBits() == 512)
194 return &X86::VR512RegClass;
197 llvm_unreachable("Unknown RegBank!");
200 const TargetRegisterClass *
201 X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
202 MachineRegisterInfo &MRI) const {
203 const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
204 return getRegClass(Ty, RegBank);
207 static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
208 unsigned SubIdx = X86::NoSubRegister;
209 if (RC == &X86::GR32RegClass) {
210 SubIdx = X86::sub_32bit;
211 } else if (RC == &X86::GR16RegClass) {
212 SubIdx = X86::sub_16bit;
213 } else if (RC == &X86::GR8RegClass) {
214 SubIdx = X86::sub_8bit;
217 return SubIdx;
220 static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
221 assert(TargetRegisterInfo::isPhysicalRegister(Reg));
222 if (X86::GR64RegClass.contains(Reg))
223 return &X86::GR64RegClass;
224 if (X86::GR32RegClass.contains(Reg))
225 return &X86::GR32RegClass;
226 if (X86::GR16RegClass.contains(Reg))
227 return &X86::GR16RegClass;
228 if (X86::GR8RegClass.contains(Reg))
229 return &X86::GR8RegClass;
231 llvm_unreachable("Unknown RegClass for PhysReg!");
234 // Set X86 Opcode and constrain DestReg.
235 bool X86InstructionSelector::selectCopy(MachineInstr &I,
236 MachineRegisterInfo &MRI) const {
237 unsigned DstReg = I.getOperand(0).getReg();
238 const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
239 const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
241 unsigned SrcReg = I.getOperand(1).getReg();
242 const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
243 const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
245 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
246 assert(I.isCopy() && "Generic operators do not allow physical registers");
248 if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
249 DstRegBank.getID() == X86::GPRRegBankID) {
251 const TargetRegisterClass *SrcRC =
252 getRegClass(MRI.getType(SrcReg), SrcRegBank);
253 const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
255 if (SrcRC != DstRC) {
256 // This case can be generated by ABI lowering, performe anyext
257 unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
258 BuildMI(*I.getParent(), I, I.getDebugLoc(),
259 TII.get(TargetOpcode::SUBREG_TO_REG))
260 .addDef(ExtSrc)
261 .addImm(0)
262 .addReg(SrcReg)
263 .addImm(getSubRegIndex(SrcRC));
265 I.getOperand(1).setReg(ExtSrc);
269 return true;
272 assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
273 "No phys reg on generic operators");
274 assert((DstSize == SrcSize ||
275 // Copies are a mean to setup initial types, the number of
276 // bits may not exactly match.
277 (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
278 DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
279 "Copy with different width?!");
281 const TargetRegisterClass *DstRC =
282 getRegClass(MRI.getType(DstReg), DstRegBank);
284 if (SrcRegBank.getID() == X86::GPRRegBankID &&
285 DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
286 TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
287 // Change the physical register to performe truncate.
289 const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
291 if (DstRC != SrcRC) {
292 I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
293 I.getOperand(1).substPhysReg(SrcReg, TRI);
297 // No need to constrain SrcReg. It will get constrained when
298 // we hit another of its use or its defs.
299 // Copies do not have constraints.
300 const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
301 if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
302 if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
303 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
304 << " operand\n");
305 return false;
308 I.setDesc(TII.get(X86::COPY));
309 return true;
312 bool X86InstructionSelector::select(MachineInstr &I,
313 CodeGenCoverage &CoverageInfo) const {
314 assert(I.getParent() && "Instruction should be in a basic block!");
315 assert(I.getParent()->getParent() && "Instruction should be in a function!");
317 MachineBasicBlock &MBB = *I.getParent();
318 MachineFunction &MF = *MBB.getParent();
319 MachineRegisterInfo &MRI = MF.getRegInfo();
321 unsigned Opcode = I.getOpcode();
322 if (!isPreISelGenericOpcode(Opcode)) {
323 // Certain non-generic instructions also need some special handling.
325 if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
326 return false;
328 if (I.isCopy())
329 return selectCopy(I, MRI);
331 return true;
334 assert(I.getNumOperands() == I.getNumExplicitOperands() &&
335 "Generic instruction has unexpected implicit operands\n");
337 if (selectImpl(I, CoverageInfo))
338 return true;
340 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
342 // TODO: This should be implemented by tblgen.
343 switch (I.getOpcode()) {
344 default:
345 return false;
346 case TargetOpcode::G_STORE:
347 case TargetOpcode::G_LOAD:
348 return selectLoadStoreOp(I, MRI, MF);
349 case TargetOpcode::G_GEP:
350 case TargetOpcode::G_FRAME_INDEX:
351 return selectFrameIndexOrGep(I, MRI, MF);
352 case TargetOpcode::G_GLOBAL_VALUE:
353 return selectGlobalValue(I, MRI, MF);
354 case TargetOpcode::G_CONSTANT:
355 return selectConstant(I, MRI, MF);
356 case TargetOpcode::G_FCONSTANT:
357 return materializeFP(I, MRI, MF);
358 case TargetOpcode::G_PTRTOINT:
359 case TargetOpcode::G_TRUNC:
360 return selectTruncOrPtrToInt(I, MRI, MF);
361 case TargetOpcode::G_INTTOPTR:
362 return selectCopy(I, MRI);
363 case TargetOpcode::G_ZEXT:
364 return selectZext(I, MRI, MF);
365 case TargetOpcode::G_ANYEXT:
366 return selectAnyext(I, MRI, MF);
367 case TargetOpcode::G_ICMP:
368 return selectCmp(I, MRI, MF);
369 case TargetOpcode::G_FCMP:
370 return selectFCmp(I, MRI, MF);
371 case TargetOpcode::G_UADDE:
372 return selectUadde(I, MRI, MF);
373 case TargetOpcode::G_UNMERGE_VALUES:
374 return selectUnmergeValues(I, MRI, MF, CoverageInfo);
375 case TargetOpcode::G_MERGE_VALUES:
376 return selectMergeValues(I, MRI, MF, CoverageInfo);
377 case TargetOpcode::G_EXTRACT:
378 return selectExtract(I, MRI, MF);
379 case TargetOpcode::G_INSERT:
380 return selectInsert(I, MRI, MF);
381 case TargetOpcode::G_BRCOND:
382 return selectCondBranch(I, MRI, MF);
383 case TargetOpcode::G_IMPLICIT_DEF:
384 case TargetOpcode::G_PHI:
385 return selectImplicitDefOrPHI(I, MRI);
386 case TargetOpcode::G_SHL:
387 case TargetOpcode::G_ASHR:
388 case TargetOpcode::G_LSHR:
389 return selectShift(I, MRI, MF);
390 case TargetOpcode::G_SDIV:
391 return selectSDiv(I, MRI, MF);
392 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
393 return selectIntrinsicWSideEffects(I, MRI, MF);
396 return false;
399 unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
400 const RegisterBank &RB,
401 unsigned Opc,
402 uint64_t Alignment) const {
403 bool Isload = (Opc == TargetOpcode::G_LOAD);
404 bool HasAVX = STI.hasAVX();
405 bool HasAVX512 = STI.hasAVX512();
406 bool HasVLX = STI.hasVLX();
408 if (Ty == LLT::scalar(8)) {
409 if (X86::GPRRegBankID == RB.getID())
410 return Isload ? X86::MOV8rm : X86::MOV8mr;
411 } else if (Ty == LLT::scalar(16)) {
412 if (X86::GPRRegBankID == RB.getID())
413 return Isload ? X86::MOV16rm : X86::MOV16mr;
414 } else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
415 if (X86::GPRRegBankID == RB.getID())
416 return Isload ? X86::MOV32rm : X86::MOV32mr;
417 if (X86::VECRRegBankID == RB.getID())
418 return Isload ? (HasAVX512 ? X86::VMOVSSZrm
419 : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
420 : (HasAVX512 ? X86::VMOVSSZmr
421 : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
422 } else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
423 if (X86::GPRRegBankID == RB.getID())
424 return Isload ? X86::MOV64rm : X86::MOV64mr;
425 if (X86::VECRRegBankID == RB.getID())
426 return Isload ? (HasAVX512 ? X86::VMOVSDZrm
427 : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
428 : (HasAVX512 ? X86::VMOVSDZmr
429 : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
430 } else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
431 if (Alignment >= 16)
432 return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
433 : HasAVX512
434 ? X86::VMOVAPSZ128rm_NOVLX
435 : HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
436 : (HasVLX ? X86::VMOVAPSZ128mr
437 : HasAVX512
438 ? X86::VMOVAPSZ128mr_NOVLX
439 : HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
440 else
441 return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
442 : HasAVX512
443 ? X86::VMOVUPSZ128rm_NOVLX
444 : HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
445 : (HasVLX ? X86::VMOVUPSZ128mr
446 : HasAVX512
447 ? X86::VMOVUPSZ128mr_NOVLX
448 : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
449 } else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
450 if (Alignment >= 32)
451 return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
452 : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
453 : X86::VMOVAPSYrm)
454 : (HasVLX ? X86::VMOVAPSZ256mr
455 : HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
456 : X86::VMOVAPSYmr);
457 else
458 return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
459 : HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
460 : X86::VMOVUPSYrm)
461 : (HasVLX ? X86::VMOVUPSZ256mr
462 : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
463 : X86::VMOVUPSYmr);
464 } else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
465 if (Alignment >= 64)
466 return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
467 else
468 return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
470 return Opc;
473 // Fill in an address from the given instruction.
474 static void X86SelectAddress(const MachineInstr &I,
475 const MachineRegisterInfo &MRI,
476 X86AddressMode &AM) {
477 assert(I.getOperand(0).isReg() && "unsupported opperand.");
478 assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
479 "unsupported type.");
481 if (I.getOpcode() == TargetOpcode::G_GEP) {
482 if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
483 int64_t Imm = *COff;
484 if (isInt<32>(Imm)) { // Check for displacement overflow.
485 AM.Disp = static_cast<int32_t>(Imm);
486 AM.Base.Reg = I.getOperand(1).getReg();
487 return;
490 } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
491 AM.Base.FrameIndex = I.getOperand(1).getIndex();
492 AM.BaseType = X86AddressMode::FrameIndexBase;
493 return;
496 // Default behavior.
497 AM.Base.Reg = I.getOperand(0).getReg();
500 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
501 MachineRegisterInfo &MRI,
502 MachineFunction &MF) const {
503 unsigned Opc = I.getOpcode();
505 assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
506 "unexpected instruction");
508 const unsigned DefReg = I.getOperand(0).getReg();
509 LLT Ty = MRI.getType(DefReg);
510 const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
512 auto &MemOp = **I.memoperands_begin();
513 if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
514 LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
515 return false;
518 unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
519 if (NewOpc == Opc)
520 return false;
522 X86AddressMode AM;
523 X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
525 I.setDesc(TII.get(NewOpc));
526 MachineInstrBuilder MIB(MF, I);
527 if (Opc == TargetOpcode::G_LOAD) {
528 I.RemoveOperand(1);
529 addFullAddress(MIB, AM);
530 } else {
531 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
532 I.RemoveOperand(1);
533 I.RemoveOperand(0);
534 addFullAddress(MIB, AM).addUse(DefReg);
536 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
539 static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
540 if (Ty == LLT::pointer(0, 64))
541 return X86::LEA64r;
542 else if (Ty == LLT::pointer(0, 32))
543 return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
544 else
545 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
548 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
549 MachineRegisterInfo &MRI,
550 MachineFunction &MF) const {
551 unsigned Opc = I.getOpcode();
553 assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
554 "unexpected instruction");
556 const unsigned DefReg = I.getOperand(0).getReg();
557 LLT Ty = MRI.getType(DefReg);
559 // Use LEA to calculate frame index and GEP
560 unsigned NewOpc = getLeaOP(Ty, STI);
561 I.setDesc(TII.get(NewOpc));
562 MachineInstrBuilder MIB(MF, I);
564 if (Opc == TargetOpcode::G_FRAME_INDEX) {
565 addOffset(MIB, 0);
566 } else {
567 MachineOperand &InxOp = I.getOperand(2);
568 I.addOperand(InxOp); // set IndexReg
569 InxOp.ChangeToImmediate(1); // set Scale
570 MIB.addImm(0).addReg(0);
573 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
576 bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
577 MachineRegisterInfo &MRI,
578 MachineFunction &MF) const {
579 assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
580 "unexpected instruction");
582 auto GV = I.getOperand(1).getGlobal();
583 if (GV->isThreadLocal()) {
584 return false; // TODO: we don't support TLS yet.
587 // Can't handle alternate code models yet.
588 if (TM.getCodeModel() != CodeModel::Small)
589 return false;
591 X86AddressMode AM;
592 AM.GV = GV;
593 AM.GVOpFlags = STI.classifyGlobalReference(GV);
595 // TODO: The ABI requires an extra load. not supported yet.
596 if (isGlobalStubReference(AM.GVOpFlags))
597 return false;
599 // TODO: This reference is relative to the pic base. not supported yet.
600 if (isGlobalRelativeToPICBase(AM.GVOpFlags))
601 return false;
603 if (STI.isPICStyleRIPRel()) {
604 // Use rip-relative addressing.
605 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
606 AM.Base.Reg = X86::RIP;
609 const unsigned DefReg = I.getOperand(0).getReg();
610 LLT Ty = MRI.getType(DefReg);
611 unsigned NewOpc = getLeaOP(Ty, STI);
613 I.setDesc(TII.get(NewOpc));
614 MachineInstrBuilder MIB(MF, I);
616 I.RemoveOperand(1);
617 addFullAddress(MIB, AM);
619 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
622 bool X86InstructionSelector::selectConstant(MachineInstr &I,
623 MachineRegisterInfo &MRI,
624 MachineFunction &MF) const {
625 assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
626 "unexpected instruction");
628 const unsigned DefReg = I.getOperand(0).getReg();
629 LLT Ty = MRI.getType(DefReg);
631 if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
632 return false;
634 uint64_t Val = 0;
635 if (I.getOperand(1).isCImm()) {
636 Val = I.getOperand(1).getCImm()->getZExtValue();
637 I.getOperand(1).ChangeToImmediate(Val);
638 } else if (I.getOperand(1).isImm()) {
639 Val = I.getOperand(1).getImm();
640 } else
641 llvm_unreachable("Unsupported operand type.");
643 unsigned NewOpc;
644 switch (Ty.getSizeInBits()) {
645 case 8:
646 NewOpc = X86::MOV8ri;
647 break;
648 case 16:
649 NewOpc = X86::MOV16ri;
650 break;
651 case 32:
652 NewOpc = X86::MOV32ri;
653 break;
654 case 64:
655 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
656 if (isInt<32>(Val))
657 NewOpc = X86::MOV64ri32;
658 else
659 NewOpc = X86::MOV64ri;
660 break;
661 default:
662 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
665 I.setDesc(TII.get(NewOpc));
666 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
669 // Helper function for selectTruncOrPtrToInt and selectAnyext.
670 // Returns true if DstRC lives on a floating register class and
671 // SrcRC lives on a 128-bit vector class.
672 static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
673 const TargetRegisterClass *SrcRC) {
674 return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
675 DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
676 (SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
679 bool X86InstructionSelector::selectTurnIntoCOPY(
680 MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
681 const TargetRegisterClass *DstRC, const unsigned SrcReg,
682 const TargetRegisterClass *SrcRC) const {
684 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
685 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
686 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
687 << " operand\n");
688 return false;
690 I.setDesc(TII.get(X86::COPY));
691 return true;
694 bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
695 MachineRegisterInfo &MRI,
696 MachineFunction &MF) const {
697 assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
698 I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
699 "unexpected instruction");
701 const unsigned DstReg = I.getOperand(0).getReg();
702 const unsigned SrcReg = I.getOperand(1).getReg();
704 const LLT DstTy = MRI.getType(DstReg);
705 const LLT SrcTy = MRI.getType(SrcReg);
707 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
708 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
710 if (DstRB.getID() != SrcRB.getID()) {
711 LLVM_DEBUG(dbgs() << TII.getName(I.getOpcode())
712 << " input/output on different banks\n");
713 return false;
716 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
717 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
719 if (!DstRC || !SrcRC)
720 return false;
722 // If that's truncation of the value that lives on the vector class and goes
723 // into the floating class, just replace it with copy, as we are able to
724 // select it as a regular move.
725 if (canTurnIntoCOPY(DstRC, SrcRC))
726 return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
728 if (DstRB.getID() != X86::GPRRegBankID)
729 return false;
731 unsigned SubIdx;
732 if (DstRC == SrcRC) {
733 // Nothing to be done
734 SubIdx = X86::NoSubRegister;
735 } else if (DstRC == &X86::GR32RegClass) {
736 SubIdx = X86::sub_32bit;
737 } else if (DstRC == &X86::GR16RegClass) {
738 SubIdx = X86::sub_16bit;
739 } else if (DstRC == &X86::GR8RegClass) {
740 SubIdx = X86::sub_8bit;
741 } else {
742 return false;
745 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
747 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
748 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
749 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
750 << "\n");
751 return false;
754 I.getOperand(1).setSubReg(SubIdx);
756 I.setDesc(TII.get(X86::COPY));
757 return true;
760 bool X86InstructionSelector::selectZext(MachineInstr &I,
761 MachineRegisterInfo &MRI,
762 MachineFunction &MF) const {
763 assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
765 const unsigned DstReg = I.getOperand(0).getReg();
766 const unsigned SrcReg = I.getOperand(1).getReg();
768 const LLT DstTy = MRI.getType(DstReg);
769 const LLT SrcTy = MRI.getType(SrcReg);
771 assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
772 "8=>32 Zext is handled by tablegen");
773 assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
774 "16=>32 Zext is handled by tablegen");
776 const static struct ZextEntry {
777 LLT SrcTy;
778 LLT DstTy;
779 unsigned MovOp;
780 bool NeedSubregToReg;
781 } OpTable[] = {
782 {LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8, false}, // i8 => i16
783 {LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8, true}, // i8 => i64
784 {LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16, true}, // i16 => i64
785 {LLT::scalar(32), LLT::scalar(64), 0, true} // i32 => i64
788 auto ZextEntryIt =
789 std::find_if(std::begin(OpTable), std::end(OpTable),
790 [SrcTy, DstTy](const ZextEntry &El) {
791 return El.DstTy == DstTy && El.SrcTy == SrcTy;
794 // Here we try to select Zext into a MOVZ and/or SUBREG_TO_REG instruction.
795 if (ZextEntryIt != std::end(OpTable)) {
796 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
797 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
798 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
799 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
801 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
802 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
803 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
804 << " operand\n");
805 return false;
808 unsigned TransitRegTo = DstReg;
809 unsigned TransitRegFrom = SrcReg;
810 if (ZextEntryIt->MovOp) {
811 // If we select Zext into MOVZ + SUBREG_TO_REG, we need to have
812 // a transit register in between: create it here.
813 if (ZextEntryIt->NeedSubregToReg) {
814 TransitRegFrom = MRI.createVirtualRegister(
815 getRegClass(LLT::scalar(32), DstReg, MRI));
816 TransitRegTo = TransitRegFrom;
819 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ZextEntryIt->MovOp))
820 .addDef(TransitRegTo)
821 .addReg(SrcReg);
823 if (ZextEntryIt->NeedSubregToReg) {
824 BuildMI(*I.getParent(), I, I.getDebugLoc(),
825 TII.get(TargetOpcode::SUBREG_TO_REG))
826 .addDef(DstReg)
827 .addImm(0)
828 .addReg(TransitRegFrom)
829 .addImm(X86::sub_32bit);
831 I.eraseFromParent();
832 return true;
835 if (SrcTy != LLT::scalar(1))
836 return false;
838 unsigned AndOpc;
839 if (DstTy == LLT::scalar(8))
840 AndOpc = X86::AND8ri;
841 else if (DstTy == LLT::scalar(16))
842 AndOpc = X86::AND16ri8;
843 else if (DstTy == LLT::scalar(32))
844 AndOpc = X86::AND32ri8;
845 else if (DstTy == LLT::scalar(64))
846 AndOpc = X86::AND64ri8;
847 else
848 return false;
850 unsigned DefReg = SrcReg;
851 if (DstTy != LLT::scalar(8)) {
852 DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
853 BuildMI(*I.getParent(), I, I.getDebugLoc(),
854 TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
855 .addImm(0)
856 .addReg(SrcReg)
857 .addImm(X86::sub_8bit);
860 MachineInstr &AndInst =
861 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
862 .addReg(DefReg)
863 .addImm(1);
865 constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
867 I.eraseFromParent();
868 return true;
871 bool X86InstructionSelector::selectAnyext(MachineInstr &I,
872 MachineRegisterInfo &MRI,
873 MachineFunction &MF) const {
874 assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
876 const unsigned DstReg = I.getOperand(0).getReg();
877 const unsigned SrcReg = I.getOperand(1).getReg();
879 const LLT DstTy = MRI.getType(DstReg);
880 const LLT SrcTy = MRI.getType(SrcReg);
882 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
883 const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
885 assert(DstRB.getID() == SrcRB.getID() &&
886 "G_ANYEXT input/output on different banks\n");
888 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
889 "G_ANYEXT incorrect operand size");
891 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
892 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
894 // If that's ANY_EXT of the value that lives on the floating class and goes
895 // into the vector class, just replace it with copy, as we are able to select
896 // it as a regular move.
897 if (canTurnIntoCOPY(SrcRC, DstRC))
898 return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
900 if (DstRB.getID() != X86::GPRRegBankID)
901 return false;
903 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
904 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
905 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
906 << " operand\n");
907 return false;
910 if (SrcRC == DstRC) {
911 I.setDesc(TII.get(X86::COPY));
912 return true;
915 BuildMI(*I.getParent(), I, I.getDebugLoc(),
916 TII.get(TargetOpcode::SUBREG_TO_REG))
917 .addDef(DstReg)
918 .addImm(0)
919 .addReg(SrcReg)
920 .addImm(getSubRegIndex(SrcRC));
922 I.eraseFromParent();
923 return true;
926 bool X86InstructionSelector::selectCmp(MachineInstr &I,
927 MachineRegisterInfo &MRI,
928 MachineFunction &MF) const {
929 assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
931 X86::CondCode CC;
932 bool SwapArgs;
933 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
934 (CmpInst::Predicate)I.getOperand(1).getPredicate());
935 unsigned OpSet = X86::getSETFromCond(CC);
937 unsigned LHS = I.getOperand(2).getReg();
938 unsigned RHS = I.getOperand(3).getReg();
940 if (SwapArgs)
941 std::swap(LHS, RHS);
943 unsigned OpCmp;
944 LLT Ty = MRI.getType(LHS);
946 switch (Ty.getSizeInBits()) {
947 default:
948 return false;
949 case 8:
950 OpCmp = X86::CMP8rr;
951 break;
952 case 16:
953 OpCmp = X86::CMP16rr;
954 break;
955 case 32:
956 OpCmp = X86::CMP32rr;
957 break;
958 case 64:
959 OpCmp = X86::CMP64rr;
960 break;
963 MachineInstr &CmpInst =
964 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
965 .addReg(LHS)
966 .addReg(RHS);
968 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
969 TII.get(OpSet), I.getOperand(0).getReg());
971 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
972 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
974 I.eraseFromParent();
975 return true;
978 bool X86InstructionSelector::selectFCmp(MachineInstr &I,
979 MachineRegisterInfo &MRI,
980 MachineFunction &MF) const {
981 assert((I.getOpcode() == TargetOpcode::G_FCMP) && "unexpected instruction");
983 unsigned LhsReg = I.getOperand(2).getReg();
984 unsigned RhsReg = I.getOperand(3).getReg();
985 CmpInst::Predicate Predicate =
986 (CmpInst::Predicate)I.getOperand(1).getPredicate();
988 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
989 static const uint16_t SETFOpcTable[2][3] = {
990 {X86::SETEr, X86::SETNPr, X86::AND8rr},
991 {X86::SETNEr, X86::SETPr, X86::OR8rr}};
992 const uint16_t *SETFOpc = nullptr;
993 switch (Predicate) {
994 default:
995 break;
996 case CmpInst::FCMP_OEQ:
997 SETFOpc = &SETFOpcTable[0][0];
998 break;
999 case CmpInst::FCMP_UNE:
1000 SETFOpc = &SETFOpcTable[1][0];
1001 break;
1004 // Compute the opcode for the CMP instruction.
1005 unsigned OpCmp;
1006 LLT Ty = MRI.getType(LhsReg);
1007 switch (Ty.getSizeInBits()) {
1008 default:
1009 return false;
1010 case 32:
1011 OpCmp = X86::UCOMISSrr;
1012 break;
1013 case 64:
1014 OpCmp = X86::UCOMISDrr;
1015 break;
1018 unsigned ResultReg = I.getOperand(0).getReg();
1019 RBI.constrainGenericRegister(
1020 ResultReg,
1021 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI);
1022 if (SETFOpc) {
1023 MachineInstr &CmpInst =
1024 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1025 .addReg(LhsReg)
1026 .addReg(RhsReg);
1028 unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
1029 unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
1030 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1031 TII.get(SETFOpc[0]), FlagReg1);
1032 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1033 TII.get(SETFOpc[1]), FlagReg2);
1034 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1035 TII.get(SETFOpc[2]), ResultReg)
1036 .addReg(FlagReg1)
1037 .addReg(FlagReg2);
1038 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1039 constrainSelectedInstRegOperands(Set1, TII, TRI, RBI);
1040 constrainSelectedInstRegOperands(Set2, TII, TRI, RBI);
1041 constrainSelectedInstRegOperands(Set3, TII, TRI, RBI);
1043 I.eraseFromParent();
1044 return true;
1047 X86::CondCode CC;
1048 bool SwapArgs;
1049 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1050 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1051 unsigned Opc = X86::getSETFromCond(CC);
1053 if (SwapArgs)
1054 std::swap(LhsReg, RhsReg);
1056 // Emit a compare of LHS/RHS.
1057 MachineInstr &CmpInst =
1058 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
1059 .addReg(LhsReg)
1060 .addReg(RhsReg);
1062 MachineInstr &Set =
1063 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc), ResultReg);
1064 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
1065 constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
1066 I.eraseFromParent();
1067 return true;
1070 bool X86InstructionSelector::selectUadde(MachineInstr &I,
1071 MachineRegisterInfo &MRI,
1072 MachineFunction &MF) const {
1073 assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
1075 const unsigned DstReg = I.getOperand(0).getReg();
1076 const unsigned CarryOutReg = I.getOperand(1).getReg();
1077 const unsigned Op0Reg = I.getOperand(2).getReg();
1078 const unsigned Op1Reg = I.getOperand(3).getReg();
1079 unsigned CarryInReg = I.getOperand(4).getReg();
1081 const LLT DstTy = MRI.getType(DstReg);
1083 if (DstTy != LLT::scalar(32))
1084 return false;
1086 // find CarryIn def instruction.
1087 MachineInstr *Def = MRI.getVRegDef(CarryInReg);
1088 while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
1089 CarryInReg = Def->getOperand(1).getReg();
1090 Def = MRI.getVRegDef(CarryInReg);
1093 unsigned Opcode;
1094 if (Def->getOpcode() == TargetOpcode::G_UADDE) {
1095 // carry set by prev ADD.
1097 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
1098 .addReg(CarryInReg);
1100 if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
1101 return false;
1103 Opcode = X86::ADC32rr;
1104 } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
1105 // carry is constant, support only 0.
1106 if (*val != 0)
1107 return false;
1109 Opcode = X86::ADD32rr;
1110 } else
1111 return false;
1113 MachineInstr &AddInst =
1114 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1115 .addReg(Op0Reg)
1116 .addReg(Op1Reg);
1118 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
1119 .addReg(X86::EFLAGS);
1121 if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
1122 !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
1123 return false;
1125 I.eraseFromParent();
1126 return true;
1129 bool X86InstructionSelector::selectExtract(MachineInstr &I,
1130 MachineRegisterInfo &MRI,
1131 MachineFunction &MF) const {
1132 assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
1133 "unexpected instruction");
1135 const unsigned DstReg = I.getOperand(0).getReg();
1136 const unsigned SrcReg = I.getOperand(1).getReg();
1137 int64_t Index = I.getOperand(2).getImm();
1139 const LLT DstTy = MRI.getType(DstReg);
1140 const LLT SrcTy = MRI.getType(SrcReg);
1142 // Meanwile handle vector type only.
1143 if (!DstTy.isVector())
1144 return false;
1146 if (Index % DstTy.getSizeInBits() != 0)
1147 return false; // Not extract subvector.
1149 if (Index == 0) {
1150 // Replace by extract subreg copy.
1151 if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
1152 return false;
1154 I.eraseFromParent();
1155 return true;
1158 bool HasAVX = STI.hasAVX();
1159 bool HasAVX512 = STI.hasAVX512();
1160 bool HasVLX = STI.hasVLX();
1162 if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
1163 if (HasVLX)
1164 I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
1165 else if (HasAVX)
1166 I.setDesc(TII.get(X86::VEXTRACTF128rr));
1167 else
1168 return false;
1169 } else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
1170 if (DstTy.getSizeInBits() == 128)
1171 I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
1172 else if (DstTy.getSizeInBits() == 256)
1173 I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
1174 else
1175 return false;
1176 } else
1177 return false;
1179 // Convert to X86 VEXTRACT immediate.
1180 Index = Index / DstTy.getSizeInBits();
1181 I.getOperand(2).setImm(Index);
1183 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1186 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
1187 MachineInstr &I,
1188 MachineRegisterInfo &MRI,
1189 MachineFunction &MF) const {
1190 const LLT DstTy = MRI.getType(DstReg);
1191 const LLT SrcTy = MRI.getType(SrcReg);
1192 unsigned SubIdx = X86::NoSubRegister;
1194 if (!DstTy.isVector() || !SrcTy.isVector())
1195 return false;
1197 assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
1198 "Incorrect Src/Dst register size");
1200 if (DstTy.getSizeInBits() == 128)
1201 SubIdx = X86::sub_xmm;
1202 else if (DstTy.getSizeInBits() == 256)
1203 SubIdx = X86::sub_ymm;
1204 else
1205 return false;
1207 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1208 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1210 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
1212 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1213 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1214 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1215 return false;
1218 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
1219 .addReg(SrcReg, 0, SubIdx);
1221 return true;
1224 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
1225 MachineInstr &I,
1226 MachineRegisterInfo &MRI,
1227 MachineFunction &MF) const {
1228 const LLT DstTy = MRI.getType(DstReg);
1229 const LLT SrcTy = MRI.getType(SrcReg);
1230 unsigned SubIdx = X86::NoSubRegister;
1232 // TODO: support scalar types
1233 if (!DstTy.isVector() || !SrcTy.isVector())
1234 return false;
1236 assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
1237 "Incorrect Src/Dst register size");
1239 if (SrcTy.getSizeInBits() == 128)
1240 SubIdx = X86::sub_xmm;
1241 else if (SrcTy.getSizeInBits() == 256)
1242 SubIdx = X86::sub_ymm;
1243 else
1244 return false;
1246 const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
1247 const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
1249 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
1250 !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
1251 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1252 return false;
1255 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
1256 .addReg(DstReg, RegState::DefineNoRead, SubIdx)
1257 .addReg(SrcReg);
1259 return true;
1262 bool X86InstructionSelector::selectInsert(MachineInstr &I,
1263 MachineRegisterInfo &MRI,
1264 MachineFunction &MF) const {
1265 assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
1267 const unsigned DstReg = I.getOperand(0).getReg();
1268 const unsigned SrcReg = I.getOperand(1).getReg();
1269 const unsigned InsertReg = I.getOperand(2).getReg();
1270 int64_t Index = I.getOperand(3).getImm();
1272 const LLT DstTy = MRI.getType(DstReg);
1273 const LLT InsertRegTy = MRI.getType(InsertReg);
1275 // Meanwile handle vector type only.
1276 if (!DstTy.isVector())
1277 return false;
1279 if (Index % InsertRegTy.getSizeInBits() != 0)
1280 return false; // Not insert subvector.
1282 if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
1283 // Replace by subreg copy.
1284 if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
1285 return false;
1287 I.eraseFromParent();
1288 return true;
1291 bool HasAVX = STI.hasAVX();
1292 bool HasAVX512 = STI.hasAVX512();
1293 bool HasVLX = STI.hasVLX();
1295 if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
1296 if (HasVLX)
1297 I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
1298 else if (HasAVX)
1299 I.setDesc(TII.get(X86::VINSERTF128rr));
1300 else
1301 return false;
1302 } else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
1303 if (InsertRegTy.getSizeInBits() == 128)
1304 I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
1305 else if (InsertRegTy.getSizeInBits() == 256)
1306 I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
1307 else
1308 return false;
1309 } else
1310 return false;
1312 // Convert to X86 VINSERT immediate.
1313 Index = Index / InsertRegTy.getSizeInBits();
1315 I.getOperand(3).setImm(Index);
1317 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1320 bool X86InstructionSelector::selectUnmergeValues(
1321 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1322 CodeGenCoverage &CoverageInfo) const {
1323 assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
1324 "unexpected instruction");
1326 // Split to extracts.
1327 unsigned NumDefs = I.getNumOperands() - 1;
1328 unsigned SrcReg = I.getOperand(NumDefs).getReg();
1329 unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
1331 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1332 MachineInstr &ExtrInst =
1333 *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1334 TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
1335 .addReg(SrcReg)
1336 .addImm(Idx * DefSize);
1338 if (!select(ExtrInst, CoverageInfo))
1339 return false;
1342 I.eraseFromParent();
1343 return true;
1346 bool X86InstructionSelector::selectMergeValues(
1347 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
1348 CodeGenCoverage &CoverageInfo) const {
1349 assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
1350 "unexpected instruction");
1352 // Split to inserts.
1353 unsigned DstReg = I.getOperand(0).getReg();
1354 unsigned SrcReg0 = I.getOperand(1).getReg();
1356 const LLT DstTy = MRI.getType(DstReg);
1357 const LLT SrcTy = MRI.getType(SrcReg0);
1358 unsigned SrcSize = SrcTy.getSizeInBits();
1360 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1362 // For the first src use insertSubReg.
1363 unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
1364 MRI.setRegBank(DefReg, RegBank);
1365 if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
1366 return false;
1368 for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
1369 unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
1370 MRI.setRegBank(Tmp, RegBank);
1372 MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1373 TII.get(TargetOpcode::G_INSERT), Tmp)
1374 .addReg(DefReg)
1375 .addReg(I.getOperand(Idx).getReg())
1376 .addImm((Idx - 1) * SrcSize);
1378 DefReg = Tmp;
1380 if (!select(InsertInst, CoverageInfo))
1381 return false;
1384 MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1385 TII.get(TargetOpcode::COPY), DstReg)
1386 .addReg(DefReg);
1388 if (!select(CopyInst, CoverageInfo))
1389 return false;
1391 I.eraseFromParent();
1392 return true;
1395 bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
1396 MachineRegisterInfo &MRI,
1397 MachineFunction &MF) const {
1398 assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
1400 const unsigned CondReg = I.getOperand(0).getReg();
1401 MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
1403 MachineInstr &TestInst =
1404 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
1405 .addReg(CondReg)
1406 .addImm(1);
1407 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
1408 .addMBB(DestMBB);
1410 constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
1412 I.eraseFromParent();
1413 return true;
1416 bool X86InstructionSelector::materializeFP(MachineInstr &I,
1417 MachineRegisterInfo &MRI,
1418 MachineFunction &MF) const {
1419 assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
1420 "unexpected instruction");
1422 // Can't handle alternate code models yet.
1423 CodeModel::Model CM = TM.getCodeModel();
1424 if (CM != CodeModel::Small && CM != CodeModel::Large)
1425 return false;
1427 const unsigned DstReg = I.getOperand(0).getReg();
1428 const LLT DstTy = MRI.getType(DstReg);
1429 const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1430 unsigned Align = DstTy.getSizeInBits();
1431 const DebugLoc &DbgLoc = I.getDebugLoc();
1433 unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
1435 // Create the load from the constant pool.
1436 const ConstantFP *CFP = I.getOperand(1).getFPImm();
1437 unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
1438 MachineInstr *LoadInst = nullptr;
1439 unsigned char OpFlag = STI.classifyLocalReference(nullptr);
1441 if (CM == CodeModel::Large && STI.is64Bit()) {
1442 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1443 // they cannot be folded into immediate fields.
1445 unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
1446 BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
1447 .addConstantPoolIndex(CPI, 0, OpFlag);
1449 MachineMemOperand *MMO = MF.getMachineMemOperand(
1450 MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
1451 MF.getDataLayout().getPointerSize(), Align);
1453 LoadInst =
1454 addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
1455 AddrReg)
1456 .addMemOperand(MMO);
1458 } else if (CM == CodeModel::Small || !STI.is64Bit()) {
1459 // Handle the case when globals fit in our immediate field.
1460 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1462 // x86-32 PIC requires a PIC base register for constant pools.
1463 unsigned PICBase = 0;
1464 if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
1465 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1466 // In DAGISEL the code that initialize it generated by the CGBR pass.
1467 return false; // TODO support the mode.
1468 } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
1469 PICBase = X86::RIP;
1471 LoadInst = addConstantPoolReference(
1472 BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
1473 OpFlag);
1474 } else
1475 return false;
1477 constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
1478 I.eraseFromParent();
1479 return true;
1482 bool X86InstructionSelector::selectImplicitDefOrPHI(
1483 MachineInstr &I, MachineRegisterInfo &MRI) const {
1484 assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
1485 I.getOpcode() == TargetOpcode::G_PHI) &&
1486 "unexpected instruction");
1488 unsigned DstReg = I.getOperand(0).getReg();
1490 if (!MRI.getRegClassOrNull(DstReg)) {
1491 const LLT DstTy = MRI.getType(DstReg);
1492 const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
1494 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1495 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1496 << " operand\n");
1497 return false;
1501 if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1502 I.setDesc(TII.get(X86::IMPLICIT_DEF));
1503 else
1504 I.setDesc(TII.get(X86::PHI));
1506 return true;
1509 // Currently GlobalIsel TableGen generates patterns for shift imm and shift 1,
1510 // but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
1511 // has the same type, so for now only shift i8 can use auto generated
1512 // TableGen patterns.
1513 bool X86InstructionSelector::selectShift(MachineInstr &I,
1514 MachineRegisterInfo &MRI,
1515 MachineFunction &MF) const {
1517 assert((I.getOpcode() == TargetOpcode::G_SHL ||
1518 I.getOpcode() == TargetOpcode::G_ASHR ||
1519 I.getOpcode() == TargetOpcode::G_LSHR) &&
1520 "unexpected instruction");
1522 unsigned DstReg = I.getOperand(0).getReg();
1523 const LLT DstTy = MRI.getType(DstReg);
1524 const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
1526 const static struct ShiftEntry {
1527 unsigned SizeInBits;
1528 unsigned CReg;
1529 unsigned OpLSHR;
1530 unsigned OpASHR;
1531 unsigned OpSHL;
1532 } OpTable[] = {
1533 {8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
1534 {16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
1535 {32, X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
1536 {64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
1539 if (DstRB.getID() != X86::GPRRegBankID)
1540 return false;
1542 auto ShiftEntryIt = std::find_if(
1543 std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) {
1544 return El.SizeInBits == DstTy.getSizeInBits();
1546 if (ShiftEntryIt == std::end(OpTable))
1547 return false;
1549 unsigned CReg = ShiftEntryIt->CReg;
1550 unsigned Opcode = 0;
1551 switch (I.getOpcode()) {
1552 case TargetOpcode::G_SHL:
1553 Opcode = ShiftEntryIt->OpSHL;
1554 break;
1555 case TargetOpcode::G_ASHR:
1556 Opcode = ShiftEntryIt->OpASHR;
1557 break;
1558 case TargetOpcode::G_LSHR:
1559 Opcode = ShiftEntryIt->OpLSHR;
1560 break;
1561 default:
1562 return false;
1565 unsigned Op0Reg = I.getOperand(1).getReg();
1566 unsigned Op1Reg = I.getOperand(2).getReg();
1568 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1569 ShiftEntryIt->CReg)
1570 .addReg(Op1Reg);
1572 // The shift instruction uses X86::CL. If we defined a super-register
1573 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1574 if (CReg != X86::CL)
1575 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::KILL),
1576 X86::CL)
1577 .addReg(CReg, RegState::Kill);
1579 MachineInstr &ShiftInst =
1580 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1581 .addReg(Op0Reg);
1583 constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI);
1584 I.eraseFromParent();
1585 return true;
1588 bool X86InstructionSelector::selectSDiv(MachineInstr &I,
1589 MachineRegisterInfo &MRI,
1590 MachineFunction &MF) const {
1592 assert(I.getOpcode() == TargetOpcode::G_SDIV && "unexpected instruction");
1594 const unsigned DstReg = I.getOperand(0).getReg();
1595 const unsigned DividentReg = I.getOperand(1).getReg();
1596 const unsigned DiviserReg = I.getOperand(2).getReg();
1598 const LLT RegTy = MRI.getType(DstReg);
1599 assert(RegTy == MRI.getType(DividentReg) &&
1600 RegTy == MRI.getType(DiviserReg) &&
1601 "Arguments and return value types must match");
1603 const RegisterBank &RegRB = *RBI.getRegBank(DstReg, MRI, TRI);
1605 // For the X86 IDIV instruction, in most cases the dividend
1606 // (numerator) must be in a specific register pair highreg:lowreg,
1607 // producing the quotient in lowreg and the remainder in highreg.
1608 // For most data types, to set up the instruction, the dividend is
1609 // copied into lowreg, and lowreg is sign-extended into highreg. The
1610 // exception is i8, where the dividend is defined as a single register rather
1611 // than a register pair, and we therefore directly sign-extend the dividend
1612 // into lowreg, instead of copying, and ignore the highreg.
1613 const static struct SDivEntry {
1614 unsigned SizeInBits;
1615 unsigned QuotientReg;
1616 unsigned DividentRegUpper;
1617 unsigned DividentRegLower;
1618 unsigned OpSignExtend;
1619 unsigned OpCopy;
1620 unsigned OpDiv;
1621 } OpTable[] = {
1622 {8, X86::AL, X86::NoRegister, X86::AX, 0, X86::MOVSX16rr8,
1623 X86::IDIV8r}, // i8
1624 {16, X86::AX, X86::DX, X86::AX, X86::CWD, TargetOpcode::COPY,
1625 X86::IDIV16r}, // i16
1626 {32, X86::EAX, X86::EDX, X86::EAX, X86::CDQ, TargetOpcode::COPY,
1627 X86::IDIV32r}, // i32
1628 {64, X86::RAX, X86::RDX, X86::RAX, X86::CQO, TargetOpcode::COPY,
1629 X86::IDIV64r} // i64
1632 if (RegRB.getID() != X86::GPRRegBankID)
1633 return false;
1635 auto SDivEntryIt = std::find_if(
1636 std::begin(OpTable), std::end(OpTable), [RegTy](const SDivEntry &El) {
1637 return El.SizeInBits == RegTy.getSizeInBits();
1640 if (SDivEntryIt == std::end(OpTable))
1641 return false;
1643 const TargetRegisterClass *RegRC = getRegClass(RegTy, RegRB);
1644 if (!RBI.constrainGenericRegister(DividentReg, *RegRC, MRI) ||
1645 !RBI.constrainGenericRegister(DiviserReg, *RegRC, MRI) ||
1646 !RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
1647 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
1648 << " operand\n");
1649 return false;
1652 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SDivEntryIt->OpCopy),
1653 SDivEntryIt->DividentRegLower)
1654 .addReg(DividentReg);
1655 if (SDivEntryIt->DividentRegUpper != X86::NoRegister)
1656 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1657 TII.get(SDivEntryIt->OpSignExtend));
1658 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SDivEntryIt->OpDiv))
1659 .addReg(DiviserReg);
1660 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
1661 DstReg)
1662 .addReg(SDivEntryIt->QuotientReg);
1664 I.eraseFromParent();
1665 return true;
1668 bool X86InstructionSelector::selectIntrinsicWSideEffects(
1669 MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const {
1671 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
1672 "unexpected instruction");
1674 if (I.getOperand(0).getIntrinsicID() != Intrinsic::trap)
1675 return false;
1677 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TRAP));
1679 I.eraseFromParent();
1680 return true;
1683 InstructionSelector *
1684 llvm::createX86InstructionSelector(const X86TargetMachine &TM,
1685 X86Subtarget &Subtarget,
1686 X86RegisterBankInfo &RBI) {
1687 return new X86InstructionSelector(TM, Subtarget, RBI);