1 //===- X86InstructionSelector.cpp -----------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/X86BaseInfo.h"
16 #include "X86InstrBuilder.h"
17 #include "X86InstrInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
25 #include "llvm/CodeGen/GlobalISel/Utils.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/TargetOpcodes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/IntrinsicsX86.h"
39 #include "llvm/Support/AtomicOrdering.h"
40 #include "llvm/Support/CodeGen.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/raw_ostream.h"
50 #define DEBUG_TYPE "X86-isel"
56 #define GET_GLOBALISEL_PREDICATE_BITSET
57 #include "X86GenGlobalISel.inc"
58 #undef GET_GLOBALISEL_PREDICATE_BITSET
60 class X86InstructionSelector
: public InstructionSelector
{
62 X86InstructionSelector(const X86TargetMachine
&TM
, const X86Subtarget
&STI
,
63 const X86RegisterBankInfo
&RBI
);
65 bool select(MachineInstr
&I
) override
;
66 static const char *getName() { return DEBUG_TYPE
; }
69 /// tblgen-erated 'select' implementation, used as the initial selector for
70 /// the patterns that don't require complex C++.
71 bool selectImpl(MachineInstr
&I
, CodeGenCoverage
&CoverageInfo
) const;
73 // TODO: remove after supported by Tablegen-erated instruction selection.
74 unsigned getLoadStoreOp(const LLT
&Ty
, const RegisterBank
&RB
, unsigned Opc
,
75 Align Alignment
) const;
77 bool selectLoadStoreOp(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
78 MachineFunction
&MF
) const;
79 bool selectFrameIndexOrGep(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
80 MachineFunction
&MF
) const;
81 bool selectGlobalValue(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
82 MachineFunction
&MF
) const;
83 bool selectConstant(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
84 MachineFunction
&MF
) const;
85 bool selectTruncOrPtrToInt(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
86 MachineFunction
&MF
) const;
87 bool selectZext(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
88 MachineFunction
&MF
) const;
89 bool selectAnyext(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
90 MachineFunction
&MF
) const;
91 bool selectCmp(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
92 MachineFunction
&MF
) const;
93 bool selectFCmp(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
94 MachineFunction
&MF
) const;
95 bool selectUadde(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
96 MachineFunction
&MF
) const;
97 bool selectCopy(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
98 bool selectUnmergeValues(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
100 bool selectMergeValues(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
101 MachineFunction
&MF
);
102 bool selectInsert(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
103 MachineFunction
&MF
) const;
104 bool selectExtract(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
105 MachineFunction
&MF
) const;
106 bool selectCondBranch(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
107 MachineFunction
&MF
) const;
108 bool selectTurnIntoCOPY(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
109 const unsigned DstReg
,
110 const TargetRegisterClass
*DstRC
,
111 const unsigned SrcReg
,
112 const TargetRegisterClass
*SrcRC
) const;
113 bool materializeFP(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
114 MachineFunction
&MF
) const;
115 bool selectImplicitDefOrPHI(MachineInstr
&I
, MachineRegisterInfo
&MRI
) const;
116 bool selectDivRem(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
117 MachineFunction
&MF
) const;
118 bool selectIntrinsicWSideEffects(MachineInstr
&I
, MachineRegisterInfo
&MRI
,
119 MachineFunction
&MF
) const;
121 // emit insert subreg instruction and insert it before MachineInstr &I
122 bool emitInsertSubreg(unsigned DstReg
, unsigned SrcReg
, MachineInstr
&I
,
123 MachineRegisterInfo
&MRI
, MachineFunction
&MF
) const;
124 // emit extract subreg instruction and insert it before MachineInstr &I
125 bool emitExtractSubreg(unsigned DstReg
, unsigned SrcReg
, MachineInstr
&I
,
126 MachineRegisterInfo
&MRI
, MachineFunction
&MF
) const;
128 const TargetRegisterClass
*getRegClass(LLT Ty
, const RegisterBank
&RB
) const;
129 const TargetRegisterClass
*getRegClass(LLT Ty
, unsigned Reg
,
130 MachineRegisterInfo
&MRI
) const;
132 const X86TargetMachine
&TM
;
133 const X86Subtarget
&STI
;
134 const X86InstrInfo
&TII
;
135 const X86RegisterInfo
&TRI
;
136 const X86RegisterBankInfo
&RBI
;
138 #define GET_GLOBALISEL_PREDICATES_DECL
139 #include "X86GenGlobalISel.inc"
140 #undef GET_GLOBALISEL_PREDICATES_DECL
142 #define GET_GLOBALISEL_TEMPORARIES_DECL
143 #include "X86GenGlobalISel.inc"
144 #undef GET_GLOBALISEL_TEMPORARIES_DECL
147 } // end anonymous namespace
149 #define GET_GLOBALISEL_IMPL
150 #include "X86GenGlobalISel.inc"
151 #undef GET_GLOBALISEL_IMPL
153 X86InstructionSelector::X86InstructionSelector(const X86TargetMachine
&TM
,
154 const X86Subtarget
&STI
,
155 const X86RegisterBankInfo
&RBI
)
156 : InstructionSelector(), TM(TM
), STI(STI
), TII(*STI
.getInstrInfo()),
157 TRI(*STI
.getRegisterInfo()), RBI(RBI
),
158 #define GET_GLOBALISEL_PREDICATES_INIT
159 #include "X86GenGlobalISel.inc"
160 #undef GET_GLOBALISEL_PREDICATES_INIT
161 #define GET_GLOBALISEL_TEMPORARIES_INIT
162 #include "X86GenGlobalISel.inc"
163 #undef GET_GLOBALISEL_TEMPORARIES_INIT
167 // FIXME: This should be target-independent, inferred from the types declared
168 // for each class in the bank.
169 const TargetRegisterClass
*
170 X86InstructionSelector::getRegClass(LLT Ty
, const RegisterBank
&RB
) const {
171 if (RB
.getID() == X86::GPRRegBankID
) {
172 if (Ty
.getSizeInBits() <= 8)
173 return &X86::GR8RegClass
;
174 if (Ty
.getSizeInBits() == 16)
175 return &X86::GR16RegClass
;
176 if (Ty
.getSizeInBits() == 32)
177 return &X86::GR32RegClass
;
178 if (Ty
.getSizeInBits() == 64)
179 return &X86::GR64RegClass
;
181 if (RB
.getID() == X86::VECRRegBankID
) {
182 if (Ty
.getSizeInBits() == 32)
183 return STI
.hasAVX512() ? &X86::FR32XRegClass
: &X86::FR32RegClass
;
184 if (Ty
.getSizeInBits() == 64)
185 return STI
.hasAVX512() ? &X86::FR64XRegClass
: &X86::FR64RegClass
;
186 if (Ty
.getSizeInBits() == 128)
187 return STI
.hasAVX512() ? &X86::VR128XRegClass
: &X86::VR128RegClass
;
188 if (Ty
.getSizeInBits() == 256)
189 return STI
.hasAVX512() ? &X86::VR256XRegClass
: &X86::VR256RegClass
;
190 if (Ty
.getSizeInBits() == 512)
191 return &X86::VR512RegClass
;
194 llvm_unreachable("Unknown RegBank!");
197 const TargetRegisterClass
*
198 X86InstructionSelector::getRegClass(LLT Ty
, unsigned Reg
,
199 MachineRegisterInfo
&MRI
) const {
200 const RegisterBank
&RegBank
= *RBI
.getRegBank(Reg
, MRI
, TRI
);
201 return getRegClass(Ty
, RegBank
);
204 static unsigned getSubRegIndex(const TargetRegisterClass
*RC
) {
205 unsigned SubIdx
= X86::NoSubRegister
;
206 if (RC
== &X86::GR32RegClass
) {
207 SubIdx
= X86::sub_32bit
;
208 } else if (RC
== &X86::GR16RegClass
) {
209 SubIdx
= X86::sub_16bit
;
210 } else if (RC
== &X86::GR8RegClass
) {
211 SubIdx
= X86::sub_8bit
;
217 static const TargetRegisterClass
*getRegClassFromGRPhysReg(Register Reg
) {
218 assert(Reg
.isPhysical());
219 if (X86::GR64RegClass
.contains(Reg
))
220 return &X86::GR64RegClass
;
221 if (X86::GR32RegClass
.contains(Reg
))
222 return &X86::GR32RegClass
;
223 if (X86::GR16RegClass
.contains(Reg
))
224 return &X86::GR16RegClass
;
225 if (X86::GR8RegClass
.contains(Reg
))
226 return &X86::GR8RegClass
;
228 llvm_unreachable("Unknown RegClass for PhysReg!");
231 // Set X86 Opcode and constrain DestReg.
232 bool X86InstructionSelector::selectCopy(MachineInstr
&I
,
233 MachineRegisterInfo
&MRI
) const {
234 Register DstReg
= I
.getOperand(0).getReg();
235 const unsigned DstSize
= RBI
.getSizeInBits(DstReg
, MRI
, TRI
);
236 const RegisterBank
&DstRegBank
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
238 Register SrcReg
= I
.getOperand(1).getReg();
239 const unsigned SrcSize
= RBI
.getSizeInBits(SrcReg
, MRI
, TRI
);
240 const RegisterBank
&SrcRegBank
= *RBI
.getRegBank(SrcReg
, MRI
, TRI
);
242 if (DstReg
.isPhysical()) {
243 assert(I
.isCopy() && "Generic operators do not allow physical registers");
245 if (DstSize
> SrcSize
&& SrcRegBank
.getID() == X86::GPRRegBankID
&&
246 DstRegBank
.getID() == X86::GPRRegBankID
) {
248 const TargetRegisterClass
*SrcRC
=
249 getRegClass(MRI
.getType(SrcReg
), SrcRegBank
);
250 const TargetRegisterClass
*DstRC
= getRegClassFromGRPhysReg(DstReg
);
252 if (SrcRC
!= DstRC
) {
253 // This case can be generated by ABI lowering, performe anyext
254 Register ExtSrc
= MRI
.createVirtualRegister(DstRC
);
255 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
256 TII
.get(TargetOpcode::SUBREG_TO_REG
))
260 .addImm(getSubRegIndex(SrcRC
));
262 I
.getOperand(1).setReg(ExtSrc
);
269 assert((!SrcReg
.isPhysical() || I
.isCopy()) &&
270 "No phys reg on generic operators");
271 assert((DstSize
== SrcSize
||
272 // Copies are a mean to setup initial types, the number of
273 // bits may not exactly match.
274 (SrcReg
.isPhysical() &&
275 DstSize
<= RBI
.getSizeInBits(SrcReg
, MRI
, TRI
))) &&
276 "Copy with different width?!");
278 const TargetRegisterClass
*DstRC
=
279 getRegClass(MRI
.getType(DstReg
), DstRegBank
);
281 if (SrcRegBank
.getID() == X86::GPRRegBankID
&&
282 DstRegBank
.getID() == X86::GPRRegBankID
&& SrcSize
> DstSize
&&
283 SrcReg
.isPhysical()) {
284 // Change the physical register to performe truncate.
286 const TargetRegisterClass
*SrcRC
= getRegClassFromGRPhysReg(SrcReg
);
288 if (DstRC
!= SrcRC
) {
289 I
.getOperand(1).setSubReg(getSubRegIndex(DstRC
));
290 I
.getOperand(1).substPhysReg(SrcReg
, TRI
);
294 // No need to constrain SrcReg. It will get constrained when
295 // we hit another of its use or its defs.
296 // Copies do not have constraints.
297 const TargetRegisterClass
*OldRC
= MRI
.getRegClassOrNull(DstReg
);
298 if (!OldRC
|| !DstRC
->hasSubClassEq(OldRC
)) {
299 if (!RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
300 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
305 I
.setDesc(TII
.get(X86::COPY
));
309 bool X86InstructionSelector::select(MachineInstr
&I
) {
310 assert(I
.getParent() && "Instruction should be in a basic block!");
311 assert(I
.getParent()->getParent() && "Instruction should be in a function!");
313 MachineBasicBlock
&MBB
= *I
.getParent();
314 MachineFunction
&MF
= *MBB
.getParent();
315 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
317 unsigned Opcode
= I
.getOpcode();
318 if (!isPreISelGenericOpcode(Opcode
)) {
319 // Certain non-generic instructions also need some special handling.
321 if (Opcode
== TargetOpcode::LOAD_STACK_GUARD
)
325 return selectCopy(I
, MRI
);
330 assert(I
.getNumOperands() == I
.getNumExplicitOperands() &&
331 "Generic instruction has unexpected implicit operands\n");
333 if (selectImpl(I
, *CoverageInfo
))
336 LLVM_DEBUG(dbgs() << " C++ instruction selection: "; I
.print(dbgs()));
338 // TODO: This should be implemented by tblgen.
339 switch (I
.getOpcode()) {
342 case TargetOpcode::G_STORE
:
343 case TargetOpcode::G_LOAD
:
344 return selectLoadStoreOp(I
, MRI
, MF
);
345 case TargetOpcode::G_PTR_ADD
:
346 case TargetOpcode::G_FRAME_INDEX
:
347 return selectFrameIndexOrGep(I
, MRI
, MF
);
348 case TargetOpcode::G_GLOBAL_VALUE
:
349 return selectGlobalValue(I
, MRI
, MF
);
350 case TargetOpcode::G_CONSTANT
:
351 return selectConstant(I
, MRI
, MF
);
352 case TargetOpcode::G_FCONSTANT
:
353 return materializeFP(I
, MRI
, MF
);
354 case TargetOpcode::G_PTRTOINT
:
355 case TargetOpcode::G_TRUNC
:
356 return selectTruncOrPtrToInt(I
, MRI
, MF
);
357 case TargetOpcode::G_INTTOPTR
:
358 return selectCopy(I
, MRI
);
359 case TargetOpcode::G_ZEXT
:
360 return selectZext(I
, MRI
, MF
);
361 case TargetOpcode::G_ANYEXT
:
362 return selectAnyext(I
, MRI
, MF
);
363 case TargetOpcode::G_ICMP
:
364 return selectCmp(I
, MRI
, MF
);
365 case TargetOpcode::G_FCMP
:
366 return selectFCmp(I
, MRI
, MF
);
367 case TargetOpcode::G_UADDE
:
368 return selectUadde(I
, MRI
, MF
);
369 case TargetOpcode::G_UNMERGE_VALUES
:
370 return selectUnmergeValues(I
, MRI
, MF
);
371 case TargetOpcode::G_MERGE_VALUES
:
372 case TargetOpcode::G_CONCAT_VECTORS
:
373 return selectMergeValues(I
, MRI
, MF
);
374 case TargetOpcode::G_EXTRACT
:
375 return selectExtract(I
, MRI
, MF
);
376 case TargetOpcode::G_INSERT
:
377 return selectInsert(I
, MRI
, MF
);
378 case TargetOpcode::G_BRCOND
:
379 return selectCondBranch(I
, MRI
, MF
);
380 case TargetOpcode::G_IMPLICIT_DEF
:
381 case TargetOpcode::G_PHI
:
382 return selectImplicitDefOrPHI(I
, MRI
);
383 case TargetOpcode::G_SDIV
:
384 case TargetOpcode::G_UDIV
:
385 case TargetOpcode::G_SREM
:
386 case TargetOpcode::G_UREM
:
387 return selectDivRem(I
, MRI
, MF
);
388 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
:
389 return selectIntrinsicWSideEffects(I
, MRI
, MF
);
395 unsigned X86InstructionSelector::getLoadStoreOp(const LLT
&Ty
,
396 const RegisterBank
&RB
,
398 Align Alignment
) const {
399 bool Isload
= (Opc
== TargetOpcode::G_LOAD
);
400 bool HasAVX
= STI
.hasAVX();
401 bool HasAVX512
= STI
.hasAVX512();
402 bool HasVLX
= STI
.hasVLX();
404 if (Ty
== LLT::scalar(8)) {
405 if (X86::GPRRegBankID
== RB
.getID())
406 return Isload
? X86::MOV8rm
: X86::MOV8mr
;
407 } else if (Ty
== LLT::scalar(16)) {
408 if (X86::GPRRegBankID
== RB
.getID())
409 return Isload
? X86::MOV16rm
: X86::MOV16mr
;
410 } else if (Ty
== LLT::scalar(32) || Ty
== LLT::pointer(0, 32)) {
411 if (X86::GPRRegBankID
== RB
.getID())
412 return Isload
? X86::MOV32rm
: X86::MOV32mr
;
413 if (X86::VECRRegBankID
== RB
.getID())
414 return Isload
? (HasAVX512
? X86::VMOVSSZrm_alt
:
415 HasAVX
? X86::VMOVSSrm_alt
:
417 : (HasAVX512
? X86::VMOVSSZmr
:
418 HasAVX
? X86::VMOVSSmr
:
420 } else if (Ty
== LLT::scalar(64) || Ty
== LLT::pointer(0, 64)) {
421 if (X86::GPRRegBankID
== RB
.getID())
422 return Isload
? X86::MOV64rm
: X86::MOV64mr
;
423 if (X86::VECRRegBankID
== RB
.getID())
424 return Isload
? (HasAVX512
? X86::VMOVSDZrm_alt
:
425 HasAVX
? X86::VMOVSDrm_alt
:
427 : (HasAVX512
? X86::VMOVSDZmr
:
428 HasAVX
? X86::VMOVSDmr
:
430 } else if (Ty
.isVector() && Ty
.getSizeInBits() == 128) {
431 if (Alignment
>= Align(16))
432 return Isload
? (HasVLX
? X86::VMOVAPSZ128rm
434 ? X86::VMOVAPSZ128rm_NOVLX
435 : HasAVX
? X86::VMOVAPSrm
: X86::MOVAPSrm
)
436 : (HasVLX
? X86::VMOVAPSZ128mr
438 ? X86::VMOVAPSZ128mr_NOVLX
439 : HasAVX
? X86::VMOVAPSmr
: X86::MOVAPSmr
);
441 return Isload
? (HasVLX
? X86::VMOVUPSZ128rm
443 ? X86::VMOVUPSZ128rm_NOVLX
444 : HasAVX
? X86::VMOVUPSrm
: X86::MOVUPSrm
)
445 : (HasVLX
? X86::VMOVUPSZ128mr
447 ? X86::VMOVUPSZ128mr_NOVLX
448 : HasAVX
? X86::VMOVUPSmr
: X86::MOVUPSmr
);
449 } else if (Ty
.isVector() && Ty
.getSizeInBits() == 256) {
450 if (Alignment
>= Align(32))
451 return Isload
? (HasVLX
? X86::VMOVAPSZ256rm
452 : HasAVX512
? X86::VMOVAPSZ256rm_NOVLX
454 : (HasVLX
? X86::VMOVAPSZ256mr
455 : HasAVX512
? X86::VMOVAPSZ256mr_NOVLX
458 return Isload
? (HasVLX
? X86::VMOVUPSZ256rm
459 : HasAVX512
? X86::VMOVUPSZ256rm_NOVLX
461 : (HasVLX
? X86::VMOVUPSZ256mr
462 : HasAVX512
? X86::VMOVUPSZ256mr_NOVLX
464 } else if (Ty
.isVector() && Ty
.getSizeInBits() == 512) {
465 if (Alignment
>= Align(64))
466 return Isload
? X86::VMOVAPSZrm
: X86::VMOVAPSZmr
;
468 return Isload
? X86::VMOVUPSZrm
: X86::VMOVUPSZmr
;
473 // Fill in an address from the given instruction.
474 static void X86SelectAddress(const MachineInstr
&I
,
475 const MachineRegisterInfo
&MRI
,
476 X86AddressMode
&AM
) {
477 assert(I
.getOperand(0).isReg() && "unsupported opperand.");
478 assert(MRI
.getType(I
.getOperand(0).getReg()).isPointer() &&
479 "unsupported type.");
481 if (I
.getOpcode() == TargetOpcode::G_PTR_ADD
) {
482 if (auto COff
= getConstantVRegSExtVal(I
.getOperand(2).getReg(), MRI
)) {
484 if (isInt
<32>(Imm
)) { // Check for displacement overflow.
485 AM
.Disp
= static_cast<int32_t>(Imm
);
486 AM
.Base
.Reg
= I
.getOperand(1).getReg();
490 } else if (I
.getOpcode() == TargetOpcode::G_FRAME_INDEX
) {
491 AM
.Base
.FrameIndex
= I
.getOperand(1).getIndex();
492 AM
.BaseType
= X86AddressMode::FrameIndexBase
;
497 AM
.Base
.Reg
= I
.getOperand(0).getReg();
500 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr
&I
,
501 MachineRegisterInfo
&MRI
,
502 MachineFunction
&MF
) const {
503 unsigned Opc
= I
.getOpcode();
505 assert((Opc
== TargetOpcode::G_STORE
|| Opc
== TargetOpcode::G_LOAD
) &&
506 "unexpected instruction");
508 const Register DefReg
= I
.getOperand(0).getReg();
509 LLT Ty
= MRI
.getType(DefReg
);
510 const RegisterBank
&RB
= *RBI
.getRegBank(DefReg
, MRI
, TRI
);
512 assert(I
.hasOneMemOperand());
513 auto &MemOp
= **I
.memoperands_begin();
514 if (MemOp
.isAtomic()) {
515 // Note: for unordered operations, we rely on the fact the appropriate MMO
516 // is already on the instruction we're mutating, and thus we don't need to
517 // make any changes. So long as we select an opcode which is capable of
518 // loading or storing the appropriate size atomically, the rest of the
519 // backend is required to respect the MMO state.
520 if (!MemOp
.isUnordered()) {
521 LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n");
524 if (MemOp
.getAlign() < Ty
.getSizeInBits() / 8) {
525 LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n");
530 unsigned NewOpc
= getLoadStoreOp(Ty
, RB
, Opc
, MemOp
.getAlign());
535 X86SelectAddress(*MRI
.getVRegDef(I
.getOperand(1).getReg()), MRI
, AM
);
537 I
.setDesc(TII
.get(NewOpc
));
538 MachineInstrBuilder
MIB(MF
, I
);
539 if (Opc
== TargetOpcode::G_LOAD
) {
541 addFullAddress(MIB
, AM
);
543 // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
546 addFullAddress(MIB
, AM
).addUse(DefReg
);
548 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
551 static unsigned getLeaOP(LLT Ty
, const X86Subtarget
&STI
) {
552 if (Ty
== LLT::pointer(0, 64))
554 else if (Ty
== LLT::pointer(0, 32))
555 return STI
.isTarget64BitILP32() ? X86::LEA64_32r
: X86::LEA32r
;
557 llvm_unreachable("Can't get LEA opcode. Unsupported type.");
560 bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr
&I
,
561 MachineRegisterInfo
&MRI
,
562 MachineFunction
&MF
) const {
563 unsigned Opc
= I
.getOpcode();
565 assert((Opc
== TargetOpcode::G_FRAME_INDEX
|| Opc
== TargetOpcode::G_PTR_ADD
) &&
566 "unexpected instruction");
568 const Register DefReg
= I
.getOperand(0).getReg();
569 LLT Ty
= MRI
.getType(DefReg
);
571 // Use LEA to calculate frame index and GEP
572 unsigned NewOpc
= getLeaOP(Ty
, STI
);
573 I
.setDesc(TII
.get(NewOpc
));
574 MachineInstrBuilder
MIB(MF
, I
);
576 if (Opc
== TargetOpcode::G_FRAME_INDEX
) {
579 MachineOperand
&InxOp
= I
.getOperand(2);
580 I
.addOperand(InxOp
); // set IndexReg
581 InxOp
.ChangeToImmediate(1); // set Scale
582 MIB
.addImm(0).addReg(0);
585 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
588 bool X86InstructionSelector::selectGlobalValue(MachineInstr
&I
,
589 MachineRegisterInfo
&MRI
,
590 MachineFunction
&MF
) const {
591 assert((I
.getOpcode() == TargetOpcode::G_GLOBAL_VALUE
) &&
592 "unexpected instruction");
594 auto GV
= I
.getOperand(1).getGlobal();
595 if (GV
->isThreadLocal()) {
596 return false; // TODO: we don't support TLS yet.
599 // Can't handle alternate code models yet.
600 if (TM
.getCodeModel() != CodeModel::Small
)
605 AM
.GVOpFlags
= STI
.classifyGlobalReference(GV
);
607 // TODO: The ABI requires an extra load. not supported yet.
608 if (isGlobalStubReference(AM
.GVOpFlags
))
611 // TODO: This reference is relative to the pic base. not supported yet.
612 if (isGlobalRelativeToPICBase(AM
.GVOpFlags
))
615 if (STI
.isPICStyleRIPRel()) {
616 // Use rip-relative addressing.
617 assert(AM
.Base
.Reg
== 0 && AM
.IndexReg
== 0);
618 AM
.Base
.Reg
= X86::RIP
;
621 const Register DefReg
= I
.getOperand(0).getReg();
622 LLT Ty
= MRI
.getType(DefReg
);
623 unsigned NewOpc
= getLeaOP(Ty
, STI
);
625 I
.setDesc(TII
.get(NewOpc
));
626 MachineInstrBuilder
MIB(MF
, I
);
629 addFullAddress(MIB
, AM
);
631 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
634 bool X86InstructionSelector::selectConstant(MachineInstr
&I
,
635 MachineRegisterInfo
&MRI
,
636 MachineFunction
&MF
) const {
637 assert((I
.getOpcode() == TargetOpcode::G_CONSTANT
) &&
638 "unexpected instruction");
640 const Register DefReg
= I
.getOperand(0).getReg();
641 LLT Ty
= MRI
.getType(DefReg
);
643 if (RBI
.getRegBank(DefReg
, MRI
, TRI
)->getID() != X86::GPRRegBankID
)
647 if (I
.getOperand(1).isCImm()) {
648 Val
= I
.getOperand(1).getCImm()->getZExtValue();
649 I
.getOperand(1).ChangeToImmediate(Val
);
650 } else if (I
.getOperand(1).isImm()) {
651 Val
= I
.getOperand(1).getImm();
653 llvm_unreachable("Unsupported operand type.");
656 switch (Ty
.getSizeInBits()) {
658 NewOpc
= X86::MOV8ri
;
661 NewOpc
= X86::MOV16ri
;
664 NewOpc
= X86::MOV32ri
;
667 // TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
669 NewOpc
= X86::MOV64ri32
;
671 NewOpc
= X86::MOV64ri
;
674 llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
677 I
.setDesc(TII
.get(NewOpc
));
678 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
681 // Helper function for selectTruncOrPtrToInt and selectAnyext.
682 // Returns true if DstRC lives on a floating register class and
683 // SrcRC lives on a 128-bit vector class.
684 static bool canTurnIntoCOPY(const TargetRegisterClass
*DstRC
,
685 const TargetRegisterClass
*SrcRC
) {
686 return (DstRC
== &X86::FR32RegClass
|| DstRC
== &X86::FR32XRegClass
||
687 DstRC
== &X86::FR64RegClass
|| DstRC
== &X86::FR64XRegClass
) &&
688 (SrcRC
== &X86::VR128RegClass
|| SrcRC
== &X86::VR128XRegClass
);
691 bool X86InstructionSelector::selectTurnIntoCOPY(
692 MachineInstr
&I
, MachineRegisterInfo
&MRI
, const unsigned DstReg
,
693 const TargetRegisterClass
*DstRC
, const unsigned SrcReg
,
694 const TargetRegisterClass
*SrcRC
) const {
696 if (!RBI
.constrainGenericRegister(SrcReg
, *SrcRC
, MRI
) ||
697 !RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
698 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
702 I
.setDesc(TII
.get(X86::COPY
));
706 bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr
&I
,
707 MachineRegisterInfo
&MRI
,
708 MachineFunction
&MF
) const {
709 assert((I
.getOpcode() == TargetOpcode::G_TRUNC
||
710 I
.getOpcode() == TargetOpcode::G_PTRTOINT
) &&
711 "unexpected instruction");
713 const Register DstReg
= I
.getOperand(0).getReg();
714 const Register SrcReg
= I
.getOperand(1).getReg();
716 const LLT DstTy
= MRI
.getType(DstReg
);
717 const LLT SrcTy
= MRI
.getType(SrcReg
);
719 const RegisterBank
&DstRB
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
720 const RegisterBank
&SrcRB
= *RBI
.getRegBank(SrcReg
, MRI
, TRI
);
722 if (DstRB
.getID() != SrcRB
.getID()) {
723 LLVM_DEBUG(dbgs() << TII
.getName(I
.getOpcode())
724 << " input/output on different banks\n");
728 const TargetRegisterClass
*DstRC
= getRegClass(DstTy
, DstRB
);
729 const TargetRegisterClass
*SrcRC
= getRegClass(SrcTy
, SrcRB
);
731 if (!DstRC
|| !SrcRC
)
734 // If that's truncation of the value that lives on the vector class and goes
735 // into the floating class, just replace it with copy, as we are able to
736 // select it as a regular move.
737 if (canTurnIntoCOPY(DstRC
, SrcRC
))
738 return selectTurnIntoCOPY(I
, MRI
, DstReg
, DstRC
, SrcReg
, SrcRC
);
740 if (DstRB
.getID() != X86::GPRRegBankID
)
744 if (DstRC
== SrcRC
) {
745 // Nothing to be done
746 SubIdx
= X86::NoSubRegister
;
747 } else if (DstRC
== &X86::GR32RegClass
) {
748 SubIdx
= X86::sub_32bit
;
749 } else if (DstRC
== &X86::GR16RegClass
) {
750 SubIdx
= X86::sub_16bit
;
751 } else if (DstRC
== &X86::GR8RegClass
) {
752 SubIdx
= X86::sub_8bit
;
757 SrcRC
= TRI
.getSubClassWithSubReg(SrcRC
, SubIdx
);
759 if (!RBI
.constrainGenericRegister(SrcReg
, *SrcRC
, MRI
) ||
760 !RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
761 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
766 I
.getOperand(1).setSubReg(SubIdx
);
768 I
.setDesc(TII
.get(X86::COPY
));
772 bool X86InstructionSelector::selectZext(MachineInstr
&I
,
773 MachineRegisterInfo
&MRI
,
774 MachineFunction
&MF
) const {
775 assert((I
.getOpcode() == TargetOpcode::G_ZEXT
) && "unexpected instruction");
777 const Register DstReg
= I
.getOperand(0).getReg();
778 const Register SrcReg
= I
.getOperand(1).getReg();
780 const LLT DstTy
= MRI
.getType(DstReg
);
781 const LLT SrcTy
= MRI
.getType(SrcReg
);
783 assert(!(SrcTy
== LLT::scalar(8) && DstTy
== LLT::scalar(16)) &&
784 "8=>16 Zext is handled by tablegen");
785 assert(!(SrcTy
== LLT::scalar(8) && DstTy
== LLT::scalar(32)) &&
786 "8=>32 Zext is handled by tablegen");
787 assert(!(SrcTy
== LLT::scalar(16) && DstTy
== LLT::scalar(32)) &&
788 "16=>32 Zext is handled by tablegen");
789 assert(!(SrcTy
== LLT::scalar(8) && DstTy
== LLT::scalar(64)) &&
790 "8=>64 Zext is handled by tablegen");
791 assert(!(SrcTy
== LLT::scalar(16) && DstTy
== LLT::scalar(64)) &&
792 "16=>64 Zext is handled by tablegen");
793 assert(!(SrcTy
== LLT::scalar(32) && DstTy
== LLT::scalar(64)) &&
794 "32=>64 Zext is handled by tablegen");
796 if (SrcTy
!= LLT::scalar(1))
800 if (DstTy
== LLT::scalar(8))
801 AndOpc
= X86::AND8ri
;
802 else if (DstTy
== LLT::scalar(16))
803 AndOpc
= X86::AND16ri8
;
804 else if (DstTy
== LLT::scalar(32))
805 AndOpc
= X86::AND32ri8
;
806 else if (DstTy
== LLT::scalar(64))
807 AndOpc
= X86::AND64ri8
;
811 Register DefReg
= SrcReg
;
812 if (DstTy
!= LLT::scalar(8)) {
814 MRI
.createVirtualRegister(getRegClass(DstTy
, DstReg
, MRI
));
815 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
816 TII
.get(TargetOpcode::IMPLICIT_DEF
), ImpDefReg
);
818 DefReg
= MRI
.createVirtualRegister(getRegClass(DstTy
, DstReg
, MRI
));
819 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
820 TII
.get(TargetOpcode::INSERT_SUBREG
), DefReg
)
823 .addImm(X86::sub_8bit
);
826 MachineInstr
&AndInst
=
827 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(AndOpc
), DstReg
)
831 constrainSelectedInstRegOperands(AndInst
, TII
, TRI
, RBI
);
837 bool X86InstructionSelector::selectAnyext(MachineInstr
&I
,
838 MachineRegisterInfo
&MRI
,
839 MachineFunction
&MF
) const {
840 assert((I
.getOpcode() == TargetOpcode::G_ANYEXT
) && "unexpected instruction");
842 const Register DstReg
= I
.getOperand(0).getReg();
843 const Register SrcReg
= I
.getOperand(1).getReg();
845 const LLT DstTy
= MRI
.getType(DstReg
);
846 const LLT SrcTy
= MRI
.getType(SrcReg
);
848 const RegisterBank
&DstRB
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
849 const RegisterBank
&SrcRB
= *RBI
.getRegBank(SrcReg
, MRI
, TRI
);
851 assert(DstRB
.getID() == SrcRB
.getID() &&
852 "G_ANYEXT input/output on different banks\n");
854 assert(DstTy
.getSizeInBits() > SrcTy
.getSizeInBits() &&
855 "G_ANYEXT incorrect operand size");
857 const TargetRegisterClass
*DstRC
= getRegClass(DstTy
, DstRB
);
858 const TargetRegisterClass
*SrcRC
= getRegClass(SrcTy
, SrcRB
);
860 // If that's ANY_EXT of the value that lives on the floating class and goes
861 // into the vector class, just replace it with copy, as we are able to select
862 // it as a regular move.
863 if (canTurnIntoCOPY(SrcRC
, DstRC
))
864 return selectTurnIntoCOPY(I
, MRI
, SrcReg
, SrcRC
, DstReg
, DstRC
);
866 if (DstRB
.getID() != X86::GPRRegBankID
)
869 if (!RBI
.constrainGenericRegister(SrcReg
, *SrcRC
, MRI
) ||
870 !RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
871 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
876 if (SrcRC
== DstRC
) {
877 I
.setDesc(TII
.get(X86::COPY
));
881 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
882 TII
.get(TargetOpcode::SUBREG_TO_REG
))
886 .addImm(getSubRegIndex(SrcRC
));
892 bool X86InstructionSelector::selectCmp(MachineInstr
&I
,
893 MachineRegisterInfo
&MRI
,
894 MachineFunction
&MF
) const {
895 assert((I
.getOpcode() == TargetOpcode::G_ICMP
) && "unexpected instruction");
899 std::tie(CC
, SwapArgs
) = X86::getX86ConditionCode(
900 (CmpInst::Predicate
)I
.getOperand(1).getPredicate());
902 Register LHS
= I
.getOperand(2).getReg();
903 Register RHS
= I
.getOperand(3).getReg();
909 LLT Ty
= MRI
.getType(LHS
);
911 switch (Ty
.getSizeInBits()) {
918 OpCmp
= X86::CMP16rr
;
921 OpCmp
= X86::CMP32rr
;
924 OpCmp
= X86::CMP64rr
;
928 MachineInstr
&CmpInst
=
929 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(OpCmp
))
933 MachineInstr
&SetInst
= *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
934 TII
.get(X86::SETCCr
), I
.getOperand(0).getReg()).addImm(CC
);
936 constrainSelectedInstRegOperands(CmpInst
, TII
, TRI
, RBI
);
937 constrainSelectedInstRegOperands(SetInst
, TII
, TRI
, RBI
);
943 bool X86InstructionSelector::selectFCmp(MachineInstr
&I
,
944 MachineRegisterInfo
&MRI
,
945 MachineFunction
&MF
) const {
946 assert((I
.getOpcode() == TargetOpcode::G_FCMP
) && "unexpected instruction");
948 Register LhsReg
= I
.getOperand(2).getReg();
949 Register RhsReg
= I
.getOperand(3).getReg();
950 CmpInst::Predicate Predicate
=
951 (CmpInst::Predicate
)I
.getOperand(1).getPredicate();
953 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
954 static const uint16_t SETFOpcTable
[2][3] = {
955 {X86::COND_E
, X86::COND_NP
, X86::AND8rr
},
956 {X86::COND_NE
, X86::COND_P
, X86::OR8rr
}};
957 const uint16_t *SETFOpc
= nullptr;
961 case CmpInst::FCMP_OEQ
:
962 SETFOpc
= &SETFOpcTable
[0][0];
964 case CmpInst::FCMP_UNE
:
965 SETFOpc
= &SETFOpcTable
[1][0];
969 // Compute the opcode for the CMP instruction.
971 LLT Ty
= MRI
.getType(LhsReg
);
972 switch (Ty
.getSizeInBits()) {
976 OpCmp
= X86::UCOMISSrr
;
979 OpCmp
= X86::UCOMISDrr
;
983 Register ResultReg
= I
.getOperand(0).getReg();
984 RBI
.constrainGenericRegister(
986 *getRegClass(LLT::scalar(8), *RBI
.getRegBank(ResultReg
, MRI
, TRI
)), MRI
);
988 MachineInstr
&CmpInst
=
989 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(OpCmp
))
993 Register FlagReg1
= MRI
.createVirtualRegister(&X86::GR8RegClass
);
994 Register FlagReg2
= MRI
.createVirtualRegister(&X86::GR8RegClass
);
995 MachineInstr
&Set1
= *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
996 TII
.get(X86::SETCCr
), FlagReg1
).addImm(SETFOpc
[0]);
997 MachineInstr
&Set2
= *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
998 TII
.get(X86::SETCCr
), FlagReg2
).addImm(SETFOpc
[1]);
999 MachineInstr
&Set3
= *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1000 TII
.get(SETFOpc
[2]), ResultReg
)
1003 constrainSelectedInstRegOperands(CmpInst
, TII
, TRI
, RBI
);
1004 constrainSelectedInstRegOperands(Set1
, TII
, TRI
, RBI
);
1005 constrainSelectedInstRegOperands(Set2
, TII
, TRI
, RBI
);
1006 constrainSelectedInstRegOperands(Set3
, TII
, TRI
, RBI
);
1008 I
.eraseFromParent();
1014 std::tie(CC
, SwapArgs
) = X86::getX86ConditionCode(Predicate
);
1015 assert(CC
<= X86::LAST_VALID_COND
&& "Unexpected condition code.");
1018 std::swap(LhsReg
, RhsReg
);
1020 // Emit a compare of LHS/RHS.
1021 MachineInstr
&CmpInst
=
1022 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(OpCmp
))
1027 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::SETCCr
), ResultReg
).addImm(CC
);
1028 constrainSelectedInstRegOperands(CmpInst
, TII
, TRI
, RBI
);
1029 constrainSelectedInstRegOperands(Set
, TII
, TRI
, RBI
);
1030 I
.eraseFromParent();
1034 bool X86InstructionSelector::selectUadde(MachineInstr
&I
,
1035 MachineRegisterInfo
&MRI
,
1036 MachineFunction
&MF
) const {
1037 assert((I
.getOpcode() == TargetOpcode::G_UADDE
) && "unexpected instruction");
1039 const Register DstReg
= I
.getOperand(0).getReg();
1040 const Register CarryOutReg
= I
.getOperand(1).getReg();
1041 const Register Op0Reg
= I
.getOperand(2).getReg();
1042 const Register Op1Reg
= I
.getOperand(3).getReg();
1043 Register CarryInReg
= I
.getOperand(4).getReg();
1045 const LLT DstTy
= MRI
.getType(DstReg
);
1047 if (DstTy
!= LLT::scalar(32))
1050 // find CarryIn def instruction.
1051 MachineInstr
*Def
= MRI
.getVRegDef(CarryInReg
);
1052 while (Def
->getOpcode() == TargetOpcode::G_TRUNC
) {
1053 CarryInReg
= Def
->getOperand(1).getReg();
1054 Def
= MRI
.getVRegDef(CarryInReg
);
1058 if (Def
->getOpcode() == TargetOpcode::G_UADDE
) {
1059 // carry set by prev ADD.
1061 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::COPY
), X86::EFLAGS
)
1062 .addReg(CarryInReg
);
1064 if (!RBI
.constrainGenericRegister(CarryInReg
, X86::GR32RegClass
, MRI
))
1067 Opcode
= X86::ADC32rr
;
1068 } else if (auto val
= getConstantVRegVal(CarryInReg
, MRI
)) {
1069 // carry is constant, support only 0.
1073 Opcode
= X86::ADD32rr
;
1077 MachineInstr
&AddInst
=
1078 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Opcode
), DstReg
)
1082 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::COPY
), CarryOutReg
)
1083 .addReg(X86::EFLAGS
);
1085 if (!constrainSelectedInstRegOperands(AddInst
, TII
, TRI
, RBI
) ||
1086 !RBI
.constrainGenericRegister(CarryOutReg
, X86::GR32RegClass
, MRI
))
1089 I
.eraseFromParent();
1093 bool X86InstructionSelector::selectExtract(MachineInstr
&I
,
1094 MachineRegisterInfo
&MRI
,
1095 MachineFunction
&MF
) const {
1096 assert((I
.getOpcode() == TargetOpcode::G_EXTRACT
) &&
1097 "unexpected instruction");
1099 const Register DstReg
= I
.getOperand(0).getReg();
1100 const Register SrcReg
= I
.getOperand(1).getReg();
1101 int64_t Index
= I
.getOperand(2).getImm();
1103 const LLT DstTy
= MRI
.getType(DstReg
);
1104 const LLT SrcTy
= MRI
.getType(SrcReg
);
1106 // Meanwile handle vector type only.
1107 if (!DstTy
.isVector())
1110 if (Index
% DstTy
.getSizeInBits() != 0)
1111 return false; // Not extract subvector.
1114 // Replace by extract subreg copy.
1115 if (!emitExtractSubreg(DstReg
, SrcReg
, I
, MRI
, MF
))
1118 I
.eraseFromParent();
1122 bool HasAVX
= STI
.hasAVX();
1123 bool HasAVX512
= STI
.hasAVX512();
1124 bool HasVLX
= STI
.hasVLX();
1126 if (SrcTy
.getSizeInBits() == 256 && DstTy
.getSizeInBits() == 128) {
1128 I
.setDesc(TII
.get(X86::VEXTRACTF32x4Z256rr
));
1130 I
.setDesc(TII
.get(X86::VEXTRACTF128rr
));
1133 } else if (SrcTy
.getSizeInBits() == 512 && HasAVX512
) {
1134 if (DstTy
.getSizeInBits() == 128)
1135 I
.setDesc(TII
.get(X86::VEXTRACTF32x4Zrr
));
1136 else if (DstTy
.getSizeInBits() == 256)
1137 I
.setDesc(TII
.get(X86::VEXTRACTF64x4Zrr
));
1143 // Convert to X86 VEXTRACT immediate.
1144 Index
= Index
/ DstTy
.getSizeInBits();
1145 I
.getOperand(2).setImm(Index
);
1147 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
1150 bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg
, unsigned SrcReg
,
1152 MachineRegisterInfo
&MRI
,
1153 MachineFunction
&MF
) const {
1154 const LLT DstTy
= MRI
.getType(DstReg
);
1155 const LLT SrcTy
= MRI
.getType(SrcReg
);
1156 unsigned SubIdx
= X86::NoSubRegister
;
1158 if (!DstTy
.isVector() || !SrcTy
.isVector())
1161 assert(SrcTy
.getSizeInBits() > DstTy
.getSizeInBits() &&
1162 "Incorrect Src/Dst register size");
1164 if (DstTy
.getSizeInBits() == 128)
1165 SubIdx
= X86::sub_xmm
;
1166 else if (DstTy
.getSizeInBits() == 256)
1167 SubIdx
= X86::sub_ymm
;
1171 const TargetRegisterClass
*DstRC
= getRegClass(DstTy
, DstReg
, MRI
);
1172 const TargetRegisterClass
*SrcRC
= getRegClass(SrcTy
, SrcReg
, MRI
);
1174 SrcRC
= TRI
.getSubClassWithSubReg(SrcRC
, SubIdx
);
1176 if (!RBI
.constrainGenericRegister(SrcReg
, *SrcRC
, MRI
) ||
1177 !RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
1178 LLVM_DEBUG(dbgs() << "Failed to constrain EXTRACT_SUBREG\n");
1182 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::COPY
), DstReg
)
1183 .addReg(SrcReg
, 0, SubIdx
);
1188 bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg
, unsigned SrcReg
,
1190 MachineRegisterInfo
&MRI
,
1191 MachineFunction
&MF
) const {
1192 const LLT DstTy
= MRI
.getType(DstReg
);
1193 const LLT SrcTy
= MRI
.getType(SrcReg
);
1194 unsigned SubIdx
= X86::NoSubRegister
;
1196 // TODO: support scalar types
1197 if (!DstTy
.isVector() || !SrcTy
.isVector())
1200 assert(SrcTy
.getSizeInBits() < DstTy
.getSizeInBits() &&
1201 "Incorrect Src/Dst register size");
1203 if (SrcTy
.getSizeInBits() == 128)
1204 SubIdx
= X86::sub_xmm
;
1205 else if (SrcTy
.getSizeInBits() == 256)
1206 SubIdx
= X86::sub_ymm
;
1210 const TargetRegisterClass
*SrcRC
= getRegClass(SrcTy
, SrcReg
, MRI
);
1211 const TargetRegisterClass
*DstRC
= getRegClass(DstTy
, DstReg
, MRI
);
1213 if (!RBI
.constrainGenericRegister(SrcReg
, *SrcRC
, MRI
) ||
1214 !RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
)) {
1215 LLVM_DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
1219 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::COPY
))
1220 .addReg(DstReg
, RegState::DefineNoRead
, SubIdx
)
1226 bool X86InstructionSelector::selectInsert(MachineInstr
&I
,
1227 MachineRegisterInfo
&MRI
,
1228 MachineFunction
&MF
) const {
1229 assert((I
.getOpcode() == TargetOpcode::G_INSERT
) && "unexpected instruction");
1231 const Register DstReg
= I
.getOperand(0).getReg();
1232 const Register SrcReg
= I
.getOperand(1).getReg();
1233 const Register InsertReg
= I
.getOperand(2).getReg();
1234 int64_t Index
= I
.getOperand(3).getImm();
1236 const LLT DstTy
= MRI
.getType(DstReg
);
1237 const LLT InsertRegTy
= MRI
.getType(InsertReg
);
1239 // Meanwile handle vector type only.
1240 if (!DstTy
.isVector())
1243 if (Index
% InsertRegTy
.getSizeInBits() != 0)
1244 return false; // Not insert subvector.
1246 if (Index
== 0 && MRI
.getVRegDef(SrcReg
)->isImplicitDef()) {
1247 // Replace by subreg copy.
1248 if (!emitInsertSubreg(DstReg
, InsertReg
, I
, MRI
, MF
))
1251 I
.eraseFromParent();
1255 bool HasAVX
= STI
.hasAVX();
1256 bool HasAVX512
= STI
.hasAVX512();
1257 bool HasVLX
= STI
.hasVLX();
1259 if (DstTy
.getSizeInBits() == 256 && InsertRegTy
.getSizeInBits() == 128) {
1261 I
.setDesc(TII
.get(X86::VINSERTF32x4Z256rr
));
1263 I
.setDesc(TII
.get(X86::VINSERTF128rr
));
1266 } else if (DstTy
.getSizeInBits() == 512 && HasAVX512
) {
1267 if (InsertRegTy
.getSizeInBits() == 128)
1268 I
.setDesc(TII
.get(X86::VINSERTF32x4Zrr
));
1269 else if (InsertRegTy
.getSizeInBits() == 256)
1270 I
.setDesc(TII
.get(X86::VINSERTF64x4Zrr
));
1276 // Convert to X86 VINSERT immediate.
1277 Index
= Index
/ InsertRegTy
.getSizeInBits();
1279 I
.getOperand(3).setImm(Index
);
1281 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
1284 bool X86InstructionSelector::selectUnmergeValues(
1285 MachineInstr
&I
, MachineRegisterInfo
&MRI
, MachineFunction
&MF
) {
1286 assert((I
.getOpcode() == TargetOpcode::G_UNMERGE_VALUES
) &&
1287 "unexpected instruction");
1289 // Split to extracts.
1290 unsigned NumDefs
= I
.getNumOperands() - 1;
1291 Register SrcReg
= I
.getOperand(NumDefs
).getReg();
1292 unsigned DefSize
= MRI
.getType(I
.getOperand(0).getReg()).getSizeInBits();
1294 for (unsigned Idx
= 0; Idx
< NumDefs
; ++Idx
) {
1295 MachineInstr
&ExtrInst
=
1296 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1297 TII
.get(TargetOpcode::G_EXTRACT
), I
.getOperand(Idx
).getReg())
1299 .addImm(Idx
* DefSize
);
1301 if (!select(ExtrInst
))
1305 I
.eraseFromParent();
1309 bool X86InstructionSelector::selectMergeValues(
1310 MachineInstr
&I
, MachineRegisterInfo
&MRI
, MachineFunction
&MF
) {
1311 assert((I
.getOpcode() == TargetOpcode::G_MERGE_VALUES
||
1312 I
.getOpcode() == TargetOpcode::G_CONCAT_VECTORS
) &&
1313 "unexpected instruction");
1315 // Split to inserts.
1316 Register DstReg
= I
.getOperand(0).getReg();
1317 Register SrcReg0
= I
.getOperand(1).getReg();
1319 const LLT DstTy
= MRI
.getType(DstReg
);
1320 const LLT SrcTy
= MRI
.getType(SrcReg0
);
1321 unsigned SrcSize
= SrcTy
.getSizeInBits();
1323 const RegisterBank
&RegBank
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
1325 // For the first src use insertSubReg.
1326 Register DefReg
= MRI
.createGenericVirtualRegister(DstTy
);
1327 MRI
.setRegBank(DefReg
, RegBank
);
1328 if (!emitInsertSubreg(DefReg
, I
.getOperand(1).getReg(), I
, MRI
, MF
))
1331 for (unsigned Idx
= 2; Idx
< I
.getNumOperands(); ++Idx
) {
1332 Register Tmp
= MRI
.createGenericVirtualRegister(DstTy
);
1333 MRI
.setRegBank(Tmp
, RegBank
);
1335 MachineInstr
&InsertInst
= *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1336 TII
.get(TargetOpcode::G_INSERT
), Tmp
)
1338 .addReg(I
.getOperand(Idx
).getReg())
1339 .addImm((Idx
- 1) * SrcSize
);
1343 if (!select(InsertInst
))
1347 MachineInstr
&CopyInst
= *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1348 TII
.get(TargetOpcode::COPY
), DstReg
)
1351 if (!select(CopyInst
))
1354 I
.eraseFromParent();
1358 bool X86InstructionSelector::selectCondBranch(MachineInstr
&I
,
1359 MachineRegisterInfo
&MRI
,
1360 MachineFunction
&MF
) const {
1361 assert((I
.getOpcode() == TargetOpcode::G_BRCOND
) && "unexpected instruction");
1363 const Register CondReg
= I
.getOperand(0).getReg();
1364 MachineBasicBlock
*DestMBB
= I
.getOperand(1).getMBB();
1366 MachineInstr
&TestInst
=
1367 *BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::TEST8ri
))
1370 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::JCC_1
))
1371 .addMBB(DestMBB
).addImm(X86::COND_NE
);
1373 constrainSelectedInstRegOperands(TestInst
, TII
, TRI
, RBI
);
1375 I
.eraseFromParent();
1379 bool X86InstructionSelector::materializeFP(MachineInstr
&I
,
1380 MachineRegisterInfo
&MRI
,
1381 MachineFunction
&MF
) const {
1382 assert((I
.getOpcode() == TargetOpcode::G_FCONSTANT
) &&
1383 "unexpected instruction");
1385 // Can't handle alternate code models yet.
1386 CodeModel::Model CM
= TM
.getCodeModel();
1387 if (CM
!= CodeModel::Small
&& CM
!= CodeModel::Large
)
1390 const Register DstReg
= I
.getOperand(0).getReg();
1391 const LLT DstTy
= MRI
.getType(DstReg
);
1392 const RegisterBank
&RegBank
= *RBI
.getRegBank(DstReg
, MRI
, TRI
);
1393 Align Alignment
= Align(DstTy
.getSizeInBytes());
1394 const DebugLoc
&DbgLoc
= I
.getDebugLoc();
1397 getLoadStoreOp(DstTy
, RegBank
, TargetOpcode::G_LOAD
, Alignment
);
1399 // Create the load from the constant pool.
1400 const ConstantFP
*CFP
= I
.getOperand(1).getFPImm();
1401 unsigned CPI
= MF
.getConstantPool()->getConstantPoolIndex(CFP
, Alignment
);
1402 MachineInstr
*LoadInst
= nullptr;
1403 unsigned char OpFlag
= STI
.classifyLocalReference(nullptr);
1405 if (CM
== CodeModel::Large
&& STI
.is64Bit()) {
1406 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
1407 // they cannot be folded into immediate fields.
1409 Register AddrReg
= MRI
.createVirtualRegister(&X86::GR64RegClass
);
1410 BuildMI(*I
.getParent(), I
, DbgLoc
, TII
.get(X86::MOV64ri
), AddrReg
)
1411 .addConstantPoolIndex(CPI
, 0, OpFlag
);
1413 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
1414 MachinePointerInfo::getConstantPool(MF
), MachineMemOperand::MOLoad
,
1415 MF
.getDataLayout().getPointerSize(), Alignment
);
1418 addDirectMem(BuildMI(*I
.getParent(), I
, DbgLoc
, TII
.get(Opc
), DstReg
),
1420 .addMemOperand(MMO
);
1422 } else if (CM
== CodeModel::Small
|| !STI
.is64Bit()) {
1423 // Handle the case when globals fit in our immediate field.
1424 // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
1426 // x86-32 PIC requires a PIC base register for constant pools.
1427 unsigned PICBase
= 0;
1428 if (OpFlag
== X86II::MO_PIC_BASE_OFFSET
|| OpFlag
== X86II::MO_GOTOFF
) {
1429 // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
1430 // In DAGISEL the code that initialize it generated by the CGBR pass.
1431 return false; // TODO support the mode.
1432 } else if (STI
.is64Bit() && TM
.getCodeModel() == CodeModel::Small
)
1435 LoadInst
= addConstantPoolReference(
1436 BuildMI(*I
.getParent(), I
, DbgLoc
, TII
.get(Opc
), DstReg
), CPI
, PICBase
,
1441 constrainSelectedInstRegOperands(*LoadInst
, TII
, TRI
, RBI
);
1442 I
.eraseFromParent();
1446 bool X86InstructionSelector::selectImplicitDefOrPHI(
1447 MachineInstr
&I
, MachineRegisterInfo
&MRI
) const {
1448 assert((I
.getOpcode() == TargetOpcode::G_IMPLICIT_DEF
||
1449 I
.getOpcode() == TargetOpcode::G_PHI
) &&
1450 "unexpected instruction");
1452 Register DstReg
= I
.getOperand(0).getReg();
1454 if (!MRI
.getRegClassOrNull(DstReg
)) {
1455 const LLT DstTy
= MRI
.getType(DstReg
);
1456 const TargetRegisterClass
*RC
= getRegClass(DstTy
, DstReg
, MRI
);
1458 if (!RBI
.constrainGenericRegister(DstReg
, *RC
, MRI
)) {
1459 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
1465 if (I
.getOpcode() == TargetOpcode::G_IMPLICIT_DEF
)
1466 I
.setDesc(TII
.get(X86::IMPLICIT_DEF
));
1468 I
.setDesc(TII
.get(X86::PHI
));
1473 bool X86InstructionSelector::selectDivRem(MachineInstr
&I
,
1474 MachineRegisterInfo
&MRI
,
1475 MachineFunction
&MF
) const {
1476 // The implementation of this function is taken from X86FastISel.
1477 assert((I
.getOpcode() == TargetOpcode::G_SDIV
||
1478 I
.getOpcode() == TargetOpcode::G_SREM
||
1479 I
.getOpcode() == TargetOpcode::G_UDIV
||
1480 I
.getOpcode() == TargetOpcode::G_UREM
) &&
1481 "unexpected instruction");
1483 const Register DstReg
= I
.getOperand(0).getReg();
1484 const Register Op1Reg
= I
.getOperand(1).getReg();
1485 const Register Op2Reg
= I
.getOperand(2).getReg();
1487 const LLT RegTy
= MRI
.getType(DstReg
);
1488 assert(RegTy
== MRI
.getType(Op1Reg
) && RegTy
== MRI
.getType(Op2Reg
) &&
1489 "Arguments and return value types must match");
1491 const RegisterBank
*RegRB
= RBI
.getRegBank(DstReg
, MRI
, TRI
);
1492 if (!RegRB
|| RegRB
->getID() != X86::GPRRegBankID
)
1495 const static unsigned NumTypes
= 4; // i8, i16, i32, i64
1496 const static unsigned NumOps
= 4; // SDiv, SRem, UDiv, URem
1497 const static bool S
= true; // IsSigned
1498 const static bool U
= false; // !IsSigned
1499 const static unsigned Copy
= TargetOpcode::COPY
;
1500 // For the X86 IDIV instruction, in most cases the dividend
1501 // (numerator) must be in a specific register pair highreg:lowreg,
1502 // producing the quotient in lowreg and the remainder in highreg.
1503 // For most data types, to set up the instruction, the dividend is
1504 // copied into lowreg, and lowreg is sign-extended into highreg. The
1505 // exception is i8, where the dividend is defined as a single register rather
1506 // than a register pair, and we therefore directly sign-extend the dividend
1507 // into lowreg, instead of copying, and ignore the highreg.
1508 const static struct DivRemEntry
{
1509 // The following portion depends only on the data type.
1510 unsigned SizeInBits
;
1511 unsigned LowInReg
; // low part of the register pair
1512 unsigned HighInReg
; // high part of the register pair
1513 // The following portion depends on both the data type and the operation.
1514 struct DivRemResult
{
1515 unsigned OpDivRem
; // The specific DIV/IDIV opcode to use.
1516 unsigned OpSignExtend
; // Opcode for sign-extending lowreg into
1517 // highreg, or copying a zero into highreg.
1518 unsigned OpCopy
; // Opcode for copying dividend into lowreg, or
1519 // zero/sign-extending into lowreg for i8.
1520 unsigned DivRemResultReg
; // Register containing the desired result.
1521 bool IsOpSigned
; // Whether to use signed or unsigned form.
1522 } ResultTable
[NumOps
];
1523 } OpTable
[NumTypes
] = {
1528 {X86::IDIV8r
, 0, X86::MOVSX16rr8
, X86::AL
, S
}, // SDiv
1529 {X86::IDIV8r
, 0, X86::MOVSX16rr8
, X86::AH
, S
}, // SRem
1530 {X86::DIV8r
, 0, X86::MOVZX16rr8
, X86::AL
, U
}, // UDiv
1531 {X86::DIV8r
, 0, X86::MOVZX16rr8
, X86::AH
, U
}, // URem
1537 {X86::IDIV16r
, X86::CWD
, Copy
, X86::AX
, S
}, // SDiv
1538 {X86::IDIV16r
, X86::CWD
, Copy
, X86::DX
, S
}, // SRem
1539 {X86::DIV16r
, X86::MOV32r0
, Copy
, X86::AX
, U
}, // UDiv
1540 {X86::DIV16r
, X86::MOV32r0
, Copy
, X86::DX
, U
}, // URem
1546 {X86::IDIV32r
, X86::CDQ
, Copy
, X86::EAX
, S
}, // SDiv
1547 {X86::IDIV32r
, X86::CDQ
, Copy
, X86::EDX
, S
}, // SRem
1548 {X86::DIV32r
, X86::MOV32r0
, Copy
, X86::EAX
, U
}, // UDiv
1549 {X86::DIV32r
, X86::MOV32r0
, Copy
, X86::EDX
, U
}, // URem
1555 {X86::IDIV64r
, X86::CQO
, Copy
, X86::RAX
, S
}, // SDiv
1556 {X86::IDIV64r
, X86::CQO
, Copy
, X86::RDX
, S
}, // SRem
1557 {X86::DIV64r
, X86::MOV32r0
, Copy
, X86::RAX
, U
}, // UDiv
1558 {X86::DIV64r
, X86::MOV32r0
, Copy
, X86::RDX
, U
}, // URem
1562 auto OpEntryIt
= llvm::find_if(OpTable
, [RegTy
](const DivRemEntry
&El
) {
1563 return El
.SizeInBits
== RegTy
.getSizeInBits();
1565 if (OpEntryIt
== std::end(OpTable
))
1569 switch (I
.getOpcode()) {
1571 llvm_unreachable("Unexpected div/rem opcode");
1572 case TargetOpcode::G_SDIV
:
1575 case TargetOpcode::G_SREM
:
1578 case TargetOpcode::G_UDIV
:
1581 case TargetOpcode::G_UREM
:
1586 const DivRemEntry
&TypeEntry
= *OpEntryIt
;
1587 const DivRemEntry::DivRemResult
&OpEntry
= TypeEntry
.ResultTable
[OpIndex
];
1589 const TargetRegisterClass
*RegRC
= getRegClass(RegTy
, *RegRB
);
1590 if (!RBI
.constrainGenericRegister(Op1Reg
, *RegRC
, MRI
) ||
1591 !RBI
.constrainGenericRegister(Op2Reg
, *RegRC
, MRI
) ||
1592 !RBI
.constrainGenericRegister(DstReg
, *RegRC
, MRI
)) {
1593 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII
.getName(I
.getOpcode())
1598 // Move op1 into low-order input register.
1599 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(OpEntry
.OpCopy
),
1602 // Zero-extend or sign-extend into high-order input register.
1603 if (OpEntry
.OpSignExtend
) {
1604 if (OpEntry
.IsOpSigned
)
1605 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1606 TII
.get(OpEntry
.OpSignExtend
));
1608 Register Zero32
= MRI
.createVirtualRegister(&X86::GR32RegClass
);
1609 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::MOV32r0
),
1612 // Copy the zero into the appropriate sub/super/identical physical
1613 // register. Unfortunately the operations needed are not uniform enough
1614 // to fit neatly into the table above.
1615 if (RegTy
.getSizeInBits() == 16) {
1616 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Copy
),
1617 TypeEntry
.HighInReg
)
1618 .addReg(Zero32
, 0, X86::sub_16bit
);
1619 } else if (RegTy
.getSizeInBits() == 32) {
1620 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Copy
),
1621 TypeEntry
.HighInReg
)
1623 } else if (RegTy
.getSizeInBits() == 64) {
1624 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1625 TII
.get(TargetOpcode::SUBREG_TO_REG
), TypeEntry
.HighInReg
)
1628 .addImm(X86::sub_32bit
);
1632 // Generate the DIV/IDIV instruction.
1633 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(OpEntry
.OpDivRem
))
1635 // For i8 remainder, we can't reference ah directly, as we'll end
1636 // up with bogus copies like %r9b = COPY %ah. Reference ax
1637 // instead to prevent ah references in a rex instruction.
1639 // The current assumption of the fast register allocator is that isel
1640 // won't generate explicit references to the GR8_NOREX registers. If
1641 // the allocator and/or the backend get enhanced to be more robust in
1642 // that regard, this can be, and should be, removed.
1643 if ((I
.getOpcode() == Instruction::SRem
||
1644 I
.getOpcode() == Instruction::URem
) &&
1645 OpEntry
.DivRemResultReg
== X86::AH
&& STI
.is64Bit()) {
1646 Register SourceSuperReg
= MRI
.createVirtualRegister(&X86::GR16RegClass
);
1647 Register ResultSuperReg
= MRI
.createVirtualRegister(&X86::GR16RegClass
);
1648 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(Copy
), SourceSuperReg
)
1651 // Shift AX right by 8 bits instead of using AH.
1652 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::SHR16ri
),
1654 .addReg(SourceSuperReg
)
1657 // Now reference the 8-bit subreg of the result.
1658 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(),
1659 TII
.get(TargetOpcode::SUBREG_TO_REG
))
1662 .addReg(ResultSuperReg
)
1663 .addImm(X86::sub_8bit
);
1665 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(TargetOpcode::COPY
),
1667 .addReg(OpEntry
.DivRemResultReg
);
1669 I
.eraseFromParent();
1673 bool X86InstructionSelector::selectIntrinsicWSideEffects(
1674 MachineInstr
&I
, MachineRegisterInfo
&MRI
, MachineFunction
&MF
) const {
1676 assert(I
.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
&&
1677 "unexpected instruction");
1679 if (I
.getOperand(0).getIntrinsicID() != Intrinsic::trap
)
1682 BuildMI(*I
.getParent(), I
, I
.getDebugLoc(), TII
.get(X86::TRAP
));
1684 I
.eraseFromParent();
1688 InstructionSelector
*
1689 llvm::createX86InstructionSelector(const X86TargetMachine
&TM
,
1690 X86Subtarget
&Subtarget
,
1691 X86RegisterBankInfo
&RBI
) {
1692 return new X86InstructionSelector(TM
, Subtarget
, RBI
);