1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains code to lower X86 MachineInstrs to their corresponding
13 //===----------------------------------------------------------------------===//
15 #include "InstPrinter/X86ATTInstPrinter.h"
16 #include "X86MCInstLower.h"
17 #include "X86AsmPrinter.h"
18 #include "X86COFFMachineModuleInfo.h"
19 #include "X86MCAsmInfo.h"
20 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCExpr.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/MC/MCStreamer.h"
25 #include "llvm/MC/MCSymbol.h"
26 #include "llvm/Target/Mangler.h"
27 #include "llvm/Support/FormattedStream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/Type.h"
32 X86MCInstLower::X86MCInstLower(Mangler
*mang
, const MachineFunction
&mf
,
33 X86AsmPrinter
&asmprinter
)
34 : Ctx(mf
.getContext()), Mang(mang
), MF(mf
), TM(mf
.getTarget()),
35 MAI(*TM
.getMCAsmInfo()), AsmPrinter(asmprinter
) {}
37 MachineModuleInfoMachO
&X86MCInstLower::getMachOMMI() const {
38 return MF
.getMMI().getObjFileInfo
<MachineModuleInfoMachO
>();
42 MCSymbol
*X86MCInstLower::GetPICBaseSymbol() const {
43 return static_cast<const X86TargetLowering
*>(TM
.getTargetLowering())->
44 getPICBaseSymbol(&MF
, Ctx
);
47 /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol
48 /// operand to an MCSymbol.
49 MCSymbol
*X86MCInstLower::
50 GetSymbolFromOperand(const MachineOperand
&MO
) const {
51 assert((MO
.isGlobal() || MO
.isSymbol()) && "Isn't a symbol reference");
53 SmallString
<128> Name
;
56 assert(MO
.isSymbol());
57 Name
+= MAI
.getGlobalPrefix();
58 Name
+= MO
.getSymbolName();
60 const GlobalValue
*GV
= MO
.getGlobal();
61 bool isImplicitlyPrivate
= false;
62 if (MO
.getTargetFlags() == X86II::MO_DARWIN_STUB
||
63 MO
.getTargetFlags() == X86II::MO_DARWIN_NONLAZY
||
64 MO
.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE
||
65 MO
.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE
)
66 isImplicitlyPrivate
= true;
68 Mang
->getNameWithPrefix(Name
, GV
, isImplicitlyPrivate
);
71 // If the target flags on the operand changes the name of the symbol, do that
72 // before we return the symbol.
73 switch (MO
.getTargetFlags()) {
75 case X86II::MO_DLLIMPORT
: {
76 // Handle dllimport linkage.
77 const char *Prefix
= "__imp_";
78 Name
.insert(Name
.begin(), Prefix
, Prefix
+strlen(Prefix
));
81 case X86II::MO_DARWIN_NONLAZY
:
82 case X86II::MO_DARWIN_NONLAZY_PIC_BASE
: {
83 Name
+= "$non_lazy_ptr";
84 MCSymbol
*Sym
= Ctx
.GetOrCreateSymbol(Name
.str());
86 MachineModuleInfoImpl::StubValueTy
&StubSym
=
87 getMachOMMI().getGVStubEntry(Sym
);
88 if (StubSym
.getPointer() == 0) {
89 assert(MO
.isGlobal() && "Extern symbol not handled yet");
91 MachineModuleInfoImpl::
92 StubValueTy(Mang
->getSymbol(MO
.getGlobal()),
93 !MO
.getGlobal()->hasInternalLinkage());
97 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE
: {
98 Name
+= "$non_lazy_ptr";
99 MCSymbol
*Sym
= Ctx
.GetOrCreateSymbol(Name
.str());
100 MachineModuleInfoImpl::StubValueTy
&StubSym
=
101 getMachOMMI().getHiddenGVStubEntry(Sym
);
102 if (StubSym
.getPointer() == 0) {
103 assert(MO
.isGlobal() && "Extern symbol not handled yet");
105 MachineModuleInfoImpl::
106 StubValueTy(Mang
->getSymbol(MO
.getGlobal()),
107 !MO
.getGlobal()->hasInternalLinkage());
111 case X86II::MO_DARWIN_STUB
: {
113 MCSymbol
*Sym
= Ctx
.GetOrCreateSymbol(Name
.str());
114 MachineModuleInfoImpl::StubValueTy
&StubSym
=
115 getMachOMMI().getFnStubEntry(Sym
);
116 if (StubSym
.getPointer())
121 MachineModuleInfoImpl::
122 StubValueTy(Mang
->getSymbol(MO
.getGlobal()),
123 !MO
.getGlobal()->hasInternalLinkage());
125 Name
.erase(Name
.end()-5, Name
.end());
127 MachineModuleInfoImpl::
128 StubValueTy(Ctx
.GetOrCreateSymbol(Name
.str()), false);
134 return Ctx
.GetOrCreateSymbol(Name
.str());
137 MCOperand
X86MCInstLower::LowerSymbolOperand(const MachineOperand
&MO
,
138 MCSymbol
*Sym
) const {
139 // FIXME: We would like an efficient form for this, so we don't have to do a
140 // lot of extra uniquing.
141 const MCExpr
*Expr
= 0;
142 MCSymbolRefExpr::VariantKind RefKind
= MCSymbolRefExpr::VK_None
;
144 switch (MO
.getTargetFlags()) {
145 default: llvm_unreachable("Unknown target flag on GV operand");
146 case X86II::MO_NO_FLAG
: // No flag.
147 // These affect the name of the symbol, not any suffix.
148 case X86II::MO_DARWIN_NONLAZY
:
149 case X86II::MO_DLLIMPORT
:
150 case X86II::MO_DARWIN_STUB
:
153 case X86II::MO_TLVP
: RefKind
= MCSymbolRefExpr::VK_TLVP
; break;
154 case X86II::MO_TLVP_PIC_BASE
:
155 Expr
= MCSymbolRefExpr::Create(Sym
, MCSymbolRefExpr::VK_TLVP
, Ctx
);
156 // Subtract the pic base.
157 Expr
= MCBinaryExpr::CreateSub(Expr
,
158 MCSymbolRefExpr::Create(GetPICBaseSymbol(),
162 case X86II::MO_TLSGD
: RefKind
= MCSymbolRefExpr::VK_TLSGD
; break;
163 case X86II::MO_GOTTPOFF
: RefKind
= MCSymbolRefExpr::VK_GOTTPOFF
; break;
164 case X86II::MO_INDNTPOFF
: RefKind
= MCSymbolRefExpr::VK_INDNTPOFF
; break;
165 case X86II::MO_TPOFF
: RefKind
= MCSymbolRefExpr::VK_TPOFF
; break;
166 case X86II::MO_NTPOFF
: RefKind
= MCSymbolRefExpr::VK_NTPOFF
; break;
167 case X86II::MO_GOTPCREL
: RefKind
= MCSymbolRefExpr::VK_GOTPCREL
; break;
168 case X86II::MO_GOT
: RefKind
= MCSymbolRefExpr::VK_GOT
; break;
169 case X86II::MO_GOTOFF
: RefKind
= MCSymbolRefExpr::VK_GOTOFF
; break;
170 case X86II::MO_PLT
: RefKind
= MCSymbolRefExpr::VK_PLT
; break;
171 case X86II::MO_PIC_BASE_OFFSET
:
172 case X86II::MO_DARWIN_NONLAZY_PIC_BASE
:
173 case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE
:
174 Expr
= MCSymbolRefExpr::Create(Sym
, Ctx
);
175 // Subtract the pic base.
176 Expr
= MCBinaryExpr::CreateSub(Expr
,
177 MCSymbolRefExpr::Create(GetPICBaseSymbol(), Ctx
),
179 if (MO
.isJTI() && MAI
.hasSetDirective()) {
180 // If .set directive is supported, use it to reduce the number of
181 // relocations the assembler will generate for differences between
182 // local labels. This is only safe when the symbols are in the same
183 // section so we are restricting it to jumptable references.
184 MCSymbol
*Label
= Ctx
.CreateTempSymbol();
185 AsmPrinter
.OutStreamer
.EmitAssignment(Label
, Expr
);
186 Expr
= MCSymbolRefExpr::Create(Label
, Ctx
);
192 Expr
= MCSymbolRefExpr::Create(Sym
, RefKind
, Ctx
);
194 if (!MO
.isJTI() && MO
.getOffset())
195 Expr
= MCBinaryExpr::CreateAdd(Expr
,
196 MCConstantExpr::Create(MO
.getOffset(), Ctx
),
198 return MCOperand::CreateExpr(Expr
);
203 static void lower_subreg32(MCInst
*MI
, unsigned OpNo
) {
204 // Convert registers in the addr mode according to subreg32.
205 unsigned Reg
= MI
->getOperand(OpNo
).getReg();
207 MI
->getOperand(OpNo
).setReg(getX86SubSuperRegister(Reg
, MVT::i32
));
210 static void lower_lea64_32mem(MCInst
*MI
, unsigned OpNo
) {
211 // Convert registers in the addr mode according to subreg64.
212 for (unsigned i
= 0; i
!= 4; ++i
) {
213 if (!MI
->getOperand(OpNo
+i
).isReg()) continue;
215 unsigned Reg
= MI
->getOperand(OpNo
+i
).getReg();
216 if (Reg
== 0) continue;
218 MI
->getOperand(OpNo
+i
).setReg(getX86SubSuperRegister(Reg
, MVT::i64
));
222 /// LowerSubReg32_Op0 - Things like MOVZX16rr8 -> MOVZX32rr8.
223 static void LowerSubReg32_Op0(MCInst
&OutMI
, unsigned NewOpc
) {
224 OutMI
.setOpcode(NewOpc
);
225 lower_subreg32(&OutMI
, 0);
227 /// LowerUnaryToTwoAddr - R = setb -> R = sbb R, R
228 static void LowerUnaryToTwoAddr(MCInst
&OutMI
, unsigned NewOpc
) {
229 OutMI
.setOpcode(NewOpc
);
230 OutMI
.addOperand(OutMI
.getOperand(0));
231 OutMI
.addOperand(OutMI
.getOperand(0));
234 /// \brief Simplify FOO $imm, %{al,ax,eax,rax} to FOO $imm, for instruction with
235 /// a short fixed-register form.
236 static void SimplifyShortImmForm(MCInst
&Inst
, unsigned Opcode
) {
237 unsigned ImmOp
= Inst
.getNumOperands() - 1;
238 assert(Inst
.getOperand(0).isReg() && Inst
.getOperand(ImmOp
).isImm() &&
239 ((Inst
.getNumOperands() == 3 && Inst
.getOperand(1).isReg() &&
240 Inst
.getOperand(0).getReg() == Inst
.getOperand(1).getReg()) ||
241 Inst
.getNumOperands() == 2) && "Unexpected instruction!");
243 // Check whether the destination register can be fixed.
244 unsigned Reg
= Inst
.getOperand(0).getReg();
245 if (Reg
!= X86::AL
&& Reg
!= X86::AX
&& Reg
!= X86::EAX
&& Reg
!= X86::RAX
)
248 // If so, rewrite the instruction.
249 MCOperand Saved
= Inst
.getOperand(ImmOp
);
251 Inst
.setOpcode(Opcode
);
252 Inst
.addOperand(Saved
);
255 /// \brief Simplify things like MOV32rm to MOV32o32a.
256 static void SimplifyShortMoveForm(X86AsmPrinter
&Printer
, MCInst
&Inst
,
258 // Don't make these simplifications in 64-bit mode; other assemblers don't
259 // perform them because they make the code larger.
260 if (Printer
.getSubtarget().is64Bit())
263 bool IsStore
= Inst
.getOperand(0).isReg() && Inst
.getOperand(1).isReg();
264 unsigned AddrBase
= IsStore
;
265 unsigned RegOp
= IsStore
? 0 : 5;
266 unsigned AddrOp
= AddrBase
+ 3;
267 assert(Inst
.getNumOperands() == 6 && Inst
.getOperand(RegOp
).isReg() &&
268 Inst
.getOperand(AddrBase
+ 0).isReg() && // base
269 Inst
.getOperand(AddrBase
+ 1).isImm() && // scale
270 Inst
.getOperand(AddrBase
+ 2).isReg() && // index register
271 (Inst
.getOperand(AddrOp
).isExpr() || // address
272 Inst
.getOperand(AddrOp
).isImm())&&
273 Inst
.getOperand(AddrBase
+ 4).isReg() && // segment
274 "Unexpected instruction!");
276 // Check whether the destination register can be fixed.
277 unsigned Reg
= Inst
.getOperand(RegOp
).getReg();
278 if (Reg
!= X86::AL
&& Reg
!= X86::AX
&& Reg
!= X86::EAX
&& Reg
!= X86::RAX
)
281 // Check whether this is an absolute address.
282 // FIXME: We know TLVP symbol refs aren't, but there should be a better way
284 bool Absolute
= true;
285 if (Inst
.getOperand(AddrOp
).isExpr()) {
286 const MCExpr
*MCE
= Inst
.getOperand(AddrOp
).getExpr();
287 if (const MCSymbolRefExpr
*SRE
= dyn_cast
<MCSymbolRefExpr
>(MCE
))
288 if (SRE
->getKind() == MCSymbolRefExpr::VK_TLVP
)
293 (Inst
.getOperand(AddrBase
+ 0).getReg() != 0 ||
294 Inst
.getOperand(AddrBase
+ 2).getReg() != 0 ||
295 Inst
.getOperand(AddrBase
+ 4).getReg() != 0 ||
296 Inst
.getOperand(AddrBase
+ 1).getImm() != 1))
299 // If so, rewrite the instruction.
300 MCOperand Saved
= Inst
.getOperand(AddrOp
);
302 Inst
.setOpcode(Opcode
);
303 Inst
.addOperand(Saved
);
306 void X86MCInstLower::Lower(const MachineInstr
*MI
, MCInst
&OutMI
) const {
307 OutMI
.setOpcode(MI
->getOpcode());
309 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
310 const MachineOperand
&MO
= MI
->getOperand(i
);
313 switch (MO
.getType()) {
316 llvm_unreachable("unknown operand type");
317 case MachineOperand::MO_Register
:
318 // Ignore all implicit register operands.
319 if (MO
.isImplicit()) continue;
320 MCOp
= MCOperand::CreateReg(MO
.getReg());
322 case MachineOperand::MO_Immediate
:
323 MCOp
= MCOperand::CreateImm(MO
.getImm());
325 case MachineOperand::MO_MachineBasicBlock
:
326 MCOp
= MCOperand::CreateExpr(MCSymbolRefExpr::Create(
327 MO
.getMBB()->getSymbol(), Ctx
));
329 case MachineOperand::MO_GlobalAddress
:
330 MCOp
= LowerSymbolOperand(MO
, GetSymbolFromOperand(MO
));
332 case MachineOperand::MO_ExternalSymbol
:
333 MCOp
= LowerSymbolOperand(MO
, GetSymbolFromOperand(MO
));
335 case MachineOperand::MO_JumpTableIndex
:
336 MCOp
= LowerSymbolOperand(MO
, AsmPrinter
.GetJTISymbol(MO
.getIndex()));
338 case MachineOperand::MO_ConstantPoolIndex
:
339 MCOp
= LowerSymbolOperand(MO
, AsmPrinter
.GetCPISymbol(MO
.getIndex()));
341 case MachineOperand::MO_BlockAddress
:
342 MCOp
= LowerSymbolOperand(MO
,
343 AsmPrinter
.GetBlockAddressSymbol(MO
.getBlockAddress()));
347 OutMI
.addOperand(MCOp
);
350 // Handle a few special cases to eliminate operand modifiers.
352 switch (OutMI
.getOpcode()) {
353 case X86::LEA64_32r
: // Handle 'subreg rewriting' for the lea64_32mem operand.
354 lower_lea64_32mem(&OutMI
, 1);
359 // LEA should have a segment register, but it must be empty.
360 assert(OutMI
.getNumOperands() == 1+X86::AddrNumOperands
&&
361 "Unexpected # of LEA operands");
362 assert(OutMI
.getOperand(1+X86::AddrSegmentReg
).getReg() == 0 &&
363 "LEA has segment specified!");
365 case X86::MOVZX16rr8
: LowerSubReg32_Op0(OutMI
, X86::MOVZX32rr8
); break;
366 case X86::MOVZX16rm8
: LowerSubReg32_Op0(OutMI
, X86::MOVZX32rm8
); break;
367 case X86::MOVSX16rr8
: LowerSubReg32_Op0(OutMI
, X86::MOVSX32rr8
); break;
368 case X86::MOVSX16rm8
: LowerSubReg32_Op0(OutMI
, X86::MOVSX32rm8
); break;
369 case X86::MOVZX64rr32
: LowerSubReg32_Op0(OutMI
, X86::MOV32rr
); break;
370 case X86::MOVZX64rm32
: LowerSubReg32_Op0(OutMI
, X86::MOV32rm
); break;
371 case X86::MOV64ri64i32
: LowerSubReg32_Op0(OutMI
, X86::MOV32ri
); break;
372 case X86::MOVZX64rr8
: LowerSubReg32_Op0(OutMI
, X86::MOVZX32rr8
); break;
373 case X86::MOVZX64rm8
: LowerSubReg32_Op0(OutMI
, X86::MOVZX32rm8
); break;
374 case X86::MOVZX64rr16
: LowerSubReg32_Op0(OutMI
, X86::MOVZX32rr16
); break;
375 case X86::MOVZX64rm16
: LowerSubReg32_Op0(OutMI
, X86::MOVZX32rm16
); break;
376 case X86::SETB_C8r
: LowerUnaryToTwoAddr(OutMI
, X86::SBB8rr
); break;
377 case X86::SETB_C16r
: LowerUnaryToTwoAddr(OutMI
, X86::SBB16rr
); break;
378 case X86::SETB_C32r
: LowerUnaryToTwoAddr(OutMI
, X86::SBB32rr
); break;
379 case X86::SETB_C64r
: LowerUnaryToTwoAddr(OutMI
, X86::SBB64rr
); break;
380 case X86::MOV8r0
: LowerUnaryToTwoAddr(OutMI
, X86::XOR8rr
); break;
381 case X86::MOV32r0
: LowerUnaryToTwoAddr(OutMI
, X86::XOR32rr
); break;
382 case X86::FsFLD0SS
: LowerUnaryToTwoAddr(OutMI
, X86::PXORrr
); break;
383 case X86::FsFLD0SD
: LowerUnaryToTwoAddr(OutMI
, X86::PXORrr
); break;
384 case X86::V_SET0PS
: LowerUnaryToTwoAddr(OutMI
, X86::XORPSrr
); break;
385 case X86::V_SET0PD
: LowerUnaryToTwoAddr(OutMI
, X86::XORPDrr
); break;
386 case X86::V_SET0PI
: LowerUnaryToTwoAddr(OutMI
, X86::PXORrr
); break;
387 case X86::V_SETALLONES
: LowerUnaryToTwoAddr(OutMI
, X86::PCMPEQDrr
); break;
388 case X86::AVX_SET0PS
: LowerUnaryToTwoAddr(OutMI
, X86::VXORPSrr
); break;
389 case X86::AVX_SET0PSY
: LowerUnaryToTwoAddr(OutMI
, X86::VXORPSYrr
); break;
390 case X86::AVX_SET0PD
: LowerUnaryToTwoAddr(OutMI
, X86::VXORPDrr
); break;
391 case X86::AVX_SET0PDY
: LowerUnaryToTwoAddr(OutMI
, X86::VXORPDYrr
); break;
392 case X86::AVX_SET0PI
: LowerUnaryToTwoAddr(OutMI
, X86::VPXORrr
); break;
395 LowerSubReg32_Op0(OutMI
, X86::MOV32r0
); // MOV16r0 -> MOV32r0
396 LowerUnaryToTwoAddr(OutMI
, X86::XOR32rr
); // MOV32r0 -> XOR32rr
399 LowerSubReg32_Op0(OutMI
, X86::MOV32r0
); // MOV64r0 -> MOV32r0
400 LowerUnaryToTwoAddr(OutMI
, X86::XOR32rr
); // MOV32r0 -> XOR32rr
403 // TAILJMPr64, [WIN]CALL64r, [WIN]CALL64pcrel32 - These instructions have
404 // register inputs modeled as normal uses instead of implicit uses. As such,
405 // truncate off all but the first operand (the callee). FIXME: Change isel.
406 case X86::TAILJMPr64
:
408 case X86::CALL64pcrel32
:
409 case X86::WINCALL64r
:
410 case X86::WINCALL64pcrel32
: {
411 unsigned Opcode
= OutMI
.getOpcode();
412 MCOperand Saved
= OutMI
.getOperand(0);
414 OutMI
.setOpcode(Opcode
);
415 OutMI
.addOperand(Saved
);
420 case X86::EH_RETURN64
: {
422 OutMI
.setOpcode(X86::RET
);
426 // TAILJMPd, TAILJMPd64 - Lower to the correct jump instructions.
429 case X86::TAILJMPd64
: {
431 switch (OutMI
.getOpcode()) {
432 default: assert(0 && "Invalid opcode");
433 case X86::TAILJMPr
: Opcode
= X86::JMP32r
; break;
435 case X86::TAILJMPd64
: Opcode
= X86::JMP_1
; break;
438 MCOperand Saved
= OutMI
.getOperand(0);
440 OutMI
.setOpcode(Opcode
);
441 OutMI
.addOperand(Saved
);
445 // These are pseudo-ops for OR to help with the OR->ADD transformation. We do
446 // this with an ugly goto in case the resultant OR uses EAX and needs the
448 case X86::ADD16rr_DB
: OutMI
.setOpcode(X86::OR16rr
); goto ReSimplify
;
449 case X86::ADD32rr_DB
: OutMI
.setOpcode(X86::OR32rr
); goto ReSimplify
;
450 case X86::ADD64rr_DB
: OutMI
.setOpcode(X86::OR64rr
); goto ReSimplify
;
451 case X86::ADD16ri_DB
: OutMI
.setOpcode(X86::OR16ri
); goto ReSimplify
;
452 case X86::ADD32ri_DB
: OutMI
.setOpcode(X86::OR32ri
); goto ReSimplify
;
453 case X86::ADD64ri32_DB
: OutMI
.setOpcode(X86::OR64ri32
); goto ReSimplify
;
454 case X86::ADD16ri8_DB
: OutMI
.setOpcode(X86::OR16ri8
); goto ReSimplify
;
455 case X86::ADD32ri8_DB
: OutMI
.setOpcode(X86::OR32ri8
); goto ReSimplify
;
456 case X86::ADD64ri8_DB
: OutMI
.setOpcode(X86::OR64ri8
); goto ReSimplify
;
458 // The assembler backend wants to see branches in their small form and relax
459 // them to their large form. The JIT can only handle the large form because
460 // it does not do relaxation. For now, translate the large form to the
462 case X86::JMP_4
: OutMI
.setOpcode(X86::JMP_1
); break;
463 case X86::JO_4
: OutMI
.setOpcode(X86::JO_1
); break;
464 case X86::JNO_4
: OutMI
.setOpcode(X86::JNO_1
); break;
465 case X86::JB_4
: OutMI
.setOpcode(X86::JB_1
); break;
466 case X86::JAE_4
: OutMI
.setOpcode(X86::JAE_1
); break;
467 case X86::JE_4
: OutMI
.setOpcode(X86::JE_1
); break;
468 case X86::JNE_4
: OutMI
.setOpcode(X86::JNE_1
); break;
469 case X86::JBE_4
: OutMI
.setOpcode(X86::JBE_1
); break;
470 case X86::JA_4
: OutMI
.setOpcode(X86::JA_1
); break;
471 case X86::JS_4
: OutMI
.setOpcode(X86::JS_1
); break;
472 case X86::JNS_4
: OutMI
.setOpcode(X86::JNS_1
); break;
473 case X86::JP_4
: OutMI
.setOpcode(X86::JP_1
); break;
474 case X86::JNP_4
: OutMI
.setOpcode(X86::JNP_1
); break;
475 case X86::JL_4
: OutMI
.setOpcode(X86::JL_1
); break;
476 case X86::JGE_4
: OutMI
.setOpcode(X86::JGE_1
); break;
477 case X86::JLE_4
: OutMI
.setOpcode(X86::JLE_1
); break;
478 case X86::JG_4
: OutMI
.setOpcode(X86::JG_1
); break;
480 // We don't currently select the correct instruction form for instructions
481 // which have a short %eax, etc. form. Handle this by custom lowering, for
484 // Note, we are currently not handling the following instructions:
485 // MOV64ao8, MOV64o8a
486 // XCHG16ar, XCHG32ar, XCHG64ar
487 case X86::MOV8mr_NOREX
:
488 case X86::MOV8mr
: SimplifyShortMoveForm(AsmPrinter
, OutMI
, X86::MOV8ao8
); break;
489 case X86::MOV8rm_NOREX
:
490 case X86::MOV8rm
: SimplifyShortMoveForm(AsmPrinter
, OutMI
, X86::MOV8o8a
); break;
491 case X86::MOV16mr
: SimplifyShortMoveForm(AsmPrinter
, OutMI
, X86::MOV16ao16
); break;
492 case X86::MOV16rm
: SimplifyShortMoveForm(AsmPrinter
, OutMI
, X86::MOV16o16a
); break;
493 case X86::MOV32mr
: SimplifyShortMoveForm(AsmPrinter
, OutMI
, X86::MOV32ao32
); break;
494 case X86::MOV32rm
: SimplifyShortMoveForm(AsmPrinter
, OutMI
, X86::MOV32o32a
); break;
496 case X86::ADC8ri
: SimplifyShortImmForm(OutMI
, X86::ADC8i8
); break;
497 case X86::ADC16ri
: SimplifyShortImmForm(OutMI
, X86::ADC16i16
); break;
498 case X86::ADC32ri
: SimplifyShortImmForm(OutMI
, X86::ADC32i32
); break;
499 case X86::ADC64ri32
: SimplifyShortImmForm(OutMI
, X86::ADC64i32
); break;
500 case X86::ADD8ri
: SimplifyShortImmForm(OutMI
, X86::ADD8i8
); break;
501 case X86::ADD16ri
: SimplifyShortImmForm(OutMI
, X86::ADD16i16
); break;
502 case X86::ADD32ri
: SimplifyShortImmForm(OutMI
, X86::ADD32i32
); break;
503 case X86::ADD64ri32
: SimplifyShortImmForm(OutMI
, X86::ADD64i32
); break;
504 case X86::AND8ri
: SimplifyShortImmForm(OutMI
, X86::AND8i8
); break;
505 case X86::AND16ri
: SimplifyShortImmForm(OutMI
, X86::AND16i16
); break;
506 case X86::AND32ri
: SimplifyShortImmForm(OutMI
, X86::AND32i32
); break;
507 case X86::AND64ri32
: SimplifyShortImmForm(OutMI
, X86::AND64i32
); break;
508 case X86::CMP8ri
: SimplifyShortImmForm(OutMI
, X86::CMP8i8
); break;
509 case X86::CMP16ri
: SimplifyShortImmForm(OutMI
, X86::CMP16i16
); break;
510 case X86::CMP32ri
: SimplifyShortImmForm(OutMI
, X86::CMP32i32
); break;
511 case X86::CMP64ri32
: SimplifyShortImmForm(OutMI
, X86::CMP64i32
); break;
512 case X86::OR8ri
: SimplifyShortImmForm(OutMI
, X86::OR8i8
); break;
513 case X86::OR16ri
: SimplifyShortImmForm(OutMI
, X86::OR16i16
); break;
514 case X86::OR32ri
: SimplifyShortImmForm(OutMI
, X86::OR32i32
); break;
515 case X86::OR64ri32
: SimplifyShortImmForm(OutMI
, X86::OR64i32
); break;
516 case X86::SBB8ri
: SimplifyShortImmForm(OutMI
, X86::SBB8i8
); break;
517 case X86::SBB16ri
: SimplifyShortImmForm(OutMI
, X86::SBB16i16
); break;
518 case X86::SBB32ri
: SimplifyShortImmForm(OutMI
, X86::SBB32i32
); break;
519 case X86::SBB64ri32
: SimplifyShortImmForm(OutMI
, X86::SBB64i32
); break;
520 case X86::SUB8ri
: SimplifyShortImmForm(OutMI
, X86::SUB8i8
); break;
521 case X86::SUB16ri
: SimplifyShortImmForm(OutMI
, X86::SUB16i16
); break;
522 case X86::SUB32ri
: SimplifyShortImmForm(OutMI
, X86::SUB32i32
); break;
523 case X86::SUB64ri32
: SimplifyShortImmForm(OutMI
, X86::SUB64i32
); break;
524 case X86::TEST8ri
: SimplifyShortImmForm(OutMI
, X86::TEST8i8
); break;
525 case X86::TEST16ri
: SimplifyShortImmForm(OutMI
, X86::TEST16i16
); break;
526 case X86::TEST32ri
: SimplifyShortImmForm(OutMI
, X86::TEST32i32
); break;
527 case X86::TEST64ri32
: SimplifyShortImmForm(OutMI
, X86::TEST64i32
); break;
528 case X86::XOR8ri
: SimplifyShortImmForm(OutMI
, X86::XOR8i8
); break;
529 case X86::XOR16ri
: SimplifyShortImmForm(OutMI
, X86::XOR16i16
); break;
530 case X86::XOR32ri
: SimplifyShortImmForm(OutMI
, X86::XOR32i32
); break;
531 case X86::XOR64ri32
: SimplifyShortImmForm(OutMI
, X86::XOR64i32
); break;
536 void X86AsmPrinter::EmitInstruction(const MachineInstr
*MI
) {
537 X86MCInstLower
MCInstLowering(Mang
, *MF
, *this);
538 switch (MI
->getOpcode()) {
539 case TargetOpcode::DBG_VALUE
:
540 if (isVerbose() && OutStreamer
.hasRawTextSupport()) {
542 raw_string_ostream
OS(TmpStr
);
543 PrintDebugValueComment(MI
, OS
);
544 OutStreamer
.EmitRawText(StringRef(OS
.str()));
548 // Emit nothing here but a comment if we can.
549 case X86::Int_MemBarrier
:
550 if (OutStreamer
.hasRawTextSupport())
551 OutStreamer
.EmitRawText(StringRef("\t#MEMBARRIER"));
556 case X86::EH_RETURN64
: {
557 // Lower these as normal, but add some comments.
558 unsigned Reg
= MI
->getOperand(0).getReg();
559 OutStreamer
.AddComment(StringRef("eh_return, addr: %") +
560 X86ATTInstPrinter::getRegisterName(Reg
));
565 case X86::TAILJMPd64
:
566 // Lower these as normal, but add some comments.
567 OutStreamer
.AddComment("TAILCALL");
570 case X86::MOVPC32r
: {
572 // This is a pseudo op for a two instruction sequence with a label, which
579 MCSymbol
*PICBase
= MCInstLowering
.GetPICBaseSymbol();
580 TmpInst
.setOpcode(X86::CALLpcrel32
);
581 // FIXME: We would like an efficient form for this, so we don't have to do a
582 // lot of extra uniquing.
583 TmpInst
.addOperand(MCOperand::CreateExpr(MCSymbolRefExpr::Create(PICBase
,
585 OutStreamer
.EmitInstruction(TmpInst
);
588 OutStreamer
.EmitLabel(PICBase
);
591 TmpInst
.setOpcode(X86::POP32r
);
592 TmpInst
.getOperand(0) = MCOperand::CreateReg(MI
->getOperand(0).getReg());
593 OutStreamer
.EmitInstruction(TmpInst
);
598 // Lower the MO_GOT_ABSOLUTE_ADDRESS form of ADD32ri.
599 if (MI
->getOperand(2).getTargetFlags() != X86II::MO_GOT_ABSOLUTE_ADDRESS
)
602 // Okay, we have something like:
603 // EAX = ADD32ri EAX, MO_GOT_ABSOLUTE_ADDRESS(@MYGLOBAL)
605 // For this, we want to print something like:
606 // MYGLOBAL + (. - PICBASE)
607 // However, we can't generate a ".", so just emit a new label here and refer
609 MCSymbol
*DotSym
= OutContext
.CreateTempSymbol();
610 OutStreamer
.EmitLabel(DotSym
);
612 // Now that we have emitted the label, lower the complex operand expression.
613 MCSymbol
*OpSym
= MCInstLowering
.GetSymbolFromOperand(MI
->getOperand(2));
615 const MCExpr
*DotExpr
= MCSymbolRefExpr::Create(DotSym
, OutContext
);
616 const MCExpr
*PICBase
=
617 MCSymbolRefExpr::Create(MCInstLowering
.GetPICBaseSymbol(), OutContext
);
618 DotExpr
= MCBinaryExpr::CreateSub(DotExpr
, PICBase
, OutContext
);
620 DotExpr
= MCBinaryExpr::CreateAdd(MCSymbolRefExpr::Create(OpSym
,OutContext
),
621 DotExpr
, OutContext
);
624 TmpInst
.setOpcode(X86::ADD32ri
);
625 TmpInst
.addOperand(MCOperand::CreateReg(MI
->getOperand(0).getReg()));
626 TmpInst
.addOperand(MCOperand::CreateReg(MI
->getOperand(1).getReg()));
627 TmpInst
.addOperand(MCOperand::CreateExpr(DotExpr
));
628 OutStreamer
.EmitInstruction(TmpInst
);
634 MCInstLowering
.Lower(MI
, TmpInst
);
635 OutStreamer
.EmitInstruction(TmpInst
);