zpu: wip eke out some simple instructions for load/store/add
[llvm/zpu.git] / lib / Target / X86 / X86AsmBackend.cpp
blob687971d114f6ebad195667c2b4e43895ca5f0a61
1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #include "llvm/Target/TargetAsmBackend.h"
11 #include "X86.h"
12 #include "X86FixupKinds.h"
13 #include "llvm/ADT/Twine.h"
14 #include "llvm/MC/ELFObjectWriter.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCObjectFormat.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCSectionCOFF.h"
20 #include "llvm/MC/MCSectionELF.h"
21 #include "llvm/MC/MCSectionMachO.h"
22 #include "llvm/MC/MachObjectWriter.h"
23 #include "llvm/Support/ELF.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Target/TargetRegistry.h"
27 #include "llvm/Target/TargetAsmBackend.h"
28 using namespace llvm;
31 static unsigned getFixupKindLog2Size(unsigned Kind) {
32 switch (Kind) {
33 default: assert(0 && "invalid fixup kind!");
34 case X86::reloc_pcrel_1byte:
35 case FK_Data_1: return 0;
36 case X86::reloc_pcrel_2byte:
37 case FK_Data_2: return 1;
38 case X86::reloc_pcrel_4byte:
39 case X86::reloc_riprel_4byte:
40 case X86::reloc_riprel_4byte_movq_load:
41 case X86::reloc_signed_4byte:
42 case X86::reloc_global_offset_table:
43 case FK_Data_4: return 2;
44 case FK_Data_8: return 3;
48 namespace {
49 class X86AsmBackend : public TargetAsmBackend {
50 public:
51 X86AsmBackend(const Target &T)
52 : TargetAsmBackend(T) {}
54 void ApplyFixup(const MCFixup &Fixup, MCDataFragment &DF,
55 uint64_t Value) const {
56 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
58 assert(Fixup.getOffset() + Size <= DF.getContents().size() &&
59 "Invalid fixup offset!");
60 for (unsigned i = 0; i != Size; ++i)
61 DF.getContents()[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
64 bool MayNeedRelaxation(const MCInst &Inst) const;
66 void RelaxInstruction(const MCInst &Inst, MCInst &Res) const;
68 bool WriteNopData(uint64_t Count, MCObjectWriter *OW) const;
70 } // end anonymous namespace
72 static unsigned getRelaxedOpcodeBranch(unsigned Op) {
73 switch (Op) {
74 default:
75 return Op;
77 case X86::JAE_1: return X86::JAE_4;
78 case X86::JA_1: return X86::JA_4;
79 case X86::JBE_1: return X86::JBE_4;
80 case X86::JB_1: return X86::JB_4;
81 case X86::JE_1: return X86::JE_4;
82 case X86::JGE_1: return X86::JGE_4;
83 case X86::JG_1: return X86::JG_4;
84 case X86::JLE_1: return X86::JLE_4;
85 case X86::JL_1: return X86::JL_4;
86 case X86::JMP_1: return X86::JMP_4;
87 case X86::JNE_1: return X86::JNE_4;
88 case X86::JNO_1: return X86::JNO_4;
89 case X86::JNP_1: return X86::JNP_4;
90 case X86::JNS_1: return X86::JNS_4;
91 case X86::JO_1: return X86::JO_4;
92 case X86::JP_1: return X86::JP_4;
93 case X86::JS_1: return X86::JS_4;
97 static unsigned getRelaxedOpcodeArith(unsigned Op) {
98 switch (Op) {
99 default:
100 return Op;
102 // IMUL
103 case X86::IMUL16rri8: return X86::IMUL16rri;
104 case X86::IMUL16rmi8: return X86::IMUL16rmi;
105 case X86::IMUL32rri8: return X86::IMUL32rri;
106 case X86::IMUL32rmi8: return X86::IMUL32rmi;
107 case X86::IMUL64rri8: return X86::IMUL64rri32;
108 case X86::IMUL64rmi8: return X86::IMUL64rmi32;
110 // AND
111 case X86::AND16ri8: return X86::AND16ri;
112 case X86::AND16mi8: return X86::AND16mi;
113 case X86::AND32ri8: return X86::AND32ri;
114 case X86::AND32mi8: return X86::AND32mi;
115 case X86::AND64ri8: return X86::AND64ri32;
116 case X86::AND64mi8: return X86::AND64mi32;
118 // OR
119 case X86::OR16ri8: return X86::OR16ri;
120 case X86::OR16mi8: return X86::OR16mi;
121 case X86::OR32ri8: return X86::OR32ri;
122 case X86::OR32mi8: return X86::OR32mi;
123 case X86::OR64ri8: return X86::OR64ri32;
124 case X86::OR64mi8: return X86::OR64mi32;
126 // XOR
127 case X86::XOR16ri8: return X86::XOR16ri;
128 case X86::XOR16mi8: return X86::XOR16mi;
129 case X86::XOR32ri8: return X86::XOR32ri;
130 case X86::XOR32mi8: return X86::XOR32mi;
131 case X86::XOR64ri8: return X86::XOR64ri32;
132 case X86::XOR64mi8: return X86::XOR64mi32;
134 // ADD
135 case X86::ADD16ri8: return X86::ADD16ri;
136 case X86::ADD16mi8: return X86::ADD16mi;
137 case X86::ADD32ri8: return X86::ADD32ri;
138 case X86::ADD32mi8: return X86::ADD32mi;
139 case X86::ADD64ri8: return X86::ADD64ri32;
140 case X86::ADD64mi8: return X86::ADD64mi32;
142 // SUB
143 case X86::SUB16ri8: return X86::SUB16ri;
144 case X86::SUB16mi8: return X86::SUB16mi;
145 case X86::SUB32ri8: return X86::SUB32ri;
146 case X86::SUB32mi8: return X86::SUB32mi;
147 case X86::SUB64ri8: return X86::SUB64ri32;
148 case X86::SUB64mi8: return X86::SUB64mi32;
150 // CMP
151 case X86::CMP16ri8: return X86::CMP16ri;
152 case X86::CMP16mi8: return X86::CMP16mi;
153 case X86::CMP32ri8: return X86::CMP32ri;
154 case X86::CMP32mi8: return X86::CMP32mi;
155 case X86::CMP64ri8: return X86::CMP64ri32;
156 case X86::CMP64mi8: return X86::CMP64mi32;
160 static unsigned getRelaxedOpcode(unsigned Op) {
161 unsigned R = getRelaxedOpcodeArith(Op);
162 if (R != Op)
163 return R;
164 return getRelaxedOpcodeBranch(Op);
167 bool X86AsmBackend::MayNeedRelaxation(const MCInst &Inst) const {
168 // Branches can always be relaxed.
169 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
170 return true;
172 // Check if this instruction is ever relaxable.
173 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
174 return false;
177 // Check if it has an expression and is not RIP relative.
178 bool hasExp = false;
179 bool hasRIP = false;
180 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
181 const MCOperand &Op = Inst.getOperand(i);
182 if (Op.isExpr())
183 hasExp = true;
185 if (Op.isReg() && Op.getReg() == X86::RIP)
186 hasRIP = true;
189 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
190 // how we do relaxations?
191 return hasExp && !hasRIP;
194 // FIXME: Can tblgen help at all here to verify there aren't other instructions
195 // we can relax?
196 void X86AsmBackend::RelaxInstruction(const MCInst &Inst, MCInst &Res) const {
197 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
198 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
200 if (RelaxedOp == Inst.getOpcode()) {
201 SmallString<256> Tmp;
202 raw_svector_ostream OS(Tmp);
203 Inst.dump_pretty(OS);
204 OS << "\n";
205 report_fatal_error("unexpected instruction to relax: " + OS.str());
208 Res = Inst;
209 Res.setOpcode(RelaxedOp);
212 /// WriteNopData - Write optimal nops to the output file for the \arg Count
213 /// bytes. This returns the number of bytes written. It may return 0 if
214 /// the \arg Count is more than the maximum optimal nops.
216 /// FIXME this is X86 32-bit specific and should move to a better place.
217 bool X86AsmBackend::WriteNopData(uint64_t Count, MCObjectWriter *OW) const {
218 static const uint8_t Nops[16][16] = {
219 // nop
220 {0x90},
221 // xchg %ax,%ax
222 {0x66, 0x90},
223 // nopl (%[re]ax)
224 {0x0f, 0x1f, 0x00},
225 // nopl 0(%[re]ax)
226 {0x0f, 0x1f, 0x40, 0x00},
227 // nopl 0(%[re]ax,%[re]ax,1)
228 {0x0f, 0x1f, 0x44, 0x00, 0x00},
229 // nopw 0(%[re]ax,%[re]ax,1)
230 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
231 // nopl 0L(%[re]ax)
232 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
233 // nopl 0L(%[re]ax,%[re]ax,1)
234 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
235 // nopw 0L(%[re]ax,%[re]ax,1)
236 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
237 // nopw %cs:0L(%[re]ax,%[re]ax,1)
238 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
239 // nopl 0(%[re]ax,%[re]ax,1)
240 // nopw 0(%[re]ax,%[re]ax,1)
241 {0x0f, 0x1f, 0x44, 0x00, 0x00,
242 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
243 // nopw 0(%[re]ax,%[re]ax,1)
244 // nopw 0(%[re]ax,%[re]ax,1)
245 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
246 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
247 // nopw 0(%[re]ax,%[re]ax,1)
248 // nopl 0L(%[re]ax) */
249 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00,
250 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
251 // nopl 0L(%[re]ax)
252 // nopl 0L(%[re]ax)
253 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
254 0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
255 // nopl 0L(%[re]ax)
256 // nopl 0L(%[re]ax,%[re]ax,1)
257 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00,
258 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}
261 // Write an optimal sequence for the first 15 bytes.
262 uint64_t OptimalCount = (Count < 16) ? Count : 15;
263 for (uint64_t i = 0, e = OptimalCount; i != e; i++)
264 OW->Write8(Nops[OptimalCount - 1][i]);
266 // Finish with single byte nops.
267 for (uint64_t i = OptimalCount, e = Count; i != e; ++i)
268 OW->Write8(0x90);
270 return true;
273 /* *** */
275 namespace {
276 class ELFX86AsmBackend : public X86AsmBackend {
277 MCELFObjectFormat Format;
279 public:
280 Triple::OSType OSType;
281 ELFX86AsmBackend(const Target &T, Triple::OSType _OSType)
282 : X86AsmBackend(T), OSType(_OSType) {
283 HasScatteredSymbols = true;
284 HasReliableSymbolDifference = true;
287 virtual const MCObjectFormat &getObjectFormat() const {
288 return Format;
291 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
292 const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section);
293 return ES.getFlags() & MCSectionELF::SHF_MERGE;
296 bool isVirtualSection(const MCSection &Section) const {
297 const MCSectionELF &SE = static_cast<const MCSectionELF&>(Section);
298 return SE.getType() == MCSectionELF::SHT_NOBITS;
302 class ELFX86_32AsmBackend : public ELFX86AsmBackend {
303 public:
304 ELFX86_32AsmBackend(const Target &T, Triple::OSType OSType)
305 : ELFX86AsmBackend(T, OSType) {}
307 unsigned getPointerSize() const {
308 return 4;
311 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
312 return new ELFObjectWriter(OS, /*Is64Bit=*/false,
313 OSType, ELF::EM_386,
314 /*IsLittleEndian=*/true,
315 /*HasRelocationAddend=*/false);
319 class ELFX86_64AsmBackend : public ELFX86AsmBackend {
320 public:
321 ELFX86_64AsmBackend(const Target &T, Triple::OSType OSType)
322 : ELFX86AsmBackend(T, OSType) {}
324 unsigned getPointerSize() const {
325 return 8;
328 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
329 return new ELFObjectWriter(OS, /*Is64Bit=*/true,
330 OSType, ELF::EM_X86_64,
331 /*IsLittleEndian=*/true,
332 /*HasRelocationAddend=*/true);
336 class WindowsX86AsmBackend : public X86AsmBackend {
337 bool Is64Bit;
338 MCCOFFObjectFormat Format;
340 public:
341 WindowsX86AsmBackend(const Target &T, bool is64Bit)
342 : X86AsmBackend(T)
343 , Is64Bit(is64Bit) {
344 HasScatteredSymbols = true;
347 virtual const MCObjectFormat &getObjectFormat() const {
348 return Format;
351 unsigned getPointerSize() const {
352 if (Is64Bit)
353 return 8;
354 else
355 return 4;
358 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
359 return createWinCOFFObjectWriter(OS, Is64Bit);
362 bool isVirtualSection(const MCSection &Section) const {
363 const MCSectionCOFF &SE = static_cast<const MCSectionCOFF&>(Section);
364 return SE.getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
368 class DarwinX86AsmBackend : public X86AsmBackend {
369 MCMachOObjectFormat Format;
371 public:
372 DarwinX86AsmBackend(const Target &T)
373 : X86AsmBackend(T) {
374 HasScatteredSymbols = true;
377 virtual const MCObjectFormat &getObjectFormat() const {
378 return Format;
381 bool isVirtualSection(const MCSection &Section) const {
382 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
383 return (SMO.getType() == MCSectionMachO::S_ZEROFILL ||
384 SMO.getType() == MCSectionMachO::S_GB_ZEROFILL ||
385 SMO.getType() == MCSectionMachO::S_THREAD_LOCAL_ZEROFILL);
389 class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
390 public:
391 DarwinX86_32AsmBackend(const Target &T)
392 : DarwinX86AsmBackend(T) {}
394 unsigned getPointerSize() const {
395 return 4;
398 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
399 return new MachObjectWriter(OS, /*Is64Bit=*/false);
403 class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
404 public:
405 DarwinX86_64AsmBackend(const Target &T)
406 : DarwinX86AsmBackend(T) {
407 HasReliableSymbolDifference = true;
410 unsigned getPointerSize() const {
411 return 8;
414 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
415 return new MachObjectWriter(OS, /*Is64Bit=*/true);
418 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
419 // Temporary labels in the string literals sections require symbols. The
420 // issue is that the x86_64 relocation format does not allow symbol +
421 // offset, and so the linker does not have enough information to resolve the
422 // access to the appropriate atom unless an external relocation is used. For
423 // non-cstring sections, we expect the compiler to use a non-temporary label
424 // for anything that could have an addend pointing outside the symbol.
426 // See <rdar://problem/4765733>.
427 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
428 return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
431 virtual bool isSectionAtomizable(const MCSection &Section) const {
432 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
433 // Fixed sized data sections are uniqued, they cannot be diced into atoms.
434 switch (SMO.getType()) {
435 default:
436 return true;
438 case MCSectionMachO::S_4BYTE_LITERALS:
439 case MCSectionMachO::S_8BYTE_LITERALS:
440 case MCSectionMachO::S_16BYTE_LITERALS:
441 case MCSectionMachO::S_LITERAL_POINTERS:
442 case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
443 case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
444 case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
445 case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
446 case MCSectionMachO::S_INTERPOSING:
447 return false;
452 } // end anonymous namespace
454 TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
455 const std::string &TT) {
456 switch (Triple(TT).getOS()) {
457 case Triple::Darwin:
458 return new DarwinX86_32AsmBackend(T);
459 case Triple::MinGW32:
460 case Triple::Cygwin:
461 case Triple::Win32:
462 return new WindowsX86AsmBackend(T, false);
463 default:
464 return new ELFX86_32AsmBackend(T, Triple(TT).getOS());
468 TargetAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
469 const std::string &TT) {
470 switch (Triple(TT).getOS()) {
471 case Triple::Darwin:
472 return new DarwinX86_64AsmBackend(T);
473 case Triple::MinGW64:
474 case Triple::Cygwin:
475 case Triple::Win32:
476 return new WindowsX86AsmBackend(T, true);
477 default:
478 return new ELFX86_64AsmBackend(T, Triple(TT).getOS());