[X86][BMI] Pull out schedule classes from bmi_andn<> and bmi_bls<>
[llvm-core.git] / lib / Target / AArch64 / MCTargetDesc / AArch64MCCodeEmitter.cpp
blob8cb7a1672983d2ce20c0f5f44643f58cda872ddf
1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64MCCodeEmitter class.
11 //===----------------------------------------------------------------------===//
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCFixup.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/Support/Casting.h"
27 #include "llvm/Support/Endian.h"
28 #include "llvm/Support/EndianStream.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <cassert>
32 #include <cstdint>
34 using namespace llvm;
36 #define DEBUG_TYPE "mccodeemitter"
38 STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
39 STATISTIC(MCNumFixups, "Number of MC fixups created.");
41 namespace {
43 class AArch64MCCodeEmitter : public MCCodeEmitter {
44 MCContext &Ctx;
45 const MCInstrInfo &MCII;
47 public:
48 AArch64MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
49 : Ctx(ctx), MCII(mcii) {}
50 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete;
51 void operator=(const AArch64MCCodeEmitter &) = delete;
52 ~AArch64MCCodeEmitter() override = default;
54 // getBinaryCodeForInstr - TableGen'erated function for getting the
55 // binary encoding for an instruction.
56 uint64_t getBinaryCodeForInstr(const MCInst &MI,
57 SmallVectorImpl<MCFixup> &Fixups,
58 const MCSubtargetInfo &STI) const;
60 /// getMachineOpValue - Return binary encoding of operand. If the machine
61 /// operand requires relocation, record the relocation and return zero.
62 unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
63 SmallVectorImpl<MCFixup> &Fixups,
64 const MCSubtargetInfo &STI) const;
66 /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
67 /// attached to a load, store or prfm instruction. If operand requires a
68 /// relocation, record it and return zero in that part of the encoding.
69 template <uint32_t FixupKind>
70 uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
71 SmallVectorImpl<MCFixup> &Fixups,
72 const MCSubtargetInfo &STI) const;
74 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
75 /// target.
76 uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
77 SmallVectorImpl<MCFixup> &Fixups,
78 const MCSubtargetInfo &STI) const;
80 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
81 /// the 2-bit shift field.
82 uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
83 SmallVectorImpl<MCFixup> &Fixups,
84 const MCSubtargetInfo &STI) const;
86 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
87 /// branch target.
88 uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
89 SmallVectorImpl<MCFixup> &Fixups,
90 const MCSubtargetInfo &STI) const;
92 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
93 /// pc-relative address.
94 uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
95 SmallVectorImpl<MCFixup> &Fixups,
96 const MCSubtargetInfo &STI) const;
98 /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
99 /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
100 /// operation is a sign extend (as opposed to a zero extend).
101 uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
102 SmallVectorImpl<MCFixup> &Fixups,
103 const MCSubtargetInfo &STI) const;
105 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
106 /// branch target.
107 uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
108 SmallVectorImpl<MCFixup> &Fixups,
109 const MCSubtargetInfo &STI) const;
111 /// getBranchTargetOpValue - Return the encoded value for an unconditional
112 /// branch target.
113 uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
114 SmallVectorImpl<MCFixup> &Fixups,
115 const MCSubtargetInfo &STI) const;
117 /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
118 /// of a MOVZ or MOVK instruction.
119 uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
120 SmallVectorImpl<MCFixup> &Fixups,
121 const MCSubtargetInfo &STI) const;
123 /// getVecShifterOpValue - Return the encoded value for the vector shifter.
124 uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
125 SmallVectorImpl<MCFixup> &Fixups,
126 const MCSubtargetInfo &STI) const;
128 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
129 /// shifter (MSL).
130 uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
131 SmallVectorImpl<MCFixup> &Fixups,
132 const MCSubtargetInfo &STI) const;
134 /// getFixedPointScaleOpValue - Return the encoded value for the
135 // FP-to-fixed-point scale factor.
136 uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
137 SmallVectorImpl<MCFixup> &Fixups,
138 const MCSubtargetInfo &STI) const;
140 uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
141 SmallVectorImpl<MCFixup> &Fixups,
142 const MCSubtargetInfo &STI) const;
143 uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
144 SmallVectorImpl<MCFixup> &Fixups,
145 const MCSubtargetInfo &STI) const;
146 uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
147 SmallVectorImpl<MCFixup> &Fixups,
148 const MCSubtargetInfo &STI) const;
149 uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
150 SmallVectorImpl<MCFixup> &Fixups,
151 const MCSubtargetInfo &STI) const;
152 uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
153 SmallVectorImpl<MCFixup> &Fixups,
154 const MCSubtargetInfo &STI) const;
155 uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
156 SmallVectorImpl<MCFixup> &Fixups,
157 const MCSubtargetInfo &STI) const;
158 uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
159 SmallVectorImpl<MCFixup> &Fixups,
160 const MCSubtargetInfo &STI) const;
161 uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
162 SmallVectorImpl<MCFixup> &Fixups,
163 const MCSubtargetInfo &STI) const;
165 uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
166 SmallVectorImpl<MCFixup> &Fixups,
167 const MCSubtargetInfo &STI) const;
168 uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
169 SmallVectorImpl<MCFixup> &Fixups,
170 const MCSubtargetInfo &STI) const;
172 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
173 const MCSubtargetInfo &STI) const;
175 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
176 SmallVectorImpl<MCFixup> &Fixups,
177 const MCSubtargetInfo &STI) const override;
179 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
180 const MCSubtargetInfo &STI) const;
182 template<int hasRs, int hasRt2> unsigned
183 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
184 const MCSubtargetInfo &STI) const;
186 unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
187 const MCSubtargetInfo &STI) const;
189 private:
190 FeatureBitset computeAvailableFeatures(const FeatureBitset &FB) const;
191 void
192 verifyInstructionPredicates(const MCInst &MI,
193 const FeatureBitset &AvailableFeatures) const;
196 } // end anonymous namespace
198 /// getMachineOpValue - Return binary encoding of operand. If the machine
199 /// operand requires relocation, record the relocation and return zero.
200 unsigned
201 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
202 SmallVectorImpl<MCFixup> &Fixups,
203 const MCSubtargetInfo &STI) const {
204 if (MO.isReg())
205 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
207 assert(MO.isImm() && "did not expect relocated expression");
208 return static_cast<unsigned>(MO.getImm());
211 template<unsigned FixupKind> uint32_t
212 AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
213 SmallVectorImpl<MCFixup> &Fixups,
214 const MCSubtargetInfo &STI) const {
215 const MCOperand &MO = MI.getOperand(OpIdx);
216 uint32_t ImmVal = 0;
218 if (MO.isImm())
219 ImmVal = static_cast<uint32_t>(MO.getImm());
220 else {
221 assert(MO.isExpr() && "unable to encode load/store imm operand");
222 MCFixupKind Kind = MCFixupKind(FixupKind);
223 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
224 ++MCNumFixups;
227 return ImmVal;
230 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
231 /// target.
232 uint32_t
233 AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
234 SmallVectorImpl<MCFixup> &Fixups,
235 const MCSubtargetInfo &STI) const {
236 const MCOperand &MO = MI.getOperand(OpIdx);
238 // If the destination is an immediate, we have nothing to do.
239 if (MO.isImm())
240 return MO.getImm();
241 assert(MO.isExpr() && "Unexpected target type!");
242 const MCExpr *Expr = MO.getExpr();
244 MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
245 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
246 : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
247 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
249 MCNumFixups += 1;
251 // All of the information is in the fixup.
252 return 0;
255 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
256 /// the 2-bit shift field. The shift field is stored in bits 13-14 of the
257 /// return value.
258 uint32_t
259 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
260 SmallVectorImpl<MCFixup> &Fixups,
261 const MCSubtargetInfo &STI) const {
262 // Suboperands are [imm, shifter].
263 const MCOperand &MO = MI.getOperand(OpIdx);
264 const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
265 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
266 "unexpected shift type for add/sub immediate");
267 unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
268 assert((ShiftVal == 0 || ShiftVal == 12) &&
269 "unexpected shift value for add/sub immediate");
270 if (MO.isImm())
271 return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
272 assert(MO.isExpr() && "Unable to encode MCOperand!");
273 const MCExpr *Expr = MO.getExpr();
275 // Encode the 12 bits of the fixup.
276 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
277 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
279 ++MCNumFixups;
281 // Set the shift bit of the add instruction for relocation types
282 // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12.
283 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
284 AArch64MCExpr::VariantKind RefKind = A64E->getKind();
285 if (RefKind == AArch64MCExpr::VK_TPREL_HI12 ||
286 RefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
287 RefKind == AArch64MCExpr::VK_SECREL_HI12)
288 ShiftVal = 12;
290 return ShiftVal == 0 ? 0 : (1 << ShiftVal);
293 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
294 /// branch target.
295 uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
296 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
297 const MCSubtargetInfo &STI) const {
298 const MCOperand &MO = MI.getOperand(OpIdx);
300 // If the destination is an immediate, we have nothing to do.
301 if (MO.isImm())
302 return MO.getImm();
303 assert(MO.isExpr() && "Unexpected target type!");
305 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
306 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
308 ++MCNumFixups;
310 // All of the information is in the fixup.
311 return 0;
314 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
315 /// pc-relative address.
316 uint32_t
317 AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
318 SmallVectorImpl<MCFixup> &Fixups,
319 const MCSubtargetInfo &STI) const {
320 const MCOperand &MO = MI.getOperand(OpIdx);
322 // If the destination is an immediate, we have nothing to do.
323 if (MO.isImm())
324 return MO.getImm();
325 assert(MO.isExpr() && "Unexpected target type!");
327 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
328 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
330 ++MCNumFixups;
332 // All of the information is in the fixup.
333 return 0;
336 uint32_t
337 AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
338 SmallVectorImpl<MCFixup> &Fixups,
339 const MCSubtargetInfo &STI) const {
340 unsigned SignExtend = MI.getOperand(OpIdx).getImm();
341 unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
342 return (SignExtend << 1) | DoShift;
345 uint32_t
346 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
347 SmallVectorImpl<MCFixup> &Fixups,
348 const MCSubtargetInfo &STI) const {
349 const MCOperand &MO = MI.getOperand(OpIdx);
351 if (MO.isImm())
352 return MO.getImm();
353 assert(MO.isExpr() && "Unexpected movz/movk immediate");
355 Fixups.push_back(MCFixup::create(
356 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
358 ++MCNumFixups;
360 return 0;
363 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
364 /// branch target.
365 uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
366 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
367 const MCSubtargetInfo &STI) const {
368 const MCOperand &MO = MI.getOperand(OpIdx);
370 // If the destination is an immediate, we have nothing to do.
371 if (MO.isImm())
372 return MO.getImm();
373 assert(MO.isExpr() && "Unexpected ADR target type!");
375 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
376 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
378 ++MCNumFixups;
380 // All of the information is in the fixup.
381 return 0;
384 /// getBranchTargetOpValue - Return the encoded value for an unconditional
385 /// branch target.
386 uint32_t
387 AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
388 SmallVectorImpl<MCFixup> &Fixups,
389 const MCSubtargetInfo &STI) const {
390 const MCOperand &MO = MI.getOperand(OpIdx);
392 // If the destination is an immediate, we have nothing to do.
393 if (MO.isImm())
394 return MO.getImm();
395 assert(MO.isExpr() && "Unexpected ADR target type!");
397 MCFixupKind Kind = MI.getOpcode() == AArch64::BL
398 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
399 : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
400 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
402 ++MCNumFixups;
404 // All of the information is in the fixup.
405 return 0;
408 /// getVecShifterOpValue - Return the encoded value for the vector shifter:
410 /// 00 -> 0
411 /// 01 -> 8
412 /// 10 -> 16
413 /// 11 -> 24
414 uint32_t
415 AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
416 SmallVectorImpl<MCFixup> &Fixups,
417 const MCSubtargetInfo &STI) const {
418 const MCOperand &MO = MI.getOperand(OpIdx);
419 assert(MO.isImm() && "Expected an immediate value for the shift amount!");
421 switch (MO.getImm()) {
422 default:
423 break;
424 case 0:
425 return 0;
426 case 8:
427 return 1;
428 case 16:
429 return 2;
430 case 24:
431 return 3;
434 llvm_unreachable("Invalid value for vector shift amount!");
437 /// getFixedPointScaleOpValue - Return the encoded value for the
438 // FP-to-fixed-point scale factor.
439 uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
440 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
441 const MCSubtargetInfo &STI) const {
442 const MCOperand &MO = MI.getOperand(OpIdx);
443 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
444 return 64 - MO.getImm();
447 uint32_t
448 AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
449 SmallVectorImpl<MCFixup> &Fixups,
450 const MCSubtargetInfo &STI) const {
451 const MCOperand &MO = MI.getOperand(OpIdx);
452 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
453 return 64 - MO.getImm();
456 uint32_t
457 AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
458 SmallVectorImpl<MCFixup> &Fixups,
459 const MCSubtargetInfo &STI) const {
460 const MCOperand &MO = MI.getOperand(OpIdx);
461 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
462 return 32 - MO.getImm();
465 uint32_t
466 AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
467 SmallVectorImpl<MCFixup> &Fixups,
468 const MCSubtargetInfo &STI) const {
469 const MCOperand &MO = MI.getOperand(OpIdx);
470 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
471 return 16 - MO.getImm();
474 uint32_t
475 AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
476 SmallVectorImpl<MCFixup> &Fixups,
477 const MCSubtargetInfo &STI) const {
478 const MCOperand &MO = MI.getOperand(OpIdx);
479 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
480 return 8 - MO.getImm();
483 uint32_t
484 AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
485 SmallVectorImpl<MCFixup> &Fixups,
486 const MCSubtargetInfo &STI) const {
487 const MCOperand &MO = MI.getOperand(OpIdx);
488 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
489 return MO.getImm() - 64;
492 uint32_t
493 AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
494 SmallVectorImpl<MCFixup> &Fixups,
495 const MCSubtargetInfo &STI) const {
496 const MCOperand &MO = MI.getOperand(OpIdx);
497 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
498 return MO.getImm() - 32;
501 uint32_t
502 AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
503 SmallVectorImpl<MCFixup> &Fixups,
504 const MCSubtargetInfo &STI) const {
505 const MCOperand &MO = MI.getOperand(OpIdx);
506 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
507 return MO.getImm() - 16;
510 uint32_t
511 AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
512 SmallVectorImpl<MCFixup> &Fixups,
513 const MCSubtargetInfo &STI) const {
514 const MCOperand &MO = MI.getOperand(OpIdx);
515 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
516 return MO.getImm() - 8;
519 uint32_t
520 AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
521 SmallVectorImpl<MCFixup> &Fixups,
522 const MCSubtargetInfo &STI) const {
523 // Test shift
524 auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm();
525 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL &&
526 "Unexpected shift type for imm8_opt_lsl immediate.");
528 unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd);
529 assert((ShiftVal == 0 || ShiftVal == 8) &&
530 "Unexpected shift value for imm8_opt_lsl immediate.");
532 // Test immediate
533 auto Immediate = MI.getOperand(OpIdx).getImm();
534 return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
537 uint32_t
538 AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
539 SmallVectorImpl<MCFixup> &Fixups,
540 const MCSubtargetInfo &STI) const {
541 const MCOperand &MO = MI.getOperand(OpIdx);
542 assert(MO.isImm() && "Expected an immediate value!");
543 // Normalize 1-16 range to 0-15.
544 return MO.getImm() - 1;
547 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
548 /// shifter (MSL).
549 uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
550 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
551 const MCSubtargetInfo &STI) const {
552 const MCOperand &MO = MI.getOperand(OpIdx);
553 assert(MO.isImm() &&
554 "Expected an immediate value for the move shift amount!");
555 unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
556 assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
557 return ShiftVal == 8 ? 0 : 1;
560 unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
561 const MCSubtargetInfo &STI) const {
562 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
563 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
564 // job to ensure that any bits possibly affected by this are 0. This means we
565 // must zero out bit 30 (essentially emitting a MOVN).
566 MCOperand UImm16MO = MI.getOperand(1);
568 // Nothing to do if there's no fixup.
569 if (UImm16MO.isImm())
570 return EncodedValue;
572 const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
573 switch (A64E->getKind()) {
574 case AArch64MCExpr::VK_DTPREL_G2:
575 case AArch64MCExpr::VK_DTPREL_G1:
576 case AArch64MCExpr::VK_DTPREL_G0:
577 case AArch64MCExpr::VK_GOTTPREL_G1:
578 case AArch64MCExpr::VK_TPREL_G2:
579 case AArch64MCExpr::VK_TPREL_G1:
580 case AArch64MCExpr::VK_TPREL_G0:
581 return EncodedValue & ~(1u << 30);
582 default:
583 // Nothing to do for an unsigned fixup.
584 return EncodedValue;
588 return EncodedValue & ~(1u << 30);
591 void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
592 SmallVectorImpl<MCFixup> &Fixups,
593 const MCSubtargetInfo &STI) const {
594 verifyInstructionPredicates(MI,
595 computeAvailableFeatures(STI.getFeatureBits()));
597 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
598 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
599 // following (BLR) instruction. It doesn't emit any code itself so it
600 // doesn't go through the normal TableGenerated channels.
601 MCFixupKind Fixup = MCFixupKind(AArch64::fixup_aarch64_tlsdesc_call);
602 Fixups.push_back(MCFixup::create(0, MI.getOperand(0).getExpr(), Fixup));
603 return;
604 } else if (MI.getOpcode() == AArch64::CompilerBarrier) {
605 // This just prevents the compiler from reordering accesses, no actual code.
606 return;
609 uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
610 support::endian::write<uint32_t>(OS, Binary, support::little);
611 ++MCNumEmitted; // Keep track of the # of mi's emitted.
614 unsigned
615 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
616 unsigned EncodedValue,
617 const MCSubtargetInfo &STI) const {
618 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
619 // (i.e. all bits 1) but is ignored by the processor.
620 EncodedValue |= 0x1f << 10;
621 return EncodedValue;
624 template<int hasRs, int hasRt2> unsigned
625 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
626 unsigned EncodedValue,
627 const MCSubtargetInfo &STI) const {
628 if (!hasRs) EncodedValue |= 0x001F0000;
629 if (!hasRt2) EncodedValue |= 0x00007C00;
631 return EncodedValue;
634 unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
635 const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
636 // The Rm field of FCMP and friends is unused - it should be assembled
637 // as 0, but is ignored by the processor.
638 EncodedValue &= ~(0x1f << 16);
639 return EncodedValue;
642 #define ENABLE_INSTR_PREDICATE_VERIFIER
643 #include "AArch64GenMCCodeEmitter.inc"
645 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
646 const MCRegisterInfo &MRI,
647 MCContext &Ctx) {
648 return new AArch64MCCodeEmitter(MCII, Ctx);