1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the AArch64MCCodeEmitter class.
11 //===----------------------------------------------------------------------===//
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/BinaryFormat/ELF.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCFixup.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/Support/Casting.h"
27 #include "llvm/Support/EndianStream.h"
28 #include "llvm/Support/ErrorHandling.h"
34 #define DEBUG_TYPE "mccodeemitter"
36 STATISTIC(MCNumEmitted
, "Number of MC instructions emitted.");
37 STATISTIC(MCNumFixups
, "Number of MC fixups created.");
41 class AArch64MCCodeEmitter
: public MCCodeEmitter
{
45 AArch64MCCodeEmitter(const MCInstrInfo
&, MCContext
&ctx
) : Ctx(ctx
) {}
46 AArch64MCCodeEmitter(const AArch64MCCodeEmitter
&) = delete;
47 void operator=(const AArch64MCCodeEmitter
&) = delete;
48 ~AArch64MCCodeEmitter() override
= default;
50 // getBinaryCodeForInstr - TableGen'erated function for getting the
51 // binary encoding for an instruction.
52 uint64_t getBinaryCodeForInstr(const MCInst
&MI
,
53 SmallVectorImpl
<MCFixup
> &Fixups
,
54 const MCSubtargetInfo
&STI
) const;
56 /// getMachineOpValue - Return binary encoding of operand. If the machine
57 /// operand requires relocation, record the relocation and return zero.
58 unsigned getMachineOpValue(const MCInst
&MI
, const MCOperand
&MO
,
59 SmallVectorImpl
<MCFixup
> &Fixups
,
60 const MCSubtargetInfo
&STI
) const;
62 /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
63 /// attached to a load, store or prfm instruction. If operand requires a
64 /// relocation, record it and return zero in that part of the encoding.
65 template <uint32_t FixupKind
>
66 uint32_t getLdStUImm12OpValue(const MCInst
&MI
, unsigned OpIdx
,
67 SmallVectorImpl
<MCFixup
> &Fixups
,
68 const MCSubtargetInfo
&STI
) const;
70 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
72 uint32_t getAdrLabelOpValue(const MCInst
&MI
, unsigned OpIdx
,
73 SmallVectorImpl
<MCFixup
> &Fixups
,
74 const MCSubtargetInfo
&STI
) const;
76 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
77 /// the 2-bit shift field.
78 uint32_t getAddSubImmOpValue(const MCInst
&MI
, unsigned OpIdx
,
79 SmallVectorImpl
<MCFixup
> &Fixups
,
80 const MCSubtargetInfo
&STI
) const;
82 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
84 uint32_t getCondBranchTargetOpValue(const MCInst
&MI
, unsigned OpIdx
,
85 SmallVectorImpl
<MCFixup
> &Fixups
,
86 const MCSubtargetInfo
&STI
) const;
88 /// getCondCompBranchTargetOpValue - Return the encoded value for a
89 /// conditional compare-and-branch target.
90 uint32_t getCondCompBranchTargetOpValue(const MCInst
&MI
, unsigned OpIdx
,
91 SmallVectorImpl
<MCFixup
> &Fixups
,
92 const MCSubtargetInfo
&STI
) const;
94 /// getPAuthPCRelOpValue - Return the encoded value for a pointer
95 /// authentication pc-relative operand.
96 uint32_t getPAuthPCRelOpValue(const MCInst
&MI
, unsigned OpIdx
,
97 SmallVectorImpl
<MCFixup
> &Fixups
,
98 const MCSubtargetInfo
&STI
) const;
100 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
101 /// pc-relative address.
102 uint32_t getLoadLiteralOpValue(const MCInst
&MI
, unsigned OpIdx
,
103 SmallVectorImpl
<MCFixup
> &Fixups
,
104 const MCSubtargetInfo
&STI
) const;
106 /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
107 /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
108 /// operation is a sign extend (as opposed to a zero extend).
109 uint32_t getMemExtendOpValue(const MCInst
&MI
, unsigned OpIdx
,
110 SmallVectorImpl
<MCFixup
> &Fixups
,
111 const MCSubtargetInfo
&STI
) const;
113 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
115 uint32_t getTestBranchTargetOpValue(const MCInst
&MI
, unsigned OpIdx
,
116 SmallVectorImpl
<MCFixup
> &Fixups
,
117 const MCSubtargetInfo
&STI
) const;
119 /// getBranchTargetOpValue - Return the encoded value for an unconditional
121 uint32_t getBranchTargetOpValue(const MCInst
&MI
, unsigned OpIdx
,
122 SmallVectorImpl
<MCFixup
> &Fixups
,
123 const MCSubtargetInfo
&STI
) const;
125 /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
126 /// of a MOVZ or MOVK instruction.
127 uint32_t getMoveWideImmOpValue(const MCInst
&MI
, unsigned OpIdx
,
128 SmallVectorImpl
<MCFixup
> &Fixups
,
129 const MCSubtargetInfo
&STI
) const;
131 /// getVecShifterOpValue - Return the encoded value for the vector shifter.
132 uint32_t getVecShifterOpValue(const MCInst
&MI
, unsigned OpIdx
,
133 SmallVectorImpl
<MCFixup
> &Fixups
,
134 const MCSubtargetInfo
&STI
) const;
136 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
138 uint32_t getMoveVecShifterOpValue(const MCInst
&MI
, unsigned OpIdx
,
139 SmallVectorImpl
<MCFixup
> &Fixups
,
140 const MCSubtargetInfo
&STI
) const;
142 /// getFixedPointScaleOpValue - Return the encoded value for the
143 // FP-to-fixed-point scale factor.
144 uint32_t getFixedPointScaleOpValue(const MCInst
&MI
, unsigned OpIdx
,
145 SmallVectorImpl
<MCFixup
> &Fixups
,
146 const MCSubtargetInfo
&STI
) const;
148 uint32_t getVecShiftR64OpValue(const MCInst
&MI
, unsigned OpIdx
,
149 SmallVectorImpl
<MCFixup
> &Fixups
,
150 const MCSubtargetInfo
&STI
) const;
151 uint32_t getVecShiftR32OpValue(const MCInst
&MI
, unsigned OpIdx
,
152 SmallVectorImpl
<MCFixup
> &Fixups
,
153 const MCSubtargetInfo
&STI
) const;
154 uint32_t getVecShiftR16OpValue(const MCInst
&MI
, unsigned OpIdx
,
155 SmallVectorImpl
<MCFixup
> &Fixups
,
156 const MCSubtargetInfo
&STI
) const;
157 uint32_t getVecShiftR8OpValue(const MCInst
&MI
, unsigned OpIdx
,
158 SmallVectorImpl
<MCFixup
> &Fixups
,
159 const MCSubtargetInfo
&STI
) const;
160 uint32_t getVecShiftL64OpValue(const MCInst
&MI
, unsigned OpIdx
,
161 SmallVectorImpl
<MCFixup
> &Fixups
,
162 const MCSubtargetInfo
&STI
) const;
163 uint32_t getVecShiftL32OpValue(const MCInst
&MI
, unsigned OpIdx
,
164 SmallVectorImpl
<MCFixup
> &Fixups
,
165 const MCSubtargetInfo
&STI
) const;
166 uint32_t getVecShiftL16OpValue(const MCInst
&MI
, unsigned OpIdx
,
167 SmallVectorImpl
<MCFixup
> &Fixups
,
168 const MCSubtargetInfo
&STI
) const;
169 uint32_t getVecShiftL8OpValue(const MCInst
&MI
, unsigned OpIdx
,
170 SmallVectorImpl
<MCFixup
> &Fixups
,
171 const MCSubtargetInfo
&STI
) const;
173 uint32_t getImm8OptLsl(const MCInst
&MI
, unsigned OpIdx
,
174 SmallVectorImpl
<MCFixup
> &Fixups
,
175 const MCSubtargetInfo
&STI
) const;
176 uint32_t getSVEIncDecImm(const MCInst
&MI
, unsigned OpIdx
,
177 SmallVectorImpl
<MCFixup
> &Fixups
,
178 const MCSubtargetInfo
&STI
) const;
180 unsigned fixMOVZ(const MCInst
&MI
, unsigned EncodedValue
,
181 const MCSubtargetInfo
&STI
) const;
183 void encodeInstruction(const MCInst
&MI
, SmallVectorImpl
<char> &CB
,
184 SmallVectorImpl
<MCFixup
> &Fixups
,
185 const MCSubtargetInfo
&STI
) const override
;
187 unsigned fixMulHigh(const MCInst
&MI
, unsigned EncodedValue
,
188 const MCSubtargetInfo
&STI
) const;
190 template<int hasRs
, int hasRt2
> unsigned
191 fixLoadStoreExclusive(const MCInst
&MI
, unsigned EncodedValue
,
192 const MCSubtargetInfo
&STI
) const;
194 unsigned fixOneOperandFPComparison(const MCInst
&MI
, unsigned EncodedValue
,
195 const MCSubtargetInfo
&STI
) const;
197 template <unsigned Multiple
, unsigned Min
, unsigned Max
>
198 uint32_t EncodeRegMul_MinMax(const MCInst
&MI
, unsigned OpIdx
,
199 SmallVectorImpl
<MCFixup
> &Fixups
,
200 const MCSubtargetInfo
&STI
) const;
201 uint32_t EncodeZK(const MCInst
&MI
, unsigned OpIdx
,
202 SmallVectorImpl
<MCFixup
> &Fixups
,
203 const MCSubtargetInfo
&STI
) const;
204 uint32_t EncodePNR_p8to15(const MCInst
&MI
, unsigned OpIdx
,
205 SmallVectorImpl
<MCFixup
> &Fixups
,
206 const MCSubtargetInfo
&STI
) const;
208 uint32_t EncodeZPR2StridedRegisterClass(const MCInst
&MI
, unsigned OpIdx
,
209 SmallVectorImpl
<MCFixup
> &Fixups
,
210 const MCSubtargetInfo
&STI
) const;
211 uint32_t EncodeZPR4StridedRegisterClass(const MCInst
&MI
, unsigned OpIdx
,
212 SmallVectorImpl
<MCFixup
> &Fixups
,
213 const MCSubtargetInfo
&STI
) const;
215 uint32_t EncodeMatrixTileListRegisterClass(const MCInst
&MI
, unsigned OpIdx
,
216 SmallVectorImpl
<MCFixup
> &Fixups
,
217 const MCSubtargetInfo
&STI
) const;
218 template <unsigned BaseReg
>
219 uint32_t encodeMatrixIndexGPR32(const MCInst
&MI
, unsigned OpIdx
,
220 SmallVectorImpl
<MCFixup
> &Fixups
,
221 const MCSubtargetInfo
&STI
) const;
224 } // end anonymous namespace
226 /// getMachineOpValue - Return binary encoding of operand. If the machine
227 /// operand requires relocation, record the relocation and return zero.
229 AArch64MCCodeEmitter::getMachineOpValue(const MCInst
&MI
, const MCOperand
&MO
,
230 SmallVectorImpl
<MCFixup
> &Fixups
,
231 const MCSubtargetInfo
&STI
) const {
233 return Ctx
.getRegisterInfo()->getEncodingValue(MO
.getReg());
235 assert(MO
.isImm() && "did not expect relocated expression");
236 return static_cast<unsigned>(MO
.getImm());
239 template<unsigned FixupKind
> uint32_t
240 AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst
&MI
, unsigned OpIdx
,
241 SmallVectorImpl
<MCFixup
> &Fixups
,
242 const MCSubtargetInfo
&STI
) const {
243 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
247 ImmVal
= static_cast<uint32_t>(MO
.getImm());
249 assert(MO
.isExpr() && "unable to encode load/store imm operand");
250 MCFixupKind Kind
= MCFixupKind(FixupKind
);
251 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
258 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
261 AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst
&MI
, unsigned OpIdx
,
262 SmallVectorImpl
<MCFixup
> &Fixups
,
263 const MCSubtargetInfo
&STI
) const {
264 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
266 // If the destination is an immediate, we have nothing to do.
269 assert(MO
.isExpr() && "Unexpected target type!");
270 const MCExpr
*Expr
= MO
.getExpr();
272 MCFixupKind Kind
= MI
.getOpcode() == AArch64::ADR
273 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21
)
274 : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21
);
275 Fixups
.push_back(MCFixup::create(0, Expr
, Kind
, MI
.getLoc()));
279 // All of the information is in the fixup.
283 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
284 /// the 2-bit shift field. The shift field is stored in bits 13-14 of the
287 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst
&MI
, unsigned OpIdx
,
288 SmallVectorImpl
<MCFixup
> &Fixups
,
289 const MCSubtargetInfo
&STI
) const {
290 // Suboperands are [imm, shifter].
291 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
292 const MCOperand
&MO1
= MI
.getOperand(OpIdx
+ 1);
293 assert(AArch64_AM::getShiftType(MO1
.getImm()) == AArch64_AM::LSL
&&
294 "unexpected shift type for add/sub immediate");
295 unsigned ShiftVal
= AArch64_AM::getShiftValue(MO1
.getImm());
296 assert((ShiftVal
== 0 || ShiftVal
== 12) &&
297 "unexpected shift value for add/sub immediate");
299 return MO
.getImm() | (ShiftVal
== 0 ? 0 : (1 << ShiftVal
));
300 assert(MO
.isExpr() && "Unable to encode MCOperand!");
301 const MCExpr
*Expr
= MO
.getExpr();
303 // Encode the 12 bits of the fixup.
304 MCFixupKind Kind
= MCFixupKind(AArch64::fixup_aarch64_add_imm12
);
305 Fixups
.push_back(MCFixup::create(0, Expr
, Kind
, MI
.getLoc()));
309 // Set the shift bit of the add instruction for relocation types
310 // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12.
311 if (const AArch64MCExpr
*A64E
= dyn_cast
<AArch64MCExpr
>(Expr
)) {
312 AArch64MCExpr::VariantKind RefKind
= A64E
->getKind();
313 if (RefKind
== AArch64MCExpr::VK_TPREL_HI12
||
314 RefKind
== AArch64MCExpr::VK_DTPREL_HI12
||
315 RefKind
== AArch64MCExpr::VK_SECREL_HI12
)
318 return ShiftVal
== 0 ? 0 : (1 << ShiftVal
);
321 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
323 uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
324 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
325 const MCSubtargetInfo
&STI
) const {
326 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
328 // If the destination is an immediate, we have nothing to do.
331 assert(MO
.isExpr() && "Unexpected target type!");
333 MCFixupKind Kind
= MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19
);
334 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
338 // All of the information is in the fixup.
342 /// getCondCompBranchTargetOpValue - Return the encoded value for a conditional
343 /// compare-and-branch target.
344 uint32_t AArch64MCCodeEmitter::getCondCompBranchTargetOpValue(
345 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
346 const MCSubtargetInfo
&STI
) const {
347 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
349 // If the destination is an immediate, we have nothing to do.
352 assert(MO
.isExpr() && "Unexpected target type!");
354 MCFixupKind Kind
= MCFixupKind(AArch64::fixup_aarch64_pcrel_branch9
);
355 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
359 // All of the information is in the fixup.
363 /// getPAuthPCRelOpValue - Return the encoded value for a pointer
364 /// authentication pc-relative operand.
366 AArch64MCCodeEmitter::getPAuthPCRelOpValue(const MCInst
&MI
, unsigned OpIdx
,
367 SmallVectorImpl
<MCFixup
> &Fixups
,
368 const MCSubtargetInfo
&STI
) const {
369 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
371 // If the destination is an immediate, invert sign as it's a negative value
372 // that should be encoded as unsigned
374 return -(MO
.getImm());
375 assert(MO
.isExpr() && "Unexpected target type!");
377 MCFixupKind Kind
= MCFixupKind(AArch64::fixup_aarch64_pcrel_branch16
);
378 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
382 // All of the information is in the fixup.
386 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
387 /// pc-relative address.
389 AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst
&MI
, unsigned OpIdx
,
390 SmallVectorImpl
<MCFixup
> &Fixups
,
391 const MCSubtargetInfo
&STI
) const {
392 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
394 // If the destination is an immediate, we have nothing to do.
397 assert(MO
.isExpr() && "Unexpected target type!");
399 MCFixupKind Kind
= MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19
);
400 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
404 // All of the information is in the fixup.
409 AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst
&MI
, unsigned OpIdx
,
410 SmallVectorImpl
<MCFixup
> &Fixups
,
411 const MCSubtargetInfo
&STI
) const {
412 unsigned SignExtend
= MI
.getOperand(OpIdx
).getImm();
413 unsigned DoShift
= MI
.getOperand(OpIdx
+ 1).getImm();
414 return (SignExtend
<< 1) | DoShift
;
418 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst
&MI
, unsigned OpIdx
,
419 SmallVectorImpl
<MCFixup
> &Fixups
,
420 const MCSubtargetInfo
&STI
) const {
421 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
425 assert(MO
.isExpr() && "Unexpected movz/movk immediate");
427 Fixups
.push_back(MCFixup::create(
428 0, MO
.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw
), MI
.getLoc()));
435 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
437 uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
438 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
439 const MCSubtargetInfo
&STI
) const {
440 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
442 // If the destination is an immediate, we have nothing to do.
445 assert(MO
.isExpr() && "Unexpected ADR target type!");
447 MCFixupKind Kind
= MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14
);
448 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
452 // All of the information is in the fixup.
456 /// getBranchTargetOpValue - Return the encoded value for an unconditional
459 AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst
&MI
, unsigned OpIdx
,
460 SmallVectorImpl
<MCFixup
> &Fixups
,
461 const MCSubtargetInfo
&STI
) const {
462 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
464 // If the destination is an immediate, we have nothing to do.
467 assert(MO
.isExpr() && "Unexpected ADR target type!");
469 MCFixupKind Kind
= MI
.getOpcode() == AArch64::BL
470 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26
)
471 : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26
);
472 Fixups
.push_back(MCFixup::create(0, MO
.getExpr(), Kind
, MI
.getLoc()));
476 // All of the information is in the fixup.
480 /// getVecShifterOpValue - Return the encoded value for the vector shifter:
487 AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst
&MI
, unsigned OpIdx
,
488 SmallVectorImpl
<MCFixup
> &Fixups
,
489 const MCSubtargetInfo
&STI
) const {
490 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
491 assert(MO
.isImm() && "Expected an immediate value for the shift amount!");
493 switch (MO
.getImm()) {
506 llvm_unreachable("Invalid value for vector shift amount!");
509 /// getFixedPointScaleOpValue - Return the encoded value for the
510 // FP-to-fixed-point scale factor.
511 uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
512 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
513 const MCSubtargetInfo
&STI
) const {
514 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
515 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
516 return 64 - MO
.getImm();
520 AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst
&MI
, unsigned OpIdx
,
521 SmallVectorImpl
<MCFixup
> &Fixups
,
522 const MCSubtargetInfo
&STI
) const {
523 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
524 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
525 return 64 - MO
.getImm();
529 AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst
&MI
, unsigned OpIdx
,
530 SmallVectorImpl
<MCFixup
> &Fixups
,
531 const MCSubtargetInfo
&STI
) const {
532 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
533 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
534 return 32 - MO
.getImm();
538 AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst
&MI
, unsigned OpIdx
,
539 SmallVectorImpl
<MCFixup
> &Fixups
,
540 const MCSubtargetInfo
&STI
) const {
541 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
542 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
543 return 16 - MO
.getImm();
547 AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst
&MI
, unsigned OpIdx
,
548 SmallVectorImpl
<MCFixup
> &Fixups
,
549 const MCSubtargetInfo
&STI
) const {
550 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
551 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
552 return 8 - MO
.getImm();
556 AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst
&MI
, unsigned OpIdx
,
557 SmallVectorImpl
<MCFixup
> &Fixups
,
558 const MCSubtargetInfo
&STI
) const {
559 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
560 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
561 return MO
.getImm() - 64;
565 AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst
&MI
, unsigned OpIdx
,
566 SmallVectorImpl
<MCFixup
> &Fixups
,
567 const MCSubtargetInfo
&STI
) const {
568 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
569 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
570 return MO
.getImm() - 32;
574 AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst
&MI
, unsigned OpIdx
,
575 SmallVectorImpl
<MCFixup
> &Fixups
,
576 const MCSubtargetInfo
&STI
) const {
577 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
578 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
579 return MO
.getImm() - 16;
583 AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst
&MI
, unsigned OpIdx
,
584 SmallVectorImpl
<MCFixup
> &Fixups
,
585 const MCSubtargetInfo
&STI
) const {
586 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
587 assert(MO
.isImm() && "Expected an immediate value for the scale amount!");
588 return MO
.getImm() - 8;
591 template <unsigned Multiple
, unsigned Min
, unsigned Max
>
593 AArch64MCCodeEmitter::EncodeRegMul_MinMax(const MCInst
&MI
, unsigned OpIdx
,
594 SmallVectorImpl
<MCFixup
> &Fixups
,
595 const MCSubtargetInfo
&STI
) const {
596 assert(llvm::isPowerOf2_32(Multiple
) && "Multiple is not a power of 2");
597 auto RegOpnd
= MI
.getOperand(OpIdx
).getReg();
598 unsigned RegVal
= Ctx
.getRegisterInfo()->getEncodingValue(RegOpnd
);
599 assert(RegVal
>= Min
&& RegVal
<= Max
&& (RegVal
& (Multiple
- 1)) == 0);
600 return (RegVal
- Min
) / Multiple
;
603 // Zk Is the name of the control vector register Z20-Z23 or Z28-Z31, encoded in
604 // the "K:Zk" fields. Z20-Z23 = 000, 001,010, 011 and Z28-Z31 = 100, 101, 110,
606 uint32_t AArch64MCCodeEmitter::EncodeZK(const MCInst
&MI
, unsigned OpIdx
,
607 SmallVectorImpl
<MCFixup
> &Fixups
,
608 const MCSubtargetInfo
&STI
) const {
609 auto RegOpnd
= MI
.getOperand(OpIdx
).getReg();
610 unsigned RegVal
= Ctx
.getRegisterInfo()->getEncodingValue(RegOpnd
);
612 // ZZ8-Z31 => Reg is in 3..7 (offset 24)
613 if (RegOpnd
> AArch64::Z27
)
614 return (RegVal
- 24);
616 assert((RegOpnd
> AArch64::Z19
&& RegOpnd
< AArch64::Z24
) &&
617 "Expected ZK in Z20..Z23 or Z28..Z31");
618 // Z20-Z23 => Reg is in 0..3 (offset 20)
619 return (RegVal
- 20);
623 AArch64MCCodeEmitter::EncodePNR_p8to15(const MCInst
&MI
, unsigned OpIdx
,
624 SmallVectorImpl
<MCFixup
> &Fixups
,
625 const MCSubtargetInfo
&STI
) const {
626 auto RegOpnd
= MI
.getOperand(OpIdx
).getReg();
627 return RegOpnd
- AArch64::PN8
;
630 uint32_t AArch64MCCodeEmitter::EncodeZPR2StridedRegisterClass(
631 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
632 const MCSubtargetInfo
&STI
) const {
633 auto RegOpnd
= MI
.getOperand(OpIdx
).getReg();
634 unsigned RegVal
= Ctx
.getRegisterInfo()->getEncodingValue(RegOpnd
);
635 unsigned T
= (RegVal
& 0x10) >> 1;
636 unsigned Zt
= RegVal
& 0x7;
640 uint32_t AArch64MCCodeEmitter::EncodeZPR4StridedRegisterClass(
641 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
642 const MCSubtargetInfo
&STI
) const {
643 auto RegOpnd
= MI
.getOperand(OpIdx
).getReg();
644 unsigned RegVal
= Ctx
.getRegisterInfo()->getEncodingValue(RegOpnd
);
645 unsigned T
= (RegVal
& 0x10) >> 2;
646 unsigned Zt
= RegVal
& 0x3;
650 uint32_t AArch64MCCodeEmitter::EncodeMatrixTileListRegisterClass(
651 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
652 const MCSubtargetInfo
&STI
) const {
653 unsigned RegMask
= MI
.getOperand(OpIdx
).getImm();
654 assert(RegMask
<= 0xFF && "Invalid register mask!");
658 template <unsigned BaseReg
>
660 AArch64MCCodeEmitter::encodeMatrixIndexGPR32(const MCInst
&MI
, unsigned OpIdx
,
661 SmallVectorImpl
<MCFixup
> &Fixups
,
662 const MCSubtargetInfo
&STI
) const {
663 auto RegOpnd
= MI
.getOperand(OpIdx
).getReg();
664 return RegOpnd
- BaseReg
;
668 AArch64MCCodeEmitter::getImm8OptLsl(const MCInst
&MI
, unsigned OpIdx
,
669 SmallVectorImpl
<MCFixup
> &Fixups
,
670 const MCSubtargetInfo
&STI
) const {
672 auto ShiftOpnd
= MI
.getOperand(OpIdx
+ 1).getImm();
673 assert(AArch64_AM::getShiftType(ShiftOpnd
) == AArch64_AM::LSL
&&
674 "Unexpected shift type for imm8_opt_lsl immediate.");
676 unsigned ShiftVal
= AArch64_AM::getShiftValue(ShiftOpnd
);
677 assert((ShiftVal
== 0 || ShiftVal
== 8) &&
678 "Unexpected shift value for imm8_opt_lsl immediate.");
681 auto Immediate
= MI
.getOperand(OpIdx
).getImm();
682 return (Immediate
& 0xff) | (ShiftVal
== 0 ? 0 : (1 << ShiftVal
));
686 AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst
&MI
, unsigned OpIdx
,
687 SmallVectorImpl
<MCFixup
> &Fixups
,
688 const MCSubtargetInfo
&STI
) const {
689 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
690 assert(MO
.isImm() && "Expected an immediate value!");
691 // Normalize 1-16 range to 0-15.
692 return MO
.getImm() - 1;
695 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
697 uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
698 const MCInst
&MI
, unsigned OpIdx
, SmallVectorImpl
<MCFixup
> &Fixups
,
699 const MCSubtargetInfo
&STI
) const {
700 const MCOperand
&MO
= MI
.getOperand(OpIdx
);
702 "Expected an immediate value for the move shift amount!");
703 unsigned ShiftVal
= AArch64_AM::getShiftValue(MO
.getImm());
704 assert((ShiftVal
== 8 || ShiftVal
== 16) && "Invalid shift amount!");
705 return ShiftVal
== 8 ? 0 : 1;
708 unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst
&MI
, unsigned EncodedValue
,
709 const MCSubtargetInfo
&STI
) const {
710 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
711 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
712 // job to ensure that any bits possibly affected by this are 0. This means we
713 // must zero out bit 30 (essentially emitting a MOVN).
714 MCOperand UImm16MO
= MI
.getOperand(1);
716 // Nothing to do if there's no fixup.
717 if (UImm16MO
.isImm())
720 const MCExpr
*E
= UImm16MO
.getExpr();
721 if (const AArch64MCExpr
*A64E
= dyn_cast
<AArch64MCExpr
>(E
)) {
722 switch (A64E
->getKind()) {
723 case AArch64MCExpr::VK_DTPREL_G2
:
724 case AArch64MCExpr::VK_DTPREL_G1
:
725 case AArch64MCExpr::VK_DTPREL_G0
:
726 case AArch64MCExpr::VK_GOTTPREL_G1
:
727 case AArch64MCExpr::VK_TPREL_G2
:
728 case AArch64MCExpr::VK_TPREL_G1
:
729 case AArch64MCExpr::VK_TPREL_G0
:
730 return EncodedValue
& ~(1u << 30);
732 // Nothing to do for an unsigned fixup.
740 void AArch64MCCodeEmitter::encodeInstruction(const MCInst
&MI
,
741 SmallVectorImpl
<char> &CB
,
743 SmallVectorImpl
<MCFixup
> &Fixups
,
744 const MCSubtargetInfo
&STI
) const {
745 if (MI
.getOpcode() == AArch64::TLSDESCCALL
) {
746 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
747 // following (BLR) instruction. It doesn't emit any code itself so it
748 // doesn't go through the normal TableGenerated channels.
749 auto Reloc
= STI
.getTargetTriple().getEnvironment() == Triple::GNUILP32
750 ? ELF::R_AARCH64_P32_TLSDESC_CALL
751 : ELF::R_AARCH64_TLSDESC_CALL
;
753 MCFixup::create(0, MI
.getOperand(0).getExpr(),
754 MCFixupKind(FirstLiteralRelocationKind
+ Reloc
)));
758 if (MI
.getOpcode() == AArch64::SPACE
) {
759 // SPACE just increases basic block size, in both cases no actual code.
763 uint64_t Binary
= getBinaryCodeForInstr(MI
, Fixups
, STI
);
764 support::endian::write
<uint32_t>(CB
, Binary
, llvm::endianness::little
);
765 ++MCNumEmitted
; // Keep track of the # of mi's emitted.
769 AArch64MCCodeEmitter::fixMulHigh(const MCInst
&MI
,
770 unsigned EncodedValue
,
771 const MCSubtargetInfo
&STI
) const {
772 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
773 // (i.e. all bits 1) but is ignored by the processor.
774 EncodedValue
|= 0x1f << 10;
778 template<int hasRs
, int hasRt2
> unsigned
779 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst
&MI
,
780 unsigned EncodedValue
,
781 const MCSubtargetInfo
&STI
) const {
782 if (!hasRs
) EncodedValue
|= 0x001F0000;
783 if (!hasRt2
) EncodedValue
|= 0x00007C00;
788 unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
789 const MCInst
&MI
, unsigned EncodedValue
, const MCSubtargetInfo
&STI
) const {
790 // The Rm field of FCMP and friends is unused - it should be assembled
791 // as 0, but is ignored by the processor.
792 EncodedValue
&= ~(0x1f << 16);
796 #include "AArch64GenMCCodeEmitter.inc"
798 MCCodeEmitter
*llvm::createAArch64MCCodeEmitter(const MCInstrInfo
&MCII
,
800 return new AArch64MCCodeEmitter(MCII
, Ctx
);