1 //===- bolt/Target/AArch64/AArch64MCPlusBuilder.cpp -----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file provides AArch64-specific MCPlus builder.
11 //===----------------------------------------------------------------------===//
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "bolt/Core/BinaryBasicBlock.h"
19 #include "bolt/Core/BinaryFunction.h"
20 #include "bolt/Core/MCPlusBuilder.h"
21 #include "llvm/BinaryFormat/ELF.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCFixupKindInfo.h"
24 #include "llvm/MC/MCInstBuilder.h"
25 #include "llvm/MC/MCInstrInfo.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/Support/DataExtractor.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
31 #define DEBUG_TYPE "mcplus"
38 static void getSystemFlag(MCInst
&Inst
, MCPhysReg RegName
) {
39 Inst
.setOpcode(AArch64::MRS
);
41 Inst
.addOperand(MCOperand::createReg(RegName
));
42 Inst
.addOperand(MCOperand::createImm(AArch64SysReg::NZCV
));
45 static void setSystemFlag(MCInst
&Inst
, MCPhysReg RegName
) {
46 Inst
.setOpcode(AArch64::MSR
);
48 Inst
.addOperand(MCOperand::createImm(AArch64SysReg::NZCV
));
49 Inst
.addOperand(MCOperand::createReg(RegName
));
52 static void createPushRegisters(MCInst
&Inst
, MCPhysReg Reg1
, MCPhysReg Reg2
) {
54 unsigned NewOpcode
= AArch64::STPXpre
;
55 Inst
.setOpcode(NewOpcode
);
56 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
57 Inst
.addOperand(MCOperand::createReg(Reg1
));
58 Inst
.addOperand(MCOperand::createReg(Reg2
));
59 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
60 Inst
.addOperand(MCOperand::createImm(-2));
63 static void createPopRegisters(MCInst
&Inst
, MCPhysReg Reg1
, MCPhysReg Reg2
) {
65 unsigned NewOpcode
= AArch64::LDPXpost
;
66 Inst
.setOpcode(NewOpcode
);
67 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
68 Inst
.addOperand(MCOperand::createReg(Reg1
));
69 Inst
.addOperand(MCOperand::createReg(Reg2
));
70 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
71 Inst
.addOperand(MCOperand::createImm(2));
74 static void loadReg(MCInst
&Inst
, MCPhysReg To
, MCPhysReg From
) {
75 Inst
.setOpcode(AArch64::LDRXui
);
77 if (From
== AArch64::SP
) {
78 Inst
.setOpcode(AArch64::LDRXpost
);
79 Inst
.addOperand(MCOperand::createReg(From
));
80 Inst
.addOperand(MCOperand::createReg(To
));
81 Inst
.addOperand(MCOperand::createReg(From
));
82 Inst
.addOperand(MCOperand::createImm(16));
84 Inst
.addOperand(MCOperand::createReg(To
));
85 Inst
.addOperand(MCOperand::createReg(From
));
86 Inst
.addOperand(MCOperand::createImm(0));
90 static void storeReg(MCInst
&Inst
, MCPhysReg From
, MCPhysReg To
) {
91 Inst
.setOpcode(AArch64::STRXui
);
93 if (To
== AArch64::SP
) {
94 Inst
.setOpcode(AArch64::STRXpre
);
95 Inst
.addOperand(MCOperand::createReg(To
));
96 Inst
.addOperand(MCOperand::createReg(From
));
97 Inst
.addOperand(MCOperand::createReg(To
));
98 Inst
.addOperand(MCOperand::createImm(-16));
100 Inst
.addOperand(MCOperand::createReg(From
));
101 Inst
.addOperand(MCOperand::createReg(To
));
102 Inst
.addOperand(MCOperand::createImm(0));
106 static void atomicAdd(MCInst
&Inst
, MCPhysReg RegTo
, MCPhysReg RegCnt
) {
107 // NOTE: Supports only ARM with LSE extension
108 Inst
.setOpcode(AArch64::LDADDX
);
110 Inst
.addOperand(MCOperand::createReg(AArch64::XZR
));
111 Inst
.addOperand(MCOperand::createReg(RegCnt
));
112 Inst
.addOperand(MCOperand::createReg(RegTo
));
115 static void createMovz(MCInst
&Inst
, MCPhysReg Reg
, uint64_t Imm
) {
116 assert(Imm
<= UINT16_MAX
&& "Invalid Imm size");
118 Inst
.setOpcode(AArch64::MOVZXi
);
119 Inst
.addOperand(MCOperand::createReg(Reg
));
120 Inst
.addOperand(MCOperand::createImm(Imm
& 0xFFFF));
121 Inst
.addOperand(MCOperand::createImm(0));
124 static InstructionListType
createIncMemory(MCPhysReg RegTo
, MCPhysReg RegTmp
) {
125 InstructionListType Insts
;
126 Insts
.emplace_back();
127 createMovz(Insts
.back(), RegTmp
, 1);
128 Insts
.emplace_back();
129 atomicAdd(Insts
.back(), RegTo
, RegTmp
);
132 class AArch64MCPlusBuilder
: public MCPlusBuilder
{
134 using MCPlusBuilder::MCPlusBuilder
;
136 bool equals(const MCTargetExpr
&A
, const MCTargetExpr
&B
,
137 CompFuncTy Comp
) const override
{
138 const auto &AArch64ExprA
= cast
<AArch64MCExpr
>(A
);
139 const auto &AArch64ExprB
= cast
<AArch64MCExpr
>(B
);
140 if (AArch64ExprA
.getKind() != AArch64ExprB
.getKind())
143 return MCPlusBuilder::equals(*AArch64ExprA
.getSubExpr(),
144 *AArch64ExprB
.getSubExpr(), Comp
);
147 bool shortenInstruction(MCInst
&, const MCSubtargetInfo
&) const override
{
151 bool isADRP(const MCInst
&Inst
) const override
{
152 return Inst
.getOpcode() == AArch64::ADRP
;
155 bool isADR(const MCInst
&Inst
) const override
{
156 return Inst
.getOpcode() == AArch64::ADR
;
159 bool isAddXri(const MCInst
&Inst
) const {
160 return Inst
.getOpcode() == AArch64::ADDXri
;
163 void getADRReg(const MCInst
&Inst
, MCPhysReg
&RegName
) const override
{
164 assert((isADR(Inst
) || isADRP(Inst
)) && "Not an ADR instruction");
165 assert(MCPlus::getNumPrimeOperands(Inst
) != 0 &&
166 "No operands for ADR instruction");
167 assert(Inst
.getOperand(0).isReg() &&
168 "Unexpected operand in ADR instruction");
169 RegName
= Inst
.getOperand(0).getReg();
172 bool isTB(const MCInst
&Inst
) const {
173 return (Inst
.getOpcode() == AArch64::TBNZW
||
174 Inst
.getOpcode() == AArch64::TBNZX
||
175 Inst
.getOpcode() == AArch64::TBZW
||
176 Inst
.getOpcode() == AArch64::TBZX
);
179 bool isCB(const MCInst
&Inst
) const {
180 return (Inst
.getOpcode() == AArch64::CBNZW
||
181 Inst
.getOpcode() == AArch64::CBNZX
||
182 Inst
.getOpcode() == AArch64::CBZW
||
183 Inst
.getOpcode() == AArch64::CBZX
);
186 bool isMOVW(const MCInst
&Inst
) const {
187 return (Inst
.getOpcode() == AArch64::MOVKWi
||
188 Inst
.getOpcode() == AArch64::MOVKXi
||
189 Inst
.getOpcode() == AArch64::MOVNWi
||
190 Inst
.getOpcode() == AArch64::MOVNXi
||
191 Inst
.getOpcode() == AArch64::MOVZXi
||
192 Inst
.getOpcode() == AArch64::MOVZWi
);
195 bool isADD(const MCInst
&Inst
) const {
196 return (Inst
.getOpcode() == AArch64::ADDSWri
||
197 Inst
.getOpcode() == AArch64::ADDSWrr
||
198 Inst
.getOpcode() == AArch64::ADDSWrs
||
199 Inst
.getOpcode() == AArch64::ADDSWrx
||
200 Inst
.getOpcode() == AArch64::ADDSXri
||
201 Inst
.getOpcode() == AArch64::ADDSXrr
||
202 Inst
.getOpcode() == AArch64::ADDSXrs
||
203 Inst
.getOpcode() == AArch64::ADDSXrx
||
204 Inst
.getOpcode() == AArch64::ADDSXrx64
||
205 Inst
.getOpcode() == AArch64::ADDWri
||
206 Inst
.getOpcode() == AArch64::ADDWrr
||
207 Inst
.getOpcode() == AArch64::ADDWrs
||
208 Inst
.getOpcode() == AArch64::ADDWrx
||
209 Inst
.getOpcode() == AArch64::ADDXri
||
210 Inst
.getOpcode() == AArch64::ADDXrr
||
211 Inst
.getOpcode() == AArch64::ADDXrs
||
212 Inst
.getOpcode() == AArch64::ADDXrx
||
213 Inst
.getOpcode() == AArch64::ADDXrx64
);
216 bool isLDRB(const MCInst
&Inst
) const {
217 return (Inst
.getOpcode() == AArch64::LDRBBpost
||
218 Inst
.getOpcode() == AArch64::LDRBBpre
||
219 Inst
.getOpcode() == AArch64::LDRBBroW
||
220 Inst
.getOpcode() == AArch64::LDRBBroX
||
221 Inst
.getOpcode() == AArch64::LDRBBui
||
222 Inst
.getOpcode() == AArch64::LDRSBWpost
||
223 Inst
.getOpcode() == AArch64::LDRSBWpre
||
224 Inst
.getOpcode() == AArch64::LDRSBWroW
||
225 Inst
.getOpcode() == AArch64::LDRSBWroX
||
226 Inst
.getOpcode() == AArch64::LDRSBWui
||
227 Inst
.getOpcode() == AArch64::LDRSBXpost
||
228 Inst
.getOpcode() == AArch64::LDRSBXpre
||
229 Inst
.getOpcode() == AArch64::LDRSBXroW
||
230 Inst
.getOpcode() == AArch64::LDRSBXroX
||
231 Inst
.getOpcode() == AArch64::LDRSBXui
);
234 bool isLDRH(const MCInst
&Inst
) const {
235 return (Inst
.getOpcode() == AArch64::LDRHHpost
||
236 Inst
.getOpcode() == AArch64::LDRHHpre
||
237 Inst
.getOpcode() == AArch64::LDRHHroW
||
238 Inst
.getOpcode() == AArch64::LDRHHroX
||
239 Inst
.getOpcode() == AArch64::LDRHHui
||
240 Inst
.getOpcode() == AArch64::LDRSHWpost
||
241 Inst
.getOpcode() == AArch64::LDRSHWpre
||
242 Inst
.getOpcode() == AArch64::LDRSHWroW
||
243 Inst
.getOpcode() == AArch64::LDRSHWroX
||
244 Inst
.getOpcode() == AArch64::LDRSHWui
||
245 Inst
.getOpcode() == AArch64::LDRSHXpost
||
246 Inst
.getOpcode() == AArch64::LDRSHXpre
||
247 Inst
.getOpcode() == AArch64::LDRSHXroW
||
248 Inst
.getOpcode() == AArch64::LDRSHXroX
||
249 Inst
.getOpcode() == AArch64::LDRSHXui
);
252 bool isLDRW(const MCInst
&Inst
) const {
253 return (Inst
.getOpcode() == AArch64::LDRWpost
||
254 Inst
.getOpcode() == AArch64::LDRWpre
||
255 Inst
.getOpcode() == AArch64::LDRWroW
||
256 Inst
.getOpcode() == AArch64::LDRWroX
||
257 Inst
.getOpcode() == AArch64::LDRWui
);
260 bool isLDRX(const MCInst
&Inst
) const {
261 return (Inst
.getOpcode() == AArch64::LDRXpost
||
262 Inst
.getOpcode() == AArch64::LDRXpre
||
263 Inst
.getOpcode() == AArch64::LDRXroW
||
264 Inst
.getOpcode() == AArch64::LDRXroX
||
265 Inst
.getOpcode() == AArch64::LDRXui
);
268 bool mayLoad(const MCInst
&Inst
) const override
{
269 return isLDRB(Inst
) || isLDRH(Inst
) || isLDRW(Inst
) || isLDRX(Inst
);
272 bool isAArch64ExclusiveLoad(const MCInst
&Inst
) const override
{
273 return (Inst
.getOpcode() == AArch64::LDXPX
||
274 Inst
.getOpcode() == AArch64::LDXPW
||
275 Inst
.getOpcode() == AArch64::LDXRX
||
276 Inst
.getOpcode() == AArch64::LDXRW
||
277 Inst
.getOpcode() == AArch64::LDXRH
||
278 Inst
.getOpcode() == AArch64::LDXRB
||
279 Inst
.getOpcode() == AArch64::LDAXPX
||
280 Inst
.getOpcode() == AArch64::LDAXPW
||
281 Inst
.getOpcode() == AArch64::LDAXRX
||
282 Inst
.getOpcode() == AArch64::LDAXRW
||
283 Inst
.getOpcode() == AArch64::LDAXRH
||
284 Inst
.getOpcode() == AArch64::LDAXRB
);
287 bool isAArch64ExclusiveStore(const MCInst
&Inst
) const override
{
288 return (Inst
.getOpcode() == AArch64::STXPX
||
289 Inst
.getOpcode() == AArch64::STXPW
||
290 Inst
.getOpcode() == AArch64::STXRX
||
291 Inst
.getOpcode() == AArch64::STXRW
||
292 Inst
.getOpcode() == AArch64::STXRH
||
293 Inst
.getOpcode() == AArch64::STXRB
||
294 Inst
.getOpcode() == AArch64::STLXPX
||
295 Inst
.getOpcode() == AArch64::STLXPW
||
296 Inst
.getOpcode() == AArch64::STLXRX
||
297 Inst
.getOpcode() == AArch64::STLXRW
||
298 Inst
.getOpcode() == AArch64::STLXRH
||
299 Inst
.getOpcode() == AArch64::STLXRB
);
302 bool isAArch64ExclusiveClear(const MCInst
&Inst
) const override
{
303 return (Inst
.getOpcode() == AArch64::CLREX
);
306 bool isLoadFromStack(const MCInst
&Inst
) const {
309 for (const MCOperand
&Operand
: useOperands(Inst
)) {
310 if (!Operand
.isReg())
312 unsigned Reg
= Operand
.getReg();
313 if (Reg
== AArch64::SP
|| Reg
== AArch64::WSP
|| Reg
== AArch64::FP
||
320 bool isRegToRegMove(const MCInst
&Inst
, MCPhysReg
&From
,
321 MCPhysReg
&To
) const override
{
322 if (Inst
.getOpcode() == AArch64::FMOVDXr
) {
323 From
= Inst
.getOperand(1).getReg();
324 To
= Inst
.getOperand(0).getReg();
328 if (Inst
.getOpcode() != AArch64::ORRXrs
)
330 if (Inst
.getOperand(1).getReg() != AArch64::XZR
)
332 if (Inst
.getOperand(3).getImm() != 0)
334 From
= Inst
.getOperand(2).getReg();
335 To
= Inst
.getOperand(0).getReg();
339 bool isIndirectCall(const MCInst
&Inst
) const override
{
340 return Inst
.getOpcode() == AArch64::BLR
;
343 MCPhysReg
getSpRegister(int Size
) const {
350 llvm_unreachable("Unexpected size");
354 MCPhysReg
getIntArgRegister(unsigned ArgNo
) const override
{
373 return getNoRegister();
377 bool hasPCRelOperand(const MCInst
&Inst
) const override
{
378 // ADRP is blacklisted and is an exception. Even though it has a
379 // PC-relative operand, this operand is not a complete symbol reference
380 // and BOLT shouldn't try to process it in isolation.
387 // Look for literal addressing mode (see C1-143 ARM DDI 0487B.a)
388 const MCInstrDesc
&MCII
= Info
->get(Inst
.getOpcode());
389 for (unsigned I
= 0, E
= MCII
.getNumOperands(); I
!= E
; ++I
)
390 if (MCII
.operands()[I
].OperandType
== MCOI::OPERAND_PCREL
)
396 bool evaluateADR(const MCInst
&Inst
, int64_t &Imm
,
397 const MCExpr
**DispExpr
) const {
398 assert((isADR(Inst
) || isADRP(Inst
)) && "Not an ADR instruction");
400 const MCOperand
&Label
= Inst
.getOperand(1);
401 if (!Label
.isImm()) {
402 assert(Label
.isExpr() && "Unexpected ADR operand");
403 assert(DispExpr
&& "DispExpr must be set");
404 *DispExpr
= Label
.getExpr();
408 if (Inst
.getOpcode() == AArch64::ADR
) {
409 Imm
= Label
.getImm();
412 Imm
= Label
.getImm() << 12;
416 bool evaluateAArch64MemoryOperand(const MCInst
&Inst
, int64_t &DispImm
,
417 const MCExpr
**DispExpr
= nullptr) const {
418 if (isADR(Inst
) || isADRP(Inst
))
419 return evaluateADR(Inst
, DispImm
, DispExpr
);
421 // Literal addressing mode
422 const MCInstrDesc
&MCII
= Info
->get(Inst
.getOpcode());
423 for (unsigned I
= 0, E
= MCII
.getNumOperands(); I
!= E
; ++I
) {
424 if (MCII
.operands()[I
].OperandType
!= MCOI::OPERAND_PCREL
)
427 if (!Inst
.getOperand(I
).isImm()) {
428 assert(Inst
.getOperand(I
).isExpr() && "Unexpected PCREL operand");
429 assert(DispExpr
&& "DispExpr must be set");
430 *DispExpr
= Inst
.getOperand(I
).getExpr();
434 DispImm
= Inst
.getOperand(I
).getImm() * 4;
440 bool evaluateMemOperandTarget(const MCInst
&Inst
, uint64_t &Target
,
442 uint64_t Size
) const override
{
444 const MCExpr
*DispExpr
= nullptr;
445 if (!evaluateAArch64MemoryOperand(Inst
, DispValue
, &DispExpr
))
448 // Make sure it's a well-formed addressing we can statically evaluate.
453 if (Inst
.getOpcode() == AArch64::ADRP
)
454 Target
+= Address
& ~0xFFFULL
;
460 MCInst::iterator
getMemOperandDisp(MCInst
&Inst
) const override
{
461 MCInst::iterator OI
= Inst
.begin();
462 if (isADR(Inst
) || isADRP(Inst
)) {
463 assert(MCPlus::getNumPrimeOperands(Inst
) >= 2 &&
464 "Unexpected number of operands");
467 const MCInstrDesc
&MCII
= Info
->get(Inst
.getOpcode());
468 for (unsigned I
= 0, E
= MCII
.getNumOperands(); I
!= E
; ++I
) {
469 if (MCII
.operands()[I
].OperandType
== MCOI::OPERAND_PCREL
)
473 assert(OI
!= Inst
.end() && "Literal operand not found");
477 bool replaceMemOperandDisp(MCInst
&Inst
, MCOperand Operand
) const override
{
478 MCInst::iterator OI
= getMemOperandDisp(Inst
);
483 void getCalleeSavedRegs(BitVector
&Regs
) const override
{
484 Regs
|= getAliases(AArch64::X18
);
485 Regs
|= getAliases(AArch64::X19
);
486 Regs
|= getAliases(AArch64::X20
);
487 Regs
|= getAliases(AArch64::X21
);
488 Regs
|= getAliases(AArch64::X22
);
489 Regs
|= getAliases(AArch64::X23
);
490 Regs
|= getAliases(AArch64::X24
);
491 Regs
|= getAliases(AArch64::X25
);
492 Regs
|= getAliases(AArch64::X26
);
493 Regs
|= getAliases(AArch64::X27
);
494 Regs
|= getAliases(AArch64::X28
);
495 Regs
|= getAliases(AArch64::LR
);
496 Regs
|= getAliases(AArch64::FP
);
499 const MCExpr
*getTargetExprFor(MCInst
&Inst
, const MCExpr
*Expr
,
501 uint64_t RelType
) const override
{
503 if (isADR(Inst
) || RelType
== ELF::R_AARCH64_ADR_PREL_LO21
||
504 RelType
== ELF::R_AARCH64_TLSDESC_ADR_PREL21
) {
505 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS
, Ctx
);
506 } else if (isADRP(Inst
) || RelType
== ELF::R_AARCH64_ADR_PREL_PG_HI21
||
507 RelType
== ELF::R_AARCH64_ADR_PREL_PG_HI21_NC
||
508 RelType
== ELF::R_AARCH64_TLSDESC_ADR_PAGE21
||
509 RelType
== ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
||
510 RelType
== ELF::R_AARCH64_ADR_GOT_PAGE
) {
511 // Never emit a GOT reloc, we handled this in
512 // RewriteInstance::readRelocations().
513 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS_PAGE
, Ctx
);
516 case ELF::R_AARCH64_ADD_ABS_LO12_NC
:
517 case ELF::R_AARCH64_LD64_GOT_LO12_NC
:
518 case ELF::R_AARCH64_LDST8_ABS_LO12_NC
:
519 case ELF::R_AARCH64_LDST16_ABS_LO12_NC
:
520 case ELF::R_AARCH64_LDST32_ABS_LO12_NC
:
521 case ELF::R_AARCH64_LDST64_ABS_LO12_NC
:
522 case ELF::R_AARCH64_LDST128_ABS_LO12_NC
:
523 case ELF::R_AARCH64_TLSDESC_ADD_LO12
:
524 case ELF::R_AARCH64_TLSDESC_LD64_LO12
:
525 case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
526 case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
527 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_LO12
, Ctx
);
528 case ELF::R_AARCH64_MOVW_UABS_G3
:
529 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS_G3
, Ctx
);
530 case ELF::R_AARCH64_MOVW_UABS_G2
:
531 case ELF::R_AARCH64_MOVW_UABS_G2_NC
:
532 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS_G2_NC
, Ctx
);
533 case ELF::R_AARCH64_MOVW_UABS_G1
:
534 case ELF::R_AARCH64_MOVW_UABS_G1_NC
:
535 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS_G1_NC
, Ctx
);
536 case ELF::R_AARCH64_MOVW_UABS_G0
:
537 case ELF::R_AARCH64_MOVW_UABS_G0_NC
:
538 return AArch64MCExpr::create(Expr
, AArch64MCExpr::VK_ABS_G0_NC
, Ctx
);
546 bool getSymbolRefOperandNum(const MCInst
&Inst
, unsigned &OpNum
) const {
547 if (OpNum
>= MCPlus::getNumPrimeOperands(Inst
))
550 // Auto-select correct operand number
552 if (isConditionalBranch(Inst
) || isADR(Inst
) || isADRP(Inst
) ||
555 if (isTB(Inst
) || isAddXri(Inst
))
562 const MCSymbol
*getTargetSymbol(const MCExpr
*Expr
) const override
{
563 auto *AArchExpr
= dyn_cast
<AArch64MCExpr
>(Expr
);
564 if (AArchExpr
&& AArchExpr
->getSubExpr())
565 return getTargetSymbol(AArchExpr
->getSubExpr());
567 auto *BinExpr
= dyn_cast
<MCBinaryExpr
>(Expr
);
569 return getTargetSymbol(BinExpr
->getLHS());
571 auto *SymExpr
= dyn_cast
<MCSymbolRefExpr
>(Expr
);
572 if (SymExpr
&& SymExpr
->getKind() == MCSymbolRefExpr::VK_None
)
573 return &SymExpr
->getSymbol();
578 const MCSymbol
*getTargetSymbol(const MCInst
&Inst
,
579 unsigned OpNum
= 0) const override
{
580 if (!getSymbolRefOperandNum(Inst
, OpNum
))
583 const MCOperand
&Op
= Inst
.getOperand(OpNum
);
587 return getTargetSymbol(Op
.getExpr());
590 int64_t getTargetAddend(const MCExpr
*Expr
) const override
{
591 auto *AArchExpr
= dyn_cast
<AArch64MCExpr
>(Expr
);
592 if (AArchExpr
&& AArchExpr
->getSubExpr())
593 return getTargetAddend(AArchExpr
->getSubExpr());
595 auto *BinExpr
= dyn_cast
<MCBinaryExpr
>(Expr
);
596 if (BinExpr
&& BinExpr
->getOpcode() == MCBinaryExpr::Add
)
597 return getTargetAddend(BinExpr
->getRHS());
599 auto *ConstExpr
= dyn_cast
<MCConstantExpr
>(Expr
);
601 return ConstExpr
->getValue();
606 int64_t getTargetAddend(const MCInst
&Inst
,
607 unsigned OpNum
= 0) const override
{
608 if (!getSymbolRefOperandNum(Inst
, OpNum
))
611 const MCOperand
&Op
= Inst
.getOperand(OpNum
);
615 return getTargetAddend(Op
.getExpr());
618 void replaceBranchTarget(MCInst
&Inst
, const MCSymbol
*TBB
,
619 MCContext
*Ctx
) const override
{
620 assert((isCall(Inst
) || isBranch(Inst
)) && !isIndirectBranch(Inst
) &&
621 "Invalid instruction");
622 assert(MCPlus::getNumPrimeOperands(Inst
) >= 1 &&
623 "Invalid number of operands");
624 MCInst::iterator OI
= Inst
.begin();
626 if (isConditionalBranch(Inst
)) {
627 assert(MCPlus::getNumPrimeOperands(Inst
) >= 2 &&
628 "Invalid number of operands");
633 assert(MCPlus::getNumPrimeOperands(Inst
) >= 3 &&
634 "Invalid number of operands");
635 OI
= Inst
.begin() + 2;
638 *OI
= MCOperand::createExpr(
639 MCSymbolRefExpr::create(TBB
, MCSymbolRefExpr::VK_None
, *Ctx
));
642 /// Matches indirect branch patterns in AArch64 related to a jump table (JT),
643 /// helping us to build the complete CFG. A typical indirect branch to
644 /// a jump table entry in AArch64 looks like the following:
646 /// adrp x1, #-7585792 # Get JT Page location
647 /// add x1, x1, #692 # Complement with JT Page offset
648 /// ldrh w0, [x1, w0, uxtw #1] # Loads JT entry
649 /// adr x1, #12 # Get PC + 12 (end of this BB) used next
650 /// add x0, x1, w0, sxth #2 # Finish building branch target
651 /// # (entries in JT are relative to the end
653 /// br x0 # Indirect jump instruction
655 bool analyzeIndirectBranchFragment(
657 DenseMap
<const MCInst
*, SmallVector
<MCInst
*, 4>> &UDChain
,
658 const MCExpr
*&JumpTable
, int64_t &Offset
, int64_t &ScaleValue
,
659 MCInst
*&PCRelBase
) const {
661 assert(Inst
.getOpcode() == AArch64::BR
&& "Unexpected opcode");
663 // Match the indirect branch pattern for aarch64
664 SmallVector
<MCInst
*, 4> &UsesRoot
= UDChain
[&Inst
];
665 if (UsesRoot
.size() == 0 || UsesRoot
[0] == nullptr)
668 const MCInst
*DefAdd
= UsesRoot
[0];
670 // Now we match an ADD
671 if (!isADD(*DefAdd
)) {
672 // If the address is not broken up in two parts, this is not branching
673 // according to a jump table entry. Fail.
676 if (DefAdd
->getOpcode() == AArch64::ADDXri
) {
677 // This can happen when there is no offset, but a direct jump that was
678 // transformed into an indirect one (indirect tail call) :
679 // ADRP x2, Perl_re_compiler
680 // ADD x2, x2, :lo12:Perl_re_compiler
684 if (DefAdd
->getOpcode() == AArch64::ADDXrs
) {
685 // Covers the less common pattern where JT entries are relative to
686 // the JT itself (like x86). Seems less efficient since we can't
687 // assume the JT is aligned at 4B boundary and thus drop 2 bits from
690 // adrp x12, #21544960 ; 216a000
691 // add x12, x12, #1696 ; 216a6a0 (JT object in .rodata)
692 // ldrsw x8, [x12, x8, lsl #2] --> loads e.g. 0xfeb73bd8
693 // * add x8, x8, x12 --> = cde278, next block
697 // Parsed as ADDXrs reg:x8 reg:x8 reg:x12 imm:0
700 assert(DefAdd
->getOpcode() == AArch64::ADDXrx
&&
701 "Failed to match indirect branch!");
703 // Validate ADD operands
704 int64_t OperandExtension
= DefAdd
->getOperand(3).getImm();
705 unsigned ShiftVal
= AArch64_AM::getArithShiftValue(OperandExtension
);
706 AArch64_AM::ShiftExtendType ExtendType
=
707 AArch64_AM::getArithExtendType(OperandExtension
);
709 // TODO: Handle the patten where ShiftVal != 2.
710 // The following code sequence below has no shift amount,
711 // the range could be 0 to 4.
712 // The pattern comes from libc, it occurs when the binary is static.
713 // adr x6, 0x219fb0 <sigall_set+0x88>
714 // add x6, x6, x14, lsl #2
716 // add x6, x6, w7, sxtw => no shift amount
718 errs() << "BOLT-WARNING: "
719 "Failed to match indirect branch: ShiftVAL != 2 \n";
723 if (ExtendType
== AArch64_AM::SXTB
)
725 else if (ExtendType
== AArch64_AM::SXTH
)
727 else if (ExtendType
== AArch64_AM::SXTW
)
730 llvm_unreachable("Failed to match indirect branch! (fragment 3)");
732 // Match an ADR to load base address to be used when addressing JT targets
733 SmallVector
<MCInst
*, 4> &UsesAdd
= UDChain
[DefAdd
];
734 if (UsesAdd
.size() <= 1 || UsesAdd
[1] == nullptr || UsesAdd
[2] == nullptr) {
735 // This happens when we don't have enough context about this jump table
736 // because the jumping code sequence was split in multiple basic blocks.
737 // This was observed in the wild in HHVM code (dispatchImpl).
740 MCInst
*DefBaseAddr
= UsesAdd
[1];
741 assert(DefBaseAddr
->getOpcode() == AArch64::ADR
&&
742 "Failed to match indirect branch pattern! (fragment 3)");
744 PCRelBase
= DefBaseAddr
;
745 // Match LOAD to load the jump table (relative) target
746 const MCInst
*DefLoad
= UsesAdd
[2];
747 assert(mayLoad(*DefLoad
) &&
748 "Failed to match indirect branch load pattern! (1)");
749 assert((ScaleValue
!= 1LL || isLDRB(*DefLoad
)) &&
750 "Failed to match indirect branch load pattern! (2)");
751 assert((ScaleValue
!= 2LL || isLDRH(*DefLoad
)) &&
752 "Failed to match indirect branch load pattern! (3)");
754 // Match ADD that calculates the JumpTable Base Address (not the offset)
755 SmallVector
<MCInst
*, 4> &UsesLoad
= UDChain
[DefLoad
];
756 const MCInst
*DefJTBaseAdd
= UsesLoad
[1];
758 if (DefJTBaseAdd
== nullptr || isLoadFromStack(*DefJTBaseAdd
) ||
759 isRegToRegMove(*DefJTBaseAdd
, From
, To
)) {
760 // Sometimes base address may have been defined in another basic block
761 // (hoisted). Return with no jump table info.
766 if (DefJTBaseAdd
->getOpcode() == AArch64::ADR
) {
767 // TODO: Handle the pattern where there is no adrp/add pair.
768 // It also occurs when the binary is static.
769 // adr x13, 0x215a18 <_nl_value_type_LC_COLLATE+0x50>
770 // ldrh w13, [x13, w12, uxtw #1]
771 // adr x12, 0x247b30 <__gettextparse+0x5b0>
772 // add x13, x12, w13, sxth #2
774 errs() << "BOLT-WARNING: Failed to match indirect branch: "
775 "nop/adr instead of adrp/add \n";
779 assert(DefJTBaseAdd
->getOpcode() == AArch64::ADDXri
&&
780 "Failed to match jump table base address pattern! (1)");
782 if (DefJTBaseAdd
->getOperand(2).isImm())
783 Offset
= DefJTBaseAdd
->getOperand(2).getImm();
784 SmallVector
<MCInst
*, 4> &UsesJTBaseAdd
= UDChain
[DefJTBaseAdd
];
785 const MCInst
*DefJTBasePage
= UsesJTBaseAdd
[1];
786 if (DefJTBasePage
== nullptr || isLoadFromStack(*DefJTBasePage
)) {
790 assert(DefJTBasePage
->getOpcode() == AArch64::ADRP
&&
791 "Failed to match jump table base page pattern! (2)");
792 if (DefJTBasePage
->getOperand(1).isExpr())
793 JumpTable
= DefJTBasePage
->getOperand(1).getExpr();
797 DenseMap
<const MCInst
*, SmallVector
<MCInst
*, 4>>
798 computeLocalUDChain(const MCInst
*CurInstr
, InstructionIterator Begin
,
799 InstructionIterator End
) const {
800 DenseMap
<int, MCInst
*> RegAliasTable
;
801 DenseMap
<const MCInst
*, SmallVector
<MCInst
*, 4>> Uses
;
803 auto addInstrOperands
= [&](const MCInst
&Instr
) {
805 for (const MCOperand
&Operand
: MCPlus::primeOperands(Instr
)) {
806 if (!Operand
.isReg())
808 unsigned Reg
= Operand
.getReg();
809 MCInst
*AliasInst
= RegAliasTable
[Reg
];
810 Uses
[&Instr
].push_back(AliasInst
);
812 dbgs() << "Adding reg operand " << Reg
<< " refs ";
813 if (AliasInst
!= nullptr)
821 LLVM_DEBUG(dbgs() << "computeLocalUDChain\n");
822 bool TerminatorSeen
= false;
823 for (auto II
= Begin
; II
!= End
; ++II
) {
825 // Ignore nops and CFIs
826 if (isPseudo(Instr
) || isNoop(Instr
))
828 if (TerminatorSeen
) {
829 RegAliasTable
.clear();
833 LLVM_DEBUG(dbgs() << "Now updating for:\n ");
834 LLVM_DEBUG(Instr
.dump());
835 addInstrOperands(Instr
);
837 BitVector Regs
= BitVector(RegInfo
->getNumRegs(), false);
838 getWrittenRegs(Instr
, Regs
);
840 // Update register definitions after this point
841 for (int Idx
: Regs
.set_bits()) {
842 RegAliasTable
[Idx
] = &Instr
;
843 LLVM_DEBUG(dbgs() << "Setting reg " << Idx
844 << " def to current instr.\n");
847 TerminatorSeen
= isTerminator(Instr
);
850 // Process the last instruction, which is not currently added into the
851 // instruction stream
853 addInstrOperands(*CurInstr
);
859 analyzeIndirectBranch(MCInst
&Instruction
, InstructionIterator Begin
,
860 InstructionIterator End
, const unsigned PtrSize
,
861 MCInst
*&MemLocInstrOut
, unsigned &BaseRegNumOut
,
862 unsigned &IndexRegNumOut
, int64_t &DispValueOut
,
863 const MCExpr
*&DispExprOut
, MCInst
*&PCRelBaseOut
,
864 MCInst
*&FixedEntryLoadInstr
) const override
{
865 MemLocInstrOut
= nullptr;
866 BaseRegNumOut
= AArch64::NoRegister
;
867 IndexRegNumOut
= AArch64::NoRegister
;
869 DispExprOut
= nullptr;
870 FixedEntryLoadInstr
= nullptr;
872 // An instruction referencing memory used by jump instruction (directly or
873 // via register). This location could be an array of function pointers
874 // in case of indirect tail call, or a jump table.
875 MCInst
*MemLocInstr
= nullptr;
877 // Analyze the memory location.
878 int64_t ScaleValue
, DispValue
;
879 const MCExpr
*DispExpr
;
881 DenseMap
<const MCInst
*, SmallVector
<llvm::MCInst
*, 4>> UDChain
=
882 computeLocalUDChain(&Instruction
, Begin
, End
);
884 if (!analyzeIndirectBranchFragment(Instruction
, UDChain
, DispExpr
,
885 DispValue
, ScaleValue
, PCRelBase
))
886 return IndirectBranchType::UNKNOWN
;
888 MemLocInstrOut
= MemLocInstr
;
889 DispValueOut
= DispValue
;
890 DispExprOut
= DispExpr
;
891 PCRelBaseOut
= PCRelBase
;
892 return IndirectBranchType::POSSIBLE_PIC_JUMP_TABLE
;
895 /// Matches PLT entry pattern and returns the associated GOT entry address.
896 /// Typical PLT entry looks like the following:
899 /// ldr x17, [x16, #3040]
900 /// add x16, x16, #0xbe0
903 /// The other type of trampolines are located in .plt.got, that are used for
904 /// non-lazy bindings so doesn't use x16 arg to transfer .got entry address:
907 /// ldr x17, [x16, #3040]
911 uint64_t analyzePLTEntry(MCInst
&Instruction
, InstructionIterator Begin
,
912 InstructionIterator End
,
913 uint64_t BeginPC
) const override
{
914 // Check branch instruction
915 MCInst
*Branch
= &Instruction
;
916 assert(Branch
->getOpcode() == AArch64::BR
&& "Unexpected opcode");
918 DenseMap
<const MCInst
*, SmallVector
<llvm::MCInst
*, 4>> UDChain
=
919 computeLocalUDChain(Branch
, Begin
, End
);
921 // Match ldr instruction
922 SmallVector
<MCInst
*, 4> &BranchUses
= UDChain
[Branch
];
923 if (BranchUses
.size() < 1 || BranchUses
[0] == nullptr)
926 // Check ldr instruction
927 const MCInst
*Ldr
= BranchUses
[0];
928 if (Ldr
->getOpcode() != AArch64::LDRXui
)
932 const unsigned ScaleLdr
= 8; // LDRX operates on 8 bytes segments
933 assert(Ldr
->getOperand(2).isImm() && "Unexpected ldr operand");
934 const uint64_t Offset
= Ldr
->getOperand(2).getImm() * ScaleLdr
;
936 // Match adrp instruction
937 SmallVector
<MCInst
*, 4> &LdrUses
= UDChain
[Ldr
];
938 if (LdrUses
.size() < 2 || LdrUses
[1] == nullptr)
941 // Check adrp instruction
942 MCInst
*Adrp
= LdrUses
[1];
943 if (Adrp
->getOpcode() != AArch64::ADRP
)
946 // Get adrp instruction PC
947 const unsigned InstSize
= 4;
948 uint64_t AdrpPC
= BeginPC
;
949 for (InstructionIterator It
= Begin
; It
!= End
; ++It
) {
957 assert(Adrp
->getOperand(1).isImm() && "Unexpected adrp operand");
958 bool Ret
= evaluateMemOperandTarget(*Adrp
, Base
, AdrpPC
, InstSize
);
959 assert(Ret
&& "Failed to evaluate adrp");
962 return Base
+ Offset
;
965 unsigned getInvertedBranchOpcode(unsigned Opcode
) const {
968 llvm_unreachable("Failed to invert branch opcode");
970 case AArch64::TBZW
: return AArch64::TBNZW
;
971 case AArch64::TBZX
: return AArch64::TBNZX
;
972 case AArch64::TBNZW
: return AArch64::TBZW
;
973 case AArch64::TBNZX
: return AArch64::TBZX
;
974 case AArch64::CBZW
: return AArch64::CBNZW
;
975 case AArch64::CBZX
: return AArch64::CBNZX
;
976 case AArch64::CBNZW
: return AArch64::CBZW
;
977 case AArch64::CBNZX
: return AArch64::CBZX
;
981 unsigned getCondCode(const MCInst
&Inst
) const override
{
982 // AArch64 does not use conditional codes, so we just return the opcode
983 // of the conditional branch here.
984 return Inst
.getOpcode();
987 unsigned getCanonicalBranchCondCode(unsigned Opcode
) const override
{
991 case AArch64::TBNZW
: return AArch64::TBZW
;
992 case AArch64::TBNZX
: return AArch64::TBZX
;
993 case AArch64::CBNZW
: return AArch64::CBZW
;
994 case AArch64::CBNZX
: return AArch64::CBZX
;
998 void reverseBranchCondition(MCInst
&Inst
, const MCSymbol
*TBB
,
999 MCContext
*Ctx
) const override
{
1000 if (isTB(Inst
) || isCB(Inst
)) {
1001 Inst
.setOpcode(getInvertedBranchOpcode(Inst
.getOpcode()));
1002 assert(Inst
.getOpcode() != 0 && "Invalid branch instruction");
1003 } else if (Inst
.getOpcode() == AArch64::Bcc
) {
1004 Inst
.getOperand(0).setImm(AArch64CC::getInvertedCondCode(
1005 static_cast<AArch64CC::CondCode
>(Inst
.getOperand(0).getImm())));
1006 assert(Inst
.getOperand(0).getImm() != AArch64CC::AL
&&
1007 Inst
.getOperand(0).getImm() != AArch64CC::NV
&&
1008 "Can't reverse ALWAYS cond code");
1010 LLVM_DEBUG(Inst
.dump());
1011 llvm_unreachable("Unrecognized branch instruction");
1013 replaceBranchTarget(Inst
, TBB
, Ctx
);
1016 int getPCRelEncodingSize(const MCInst
&Inst
) const override
{
1017 switch (Inst
.getOpcode()) {
1019 llvm_unreachable("Failed to get pcrel encoding size");
1021 case AArch64::TBZW
: return 16;
1022 case AArch64::TBZX
: return 16;
1023 case AArch64::TBNZW
: return 16;
1024 case AArch64::TBNZX
: return 16;
1025 case AArch64::CBZW
: return 21;
1026 case AArch64::CBZX
: return 21;
1027 case AArch64::CBNZW
: return 21;
1028 case AArch64::CBNZX
: return 21;
1029 case AArch64::B
: return 28;
1030 case AArch64::BL
: return 28;
1031 case AArch64::Bcc
: return 21;
1035 int getShortJmpEncodingSize() const override
{ return 33; }
1037 int getUncondBranchEncodingSize() const override
{ return 28; }
1039 InstructionListType
createCmpJE(MCPhysReg RegNo
, int64_t Imm
,
1040 const MCSymbol
*Target
,
1041 MCContext
*Ctx
) const override
{
1042 InstructionListType Code
;
1043 Code
.emplace_back(MCInstBuilder(AArch64::SUBSXri
)
1048 Code
.emplace_back(MCInstBuilder(AArch64::Bcc
)
1050 .addExpr(MCSymbolRefExpr::create(
1051 Target
, MCSymbolRefExpr::VK_None
, *Ctx
)));
1055 void createTailCall(MCInst
&Inst
, const MCSymbol
*Target
,
1056 MCContext
*Ctx
) override
{
1057 return createDirectCall(Inst
, Target
, Ctx
, /*IsTailCall*/ true);
1060 void createLongTailCall(InstructionListType
&Seq
, const MCSymbol
*Target
,
1061 MCContext
*Ctx
) override
{
1062 createShortJmp(Seq
, Target
, Ctx
, /*IsTailCall*/ true);
1065 void createTrap(MCInst
&Inst
) const override
{
1067 Inst
.setOpcode(AArch64::BRK
);
1068 Inst
.addOperand(MCOperand::createImm(1));
1071 bool convertJmpToTailCall(MCInst
&Inst
) override
{
1076 bool convertTailCallToJmp(MCInst
&Inst
) override
{
1077 removeAnnotation(Inst
, MCPlus::MCAnnotation::kTailCall
);
1079 if (getConditionalTailCall(Inst
))
1080 unsetConditionalTailCall(Inst
);
1084 InstructionListType
createIndirectPltCall(const MCInst
&DirectCall
,
1085 const MCSymbol
*TargetLocation
,
1086 MCContext
*Ctx
) override
{
1087 const bool IsTailCall
= isTailCall(DirectCall
);
1088 assert((DirectCall
.getOpcode() == AArch64::BL
||
1089 (DirectCall
.getOpcode() == AArch64::B
&& IsTailCall
)) &&
1090 "64-bit direct (tail) call instruction expected");
1092 InstructionListType Code
;
1093 // Code sequence for indirect plt call:
1094 // adrp x16 <symbol>
1095 // ldr x17, [x16, #<offset>]
1096 // blr x17 ; or 'br' for tail calls
1099 InstAdrp
.setOpcode(AArch64::ADRP
);
1100 InstAdrp
.addOperand(MCOperand::createReg(AArch64::X16
));
1101 InstAdrp
.addOperand(MCOperand::createImm(0));
1102 setOperandToSymbolRef(InstAdrp
, /* OpNum */ 1, TargetLocation
,
1103 /* Addend */ 0, Ctx
, ELF::R_AARCH64_ADR_GOT_PAGE
);
1104 Code
.emplace_back(InstAdrp
);
1107 InstLoad
.setOpcode(AArch64::LDRXui
);
1108 InstLoad
.addOperand(MCOperand::createReg(AArch64::X17
));
1109 InstLoad
.addOperand(MCOperand::createReg(AArch64::X16
));
1110 InstLoad
.addOperand(MCOperand::createImm(0));
1111 setOperandToSymbolRef(InstLoad
, /* OpNum */ 2, TargetLocation
,
1112 /* Addend */ 0, Ctx
, ELF::R_AARCH64_LD64_GOT_LO12_NC
);
1113 Code
.emplace_back(InstLoad
);
1116 InstCall
.setOpcode(IsTailCall
? AArch64::BR
: AArch64::BLR
);
1117 InstCall
.addOperand(MCOperand::createReg(AArch64::X17
));
1119 setTailCall(InstCall
);
1120 Code
.emplace_back(InstCall
);
1125 bool lowerTailCall(MCInst
&Inst
) override
{
1126 removeAnnotation(Inst
, MCPlus::MCAnnotation::kTailCall
);
1127 if (getConditionalTailCall(Inst
))
1128 unsetConditionalTailCall(Inst
);
1132 bool isNoop(const MCInst
&Inst
) const override
{
1133 return Inst
.getOpcode() == AArch64::HINT
&&
1134 Inst
.getOperand(0).getImm() == 0;
1137 void createNoop(MCInst
&Inst
) const override
{
1138 Inst
.setOpcode(AArch64::HINT
);
1140 Inst
.addOperand(MCOperand::createImm(0));
1143 bool mayStore(const MCInst
&Inst
) const override
{ return false; }
1145 void createDirectCall(MCInst
&Inst
, const MCSymbol
*Target
, MCContext
*Ctx
,
1146 bool IsTailCall
) override
{
1147 Inst
.setOpcode(IsTailCall
? AArch64::B
: AArch64::BL
);
1149 Inst
.addOperand(MCOperand::createExpr(getTargetExprFor(
1150 Inst
, MCSymbolRefExpr::create(Target
, MCSymbolRefExpr::VK_None
, *Ctx
),
1153 convertJmpToTailCall(Inst
);
1156 bool analyzeBranch(InstructionIterator Begin
, InstructionIterator End
,
1157 const MCSymbol
*&TBB
, const MCSymbol
*&FBB
,
1158 MCInst
*&CondBranch
,
1159 MCInst
*&UncondBranch
) const override
{
1162 while (I
!= Begin
) {
1165 // Ignore nops and CFIs
1166 if (isPseudo(*I
) || isNoop(*I
))
1169 // Stop when we find the first non-terminator
1170 if (!isTerminator(*I
) || isTailCall(*I
) || !isBranch(*I
))
1173 // Handle unconditional branches.
1174 if (isUnconditionalBranch(*I
)) {
1175 // If any code was seen after this unconditional branch, we've seen
1176 // unreachable code. Ignore them.
1177 CondBranch
= nullptr;
1179 const MCSymbol
*Sym
= getTargetSymbol(*I
);
1180 assert(Sym
!= nullptr &&
1181 "Couldn't extract BB symbol from jump operand");
1186 // Handle conditional branches and ignore indirect branches
1187 if (isIndirectBranch(*I
))
1190 if (CondBranch
== nullptr) {
1191 const MCSymbol
*TargetBB
= getTargetSymbol(*I
);
1192 if (TargetBB
== nullptr) {
1193 // Unrecognized branch target
1202 llvm_unreachable("multiple conditional branches in one BB");
1207 void createLongJmp(InstructionListType
&Seq
, const MCSymbol
*Target
,
1208 MCContext
*Ctx
, bool IsTailCall
) override
{
1209 // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
1210 // Standard for the ARM 64-bit Architecture (AArch64)".
1211 // The sequence of instructions we create here is the following:
1212 // movz ip0, #:abs_g3:<addr>
1213 // movk ip0, #:abs_g2_nc:<addr>
1214 // movk ip0, #:abs_g1_nc:<addr>
1215 // movk ip0, #:abs_g0_nc:<addr>
1218 Inst
.setOpcode(AArch64::MOVZXi
);
1219 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1220 Inst
.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1221 MCSymbolRefExpr::create(Target
, MCSymbolRefExpr::VK_None
, *Ctx
),
1222 AArch64MCExpr::VK_ABS_G3
, *Ctx
)));
1223 Inst
.addOperand(MCOperand::createImm(0x30));
1224 Seq
.emplace_back(Inst
);
1227 Inst
.setOpcode(AArch64::MOVKXi
);
1228 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1229 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1230 Inst
.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1231 MCSymbolRefExpr::create(Target
, MCSymbolRefExpr::VK_None
, *Ctx
),
1232 AArch64MCExpr::VK_ABS_G2_NC
, *Ctx
)));
1233 Inst
.addOperand(MCOperand::createImm(0x20));
1234 Seq
.emplace_back(Inst
);
1237 Inst
.setOpcode(AArch64::MOVKXi
);
1238 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1239 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1240 Inst
.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1241 MCSymbolRefExpr::create(Target
, MCSymbolRefExpr::VK_None
, *Ctx
),
1242 AArch64MCExpr::VK_ABS_G1_NC
, *Ctx
)));
1243 Inst
.addOperand(MCOperand::createImm(0x10));
1244 Seq
.emplace_back(Inst
);
1247 Inst
.setOpcode(AArch64::MOVKXi
);
1248 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1249 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1250 Inst
.addOperand(MCOperand::createExpr(AArch64MCExpr::create(
1251 MCSymbolRefExpr::create(Target
, MCSymbolRefExpr::VK_None
, *Ctx
),
1252 AArch64MCExpr::VK_ABS_G0_NC
, *Ctx
)));
1253 Inst
.addOperand(MCOperand::createImm(0));
1254 Seq
.emplace_back(Inst
);
1257 Inst
.setOpcode(AArch64::BR
);
1258 Inst
.addOperand(MCOperand::createReg(AArch64::X16
));
1261 Seq
.emplace_back(Inst
);
1264 void createShortJmp(InstructionListType
&Seq
, const MCSymbol
*Target
,
1265 MCContext
*Ctx
, bool IsTailCall
) override
{
1266 // ip0 (r16) is reserved to the linker (refer to 5.3.1.1 of "Procedure Call
1267 // Standard for the ARM 64-bit Architecture (AArch64)".
1268 // The sequence of instructions we create here is the following:
1270 // add ip0, ip0, imm
1272 MCPhysReg Reg
= AArch64::X16
;
1273 InstructionListType Insts
= materializeAddress(Target
, Ctx
, Reg
);
1274 Insts
.emplace_back();
1275 MCInst
&Inst
= Insts
.back();
1277 Inst
.setOpcode(AArch64::BR
);
1278 Inst
.addOperand(MCOperand::createReg(Reg
));
1284 /// Matching pattern here is
1287 /// ADD x16, x16, imm
1290 uint64_t matchLinkerVeneer(InstructionIterator Begin
, InstructionIterator End
,
1291 uint64_t Address
, const MCInst
&CurInst
,
1292 MCInst
*&TargetHiBits
, MCInst
*&TargetLowBits
,
1293 uint64_t &Target
) const override
{
1294 if (CurInst
.getOpcode() != AArch64::BR
|| !CurInst
.getOperand(0).isReg() ||
1295 CurInst
.getOperand(0).getReg() != AArch64::X16
)
1304 if (I
== Begin
|| I
->getOpcode() != AArch64::ADDXri
||
1305 MCPlus::getNumPrimeOperands(*I
) < 3 || !I
->getOperand(0).isReg() ||
1306 !I
->getOperand(1).isReg() ||
1307 I
->getOperand(0).getReg() != AArch64::X16
||
1308 I
->getOperand(1).getReg() != AArch64::X16
|| !I
->getOperand(2).isImm())
1310 TargetLowBits
= &*I
;
1311 uint64_t Addr
= I
->getOperand(2).getImm() & 0xFFF;
1315 if (I
->getOpcode() != AArch64::ADRP
||
1316 MCPlus::getNumPrimeOperands(*I
) < 2 || !I
->getOperand(0).isReg() ||
1317 !I
->getOperand(1).isImm() || I
->getOperand(0).getReg() != AArch64::X16
)
1320 Addr
|= (Address
+ ((int64_t)I
->getOperand(1).getImm() << 12)) &
1321 0xFFFFFFFFFFFFF000ULL
;
1326 /// Match the following pattern:
1333 /// Populate \p TargetAddress with the Target value on successful match.
1334 bool matchAbsLongVeneer(const BinaryFunction
&BF
,
1335 uint64_t &TargetAddress
) const override
{
1336 if (BF
.size() != 1 || BF
.getMaxSize() < 16)
1339 if (!BF
.hasConstantIsland())
1342 const BinaryBasicBlock
&BB
= BF
.front();
1346 const MCInst
&LDRInst
= BB
.getInstructionAtIndex(0);
1347 if (LDRInst
.getOpcode() != AArch64::LDRXl
)
1350 if (!LDRInst
.getOperand(0).isReg() ||
1351 LDRInst
.getOperand(0).getReg() != AArch64::X16
)
1354 const MCSymbol
*TargetSym
= getTargetSymbol(LDRInst
, 1);
1358 const MCInst
&BRInst
= BB
.getInstructionAtIndex(1);
1359 if (BRInst
.getOpcode() != AArch64::BR
)
1361 if (!BRInst
.getOperand(0).isReg() ||
1362 BRInst
.getOperand(0).getReg() != AArch64::X16
)
1365 const BinaryFunction::IslandInfo
&IInfo
= BF
.getIslandInfo();
1366 if (IInfo
.HasDynamicRelocations
)
1369 auto Iter
= IInfo
.Offsets
.find(8);
1370 if (Iter
== IInfo
.Offsets
.end() || Iter
->second
!= TargetSym
)
1373 // Extract the absolute value stored inside the island.
1374 StringRef SectionContents
= BF
.getOriginSection()->getContents();
1375 StringRef FunctionContents
= SectionContents
.substr(
1376 BF
.getAddress() - BF
.getOriginSection()->getAddress(), BF
.getMaxSize());
1378 const BinaryContext
&BC
= BF
.getBinaryContext();
1379 DataExtractor
DE(FunctionContents
, BC
.AsmInfo
->isLittleEndian(),
1380 BC
.AsmInfo
->getCodePointerSize());
1381 uint64_t Offset
= 8;
1382 TargetAddress
= DE
.getAddress(&Offset
);
1387 bool matchAdrpAddPair(const MCInst
&Adrp
, const MCInst
&Add
) const override
{
1388 if (!isADRP(Adrp
) || !isAddXri(Add
))
1391 assert(Adrp
.getOperand(0).isReg() &&
1392 "Unexpected operand in ADRP instruction");
1393 MCPhysReg AdrpReg
= Adrp
.getOperand(0).getReg();
1394 assert(Add
.getOperand(1).isReg() &&
1395 "Unexpected operand in ADDXri instruction");
1396 MCPhysReg AddReg
= Add
.getOperand(1).getReg();
1397 return AdrpReg
== AddReg
;
1400 bool replaceImmWithSymbolRef(MCInst
&Inst
, const MCSymbol
*Symbol
,
1401 int64_t Addend
, MCContext
*Ctx
, int64_t &Value
,
1402 uint64_t RelType
) const override
{
1403 unsigned ImmOpNo
= -1U;
1404 for (unsigned Index
= 0; Index
< MCPlus::getNumPrimeOperands(Inst
);
1406 if (Inst
.getOperand(Index
).isImm()) {
1414 Value
= Inst
.getOperand(ImmOpNo
).getImm();
1416 setOperandToSymbolRef(Inst
, ImmOpNo
, Symbol
, Addend
, Ctx
, RelType
);
1421 void createUncondBranch(MCInst
&Inst
, const MCSymbol
*TBB
,
1422 MCContext
*Ctx
) const override
{
1423 Inst
.setOpcode(AArch64::B
);
1425 Inst
.addOperand(MCOperand::createExpr(getTargetExprFor(
1426 Inst
, MCSymbolRefExpr::create(TBB
, MCSymbolRefExpr::VK_None
, *Ctx
),
1430 bool shouldRecordCodeRelocation(uint64_t RelType
) const override
{
1432 case ELF::R_AARCH64_ABS64
:
1433 case ELF::R_AARCH64_ABS32
:
1434 case ELF::R_AARCH64_ABS16
:
1435 case ELF::R_AARCH64_ADD_ABS_LO12_NC
:
1436 case ELF::R_AARCH64_ADR_GOT_PAGE
:
1437 case ELF::R_AARCH64_ADR_PREL_LO21
:
1438 case ELF::R_AARCH64_ADR_PREL_PG_HI21
:
1439 case ELF::R_AARCH64_ADR_PREL_PG_HI21_NC
:
1440 case ELF::R_AARCH64_LD64_GOT_LO12_NC
:
1441 case ELF::R_AARCH64_LDST8_ABS_LO12_NC
:
1442 case ELF::R_AARCH64_LDST16_ABS_LO12_NC
:
1443 case ELF::R_AARCH64_LDST32_ABS_LO12_NC
:
1444 case ELF::R_AARCH64_LDST64_ABS_LO12_NC
:
1445 case ELF::R_AARCH64_LDST128_ABS_LO12_NC
:
1446 case ELF::R_AARCH64_TLSDESC_ADD_LO12
:
1447 case ELF::R_AARCH64_TLSDESC_ADR_PAGE21
:
1448 case ELF::R_AARCH64_TLSDESC_ADR_PREL21
:
1449 case ELF::R_AARCH64_TLSDESC_LD64_LO12
:
1450 case ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
1451 case ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
1452 case ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0
:
1453 case ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
1454 case ELF::R_AARCH64_MOVW_UABS_G0
:
1455 case ELF::R_AARCH64_MOVW_UABS_G0_NC
:
1456 case ELF::R_AARCH64_MOVW_UABS_G1
:
1457 case ELF::R_AARCH64_MOVW_UABS_G1_NC
:
1458 case ELF::R_AARCH64_MOVW_UABS_G2
:
1459 case ELF::R_AARCH64_MOVW_UABS_G2_NC
:
1460 case ELF::R_AARCH64_MOVW_UABS_G3
:
1461 case ELF::R_AARCH64_PREL16
:
1462 case ELF::R_AARCH64_PREL32
:
1463 case ELF::R_AARCH64_PREL64
:
1465 case ELF::R_AARCH64_CALL26
:
1466 case ELF::R_AARCH64_JUMP26
:
1467 case ELF::R_AARCH64_TSTBR14
:
1468 case ELF::R_AARCH64_CONDBR19
:
1469 case ELF::R_AARCH64_TLSDESC_CALL
:
1470 case ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12
:
1471 case ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
1474 llvm_unreachable("Unexpected AArch64 relocation type in code");
1478 StringRef
getTrapFillValue() const override
{
1479 return StringRef("\0\0\0\0", 4);
1482 void createReturn(MCInst
&Inst
) const override
{
1483 Inst
.setOpcode(AArch64::RET
);
1485 Inst
.addOperand(MCOperand::createReg(AArch64::LR
));
1488 void createStackPointerIncrement(
1489 MCInst
&Inst
, int Size
,
1490 bool NoFlagsClobber
= false /*unused for AArch64*/) const override
{
1491 Inst
.setOpcode(AArch64::SUBXri
);
1493 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
1494 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
1495 Inst
.addOperand(MCOperand::createImm(Size
));
1496 Inst
.addOperand(MCOperand::createImm(0));
1499 void createStackPointerDecrement(
1500 MCInst
&Inst
, int Size
,
1501 bool NoFlagsClobber
= false /*unused for AArch64*/) const override
{
1502 Inst
.setOpcode(AArch64::ADDXri
);
1504 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
1505 Inst
.addOperand(MCOperand::createReg(AArch64::SP
));
1506 Inst
.addOperand(MCOperand::createImm(Size
));
1507 Inst
.addOperand(MCOperand::createImm(0));
1510 void createIndirectBranch(MCInst
&Inst
, MCPhysReg MemBaseReg
,
1511 int64_t Disp
) const {
1512 Inst
.setOpcode(AArch64::BR
);
1514 Inst
.addOperand(MCOperand::createReg(MemBaseReg
));
1517 InstructionListType
createInstrumentedIndCallHandlerExitBB() const override
{
1518 InstructionListType
Insts(5);
1519 // Code sequence for instrumented indirect call handler:
1521 // ldp x0, x1, [sp], #16
1522 // ldr x16, [sp], #16
1523 // ldp x0, x1, [sp], #16
1525 setSystemFlag(Insts
[0], AArch64::X1
);
1526 createPopRegisters(Insts
[1], AArch64::X0
, AArch64::X1
);
1527 // Here we load address of the next function which should be called in the
1528 // original binary to X16 register. Writing to X16 is permitted without
1529 // needing to restore.
1530 loadReg(Insts
[2], AArch64::X16
, AArch64::SP
);
1531 createPopRegisters(Insts
[3], AArch64::X0
, AArch64::X1
);
1532 createIndirectBranch(Insts
[4], AArch64::X16
, 0);
1537 createInstrumentedIndTailCallHandlerExitBB() const override
{
1538 return createInstrumentedIndCallHandlerExitBB();
1541 InstructionListType
createGetter(MCContext
*Ctx
, const char *name
) const {
1542 InstructionListType
Insts(4);
1543 MCSymbol
*Locs
= Ctx
->getOrCreateSymbol(name
);
1544 InstructionListType Addr
= materializeAddress(Locs
, Ctx
, AArch64::X0
);
1545 std::copy(Addr
.begin(), Addr
.end(), Insts
.begin());
1546 assert(Addr
.size() == 2 && "Invalid Addr size");
1547 loadReg(Insts
[2], AArch64::X0
, AArch64::X0
);
1548 createReturn(Insts
[3]);
1552 InstructionListType
createNumCountersGetter(MCContext
*Ctx
) const override
{
1553 return createGetter(Ctx
, "__bolt_num_counters");
1557 createInstrLocationsGetter(MCContext
*Ctx
) const override
{
1558 return createGetter(Ctx
, "__bolt_instr_locations");
1561 InstructionListType
createInstrTablesGetter(MCContext
*Ctx
) const override
{
1562 return createGetter(Ctx
, "__bolt_instr_tables");
1565 InstructionListType
createInstrNumFuncsGetter(MCContext
*Ctx
) const override
{
1566 return createGetter(Ctx
, "__bolt_instr_num_funcs");
1569 void convertIndirectCallToLoad(MCInst
&Inst
, MCPhysReg Reg
) override
{
1570 bool IsTailCall
= isTailCall(Inst
);
1572 removeAnnotation(Inst
, MCPlus::MCAnnotation::kTailCall
);
1573 if (Inst
.getOpcode() == AArch64::BR
|| Inst
.getOpcode() == AArch64::BLR
) {
1574 Inst
.setOpcode(AArch64::ORRXrs
);
1575 Inst
.insert(Inst
.begin(), MCOperand::createReg(Reg
));
1576 Inst
.insert(Inst
.begin() + 1, MCOperand::createReg(AArch64::XZR
));
1577 Inst
.insert(Inst
.begin() + 3, MCOperand::createImm(0));
1580 llvm_unreachable("not implemented");
1583 InstructionListType
createLoadImmediate(const MCPhysReg Dest
,
1584 uint64_t Imm
) const override
{
1585 InstructionListType
Insts(4);
1587 for (int I
= 0; I
< 4; I
++, Shift
-= 16) {
1588 Insts
[I
].setOpcode(AArch64::MOVKXi
);
1589 Insts
[I
].addOperand(MCOperand::createReg(Dest
));
1590 Insts
[I
].addOperand(MCOperand::createReg(Dest
));
1591 Insts
[I
].addOperand(MCOperand::createImm((Imm
>> Shift
) & 0xFFFF));
1592 Insts
[I
].addOperand(MCOperand::createImm(Shift
));
1597 void createIndirectCallInst(MCInst
&Inst
, bool IsTailCall
,
1598 MCPhysReg Reg
) const {
1600 Inst
.setOpcode(IsTailCall
? AArch64::BR
: AArch64::BLR
);
1601 Inst
.addOperand(MCOperand::createReg(Reg
));
1604 InstructionListType
createInstrumentedIndirectCall(MCInst
&&CallInst
,
1605 MCSymbol
*HandlerFuncAddr
,
1607 MCContext
*Ctx
) override
{
1608 InstructionListType Insts
;
1609 // Code sequence used to enter indirect call instrumentation helper:
1610 // stp x0, x1, [sp, #-16]! createPushRegisters
1611 // mov target x0 convertIndirectCallToLoad -> orr x0 target xzr
1612 // mov x1 CallSiteID createLoadImmediate ->
1613 // movk x1, #0x0, lsl #48
1614 // movk x1, #0x0, lsl #32
1615 // movk x1, #0x0, lsl #16
1617 // stp x0, x1, [sp, #-16]!
1618 // bl *HandlerFuncAddr createIndirectCall ->
1619 // adr x0 *HandlerFuncAddr -> adrp + add
1621 Insts
.emplace_back();
1622 createPushRegisters(Insts
.back(), AArch64::X0
, AArch64::X1
);
1623 Insts
.emplace_back(CallInst
);
1624 convertIndirectCallToLoad(Insts
.back(), AArch64::X0
);
1625 InstructionListType LoadImm
=
1626 createLoadImmediate(getIntArgRegister(1), CallSiteID
);
1627 Insts
.insert(Insts
.end(), LoadImm
.begin(), LoadImm
.end());
1628 Insts
.emplace_back();
1629 createPushRegisters(Insts
.back(), AArch64::X0
, AArch64::X1
);
1630 Insts
.resize(Insts
.size() + 2);
1631 InstructionListType Addr
=
1632 materializeAddress(HandlerFuncAddr
, Ctx
, AArch64::X0
);
1633 assert(Addr
.size() == 2 && "Invalid Addr size");
1634 std::copy(Addr
.begin(), Addr
.end(), Insts
.end() - Addr
.size());
1635 Insts
.emplace_back();
1636 createIndirectCallInst(Insts
.back(), isTailCall(CallInst
), AArch64::X0
);
1638 // Carry over metadata including tail call marker if present.
1639 stripAnnotations(Insts
.back());
1640 moveAnnotations(std::move(CallInst
), Insts
.back());
1646 createInstrumentedIndCallHandlerEntryBB(const MCSymbol
*InstrTrampoline
,
1647 const MCSymbol
*IndCallHandler
,
1648 MCContext
*Ctx
) override
{
1649 // Code sequence used to check whether InstrTampoline was initialized
1650 // and call it if so, returns via IndCallHandler
1651 // stp x0, x1, [sp, #-16]!
1653 // adr x0, InstrTrampoline -> adrp + add
1655 // subs x0, x0, #0x0
1656 // b.eq IndCallHandler
1657 // str x30, [sp, #-16]!
1659 // ldr x30, [sp], #16
1661 InstructionListType Insts
;
1662 Insts
.emplace_back();
1663 createPushRegisters(Insts
.back(), AArch64::X0
, AArch64::X1
);
1664 Insts
.emplace_back();
1665 getSystemFlag(Insts
.back(), getIntArgRegister(1));
1666 Insts
.emplace_back();
1667 Insts
.emplace_back();
1668 InstructionListType Addr
=
1669 materializeAddress(InstrTrampoline
, Ctx
, AArch64::X0
);
1670 std::copy(Addr
.begin(), Addr
.end(), Insts
.end() - Addr
.size());
1671 assert(Addr
.size() == 2 && "Invalid Addr size");
1672 Insts
.emplace_back();
1673 loadReg(Insts
.back(), AArch64::X0
, AArch64::X0
);
1674 InstructionListType cmpJmp
=
1675 createCmpJE(AArch64::X0
, 0, IndCallHandler
, Ctx
);
1676 Insts
.insert(Insts
.end(), cmpJmp
.begin(), cmpJmp
.end());
1677 Insts
.emplace_back();
1678 storeReg(Insts
.back(), AArch64::LR
, AArch64::SP
);
1679 Insts
.emplace_back();
1680 Insts
.back().setOpcode(AArch64::BLR
);
1681 Insts
.back().addOperand(MCOperand::createReg(AArch64::X0
));
1682 Insts
.emplace_back();
1683 loadReg(Insts
.back(), AArch64::LR
, AArch64::SP
);
1684 Insts
.emplace_back();
1685 createDirectCall(Insts
.back(), IndCallHandler
, Ctx
, /*IsTailCall*/ true);
1690 createInstrIncMemory(const MCSymbol
*Target
, MCContext
*Ctx
, bool IsLeaf
,
1691 unsigned CodePointerSize
) const override
{
1693 InstructionListType
Instrs(IsLeaf
? 12 : 10);
1696 createStackPointerIncrement(Instrs
[I
++], 128);
1697 createPushRegisters(Instrs
[I
++], AArch64::X0
, AArch64::X1
);
1698 getSystemFlag(Instrs
[I
++], AArch64::X1
);
1699 InstructionListType Addr
= materializeAddress(Target
, Ctx
, AArch64::X0
);
1700 assert(Addr
.size() == 2 && "Invalid Addr size");
1701 std::copy(Addr
.begin(), Addr
.end(), Instrs
.begin() + I
);
1703 storeReg(Instrs
[I
++], AArch64::X2
, AArch64::SP
);
1704 InstructionListType Insts
= createIncMemory(AArch64::X0
, AArch64::X2
);
1705 assert(Insts
.size() == 2 && "Invalid Insts size");
1706 std::copy(Insts
.begin(), Insts
.end(), Instrs
.begin() + I
);
1708 loadReg(Instrs
[I
++], AArch64::X2
, AArch64::SP
);
1709 setSystemFlag(Instrs
[I
++], AArch64::X1
);
1710 createPopRegisters(Instrs
[I
++], AArch64::X0
, AArch64::X1
);
1712 createStackPointerDecrement(Instrs
[I
++], 128);
1716 std::vector
<MCInst
> createSymbolTrampoline(const MCSymbol
*TgtSym
,
1717 MCContext
*Ctx
) override
{
1718 std::vector
<MCInst
> Insts
;
1719 createShortJmp(Insts
, TgtSym
, Ctx
, /*IsTailCall*/ true);
1723 InstructionListType
materializeAddress(const MCSymbol
*Target
, MCContext
*Ctx
,
1725 int64_t Addend
= 0) const override
{
1726 // Get page-aligned address and add page offset
1727 InstructionListType
Insts(2);
1728 Insts
[0].setOpcode(AArch64::ADRP
);
1730 Insts
[0].addOperand(MCOperand::createReg(RegName
));
1731 Insts
[0].addOperand(MCOperand::createImm(0));
1732 setOperandToSymbolRef(Insts
[0], /* OpNum */ 1, Target
, Addend
, Ctx
,
1733 ELF::R_AARCH64_NONE
);
1734 Insts
[1].setOpcode(AArch64::ADDXri
);
1736 Insts
[1].addOperand(MCOperand::createReg(RegName
));
1737 Insts
[1].addOperand(MCOperand::createReg(RegName
));
1738 Insts
[1].addOperand(MCOperand::createImm(0));
1739 Insts
[1].addOperand(MCOperand::createImm(0));
1740 setOperandToSymbolRef(Insts
[1], /* OpNum */ 2, Target
, Addend
, Ctx
,
1741 ELF::R_AARCH64_ADD_ABS_LO12_NC
);
1745 std::optional
<Relocation
>
1746 createRelocation(const MCFixup
&Fixup
,
1747 const MCAsmBackend
&MAB
) const override
{
1748 const MCFixupKindInfo
&FKI
= MAB
.getFixupKindInfo(Fixup
.getKind());
1750 assert(FKI
.TargetOffset
== 0 && "0-bit relocation offset expected");
1751 const uint64_t RelOffset
= Fixup
.getOffset();
1754 if (Fixup
.getKind() == MCFixupKind(AArch64::fixup_aarch64_pcrel_call26
))
1755 RelType
= ELF::R_AARCH64_CALL26
;
1756 else if (Fixup
.getKind() ==
1757 MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26
))
1758 RelType
= ELF::R_AARCH64_JUMP26
;
1759 else if (FKI
.Flags
& MCFixupKindInfo::FKF_IsPCRel
) {
1760 switch (FKI
.TargetSize
) {
1762 return std::nullopt
;
1764 RelType
= ELF::R_AARCH64_PREL16
;
1767 RelType
= ELF::R_AARCH64_PREL32
;
1770 RelType
= ELF::R_AARCH64_PREL64
;
1774 switch (FKI
.TargetSize
) {
1776 return std::nullopt
;
1778 RelType
= ELF::R_AARCH64_ABS16
;
1781 RelType
= ELF::R_AARCH64_ABS32
;
1784 RelType
= ELF::R_AARCH64_ABS64
;
1789 auto [RelSymbol
, RelAddend
] = extractFixupExpr(Fixup
);
1791 return Relocation({RelOffset
, RelSymbol
, RelType
, RelAddend
, 0});
1794 uint16_t getMinFunctionAlignment() const override
{ return 4; }
1797 } // end anonymous namespace
1802 MCPlusBuilder
*createAArch64MCPlusBuilder(const MCInstrAnalysis
*Analysis
,
1803 const MCInstrInfo
*Info
,
1804 const MCRegisterInfo
*RegInfo
,
1805 const MCSubtargetInfo
*STI
) {
1806 return new AArch64MCPlusBuilder(Analysis
, Info
, RegInfo
, STI
);