1 //===----- x86_64.cpp - Generic JITLink x86-64 edge kinds, utilities ------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Generic utilities for graphs representing x86-64 objects.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/ExecutionEngine/JITLink/x86_64.h"
15 #define DEBUG_TYPE "jitlink"
21 const char *getEdgeKindName(Edge::Kind K
) {
28 return "Pointer32Signed";
42 return "Delta64FromGOT";
46 return "BranchPCRel32";
47 case BranchPCRel32ToPtrJumpStub
:
48 return "BranchPCRel32ToPtrJumpStub";
49 case BranchPCRel32ToPtrJumpStubBypassable
:
50 return "BranchPCRel32ToPtrJumpStubBypassable";
51 case RequestGOTAndTransformToDelta32
:
52 return "RequestGOTAndTransformToDelta32";
53 case RequestGOTAndTransformToDelta64
:
54 return "RequestGOTAndTransformToDelta64";
55 case RequestGOTAndTransformToDelta64FromGOT
:
56 return "RequestGOTAndTransformToDelta64FromGOT";
57 case PCRel32GOTLoadREXRelaxable
:
58 return "PCRel32GOTLoadREXRelaxable";
59 case RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable
:
60 return "RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable";
61 case PCRel32GOTLoadRelaxable
:
62 return "PCRel32GOTLoadRelaxable";
63 case RequestGOTAndTransformToPCRel32GOTLoadRelaxable
:
64 return "RequestGOTAndTransformToPCRel32GOTLoadRelaxable";
65 case PCRel32TLVPLoadREXRelaxable
:
66 return "PCRel32TLVPLoadREXRelaxable";
67 case RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable
:
68 return "RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable";
70 return getGenericEdgeKindName(static_cast<Edge::Kind
>(K
));
74 const char NullPointerContent
[PointerSize
] = {0x00, 0x00, 0x00, 0x00,
75 0x00, 0x00, 0x00, 0x00};
77 const char PointerJumpStubContent
[6] = {
78 static_cast<char>(0xFFu
), 0x25, 0x00, 0x00, 0x00, 0x00};
80 Error
optimizeGOTAndStubAccesses(LinkGraph
&G
) {
81 LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
83 for (auto *B
: G
.blocks())
84 for (auto &E
: B
->edges()) {
85 if (E
.getKind() == x86_64::PCRel32GOTLoadRelaxable
||
86 E
.getKind() == x86_64::PCRel32GOTLoadREXRelaxable
) {
88 bool REXPrefix
= E
.getKind() == x86_64::PCRel32GOTLoadREXRelaxable
;
89 assert(E
.getOffset() >= (REXPrefix
? 3u : 2u) &&
90 "GOT edge occurs too early in block");
92 auto *FixupData
= reinterpret_cast<uint8_t *>(
93 const_cast<char *>(B
->getContent().data())) +
95 const uint8_t Op
= FixupData
[-2];
96 const uint8_t ModRM
= FixupData
[-1];
98 auto &GOTEntryBlock
= E
.getTarget().getBlock();
99 assert(GOTEntryBlock
.getSize() == G
.getPointerSize() &&
100 "GOT entry block should be pointer sized");
101 assert(GOTEntryBlock
.edges_size() == 1 &&
102 "GOT entry should only have one outgoing edge");
103 auto &GOTTarget
= GOTEntryBlock
.edges().begin()->getTarget();
104 orc::ExecutorAddr TargetAddr
= GOTTarget
.getAddress();
105 orc::ExecutorAddr EdgeAddr
= B
->getFixupAddress(E
);
106 int64_t Displacement
= TargetAddr
- EdgeAddr
+ 4;
107 bool TargetInRangeForImmU32
= isUInt
<32>(TargetAddr
.getValue());
108 bool DisplacementInRangeForImmS32
= isInt
<32>(Displacement
);
110 // If both of the Target and displacement is out of range, then
111 // there isn't optimization chance.
112 if (!(TargetInRangeForImmU32
|| DisplacementInRangeForImmS32
))
115 // Transform "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
116 if (Op
== 0x8b && DisplacementInRangeForImmS32
) {
117 FixupData
[-2] = 0x8d;
118 E
.setKind(x86_64::Delta32
);
119 E
.setTarget(GOTTarget
);
120 E
.setAddend(E
.getAddend() - 4);
122 dbgs() << " Replaced GOT load wih LEA:\n ";
123 printEdge(dbgs(), *B
, E
, getEdgeKindName(E
.getKind()));
129 // Transform call/jmp instructions
130 if (Op
== 0xff && TargetInRangeForImmU32
) {
132 // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call
133 // foo" But lld convert it to "addr32 call foo, because that makes
134 // result expression to be a single instruction.
135 FixupData
[-2] = 0x67;
136 FixupData
[-1] = 0xe8;
138 dbgs() << " replaced call instruction's memory operand wih imm "
140 printEdge(dbgs(), *B
, E
, getEdgeKindName(E
.getKind()));
144 // Transform "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop"
145 assert(ModRM
== 0x25 && "Invalid ModRm for call/jmp instructions");
146 FixupData
[-2] = 0xe9;
148 E
.setOffset(E
.getOffset() - 1);
150 dbgs() << " replaced jmp instruction's memory operand wih imm "
152 printEdge(dbgs(), *B
, E
, getEdgeKindName(E
.getKind()));
156 E
.setKind(x86_64::Pointer32
);
157 E
.setTarget(GOTTarget
);
160 } else if (E
.getKind() == x86_64::BranchPCRel32ToPtrJumpStubBypassable
) {
161 auto &StubBlock
= E
.getTarget().getBlock();
162 assert(StubBlock
.getSize() == sizeof(PointerJumpStubContent
) &&
163 "Stub block should be stub sized");
164 assert(StubBlock
.edges_size() == 1 &&
165 "Stub block should only have one outgoing edge");
167 auto &GOTBlock
= StubBlock
.edges().begin()->getTarget().getBlock();
168 assert(GOTBlock
.getSize() == G
.getPointerSize() &&
169 "GOT block should be pointer sized");
170 assert(GOTBlock
.edges_size() == 1 &&
171 "GOT block should only have one outgoing edge");
173 auto &GOTTarget
= GOTBlock
.edges().begin()->getTarget();
174 orc::ExecutorAddr EdgeAddr
= B
->getAddress() + E
.getOffset();
175 orc::ExecutorAddr TargetAddr
= GOTTarget
.getAddress();
177 int64_t Displacement
= TargetAddr
- EdgeAddr
+ 4;
178 if (isInt
<32>(Displacement
)) {
179 E
.setKind(x86_64::BranchPCRel32
);
180 E
.setTarget(GOTTarget
);
182 dbgs() << " Replaced stub branch with direct branch:\n ";
183 printEdge(dbgs(), *B
, E
, getEdgeKindName(E
.getKind()));
190 return Error::success();
193 } // end namespace x86_64
194 } // end namespace jitlink
195 } // end namespace llvm