1 //===- Relocations.h -------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef LLD_ELF_RELOCATIONS_H
10 #define LLD_ELF_RELOCATIONS_H
12 #include "lld/Common/LLVM.h"
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/Object/ELFTypes.h"
23 class InputSectionBase
;
25 class RelocationBaseSection
;
28 // Represents a relocation type, such as R_X86_64_PC32 or R_ARM_THM_CALL.
31 /*implicit*/ constexpr RelType(uint32_t v
= 0) : v(v
) {}
32 /*implicit*/ operator uint32_t() const { return v
; }
35 using JumpModType
= uint32_t;
37 // List of target-independent relocation types. Relocations read
38 // from files are converted to these types so that the main code
39 // doesn't have to know about architecture-specific details.
64 R_RELAX_TLS_GD_TO_IE_ABS
,
65 R_RELAX_TLS_GD_TO_IE_GOT_OFF
,
66 R_RELAX_TLS_GD_TO_IE_GOTPLT
,
68 R_RELAX_TLS_GD_TO_LE_NEG
,
71 R_RELAX_TLS_LD_TO_LE_ABS
,
89 // The following is abstract relocation types used for only one target.
91 // Even though RelExpr is intended to be a target-neutral representation
92 // of a relocation type, there are some relocations whose semantics are
93 // unique to a target. Such relocation are marked with RE_<TARGET_NAME>.
94 RE_AARCH64_GOT_PAGE_PC
,
95 RE_AARCH64_AUTH_GOT_PAGE_PC
,
98 RE_AARCH64_AUTH_GOT_PC
,
100 RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC
,
101 RE_AARCH64_TLSDESC_PAGE
,
108 RE_MIPS_GOT_LOCAL_PAGE
,
118 RE_PPC64_RELAX_GOT_PC
,
121 RE_RISCV_PC_INDIRECT
,
122 // Same as R_PC but with page-aligned semantics.
123 RE_LOONGARCH_PAGE_PC
,
124 // Same as R_PLT_PC but with page-aligned semantics.
125 RE_LOONGARCH_PLT_PAGE_PC
,
126 // In addition to having page-aligned semantics, LoongArch GOT relocs are
127 // also reused for TLS, making the semantics differ from other architectures.
129 RE_LOONGARCH_GOT_PAGE_PC
,
130 RE_LOONGARCH_TLSGD_PAGE_PC
,
131 RE_LOONGARCH_TLSDESC_PAGE_PC
,
134 // Architecture-neutral representation of relocation.
143 // Manipulate jump instructions with these modifiers. These are used to relax
144 // jump instruction opcodes at basic block boundaries and are particularly
145 // useful when basic block sections are enabled.
146 struct JumpInstrMod
{
148 JumpModType original
;
152 // This function writes undefined symbol diagnostics to an internal buffer.
153 // Call reportUndefinedSymbols() after calling scanRelocations() to emit
155 template <class ELFT
> void scanRelocations(Ctx
&ctx
);
156 template <class ELFT
> void checkNoCrossRefs(Ctx
&ctx
);
157 void reportUndefinedSymbols(Ctx
&);
158 void postScanRelocations(Ctx
&ctx
);
159 void addGotEntry(Ctx
&ctx
, Symbol
&sym
);
161 void hexagonTLSSymbolUpdate(Ctx
&ctx
);
162 bool hexagonNeedsTLSSymbol(ArrayRef
<OutputSection
*> outputSections
);
166 class InputSectionDescription
;
170 // Thunk may be incomplete. Avoid inline ctor/dtor.
171 ThunkCreator(Ctx
&ctx
);
173 // Return true if Thunks have been added to OutputSections
174 bool createThunks(uint32_t pass
, ArrayRef
<OutputSection
*> outputSections
);
177 void mergeThunks(ArrayRef
<OutputSection
*> outputSections
);
179 ThunkSection
*getISDThunkSec(OutputSection
*os
, InputSection
*isec
,
180 InputSectionDescription
*isd
,
181 const Relocation
&rel
, uint64_t src
);
183 ThunkSection
*getISThunkSec(InputSection
*isec
);
185 void createInitialThunkSections(ArrayRef
<OutputSection
*> outputSections
);
187 std::pair
<Thunk
*, bool> getThunk(InputSection
*isec
, Relocation
&rel
,
190 std::pair
<Thunk
*, bool> getSyntheticLandingPad(Defined
&d
, int64_t a
);
192 ThunkSection
*addThunkSection(OutputSection
*os
, InputSectionDescription
*,
195 bool normalizeExistingThunk(Relocation
&rel
, uint64_t src
);
197 bool addSyntheticLandingPads();
201 // Record all the available Thunks for a (Symbol, addend) pair, where Symbol
202 // is represented as a (section, offset) pair. There may be multiple
203 // relocations sharing the same (section, offset + addend) pair. We may revert
204 // a relocation back to its original non-Thunk target, and restore the
205 // original addend, so we cannot fold offset + addend. A nested pair is used
206 // because DenseMapInfo is not specialized for std::tuple.
207 llvm::DenseMap
<std::pair
<std::pair
<SectionBase
*, uint64_t>, int64_t>,
208 SmallVector
<std::unique_ptr
<Thunk
>, 0>>
209 thunkedSymbolsBySectionAndAddend
;
210 llvm::DenseMap
<std::pair
<Symbol
*, int64_t>,
211 SmallVector
<std::unique_ptr
<Thunk
>, 0>>
214 // Find a Thunk from the Thunks symbol definition, we can use this to find
215 // the Thunk from a relocation to the Thunks symbol definition.
216 llvm::DenseMap
<Symbol
*, Thunk
*> thunks
;
218 // Track InputSections that have an inline ThunkSection placed in front
219 // an inline ThunkSection may have control fall through to the section below
220 // so we need to make sure that there is only one of them.
221 // The Mips LA25 Thunk is an example of an inline ThunkSection, as is
222 // the AArch64BTLandingPadThunk.
223 llvm::DenseMap
<InputSection
*, ThunkSection
*> thunkedSections
;
225 // Record landing pads, generated for a section + offset destination.
226 // Landling pads are alternative entry points for destinations that need
227 // to be reached via thunks that use indirect branches. A destination
228 // needs at most one landing pad as that can be reused by all callers.
229 llvm::DenseMap
<std::pair
<std::pair
<SectionBase
*, uint64_t>, int64_t>,
230 std::unique_ptr
<Thunk
>>
231 landingPadsBySectionAndAddend
;
233 // All the nonLandingPad thunks that have been created, in order of creation.
234 std::vector
<Thunk
*> allThunks
;
236 // The number of completed passes of createThunks this permits us
237 // to do one time initialization on Pass 0 and put a limit on the
238 // number of times it can be called to prevent infinite loops.
242 // Decode LEB128 without error checking. Only used by performance critical code
244 inline uint64_t readLEB128(const uint8_t *&p
, uint64_t leb
) {
245 uint64_t acc
= 0, shift
= 0, byte
;
248 acc
|= (byte
- 128 * (byte
>= leb
)) << shift
;
250 } while (byte
>= 128);
253 inline uint64_t readULEB128(const uint8_t *&p
) { return readLEB128(p
, 128); }
254 inline int64_t readSLEB128(const uint8_t *&p
) { return readLEB128(p
, 64); }
256 // This class implements a CREL iterator that does not allocate extra memory.
257 template <bool is64
> struct RelocsCrel
{
258 using uint
= std::conditional_t
<is64
, uint64_t, uint32_t>;
259 struct const_iterator
{
260 using iterator_category
= std::forward_iterator_tag
;
261 using value_type
= llvm::object::Elf_Crel_Impl
<is64
>;
262 using difference_type
= ptrdiff_t;
263 using pointer
= value_type
*;
264 using reference
= const value_type
&;
266 uint8_t flagBits
, shift
;
268 llvm::object::Elf_Crel_Impl
<is64
> crel
{};
269 const_iterator(size_t hdr
, const uint8_t *p
)
270 : count(hdr
/ 8), flagBits(hdr
& 4 ? 3 : 2), shift(hdr
% 4), p(p
) {
275 // See object::decodeCrel.
276 const uint8_t b
= *p
++;
277 crel
.r_offset
+= b
>> flagBits
<< shift
;
280 ((readULEB128(p
) << (7 - flagBits
)) - (0x80 >> flagBits
)) << shift
;
282 crel
.r_symidx
+= readSLEB128(p
);
284 crel
.r_type
+= readSLEB128(p
);
285 if (b
& 4 && flagBits
== 3)
286 crel
.r_addend
+= static_cast<uint
>(readSLEB128(p
));
288 llvm::object::Elf_Crel_Impl
<is64
> operator*() const { return crel
; };
289 const llvm::object::Elf_Crel_Impl
<is64
> *operator->() const {
292 // For llvm::enumerate.
293 bool operator==(const const_iterator
&r
) const { return count
== r
.count
; }
294 bool operator!=(const const_iterator
&r
) const { return count
!= r
.count
; }
295 const_iterator
&operator++() {
300 // For RelocationScanner::scanOne.
301 void operator+=(size_t n
) {
308 const uint8_t *p
= nullptr;
310 constexpr RelocsCrel() = default;
311 RelocsCrel(const uint8_t *p
) : hdr(readULEB128(p
)) { this->p
= p
; }
312 size_t size() const { return hdr
/ 8; }
313 const_iterator
begin() const { return {hdr
, p
}; }
314 const_iterator
end() const { return {0, nullptr}; }
317 template <class RelTy
> struct Relocs
: ArrayRef
<RelTy
> {
319 Relocs(ArrayRef
<RelTy
> a
) : ArrayRef
<RelTy
>(a
) {}
323 struct Relocs
<llvm::object::Elf_Crel_Impl
<is64
>> : RelocsCrel
<is64
> {
324 using RelocsCrel
<is64
>::RelocsCrel
;
327 // Return a int64_t to make sure we get the sign extension out of the way as
328 // early as possible.
329 template <class ELFT
>
330 static inline int64_t getAddend(const typename
ELFT::Rel
&rel
) {
333 template <class ELFT
>
334 static inline int64_t getAddend(const typename
ELFT::Rela
&rel
) {
337 template <class ELFT
>
338 static inline int64_t getAddend(const typename
ELFT::Crel
&rel
) {
342 template <typename RelTy
>
343 inline Relocs
<RelTy
> sortRels(Relocs
<RelTy
> rels
,
344 SmallVector
<RelTy
, 0> &storage
) {
345 auto cmp
= [](const RelTy
&a
, const RelTy
&b
) {
346 return a
.r_offset
< b
.r_offset
;
348 if (!llvm::is_sorted(rels
, cmp
)) {
349 storage
.assign(rels
.begin(), rels
.end());
350 llvm::stable_sort(storage
, cmp
);
351 rels
= Relocs
<RelTy
>(storage
);
357 inline Relocs
<llvm::object::Elf_Crel_Impl
<is64
>>
358 sortRels(Relocs
<llvm::object::Elf_Crel_Impl
<is64
>> rels
,
359 SmallVector
<llvm::object::Elf_Crel_Impl
<is64
>, 0> &storage
) {
363 RelocationBaseSection
&getIRelativeSection(Ctx
&ctx
);
365 // Returns true if Expr refers a GOT entry. Note that this function returns
366 // false for TLS variables even though they need GOT, because TLS variables uses
367 // GOT differently than the regular variables.
368 bool needsGot(RelExpr expr
);
369 } // namespace lld::elf