1 //===- Relocations.h -------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #ifndef LLD_ELF_RELOCATIONS_H
10 #define LLD_ELF_RELOCATIONS_H
12 #include "lld/Common/LLVM.h"
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/Object/ELFTypes.h"
23 class InputSectionBase
;
27 // Represents a relocation type, such as R_X86_64_PC32 or R_ARM_THM_CALL.
30 /*implicit*/ constexpr RelType(uint32_t v
= 0) : v(v
) {}
31 /*implicit*/ operator uint32_t() const { return v
; }
34 using JumpModType
= uint32_t;
36 // List of target-independent relocation types. Relocations read
37 // from files are converted to these types so that the main code
38 // doesn't have to know about architecture-specific details.
63 R_RELAX_TLS_GD_TO_IE_ABS
,
64 R_RELAX_TLS_GD_TO_IE_GOT_OFF
,
65 R_RELAX_TLS_GD_TO_IE_GOTPLT
,
67 R_RELAX_TLS_GD_TO_LE_NEG
,
70 R_RELAX_TLS_LD_TO_LE_ABS
,
88 // The following is abstract relocation types used for only one target.
90 // Even though RelExpr is intended to be a target-neutral representation
91 // of a relocation type, there are some relocations whose semantics are
92 // unique to a target. Such relocation are marked with R_<TARGET_NAME>.
93 R_AARCH64_GOT_PAGE_PC
,
96 R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC
,
97 R_AARCH64_TLSDESC_PAGE
,
104 R_MIPS_GOT_LOCAL_PAGE
,
114 R_PPC64_RELAX_GOT_PC
,
118 // Same as R_PC but with page-aligned semantics.
120 // Same as R_PLT_PC but with page-aligned semantics.
121 R_LOONGARCH_PLT_PAGE_PC
,
122 // In addition to having page-aligned semantics, LoongArch GOT relocs are
123 // also reused for TLS, making the semantics differ from other architectures.
125 R_LOONGARCH_GOT_PAGE_PC
,
126 R_LOONGARCH_TLSGD_PAGE_PC
,
127 R_LOONGARCH_TLSDESC_PAGE_PC
,
130 // Architecture-neutral representation of relocation.
139 // Manipulate jump instructions with these modifiers. These are used to relax
140 // jump instruction opcodes at basic block boundaries and are particularly
141 // useful when basic block sections are enabled.
142 struct JumpInstrMod
{
144 JumpModType original
;
148 // This function writes undefined symbol diagnostics to an internal buffer.
149 // Call reportUndefinedSymbols() after calling scanRelocations() to emit
151 template <class ELFT
> void scanRelocations(Ctx
&ctx
);
152 template <class ELFT
> void checkNoCrossRefs(Ctx
&ctx
);
153 void reportUndefinedSymbols(Ctx
&);
154 void postScanRelocations(Ctx
&ctx
);
155 void addGotEntry(Ctx
&ctx
, Symbol
&sym
);
157 void hexagonTLSSymbolUpdate(Ctx
&ctx
);
158 bool hexagonNeedsTLSSymbol(ArrayRef
<OutputSection
*> outputSections
);
162 class InputSectionDescription
;
166 // Thunk may be incomplete. Avoid inline ctor/dtor.
167 ThunkCreator(Ctx
&ctx
);
169 // Return true if Thunks have been added to OutputSections
170 bool createThunks(uint32_t pass
, ArrayRef
<OutputSection
*> outputSections
);
173 void mergeThunks(ArrayRef
<OutputSection
*> outputSections
);
175 ThunkSection
*getISDThunkSec(OutputSection
*os
, InputSection
*isec
,
176 InputSectionDescription
*isd
,
177 const Relocation
&rel
, uint64_t src
);
179 ThunkSection
*getISThunkSec(InputSection
*isec
);
181 void createInitialThunkSections(ArrayRef
<OutputSection
*> outputSections
);
183 std::pair
<Thunk
*, bool> getThunk(InputSection
*isec
, Relocation
&rel
,
186 std::pair
<Thunk
*, bool> getSyntheticLandingPad(Defined
&d
, int64_t a
);
188 ThunkSection
*addThunkSection(OutputSection
*os
, InputSectionDescription
*,
191 bool normalizeExistingThunk(Relocation
&rel
, uint64_t src
);
193 bool addSyntheticLandingPads();
197 // Record all the available Thunks for a (Symbol, addend) pair, where Symbol
198 // is represented as a (section, offset) pair. There may be multiple
199 // relocations sharing the same (section, offset + addend) pair. We may revert
200 // a relocation back to its original non-Thunk target, and restore the
201 // original addend, so we cannot fold offset + addend. A nested pair is used
202 // because DenseMapInfo is not specialized for std::tuple.
203 llvm::DenseMap
<std::pair
<std::pair
<SectionBase
*, uint64_t>, int64_t>,
204 SmallVector
<std::unique_ptr
<Thunk
>, 0>>
205 thunkedSymbolsBySectionAndAddend
;
206 llvm::DenseMap
<std::pair
<Symbol
*, int64_t>,
207 SmallVector
<std::unique_ptr
<Thunk
>, 0>>
210 // Find a Thunk from the Thunks symbol definition, we can use this to find
211 // the Thunk from a relocation to the Thunks symbol definition.
212 llvm::DenseMap
<Symbol
*, Thunk
*> thunks
;
214 // Track InputSections that have an inline ThunkSection placed in front
215 // an inline ThunkSection may have control fall through to the section below
216 // so we need to make sure that there is only one of them.
217 // The Mips LA25 Thunk is an example of an inline ThunkSection, as is
218 // the AArch64BTLandingPadThunk.
219 llvm::DenseMap
<InputSection
*, ThunkSection
*> thunkedSections
;
221 // Record landing pads, generated for a section + offset destination.
222 // Landling pads are alternative entry points for destinations that need
223 // to be reached via thunks that use indirect branches. A destination
224 // needs at most one landing pad as that can be reused by all callers.
225 llvm::DenseMap
<std::pair
<std::pair
<SectionBase
*, uint64_t>, int64_t>,
226 std::unique_ptr
<Thunk
>>
227 landingPadsBySectionAndAddend
;
229 // All the nonLandingPad thunks that have been created, in order of creation.
230 std::vector
<Thunk
*> allThunks
;
232 // The number of completed passes of createThunks this permits us
233 // to do one time initialization on Pass 0 and put a limit on the
234 // number of times it can be called to prevent infinite loops.
238 // Decode LEB128 without error checking. Only used by performance critical code
240 inline uint64_t readLEB128(const uint8_t *&p
, uint64_t leb
) {
241 uint64_t acc
= 0, shift
= 0, byte
;
244 acc
|= (byte
- 128 * (byte
>= leb
)) << shift
;
246 } while (byte
>= 128);
249 inline uint64_t readULEB128(const uint8_t *&p
) { return readLEB128(p
, 128); }
250 inline int64_t readSLEB128(const uint8_t *&p
) { return readLEB128(p
, 64); }
252 // This class implements a CREL iterator that does not allocate extra memory.
253 template <bool is64
> struct RelocsCrel
{
254 using uint
= std::conditional_t
<is64
, uint64_t, uint32_t>;
255 struct const_iterator
{
256 using iterator_category
= std::forward_iterator_tag
;
257 using value_type
= llvm::object::Elf_Crel_Impl
<is64
>;
258 using difference_type
= ptrdiff_t;
259 using pointer
= value_type
*;
260 using reference
= const value_type
&;
262 uint8_t flagBits
, shift
;
264 llvm::object::Elf_Crel_Impl
<is64
> crel
{};
265 const_iterator(size_t hdr
, const uint8_t *p
)
266 : count(hdr
/ 8), flagBits(hdr
& 4 ? 3 : 2), shift(hdr
% 4), p(p
) {
271 // See object::decodeCrel.
272 const uint8_t b
= *p
++;
273 crel
.r_offset
+= b
>> flagBits
<< shift
;
276 ((readULEB128(p
) << (7 - flagBits
)) - (0x80 >> flagBits
)) << shift
;
278 crel
.r_symidx
+= readSLEB128(p
);
280 crel
.r_type
+= readSLEB128(p
);
281 if (b
& 4 && flagBits
== 3)
282 crel
.r_addend
+= static_cast<uint
>(readSLEB128(p
));
284 llvm::object::Elf_Crel_Impl
<is64
> operator*() const { return crel
; };
285 const llvm::object::Elf_Crel_Impl
<is64
> *operator->() const {
288 // For llvm::enumerate.
289 bool operator==(const const_iterator
&r
) const { return count
== r
.count
; }
290 bool operator!=(const const_iterator
&r
) const { return count
!= r
.count
; }
291 const_iterator
&operator++() {
296 // For RelocationScanner::scanOne.
297 void operator+=(size_t n
) {
304 const uint8_t *p
= nullptr;
306 constexpr RelocsCrel() = default;
307 RelocsCrel(const uint8_t *p
) : hdr(readULEB128(p
)) { this->p
= p
; }
308 size_t size() const { return hdr
/ 8; }
309 const_iterator
begin() const { return {hdr
, p
}; }
310 const_iterator
end() const { return {0, nullptr}; }
313 template <class RelTy
> struct Relocs
: ArrayRef
<RelTy
> {
315 Relocs(ArrayRef
<RelTy
> a
) : ArrayRef
<RelTy
>(a
) {}
319 struct Relocs
<llvm::object::Elf_Crel_Impl
<is64
>> : RelocsCrel
<is64
> {
320 using RelocsCrel
<is64
>::RelocsCrel
;
323 // Return a int64_t to make sure we get the sign extension out of the way as
324 // early as possible.
325 template <class ELFT
>
326 static inline int64_t getAddend(const typename
ELFT::Rel
&rel
) {
329 template <class ELFT
>
330 static inline int64_t getAddend(const typename
ELFT::Rela
&rel
) {
333 template <class ELFT
>
334 static inline int64_t getAddend(const typename
ELFT::Crel
&rel
) {
338 template <typename RelTy
>
339 inline Relocs
<RelTy
> sortRels(Relocs
<RelTy
> rels
,
340 SmallVector
<RelTy
, 0> &storage
) {
341 auto cmp
= [](const RelTy
&a
, const RelTy
&b
) {
342 return a
.r_offset
< b
.r_offset
;
344 if (!llvm::is_sorted(rels
, cmp
)) {
345 storage
.assign(rels
.begin(), rels
.end());
346 llvm::stable_sort(storage
, cmp
);
347 rels
= Relocs
<RelTy
>(storage
);
353 inline Relocs
<llvm::object::Elf_Crel_Impl
<is64
>>
354 sortRels(Relocs
<llvm::object::Elf_Crel_Impl
<is64
>> rels
,
355 SmallVector
<llvm::object::Elf_Crel_Impl
<is64
>, 0> &storage
) {
359 // Returns true if Expr refers a GOT entry. Note that this function returns
360 // false for TLS variables even though they need GOT, because TLS variables uses
361 // GOT differently than the regular variables.
362 bool needsGot(RelExpr expr
);
363 } // namespace lld::elf