[mlir][py] Enable loading only specified dialects during creation. (#121421)
[llvm-project.git] / lld / ELF / Relocations.h
blobfde25a230b72e6251a5a487b39443932a9f7acdf
1 //===- Relocations.h -------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef LLD_ELF_RELOCATIONS_H
10 #define LLD_ELF_RELOCATIONS_H
12 #include "lld/Common/LLVM.h"
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/Object/ELFTypes.h"
16 #include <vector>
18 namespace lld::elf {
19 struct Ctx;
20 class Defined;
21 class Symbol;
22 class InputSection;
23 class InputSectionBase;
24 class OutputSection;
25 class RelocationBaseSection;
26 class SectionBase;
28 // Represents a relocation type, such as R_X86_64_PC32 or R_ARM_THM_CALL.
29 struct RelType {
30 uint32_t v = 0;
31 /*implicit*/ constexpr RelType(uint32_t v = 0) : v(v) {}
32 /*implicit*/ operator uint32_t() const { return v; }
35 using JumpModType = uint32_t;
37 // List of target-independent relocation types. Relocations read
38 // from files are converted to these types so that the main code
39 // doesn't have to know about architecture-specific details.
40 enum RelExpr {
41 R_ABS,
42 R_ADDEND,
43 R_DTPREL,
44 R_GOT,
45 R_GOT_OFF,
46 R_GOT_PC,
47 R_GOTONLY_PC,
48 R_GOTPLTONLY_PC,
49 R_GOTPLT,
50 R_GOTPLTREL,
51 R_GOTREL,
52 R_GOTPLT_GOTREL,
53 R_GOTPLT_PC,
54 R_NONE,
55 R_PC,
56 R_PLT,
57 R_PLT_PC,
58 R_PLT_GOTPLT,
59 R_PLT_GOTREL,
60 R_RELAX_HINT,
61 R_RELAX_GOT_PC,
62 R_RELAX_GOT_PC_NOPIC,
63 R_RELAX_TLS_GD_TO_IE,
64 R_RELAX_TLS_GD_TO_IE_ABS,
65 R_RELAX_TLS_GD_TO_IE_GOT_OFF,
66 R_RELAX_TLS_GD_TO_IE_GOTPLT,
67 R_RELAX_TLS_GD_TO_LE,
68 R_RELAX_TLS_GD_TO_LE_NEG,
69 R_RELAX_TLS_IE_TO_LE,
70 R_RELAX_TLS_LD_TO_LE,
71 R_RELAX_TLS_LD_TO_LE_ABS,
72 R_SIZE,
73 R_TPREL,
74 R_TPREL_NEG,
75 R_TLSDESC,
76 R_TLSDESC_CALL,
77 R_TLSDESC_PC,
78 R_TLSDESC_GOTPLT,
79 R_TLSGD_GOT,
80 R_TLSGD_GOTPLT,
81 R_TLSGD_PC,
82 R_TLSIE_HINT,
83 R_TLSLD_GOT,
84 R_TLSLD_GOTPLT,
85 R_TLSLD_GOT_OFF,
86 R_TLSLD_HINT,
87 R_TLSLD_PC,
89 // The following is abstract relocation types used for only one target.
91 // Even though RelExpr is intended to be a target-neutral representation
92 // of a relocation type, there are some relocations whose semantics are
93 // unique to a target. Such relocation are marked with RE_<TARGET_NAME>.
94 RE_AARCH64_GOT_PAGE_PC,
95 RE_AARCH64_AUTH_GOT_PAGE_PC,
96 RE_AARCH64_GOT_PAGE,
97 RE_AARCH64_AUTH_GOT,
98 RE_AARCH64_AUTH_GOT_PC,
99 RE_AARCH64_PAGE_PC,
100 RE_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC,
101 RE_AARCH64_TLSDESC_PAGE,
102 RE_AARCH64_AUTH,
103 RE_ARM_PCA,
104 RE_ARM_SBREL,
105 RE_MIPS_GOTREL,
106 RE_MIPS_GOT_GP,
107 RE_MIPS_GOT_GP_PC,
108 RE_MIPS_GOT_LOCAL_PAGE,
109 RE_MIPS_GOT_OFF,
110 RE_MIPS_GOT_OFF32,
111 RE_MIPS_TLSGD,
112 RE_MIPS_TLSLD,
113 RE_PPC32_PLTREL,
114 RE_PPC64_CALL,
115 RE_PPC64_CALL_PLT,
116 RE_PPC64_RELAX_TOC,
117 RE_PPC64_TOCBASE,
118 RE_PPC64_RELAX_GOT_PC,
119 RE_RISCV_ADD,
120 RE_RISCV_LEB128,
121 RE_RISCV_PC_INDIRECT,
122 // Same as R_PC but with page-aligned semantics.
123 RE_LOONGARCH_PAGE_PC,
124 // Same as R_PLT_PC but with page-aligned semantics.
125 RE_LOONGARCH_PLT_PAGE_PC,
126 // In addition to having page-aligned semantics, LoongArch GOT relocs are
127 // also reused for TLS, making the semantics differ from other architectures.
128 RE_LOONGARCH_GOT,
129 RE_LOONGARCH_GOT_PAGE_PC,
130 RE_LOONGARCH_TLSGD_PAGE_PC,
131 RE_LOONGARCH_TLSDESC_PAGE_PC,
134 // Architecture-neutral representation of relocation.
135 struct Relocation {
136 RelExpr expr;
137 RelType type;
138 uint64_t offset;
139 int64_t addend;
140 Symbol *sym;
143 // Manipulate jump instructions with these modifiers. These are used to relax
144 // jump instruction opcodes at basic block boundaries and are particularly
145 // useful when basic block sections are enabled.
146 struct JumpInstrMod {
147 uint64_t offset;
148 JumpModType original;
149 unsigned size;
152 // This function writes undefined symbol diagnostics to an internal buffer.
153 // Call reportUndefinedSymbols() after calling scanRelocations() to emit
154 // the diagnostics.
155 template <class ELFT> void scanRelocations(Ctx &ctx);
156 template <class ELFT> void checkNoCrossRefs(Ctx &ctx);
157 void reportUndefinedSymbols(Ctx &);
158 void postScanRelocations(Ctx &ctx);
159 void addGotEntry(Ctx &ctx, Symbol &sym);
161 void hexagonTLSSymbolUpdate(Ctx &ctx);
162 bool hexagonNeedsTLSSymbol(ArrayRef<OutputSection *> outputSections);
164 class ThunkSection;
165 class Thunk;
166 class InputSectionDescription;
168 class ThunkCreator {
169 public:
170 // Thunk may be incomplete. Avoid inline ctor/dtor.
171 ThunkCreator(Ctx &ctx);
172 ~ThunkCreator();
173 // Return true if Thunks have been added to OutputSections
174 bool createThunks(uint32_t pass, ArrayRef<OutputSection *> outputSections);
176 private:
177 void mergeThunks(ArrayRef<OutputSection *> outputSections);
179 ThunkSection *getISDThunkSec(OutputSection *os, InputSection *isec,
180 InputSectionDescription *isd,
181 const Relocation &rel, uint64_t src);
183 ThunkSection *getISThunkSec(InputSection *isec);
185 void createInitialThunkSections(ArrayRef<OutputSection *> outputSections);
187 std::pair<Thunk *, bool> getThunk(InputSection *isec, Relocation &rel,
188 uint64_t src);
190 std::pair<Thunk *, bool> getSyntheticLandingPad(Defined &d, int64_t a);
192 ThunkSection *addThunkSection(OutputSection *os, InputSectionDescription *,
193 uint64_t off);
195 bool normalizeExistingThunk(Relocation &rel, uint64_t src);
197 bool addSyntheticLandingPads();
199 Ctx &ctx;
201 // Record all the available Thunks for a (Symbol, addend) pair, where Symbol
202 // is represented as a (section, offset) pair. There may be multiple
203 // relocations sharing the same (section, offset + addend) pair. We may revert
204 // a relocation back to its original non-Thunk target, and restore the
205 // original addend, so we cannot fold offset + addend. A nested pair is used
206 // because DenseMapInfo is not specialized for std::tuple.
207 llvm::DenseMap<std::pair<std::pair<SectionBase *, uint64_t>, int64_t>,
208 SmallVector<std::unique_ptr<Thunk>, 0>>
209 thunkedSymbolsBySectionAndAddend;
210 llvm::DenseMap<std::pair<Symbol *, int64_t>,
211 SmallVector<std::unique_ptr<Thunk>, 0>>
212 thunkedSymbols;
214 // Find a Thunk from the Thunks symbol definition, we can use this to find
215 // the Thunk from a relocation to the Thunks symbol definition.
216 llvm::DenseMap<Symbol *, Thunk *> thunks;
218 // Track InputSections that have an inline ThunkSection placed in front
219 // an inline ThunkSection may have control fall through to the section below
220 // so we need to make sure that there is only one of them.
221 // The Mips LA25 Thunk is an example of an inline ThunkSection, as is
222 // the AArch64BTLandingPadThunk.
223 llvm::DenseMap<InputSection *, ThunkSection *> thunkedSections;
225 // Record landing pads, generated for a section + offset destination.
226 // Landling pads are alternative entry points for destinations that need
227 // to be reached via thunks that use indirect branches. A destination
228 // needs at most one landing pad as that can be reused by all callers.
229 llvm::DenseMap<std::pair<std::pair<SectionBase *, uint64_t>, int64_t>,
230 std::unique_ptr<Thunk>>
231 landingPadsBySectionAndAddend;
233 // All the nonLandingPad thunks that have been created, in order of creation.
234 std::vector<Thunk *> allThunks;
236 // The number of completed passes of createThunks this permits us
237 // to do one time initialization on Pass 0 and put a limit on the
238 // number of times it can be called to prevent infinite loops.
239 uint32_t pass = 0;
242 // Decode LEB128 without error checking. Only used by performance critical code
243 // like RelocsCrel.
244 inline uint64_t readLEB128(const uint8_t *&p, uint64_t leb) {
245 uint64_t acc = 0, shift = 0, byte;
246 do {
247 byte = *p++;
248 acc |= (byte - 128 * (byte >= leb)) << shift;
249 shift += 7;
250 } while (byte >= 128);
251 return acc;
253 inline uint64_t readULEB128(const uint8_t *&p) { return readLEB128(p, 128); }
254 inline int64_t readSLEB128(const uint8_t *&p) { return readLEB128(p, 64); }
256 // This class implements a CREL iterator that does not allocate extra memory.
257 template <bool is64> struct RelocsCrel {
258 using uint = std::conditional_t<is64, uint64_t, uint32_t>;
259 struct const_iterator {
260 using iterator_category = std::forward_iterator_tag;
261 using value_type = llvm::object::Elf_Crel_Impl<is64>;
262 using difference_type = ptrdiff_t;
263 using pointer = value_type *;
264 using reference = const value_type &;
265 uint32_t count;
266 uint8_t flagBits, shift;
267 const uint8_t *p;
268 llvm::object::Elf_Crel_Impl<is64> crel{};
269 const_iterator(size_t hdr, const uint8_t *p)
270 : count(hdr / 8), flagBits(hdr & 4 ? 3 : 2), shift(hdr % 4), p(p) {
271 if (count)
272 step();
274 void step() {
275 // See object::decodeCrel.
276 const uint8_t b = *p++;
277 crel.r_offset += b >> flagBits << shift;
278 if (b >= 0x80)
279 crel.r_offset +=
280 ((readULEB128(p) << (7 - flagBits)) - (0x80 >> flagBits)) << shift;
281 if (b & 1)
282 crel.r_symidx += readSLEB128(p);
283 if (b & 2)
284 crel.r_type += readSLEB128(p);
285 if (b & 4 && flagBits == 3)
286 crel.r_addend += static_cast<uint>(readSLEB128(p));
288 llvm::object::Elf_Crel_Impl<is64> operator*() const { return crel; };
289 const llvm::object::Elf_Crel_Impl<is64> *operator->() const {
290 return &crel;
292 // For llvm::enumerate.
293 bool operator==(const const_iterator &r) const { return count == r.count; }
294 bool operator!=(const const_iterator &r) const { return count != r.count; }
295 const_iterator &operator++() {
296 if (--count)
297 step();
298 return *this;
300 // For RelocationScanner::scanOne.
301 void operator+=(size_t n) {
302 for (; n; --n)
303 operator++();
307 size_t hdr = 0;
308 const uint8_t *p = nullptr;
310 constexpr RelocsCrel() = default;
311 RelocsCrel(const uint8_t *p) : hdr(readULEB128(p)) { this->p = p; }
312 size_t size() const { return hdr / 8; }
313 const_iterator begin() const { return {hdr, p}; }
314 const_iterator end() const { return {0, nullptr}; }
317 template <class RelTy> struct Relocs : ArrayRef<RelTy> {
318 Relocs() = default;
319 Relocs(ArrayRef<RelTy> a) : ArrayRef<RelTy>(a) {}
322 template <bool is64>
323 struct Relocs<llvm::object::Elf_Crel_Impl<is64>> : RelocsCrel<is64> {
324 using RelocsCrel<is64>::RelocsCrel;
327 // Return a int64_t to make sure we get the sign extension out of the way as
328 // early as possible.
329 template <class ELFT>
330 static inline int64_t getAddend(const typename ELFT::Rel &rel) {
331 return 0;
333 template <class ELFT>
334 static inline int64_t getAddend(const typename ELFT::Rela &rel) {
335 return rel.r_addend;
337 template <class ELFT>
338 static inline int64_t getAddend(const typename ELFT::Crel &rel) {
339 return rel.r_addend;
342 template <typename RelTy>
343 inline Relocs<RelTy> sortRels(Relocs<RelTy> rels,
344 SmallVector<RelTy, 0> &storage) {
345 auto cmp = [](const RelTy &a, const RelTy &b) {
346 return a.r_offset < b.r_offset;
348 if (!llvm::is_sorted(rels, cmp)) {
349 storage.assign(rels.begin(), rels.end());
350 llvm::stable_sort(storage, cmp);
351 rels = Relocs<RelTy>(storage);
353 return rels;
356 template <bool is64>
357 inline Relocs<llvm::object::Elf_Crel_Impl<is64>>
358 sortRels(Relocs<llvm::object::Elf_Crel_Impl<is64>> rels,
359 SmallVector<llvm::object::Elf_Crel_Impl<is64>, 0> &storage) {
360 return {};
363 RelocationBaseSection &getIRelativeSection(Ctx &ctx);
365 // Returns true if Expr refers a GOT entry. Note that this function returns
366 // false for TLS variables even though they need GOT, because TLS variables uses
367 // GOT differently than the regular variables.
368 bool needsGot(RelExpr expr);
369 } // namespace lld::elf
371 #endif