Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / lld / ELF / Arch / RISCV.cpp
blob6413dcd7dcd7976213590d3cf343523a54834c6a
1 //===- RISCV.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 #include "llvm/Support/ELFAttributes.h"
15 #include "llvm/Support/LEB128.h"
16 #include "llvm/Support/RISCVAttributeParser.h"
17 #include "llvm/Support/RISCVAttributes.h"
18 #include "llvm/Support/RISCVISAInfo.h"
19 #include "llvm/Support/TimeProfiler.h"
21 using namespace llvm;
22 using namespace llvm::object;
23 using namespace llvm::support::endian;
24 using namespace llvm::ELF;
25 using namespace lld;
26 using namespace lld::elf;
28 namespace {
30 class RISCV final : public TargetInfo {
31 public:
32 RISCV();
33 uint32_t calcEFlags() const override;
34 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
35 void writeGotHeader(uint8_t *buf) const override;
36 void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
37 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
38 void writePltHeader(uint8_t *buf) const override;
39 void writePlt(uint8_t *buf, const Symbol &sym,
40 uint64_t pltEntryAddr) const override;
41 RelType getDynRel(RelType type) const override;
42 RelExpr getRelExpr(RelType type, const Symbol &s,
43 const uint8_t *loc) const override;
44 void relocate(uint8_t *loc, const Relocation &rel,
45 uint64_t val) const override;
46 bool relaxOnce(int pass) const override;
49 } // end anonymous namespace
51 // These are internal relocation numbers for GP relaxation. They aren't part
52 // of the psABI spec.
53 #define INTERNAL_R_RISCV_GPREL_I 256
54 #define INTERNAL_R_RISCV_GPREL_S 257
56 const uint64_t dtpOffset = 0x800;
58 enum Op {
59 ADDI = 0x13,
60 AUIPC = 0x17,
61 JALR = 0x67,
62 LD = 0x3003,
63 LW = 0x2003,
64 SRLI = 0x5013,
65 SUB = 0x40000033,
68 enum Reg {
69 X_RA = 1,
70 X_GP = 3,
71 X_TP = 4,
72 X_T0 = 5,
73 X_T1 = 6,
74 X_T2 = 7,
75 X_T3 = 28,
78 static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
79 static uint32_t lo12(uint32_t val) { return val & 4095; }
81 static uint32_t itype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t imm) {
82 return op | (rd << 7) | (rs1 << 15) | (imm << 20);
84 static uint32_t rtype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t rs2) {
85 return op | (rd << 7) | (rs1 << 15) | (rs2 << 20);
87 static uint32_t utype(uint32_t op, uint32_t rd, uint32_t imm) {
88 return op | (rd << 7) | (imm << 12);
91 // Extract bits v[begin:end], where range is inclusive, and begin must be < 63.
92 static uint32_t extractBits(uint64_t v, uint32_t begin, uint32_t end) {
93 return (v & ((1ULL << (begin + 1)) - 1)) >> end;
96 static uint32_t setLO12_I(uint32_t insn, uint32_t imm) {
97 return (insn & 0xfffff) | (imm << 20);
99 static uint32_t setLO12_S(uint32_t insn, uint32_t imm) {
100 return (insn & 0x1fff07f) | (extractBits(imm, 11, 5) << 25) |
101 (extractBits(imm, 4, 0) << 7);
104 RISCV::RISCV() {
105 copyRel = R_RISCV_COPY;
106 pltRel = R_RISCV_JUMP_SLOT;
107 relativeRel = R_RISCV_RELATIVE;
108 iRelativeRel = R_RISCV_IRELATIVE;
109 if (config->is64) {
110 symbolicRel = R_RISCV_64;
111 tlsModuleIndexRel = R_RISCV_TLS_DTPMOD64;
112 tlsOffsetRel = R_RISCV_TLS_DTPREL64;
113 tlsGotRel = R_RISCV_TLS_TPREL64;
114 } else {
115 symbolicRel = R_RISCV_32;
116 tlsModuleIndexRel = R_RISCV_TLS_DTPMOD32;
117 tlsOffsetRel = R_RISCV_TLS_DTPREL32;
118 tlsGotRel = R_RISCV_TLS_TPREL32;
120 gotRel = symbolicRel;
122 // .got[0] = _DYNAMIC
123 gotHeaderEntriesNum = 1;
125 // .got.plt[0] = _dl_runtime_resolve, .got.plt[1] = link_map
126 gotPltHeaderEntriesNum = 2;
128 pltHeaderSize = 32;
129 pltEntrySize = 16;
130 ipltEntrySize = 16;
133 static uint32_t getEFlags(InputFile *f) {
134 if (config->is64)
135 return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
136 return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
139 uint32_t RISCV::calcEFlags() const {
140 // If there are only binary input files (from -b binary), use a
141 // value of 0 for the ELF header flags.
142 if (ctx.objectFiles.empty())
143 return 0;
145 uint32_t target = getEFlags(ctx.objectFiles.front());
147 for (InputFile *f : ctx.objectFiles) {
148 uint32_t eflags = getEFlags(f);
149 if (eflags & EF_RISCV_RVC)
150 target |= EF_RISCV_RVC;
152 if ((eflags & EF_RISCV_FLOAT_ABI) != (target & EF_RISCV_FLOAT_ABI))
153 error(
154 toString(f) +
155 ": cannot link object files with different floating-point ABI from " +
156 toString(ctx.objectFiles[0]));
158 if ((eflags & EF_RISCV_RVE) != (target & EF_RISCV_RVE))
159 error(toString(f) +
160 ": cannot link object files with different EF_RISCV_RVE");
163 return target;
166 int64_t RISCV::getImplicitAddend(const uint8_t *buf, RelType type) const {
167 switch (type) {
168 default:
169 internalLinkerError(getErrorLocation(buf),
170 "cannot read addend for relocation " + toString(type));
171 return 0;
172 case R_RISCV_32:
173 case R_RISCV_TLS_DTPMOD32:
174 case R_RISCV_TLS_DTPREL32:
175 case R_RISCV_TLS_TPREL32:
176 return SignExtend64<32>(read32le(buf));
177 case R_RISCV_64:
178 case R_RISCV_TLS_DTPMOD64:
179 case R_RISCV_TLS_DTPREL64:
180 case R_RISCV_TLS_TPREL64:
181 return read64le(buf);
182 case R_RISCV_RELATIVE:
183 case R_RISCV_IRELATIVE:
184 return config->is64 ? read64le(buf) : read32le(buf);
185 case R_RISCV_NONE:
186 case R_RISCV_JUMP_SLOT:
187 // These relocations are defined as not having an implicit addend.
188 return 0;
192 void RISCV::writeGotHeader(uint8_t *buf) const {
193 if (config->is64)
194 write64le(buf, mainPart->dynamic->getVA());
195 else
196 write32le(buf, mainPart->dynamic->getVA());
199 void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
200 if (config->is64)
201 write64le(buf, in.plt->getVA());
202 else
203 write32le(buf, in.plt->getVA());
206 void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
207 if (config->writeAddends) {
208 if (config->is64)
209 write64le(buf, s.getVA());
210 else
211 write32le(buf, s.getVA());
215 void RISCV::writePltHeader(uint8_t *buf) const {
216 // 1: auipc t2, %pcrel_hi(.got.plt)
217 // sub t1, t1, t3
218 // l[wd] t3, %pcrel_lo(1b)(t2); t3 = _dl_runtime_resolve
219 // addi t1, t1, -pltHeaderSize-12; t1 = &.plt[i] - &.plt[0]
220 // addi t0, t2, %pcrel_lo(1b)
221 // srli t1, t1, (rv64?1:2); t1 = &.got.plt[i] - &.got.plt[0]
222 // l[wd] t0, Wordsize(t0); t0 = link_map
223 // jr t3
224 uint32_t offset = in.gotPlt->getVA() - in.plt->getVA();
225 uint32_t load = config->is64 ? LD : LW;
226 write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
227 write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
228 write32le(buf + 8, itype(load, X_T3, X_T2, lo12(offset)));
229 write32le(buf + 12, itype(ADDI, X_T1, X_T1, -target->pltHeaderSize - 12));
230 write32le(buf + 16, itype(ADDI, X_T0, X_T2, lo12(offset)));
231 write32le(buf + 20, itype(SRLI, X_T1, X_T1, config->is64 ? 1 : 2));
232 write32le(buf + 24, itype(load, X_T0, X_T0, config->wordsize));
233 write32le(buf + 28, itype(JALR, 0, X_T3, 0));
236 void RISCV::writePlt(uint8_t *buf, const Symbol &sym,
237 uint64_t pltEntryAddr) const {
238 // 1: auipc t3, %pcrel_hi(f@.got.plt)
239 // l[wd] t3, %pcrel_lo(1b)(t3)
240 // jalr t1, t3
241 // nop
242 uint32_t offset = sym.getGotPltVA() - pltEntryAddr;
243 write32le(buf + 0, utype(AUIPC, X_T3, hi20(offset)));
244 write32le(buf + 4, itype(config->is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
245 write32le(buf + 8, itype(JALR, X_T1, X_T3, 0));
246 write32le(buf + 12, itype(ADDI, 0, 0, 0));
249 RelType RISCV::getDynRel(RelType type) const {
250 return type == target->symbolicRel ? type
251 : static_cast<RelType>(R_RISCV_NONE);
254 RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
255 const uint8_t *loc) const {
256 switch (type) {
257 case R_RISCV_NONE:
258 return R_NONE;
259 case R_RISCV_32:
260 case R_RISCV_64:
261 case R_RISCV_HI20:
262 case R_RISCV_LO12_I:
263 case R_RISCV_LO12_S:
264 case R_RISCV_RVC_LUI:
265 return R_ABS;
266 case R_RISCV_ADD8:
267 case R_RISCV_ADD16:
268 case R_RISCV_ADD32:
269 case R_RISCV_ADD64:
270 case R_RISCV_SET6:
271 case R_RISCV_SET8:
272 case R_RISCV_SET16:
273 case R_RISCV_SET32:
274 case R_RISCV_SUB6:
275 case R_RISCV_SUB8:
276 case R_RISCV_SUB16:
277 case R_RISCV_SUB32:
278 case R_RISCV_SUB64:
279 return R_RISCV_ADD;
280 case R_RISCV_JAL:
281 case R_RISCV_BRANCH:
282 case R_RISCV_PCREL_HI20:
283 case R_RISCV_RVC_BRANCH:
284 case R_RISCV_RVC_JUMP:
285 case R_RISCV_32_PCREL:
286 return R_PC;
287 case R_RISCV_CALL:
288 case R_RISCV_CALL_PLT:
289 case R_RISCV_PLT32:
290 return R_PLT_PC;
291 case R_RISCV_GOT_HI20:
292 return R_GOT_PC;
293 case R_RISCV_PCREL_LO12_I:
294 case R_RISCV_PCREL_LO12_S:
295 return R_RISCV_PC_INDIRECT;
296 case R_RISCV_TLS_GD_HI20:
297 return R_TLSGD_PC;
298 case R_RISCV_TLS_GOT_HI20:
299 return R_GOT_PC;
300 case R_RISCV_TPREL_HI20:
301 case R_RISCV_TPREL_LO12_I:
302 case R_RISCV_TPREL_LO12_S:
303 return R_TPREL;
304 case R_RISCV_ALIGN:
305 return R_RELAX_HINT;
306 case R_RISCV_TPREL_ADD:
307 case R_RISCV_RELAX:
308 return config->relax ? R_RELAX_HINT : R_NONE;
309 default:
310 error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
311 ") against symbol " + toString(s));
312 return R_NONE;
316 void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
317 const unsigned bits = config->wordsize * 8;
319 switch (rel.type) {
320 case R_RISCV_32:
321 write32le(loc, val);
322 return;
323 case R_RISCV_64:
324 write64le(loc, val);
325 return;
327 case R_RISCV_RVC_BRANCH: {
328 checkInt(loc, val, 9, rel);
329 checkAlignment(loc, val, 2, rel);
330 uint16_t insn = read16le(loc) & 0xE383;
331 uint16_t imm8 = extractBits(val, 8, 8) << 12;
332 uint16_t imm4_3 = extractBits(val, 4, 3) << 10;
333 uint16_t imm7_6 = extractBits(val, 7, 6) << 5;
334 uint16_t imm2_1 = extractBits(val, 2, 1) << 3;
335 uint16_t imm5 = extractBits(val, 5, 5) << 2;
336 insn |= imm8 | imm4_3 | imm7_6 | imm2_1 | imm5;
338 write16le(loc, insn);
339 return;
342 case R_RISCV_RVC_JUMP: {
343 checkInt(loc, val, 12, rel);
344 checkAlignment(loc, val, 2, rel);
345 uint16_t insn = read16le(loc) & 0xE003;
346 uint16_t imm11 = extractBits(val, 11, 11) << 12;
347 uint16_t imm4 = extractBits(val, 4, 4) << 11;
348 uint16_t imm9_8 = extractBits(val, 9, 8) << 9;
349 uint16_t imm10 = extractBits(val, 10, 10) << 8;
350 uint16_t imm6 = extractBits(val, 6, 6) << 7;
351 uint16_t imm7 = extractBits(val, 7, 7) << 6;
352 uint16_t imm3_1 = extractBits(val, 3, 1) << 3;
353 uint16_t imm5 = extractBits(val, 5, 5) << 2;
354 insn |= imm11 | imm4 | imm9_8 | imm10 | imm6 | imm7 | imm3_1 | imm5;
356 write16le(loc, insn);
357 return;
360 case R_RISCV_RVC_LUI: {
361 int64_t imm = SignExtend64(val + 0x800, bits) >> 12;
362 checkInt(loc, imm, 6, rel);
363 if (imm == 0) { // `c.lui rd, 0` is illegal, convert to `c.li rd, 0`
364 write16le(loc, (read16le(loc) & 0x0F83) | 0x4000);
365 } else {
366 uint16_t imm17 = extractBits(val + 0x800, 17, 17) << 12;
367 uint16_t imm16_12 = extractBits(val + 0x800, 16, 12) << 2;
368 write16le(loc, (read16le(loc) & 0xEF83) | imm17 | imm16_12);
370 return;
373 case R_RISCV_JAL: {
374 checkInt(loc, val, 21, rel);
375 checkAlignment(loc, val, 2, rel);
377 uint32_t insn = read32le(loc) & 0xFFF;
378 uint32_t imm20 = extractBits(val, 20, 20) << 31;
379 uint32_t imm10_1 = extractBits(val, 10, 1) << 21;
380 uint32_t imm11 = extractBits(val, 11, 11) << 20;
381 uint32_t imm19_12 = extractBits(val, 19, 12) << 12;
382 insn |= imm20 | imm10_1 | imm11 | imm19_12;
384 write32le(loc, insn);
385 return;
388 case R_RISCV_BRANCH: {
389 checkInt(loc, val, 13, rel);
390 checkAlignment(loc, val, 2, rel);
392 uint32_t insn = read32le(loc) & 0x1FFF07F;
393 uint32_t imm12 = extractBits(val, 12, 12) << 31;
394 uint32_t imm10_5 = extractBits(val, 10, 5) << 25;
395 uint32_t imm4_1 = extractBits(val, 4, 1) << 8;
396 uint32_t imm11 = extractBits(val, 11, 11) << 7;
397 insn |= imm12 | imm10_5 | imm4_1 | imm11;
399 write32le(loc, insn);
400 return;
403 // auipc + jalr pair
404 case R_RISCV_CALL:
405 case R_RISCV_CALL_PLT: {
406 int64_t hi = SignExtend64(val + 0x800, bits) >> 12;
407 checkInt(loc, hi, 20, rel);
408 if (isInt<20>(hi)) {
409 relocateNoSym(loc, R_RISCV_PCREL_HI20, val);
410 relocateNoSym(loc + 4, R_RISCV_PCREL_LO12_I, val);
412 return;
415 case R_RISCV_GOT_HI20:
416 case R_RISCV_PCREL_HI20:
417 case R_RISCV_TLS_GD_HI20:
418 case R_RISCV_TLS_GOT_HI20:
419 case R_RISCV_TPREL_HI20:
420 case R_RISCV_HI20: {
421 uint64_t hi = val + 0x800;
422 checkInt(loc, SignExtend64(hi, bits) >> 12, 20, rel);
423 write32le(loc, (read32le(loc) & 0xFFF) | (hi & 0xFFFFF000));
424 return;
427 case R_RISCV_PCREL_LO12_I:
428 case R_RISCV_TPREL_LO12_I:
429 case R_RISCV_LO12_I: {
430 uint64_t hi = (val + 0x800) >> 12;
431 uint64_t lo = val - (hi << 12);
432 write32le(loc, setLO12_I(read32le(loc), lo & 0xfff));
433 return;
436 case R_RISCV_PCREL_LO12_S:
437 case R_RISCV_TPREL_LO12_S:
438 case R_RISCV_LO12_S: {
439 uint64_t hi = (val + 0x800) >> 12;
440 uint64_t lo = val - (hi << 12);
441 write32le(loc, setLO12_S(read32le(loc), lo));
442 return;
445 case INTERNAL_R_RISCV_GPREL_I:
446 case INTERNAL_R_RISCV_GPREL_S: {
447 Defined *gp = ElfSym::riscvGlobalPointer;
448 int64_t displace = SignExtend64(val - gp->getVA(), bits);
449 checkInt(loc, displace, 12, rel);
450 uint32_t insn = (read32le(loc) & ~(31 << 15)) | (X_GP << 15);
451 if (rel.type == INTERNAL_R_RISCV_GPREL_I)
452 insn = setLO12_I(insn, displace);
453 else
454 insn = setLO12_S(insn, displace);
455 write32le(loc, insn);
456 return;
459 case R_RISCV_ADD8:
460 *loc += val;
461 return;
462 case R_RISCV_ADD16:
463 write16le(loc, read16le(loc) + val);
464 return;
465 case R_RISCV_ADD32:
466 write32le(loc, read32le(loc) + val);
467 return;
468 case R_RISCV_ADD64:
469 write64le(loc, read64le(loc) + val);
470 return;
471 case R_RISCV_SUB6:
472 *loc = (*loc & 0xc0) | (((*loc & 0x3f) - val) & 0x3f);
473 return;
474 case R_RISCV_SUB8:
475 *loc -= val;
476 return;
477 case R_RISCV_SUB16:
478 write16le(loc, read16le(loc) - val);
479 return;
480 case R_RISCV_SUB32:
481 write32le(loc, read32le(loc) - val);
482 return;
483 case R_RISCV_SUB64:
484 write64le(loc, read64le(loc) - val);
485 return;
486 case R_RISCV_SET6:
487 *loc = (*loc & 0xc0) | (val & 0x3f);
488 return;
489 case R_RISCV_SET8:
490 *loc = val;
491 return;
492 case R_RISCV_SET16:
493 write16le(loc, val);
494 return;
495 case R_RISCV_SET32:
496 case R_RISCV_32_PCREL:
497 case R_RISCV_PLT32:
498 write32le(loc, val);
499 return;
501 case R_RISCV_TLS_DTPREL32:
502 write32le(loc, val - dtpOffset);
503 break;
504 case R_RISCV_TLS_DTPREL64:
505 write64le(loc, val - dtpOffset);
506 break;
508 case R_RISCV_RELAX:
509 return; // Ignored (for now)
511 default:
512 llvm_unreachable("unknown relocation");
516 namespace {
517 struct SymbolAnchor {
518 uint64_t offset;
519 Defined *d;
520 bool end; // true for the anchor of st_value+st_size
522 } // namespace
524 struct elf::RISCVRelaxAux {
525 // This records symbol start and end offsets which will be adjusted according
526 // to the nearest relocDeltas element.
527 SmallVector<SymbolAnchor, 0> anchors;
528 // For relocations[i], the actual offset is r_offset - (i ? relocDeltas[i-1] :
529 // 0).
530 std::unique_ptr<uint32_t[]> relocDeltas;
531 // For relocations[i], the actual type is relocTypes[i].
532 std::unique_ptr<RelType[]> relocTypes;
533 SmallVector<uint32_t, 0> writes;
536 static void initSymbolAnchors() {
537 SmallVector<InputSection *, 0> storage;
538 for (OutputSection *osec : outputSections) {
539 if (!(osec->flags & SHF_EXECINSTR))
540 continue;
541 for (InputSection *sec : getInputSections(*osec, storage)) {
542 sec->relaxAux = make<RISCVRelaxAux>();
543 if (sec->relocs().size()) {
544 sec->relaxAux->relocDeltas =
545 std::make_unique<uint32_t[]>(sec->relocs().size());
546 sec->relaxAux->relocTypes =
547 std::make_unique<RelType[]>(sec->relocs().size());
551 // Store anchors (st_value and st_value+st_size) for symbols relative to text
552 // sections.
554 // For a defined symbol foo, we may have `d->file != file` with --wrap=foo.
555 // We should process foo, as the defining object file's symbol table may not
556 // contain foo after redirectSymbols changed the foo entry to __wrap_foo. To
557 // avoid adding a Defined that is undefined in one object file, use
558 // `!d->scriptDefined` to exclude symbols that are definitely not wrapped.
560 // `relaxAux->anchors` may contain duplicate symbols, but that is fine.
561 for (InputFile *file : ctx.objectFiles)
562 for (Symbol *sym : file->getSymbols()) {
563 auto *d = dyn_cast<Defined>(sym);
564 if (!d || (d->file != file && !d->scriptDefined))
565 continue;
566 if (auto *sec = dyn_cast_or_null<InputSection>(d->section))
567 if (sec->flags & SHF_EXECINSTR && sec->relaxAux) {
568 // If sec is discarded, relaxAux will be nullptr.
569 sec->relaxAux->anchors.push_back({d->value, d, false});
570 sec->relaxAux->anchors.push_back({d->value + d->size, d, true});
573 // Sort anchors by offset so that we can find the closest relocation
574 // efficiently. For a zero size symbol, ensure that its start anchor precedes
575 // its end anchor. For two symbols with anchors at the same offset, their
576 // order does not matter.
577 for (OutputSection *osec : outputSections) {
578 if (!(osec->flags & SHF_EXECINSTR))
579 continue;
580 for (InputSection *sec : getInputSections(*osec, storage)) {
581 llvm::sort(sec->relaxAux->anchors, [](auto &a, auto &b) {
582 return std::make_pair(a.offset, a.end) <
583 std::make_pair(b.offset, b.end);
589 // Relax R_RISCV_CALL/R_RISCV_CALL_PLT auipc+jalr to c.j, c.jal, or jal.
590 static void relaxCall(const InputSection &sec, size_t i, uint64_t loc,
591 Relocation &r, uint32_t &remove) {
592 const bool rvc = config->eflags & EF_RISCV_RVC;
593 const Symbol &sym = *r.sym;
594 const uint64_t insnPair = read64le(sec.content().data() + r.offset);
595 const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7);
596 const uint64_t dest =
597 (r.expr == R_PLT_PC ? sym.getPltVA() : sym.getVA()) + r.addend;
598 const int64_t displace = dest - loc;
600 if (rvc && isInt<12>(displace) && rd == 0) {
601 sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
602 sec.relaxAux->writes.push_back(0xa001); // c.j
603 remove = 6;
604 } else if (rvc && isInt<12>(displace) && rd == X_RA &&
605 !config->is64) { // RV32C only
606 sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
607 sec.relaxAux->writes.push_back(0x2001); // c.jal
608 remove = 6;
609 } else if (isInt<21>(displace)) {
610 sec.relaxAux->relocTypes[i] = R_RISCV_JAL;
611 sec.relaxAux->writes.push_back(0x6f | rd << 7); // jal
612 remove = 4;
616 // Relax local-exec TLS when hi20 is zero.
617 static void relaxTlsLe(const InputSection &sec, size_t i, uint64_t loc,
618 Relocation &r, uint32_t &remove) {
619 uint64_t val = r.sym->getVA(r.addend);
620 if (hi20(val) != 0)
621 return;
622 uint32_t insn = read32le(sec.content().data() + r.offset);
623 switch (r.type) {
624 case R_RISCV_TPREL_HI20:
625 case R_RISCV_TPREL_ADD:
626 // Remove lui rd, %tprel_hi(x) and add rd, rd, tp, %tprel_add(x).
627 sec.relaxAux->relocTypes[i] = R_RISCV_RELAX;
628 remove = 4;
629 break;
630 case R_RISCV_TPREL_LO12_I:
631 // addi rd, rd, %tprel_lo(x) => addi rd, tp, st_value(x)
632 sec.relaxAux->relocTypes[i] = R_RISCV_32;
633 insn = (insn & ~(31 << 15)) | (X_TP << 15);
634 sec.relaxAux->writes.push_back(setLO12_I(insn, val));
635 break;
636 case R_RISCV_TPREL_LO12_S:
637 // sw rs, %tprel_lo(x)(rd) => sw rs, st_value(x)(rd)
638 sec.relaxAux->relocTypes[i] = R_RISCV_32;
639 insn = (insn & ~(31 << 15)) | (X_TP << 15);
640 sec.relaxAux->writes.push_back(setLO12_S(insn, val));
641 break;
645 static void relaxHi20Lo12(const InputSection &sec, size_t i, uint64_t loc,
646 Relocation &r, uint32_t &remove) {
647 const Defined *gp = ElfSym::riscvGlobalPointer;
648 if (!gp)
649 return;
651 if (!isInt<12>(r.sym->getVA(r.addend) - gp->getVA()))
652 return;
654 switch (r.type) {
655 case R_RISCV_HI20:
656 // Remove lui rd, %hi20(x).
657 sec.relaxAux->relocTypes[i] = R_RISCV_RELAX;
658 remove = 4;
659 break;
660 case R_RISCV_LO12_I:
661 sec.relaxAux->relocTypes[i] = INTERNAL_R_RISCV_GPREL_I;
662 break;
663 case R_RISCV_LO12_S:
664 sec.relaxAux->relocTypes[i] = INTERNAL_R_RISCV_GPREL_S;
665 break;
669 static bool relax(InputSection &sec) {
670 const uint64_t secAddr = sec.getVA();
671 auto &aux = *sec.relaxAux;
672 bool changed = false;
673 ArrayRef<SymbolAnchor> sa = ArrayRef(aux.anchors);
674 uint64_t delta = 0;
676 std::fill_n(aux.relocTypes.get(), sec.relocs().size(), R_RISCV_NONE);
677 aux.writes.clear();
678 for (auto [i, r] : llvm::enumerate(sec.relocs())) {
679 const uint64_t loc = secAddr + r.offset - delta;
680 uint32_t &cur = aux.relocDeltas[i], remove = 0;
681 switch (r.type) {
682 case R_RISCV_ALIGN: {
683 const uint64_t nextLoc = loc + r.addend;
684 const uint64_t align = PowerOf2Ceil(r.addend + 2);
685 // All bytes beyond the alignment boundary should be removed.
686 remove = nextLoc - ((loc + align - 1) & -align);
687 assert(static_cast<int32_t>(remove) >= 0 &&
688 "R_RISCV_ALIGN needs expanding the content");
689 break;
691 case R_RISCV_CALL:
692 case R_RISCV_CALL_PLT:
693 if (i + 1 != sec.relocs().size() &&
694 sec.relocs()[i + 1].type == R_RISCV_RELAX)
695 relaxCall(sec, i, loc, r, remove);
696 break;
697 case R_RISCV_TPREL_HI20:
698 case R_RISCV_TPREL_ADD:
699 case R_RISCV_TPREL_LO12_I:
700 case R_RISCV_TPREL_LO12_S:
701 if (i + 1 != sec.relocs().size() &&
702 sec.relocs()[i + 1].type == R_RISCV_RELAX)
703 relaxTlsLe(sec, i, loc, r, remove);
704 break;
705 case R_RISCV_HI20:
706 case R_RISCV_LO12_I:
707 case R_RISCV_LO12_S:
708 if (i + 1 != sec.relocs().size() &&
709 sec.relocs()[i + 1].type == R_RISCV_RELAX)
710 relaxHi20Lo12(sec, i, loc, r, remove);
711 break;
714 // For all anchors whose offsets are <= r.offset, they are preceded by
715 // the previous relocation whose `relocDeltas` value equals `delta`.
716 // Decrease their st_value and update their st_size.
717 for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(1)) {
718 if (sa[0].end)
719 sa[0].d->size = sa[0].offset - delta - sa[0].d->value;
720 else
721 sa[0].d->value = sa[0].offset - delta;
723 delta += remove;
724 if (delta != cur) {
725 cur = delta;
726 changed = true;
730 for (const SymbolAnchor &a : sa) {
731 if (a.end)
732 a.d->size = a.offset - delta - a.d->value;
733 else
734 a.d->value = a.offset - delta;
736 // Inform assignAddresses that the size has changed.
737 if (!isUInt<32>(delta))
738 fatal("section size decrease is too large: " + Twine(delta));
739 sec.bytesDropped = delta;
740 return changed;
743 // When relaxing just R_RISCV_ALIGN, relocDeltas is usually changed only once in
744 // the absence of a linker script. For call and load/store R_RISCV_RELAX, code
745 // shrinkage may reduce displacement and make more relocations eligible for
746 // relaxation. Code shrinkage may increase displacement to a call/load/store
747 // target at a higher fixed address, invalidating an earlier relaxation. Any
748 // change in section sizes can have cascading effect and require another
749 // relaxation pass.
750 bool RISCV::relaxOnce(int pass) const {
751 llvm::TimeTraceScope timeScope("RISC-V relaxOnce");
752 if (config->relocatable)
753 return false;
755 if (pass == 0)
756 initSymbolAnchors();
758 SmallVector<InputSection *, 0> storage;
759 bool changed = false;
760 for (OutputSection *osec : outputSections) {
761 if (!(osec->flags & SHF_EXECINSTR))
762 continue;
763 for (InputSection *sec : getInputSections(*osec, storage))
764 changed |= relax(*sec);
766 return changed;
769 void elf::riscvFinalizeRelax(int passes) {
770 llvm::TimeTraceScope timeScope("Finalize RISC-V relaxation");
771 log("relaxation passes: " + Twine(passes));
772 SmallVector<InputSection *, 0> storage;
773 for (OutputSection *osec : outputSections) {
774 if (!(osec->flags & SHF_EXECINSTR))
775 continue;
776 for (InputSection *sec : getInputSections(*osec, storage)) {
777 RISCVRelaxAux &aux = *sec->relaxAux;
778 if (!aux.relocDeltas)
779 continue;
781 MutableArrayRef<Relocation> rels = sec->relocs();
782 ArrayRef<uint8_t> old = sec->content();
783 size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1];
784 size_t writesIdx = 0;
785 uint8_t *p = context().bAlloc.Allocate<uint8_t>(newSize);
786 uint64_t offset = 0;
787 int64_t delta = 0;
788 sec->content_ = p;
789 sec->size = newSize;
790 sec->bytesDropped = 0;
792 // Update section content: remove NOPs for R_RISCV_ALIGN and rewrite
793 // instructions for relaxed relocations.
794 for (size_t i = 0, e = rels.size(); i != e; ++i) {
795 uint32_t remove = aux.relocDeltas[i] - delta;
796 delta = aux.relocDeltas[i];
797 if (remove == 0 && aux.relocTypes[i] == R_RISCV_NONE)
798 continue;
800 // Copy from last location to the current relocated location.
801 const Relocation &r = rels[i];
802 uint64_t size = r.offset - offset;
803 memcpy(p, old.data() + offset, size);
804 p += size;
806 // For R_RISCV_ALIGN, we will place `offset` in a location (among NOPs)
807 // to satisfy the alignment requirement. If both `remove` and r.addend
808 // are multiples of 4, it is as if we have skipped some NOPs. Otherwise
809 // we are in the middle of a 4-byte NOP, and we need to rewrite the NOP
810 // sequence.
811 int64_t skip = 0;
812 if (r.type == R_RISCV_ALIGN) {
813 if (remove % 4 || r.addend % 4) {
814 skip = r.addend - remove;
815 int64_t j = 0;
816 for (; j + 4 <= skip; j += 4)
817 write32le(p + j, 0x00000013); // nop
818 if (j != skip) {
819 assert(j + 2 == skip);
820 write16le(p + j, 0x0001); // c.nop
823 } else if (RelType newType = aux.relocTypes[i]) {
824 switch (newType) {
825 case INTERNAL_R_RISCV_GPREL_I:
826 case INTERNAL_R_RISCV_GPREL_S:
827 break;
828 case R_RISCV_RELAX:
829 // Used by relaxTlsLe to indicate the relocation is ignored.
830 break;
831 case R_RISCV_RVC_JUMP:
832 skip = 2;
833 write16le(p, aux.writes[writesIdx++]);
834 break;
835 case R_RISCV_JAL:
836 skip = 4;
837 write32le(p, aux.writes[writesIdx++]);
838 break;
839 case R_RISCV_32:
840 // Used by relaxTlsLe to write a uint32_t then suppress the handling
841 // in relocateAlloc.
842 skip = 4;
843 write32le(p, aux.writes[writesIdx++]);
844 aux.relocTypes[i] = R_RISCV_NONE;
845 break;
846 default:
847 llvm_unreachable("unsupported type");
851 p += skip;
852 offset = r.offset + skip + remove;
854 memcpy(p, old.data() + offset, old.size() - offset);
856 // Subtract the previous relocDeltas value from the relocation offset.
857 // For a pair of R_RISCV_CALL/R_RISCV_RELAX with the same offset, decrease
858 // their r_offset by the same delta.
859 delta = 0;
860 for (size_t i = 0, e = rels.size(); i != e;) {
861 uint64_t cur = rels[i].offset;
862 do {
863 rels[i].offset -= delta;
864 if (aux.relocTypes[i] != R_RISCV_NONE)
865 rels[i].type = aux.relocTypes[i];
866 } while (++i != e && rels[i].offset == cur);
867 delta = aux.relocDeltas[i - 1];
873 namespace {
874 // Representation of the merged .riscv.attributes input sections. The psABI
875 // specifies merge policy for attributes. E.g. if we link an object without an
876 // extension with an object with the extension, the output Tag_RISCV_arch shall
877 // contain the extension. Some tools like objdump parse .riscv.attributes and
878 // disabling some instructions if the first Tag_RISCV_arch does not contain an
879 // extension.
880 class RISCVAttributesSection final : public SyntheticSection {
881 public:
882 RISCVAttributesSection()
883 : SyntheticSection(0, SHT_RISCV_ATTRIBUTES, 1, ".riscv.attributes") {}
885 size_t getSize() const override { return size; }
886 void writeTo(uint8_t *buf) override;
888 static constexpr StringRef vendor = "riscv";
889 DenseMap<unsigned, unsigned> intAttr;
890 DenseMap<unsigned, StringRef> strAttr;
891 size_t size = 0;
893 } // namespace
895 static void mergeArch(RISCVISAInfo::OrderedExtensionMap &mergedExts,
896 unsigned &mergedXlen, const InputSectionBase *sec,
897 StringRef s) {
898 auto maybeInfo = RISCVISAInfo::parseNormalizedArchString(s);
899 if (!maybeInfo) {
900 errorOrWarn(toString(sec) + ": " + s + ": " +
901 llvm::toString(maybeInfo.takeError()));
902 return;
905 // Merge extensions.
906 RISCVISAInfo &info = **maybeInfo;
907 if (mergedExts.empty()) {
908 mergedExts = info.getExtensions();
909 mergedXlen = info.getXLen();
910 } else {
911 for (const auto &ext : info.getExtensions()) {
912 if (auto it = mergedExts.find(ext.first); it != mergedExts.end()) {
913 if (std::tie(it->second.MajorVersion, it->second.MinorVersion) >=
914 std::tie(ext.second.MajorVersion, ext.second.MinorVersion))
915 continue;
917 mergedExts[ext.first] = ext.second;
922 static RISCVAttributesSection *
923 mergeAttributesSection(const SmallVector<InputSectionBase *, 0> &sections) {
924 RISCVISAInfo::OrderedExtensionMap exts;
925 const InputSectionBase *firstStackAlign = nullptr;
926 unsigned firstStackAlignValue = 0, xlen = 0;
927 bool hasArch = false;
929 in.riscvAttributes = std::make_unique<RISCVAttributesSection>();
930 auto &merged = static_cast<RISCVAttributesSection &>(*in.riscvAttributes);
932 // Collect all tags values from attributes section.
933 const auto &attributesTags = RISCVAttrs::getRISCVAttributeTags();
934 for (const InputSectionBase *sec : sections) {
935 RISCVAttributeParser parser;
936 if (Error e = parser.parse(sec->content(), llvm::endianness::little))
937 warn(toString(sec) + ": " + llvm::toString(std::move(e)));
938 for (const auto &tag : attributesTags) {
939 switch (RISCVAttrs::AttrType(tag.attr)) {
940 // Integer attributes.
941 case RISCVAttrs::STACK_ALIGN:
942 if (auto i = parser.getAttributeValue(tag.attr)) {
943 auto r = merged.intAttr.try_emplace(tag.attr, *i);
944 if (r.second) {
945 firstStackAlign = sec;
946 firstStackAlignValue = *i;
947 } else if (r.first->second != *i) {
948 errorOrWarn(toString(sec) + " has stack_align=" + Twine(*i) +
949 " but " + toString(firstStackAlign) +
950 " has stack_align=" + Twine(firstStackAlignValue));
953 continue;
954 case RISCVAttrs::UNALIGNED_ACCESS:
955 if (auto i = parser.getAttributeValue(tag.attr))
956 merged.intAttr[tag.attr] |= *i;
957 continue;
959 // String attributes.
960 case RISCVAttrs::ARCH:
961 if (auto s = parser.getAttributeString(tag.attr)) {
962 hasArch = true;
963 mergeArch(exts, xlen, sec, *s);
965 continue;
967 // Attributes which use the default handling.
968 case RISCVAttrs::PRIV_SPEC:
969 case RISCVAttrs::PRIV_SPEC_MINOR:
970 case RISCVAttrs::PRIV_SPEC_REVISION:
971 break;
974 // Fallback for deprecated priv_spec* and other unknown attributes: retain
975 // the attribute if all input sections agree on the value. GNU ld uses 0
976 // and empty strings as default values which are not dumped to the output.
977 // TODO Adjust after resolution to
978 // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/issues/352
979 if (tag.attr % 2 == 0) {
980 if (auto i = parser.getAttributeValue(tag.attr)) {
981 auto r = merged.intAttr.try_emplace(tag.attr, *i);
982 if (!r.second && r.first->second != *i)
983 r.first->second = 0;
985 } else if (auto s = parser.getAttributeString(tag.attr)) {
986 auto r = merged.strAttr.try_emplace(tag.attr, *s);
987 if (!r.second && r.first->second != *s)
988 r.first->second = {};
993 if (hasArch) {
994 if (auto result = RISCVISAInfo::postProcessAndChecking(
995 std::make_unique<RISCVISAInfo>(xlen, exts))) {
996 merged.strAttr.try_emplace(RISCVAttrs::ARCH,
997 saver().save((*result)->toString()));
998 } else {
999 errorOrWarn(llvm::toString(result.takeError()));
1003 // The total size of headers: format-version [ <section-length> "vendor-name"
1004 // [ <file-tag> <size>.
1005 size_t size = 5 + merged.vendor.size() + 1 + 5;
1006 for (auto &attr : merged.intAttr)
1007 if (attr.second != 0)
1008 size += getULEB128Size(attr.first) + getULEB128Size(attr.second);
1009 for (auto &attr : merged.strAttr)
1010 if (!attr.second.empty())
1011 size += getULEB128Size(attr.first) + attr.second.size() + 1;
1012 merged.size = size;
1013 return &merged;
1016 void RISCVAttributesSection::writeTo(uint8_t *buf) {
1017 const size_t size = getSize();
1018 uint8_t *const end = buf + size;
1019 *buf = ELFAttrs::Format_Version;
1020 write32(buf + 1, size - 1);
1021 buf += 5;
1023 memcpy(buf, vendor.data(), vendor.size());
1024 buf += vendor.size() + 1;
1026 *buf = ELFAttrs::File;
1027 write32(buf + 1, end - buf);
1028 buf += 5;
1030 for (auto &attr : intAttr) {
1031 if (attr.second == 0)
1032 continue;
1033 buf += encodeULEB128(attr.first, buf);
1034 buf += encodeULEB128(attr.second, buf);
1036 for (auto &attr : strAttr) {
1037 if (attr.second.empty())
1038 continue;
1039 buf += encodeULEB128(attr.first, buf);
1040 memcpy(buf, attr.second.data(), attr.second.size());
1041 buf += attr.second.size() + 1;
1045 void elf::mergeRISCVAttributesSections() {
1046 // Find the first input SHT_RISCV_ATTRIBUTES; return if not found.
1047 size_t place =
1048 llvm::find_if(ctx.inputSections,
1049 [](auto *s) { return s->type == SHT_RISCV_ATTRIBUTES; }) -
1050 ctx.inputSections.begin();
1051 if (place == ctx.inputSections.size())
1052 return;
1054 // Extract all SHT_RISCV_ATTRIBUTES sections into `sections`.
1055 SmallVector<InputSectionBase *, 0> sections;
1056 llvm::erase_if(ctx.inputSections, [&](InputSectionBase *s) {
1057 if (s->type != SHT_RISCV_ATTRIBUTES)
1058 return false;
1059 sections.push_back(s);
1060 return true;
1063 // Add the merged section.
1064 ctx.inputSections.insert(ctx.inputSections.begin() + place,
1065 mergeAttributesSection(sections));
1068 TargetInfo *elf::getRISCVTargetInfo() {
1069 static RISCV target;
1070 return &target;