[ARM] Reduce loop unroll when low overhead branching is available (#120065)
[llvm-project.git] / lld / ELF / Arch / RISCV.cpp
blob36ae31be6ed2a29ceb2da7cd86dc274398bffdc9
1 //===- RISCV.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "InputFiles.h"
10 #include "OutputSections.h"
11 #include "Symbols.h"
12 #include "SyntheticSections.h"
13 #include "Target.h"
14 #include "llvm/Support/ELFAttributes.h"
15 #include "llvm/Support/LEB128.h"
16 #include "llvm/Support/RISCVAttributeParser.h"
17 #include "llvm/Support/RISCVAttributes.h"
18 #include "llvm/Support/TimeProfiler.h"
19 #include "llvm/TargetParser/RISCVISAInfo.h"
21 using namespace llvm;
22 using namespace llvm::object;
23 using namespace llvm::support::endian;
24 using namespace llvm::ELF;
25 using namespace lld;
26 using namespace lld::elf;
28 namespace {
30 class RISCV final : public TargetInfo {
31 public:
32 RISCV(Ctx &);
33 uint32_t calcEFlags() const override;
34 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
35 void writeGotHeader(uint8_t *buf) const override;
36 void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
37 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
38 void writePltHeader(uint8_t *buf) const override;
39 void writePlt(uint8_t *buf, const Symbol &sym,
40 uint64_t pltEntryAddr) const override;
41 RelType getDynRel(RelType type) const override;
42 RelExpr getRelExpr(RelType type, const Symbol &s,
43 const uint8_t *loc) const override;
44 void relocate(uint8_t *loc, const Relocation &rel,
45 uint64_t val) const override;
46 void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override;
47 bool relaxOnce(int pass) const override;
48 void finalizeRelax(int passes) const override;
51 } // end anonymous namespace
53 // These are internal relocation numbers for GP relaxation. They aren't part
54 // of the psABI spec.
55 #define INTERNAL_R_RISCV_GPREL_I 256
56 #define INTERNAL_R_RISCV_GPREL_S 257
58 const uint64_t dtpOffset = 0x800;
60 namespace {
61 enum Op {
62 ADDI = 0x13,
63 AUIPC = 0x17,
64 JALR = 0x67,
65 LD = 0x3003,
66 LUI = 0x37,
67 LW = 0x2003,
68 SRLI = 0x5013,
69 SUB = 0x40000033,
72 enum Reg {
73 X_RA = 1,
74 X_GP = 3,
75 X_TP = 4,
76 X_T0 = 5,
77 X_T1 = 6,
78 X_T2 = 7,
79 X_A0 = 10,
80 X_T3 = 28,
82 } // namespace
84 static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
85 static uint32_t lo12(uint32_t val) { return val & 4095; }
87 static uint32_t itype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t imm) {
88 return op | (rd << 7) | (rs1 << 15) | (imm << 20);
90 static uint32_t rtype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t rs2) {
91 return op | (rd << 7) | (rs1 << 15) | (rs2 << 20);
93 static uint32_t utype(uint32_t op, uint32_t rd, uint32_t imm) {
94 return op | (rd << 7) | (imm << 12);
97 // Extract bits v[begin:end], where range is inclusive, and begin must be < 63.
98 static uint32_t extractBits(uint64_t v, uint32_t begin, uint32_t end) {
99 return (v & ((1ULL << (begin + 1)) - 1)) >> end;
102 static uint32_t setLO12_I(uint32_t insn, uint32_t imm) {
103 return (insn & 0xfffff) | (imm << 20);
105 static uint32_t setLO12_S(uint32_t insn, uint32_t imm) {
106 return (insn & 0x1fff07f) | (extractBits(imm, 11, 5) << 25) |
107 (extractBits(imm, 4, 0) << 7);
110 RISCV::RISCV(Ctx &ctx) : TargetInfo(ctx) {
111 copyRel = R_RISCV_COPY;
112 pltRel = R_RISCV_JUMP_SLOT;
113 relativeRel = R_RISCV_RELATIVE;
114 iRelativeRel = R_RISCV_IRELATIVE;
115 if (ctx.arg.is64) {
116 symbolicRel = R_RISCV_64;
117 tlsModuleIndexRel = R_RISCV_TLS_DTPMOD64;
118 tlsOffsetRel = R_RISCV_TLS_DTPREL64;
119 tlsGotRel = R_RISCV_TLS_TPREL64;
120 } else {
121 symbolicRel = R_RISCV_32;
122 tlsModuleIndexRel = R_RISCV_TLS_DTPMOD32;
123 tlsOffsetRel = R_RISCV_TLS_DTPREL32;
124 tlsGotRel = R_RISCV_TLS_TPREL32;
126 gotRel = symbolicRel;
127 tlsDescRel = R_RISCV_TLSDESC;
129 // .got[0] = _DYNAMIC
130 gotHeaderEntriesNum = 1;
132 // .got.plt[0] = _dl_runtime_resolve, .got.plt[1] = link_map
133 gotPltHeaderEntriesNum = 2;
135 pltHeaderSize = 32;
136 pltEntrySize = 16;
137 ipltEntrySize = 16;
140 static uint32_t getEFlags(Ctx &ctx, InputFile *f) {
141 if (ctx.arg.is64)
142 return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader().e_flags;
143 return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader().e_flags;
146 uint32_t RISCV::calcEFlags() const {
147 // If there are only binary input files (from -b binary), use a
148 // value of 0 for the ELF header flags.
149 if (ctx.objectFiles.empty())
150 return 0;
152 uint32_t target = getEFlags(ctx, ctx.objectFiles.front());
153 for (InputFile *f : ctx.objectFiles) {
154 uint32_t eflags = getEFlags(ctx, f);
155 if (eflags & EF_RISCV_RVC)
156 target |= EF_RISCV_RVC;
158 if ((eflags & EF_RISCV_FLOAT_ABI) != (target & EF_RISCV_FLOAT_ABI))
159 Err(ctx) << f
160 << ": cannot link object files with different "
161 "floating-point ABI from "
162 << ctx.objectFiles[0];
164 if ((eflags & EF_RISCV_RVE) != (target & EF_RISCV_RVE))
165 Err(ctx) << f << ": cannot link object files with different EF_RISCV_RVE";
168 return target;
171 int64_t RISCV::getImplicitAddend(const uint8_t *buf, RelType type) const {
172 switch (type) {
173 default:
174 InternalErr(ctx, buf) << "cannot read addend for relocation " << type;
175 return 0;
176 case R_RISCV_32:
177 case R_RISCV_TLS_DTPMOD32:
178 case R_RISCV_TLS_DTPREL32:
179 case R_RISCV_TLS_TPREL32:
180 return SignExtend64<32>(read32le(buf));
181 case R_RISCV_64:
182 case R_RISCV_TLS_DTPMOD64:
183 case R_RISCV_TLS_DTPREL64:
184 case R_RISCV_TLS_TPREL64:
185 return read64le(buf);
186 case R_RISCV_RELATIVE:
187 case R_RISCV_IRELATIVE:
188 return ctx.arg.is64 ? read64le(buf) : read32le(buf);
189 case R_RISCV_NONE:
190 case R_RISCV_JUMP_SLOT:
191 // These relocations are defined as not having an implicit addend.
192 return 0;
193 case R_RISCV_TLSDESC:
194 return ctx.arg.is64 ? read64le(buf + 8) : read32le(buf + 4);
198 void RISCV::writeGotHeader(uint8_t *buf) const {
199 if (ctx.arg.is64)
200 write64le(buf, ctx.mainPart->dynamic->getVA());
201 else
202 write32le(buf, ctx.mainPart->dynamic->getVA());
205 void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
206 if (ctx.arg.is64)
207 write64le(buf, ctx.in.plt->getVA());
208 else
209 write32le(buf, ctx.in.plt->getVA());
212 void RISCV::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
213 if (ctx.arg.writeAddends) {
214 if (ctx.arg.is64)
215 write64le(buf, s.getVA(ctx));
216 else
217 write32le(buf, s.getVA(ctx));
221 void RISCV::writePltHeader(uint8_t *buf) const {
222 // 1: auipc t2, %pcrel_hi(.got.plt)
223 // sub t1, t1, t3
224 // l[wd] t3, %pcrel_lo(1b)(t2); t3 = _dl_runtime_resolve
225 // addi t1, t1, -pltHeaderSize-12; t1 = &.plt[i] - &.plt[0]
226 // addi t0, t2, %pcrel_lo(1b)
227 // srli t1, t1, (rv64?1:2); t1 = &.got.plt[i] - &.got.plt[0]
228 // l[wd] t0, Wordsize(t0); t0 = link_map
229 // jr t3
230 uint32_t offset = ctx.in.gotPlt->getVA() - ctx.in.plt->getVA();
231 uint32_t load = ctx.arg.is64 ? LD : LW;
232 write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
233 write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
234 write32le(buf + 8, itype(load, X_T3, X_T2, lo12(offset)));
235 write32le(buf + 12, itype(ADDI, X_T1, X_T1, -ctx.target->pltHeaderSize - 12));
236 write32le(buf + 16, itype(ADDI, X_T0, X_T2, lo12(offset)));
237 write32le(buf + 20, itype(SRLI, X_T1, X_T1, ctx.arg.is64 ? 1 : 2));
238 write32le(buf + 24, itype(load, X_T0, X_T0, ctx.arg.wordsize));
239 write32le(buf + 28, itype(JALR, 0, X_T3, 0));
242 void RISCV::writePlt(uint8_t *buf, const Symbol &sym,
243 uint64_t pltEntryAddr) const {
244 // 1: auipc t3, %pcrel_hi(f@.got.plt)
245 // l[wd] t3, %pcrel_lo(1b)(t3)
246 // jalr t1, t3
247 // nop
248 uint32_t offset = sym.getGotPltVA(ctx) - pltEntryAddr;
249 write32le(buf + 0, utype(AUIPC, X_T3, hi20(offset)));
250 write32le(buf + 4, itype(ctx.arg.is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
251 write32le(buf + 8, itype(JALR, X_T1, X_T3, 0));
252 write32le(buf + 12, itype(ADDI, 0, 0, 0));
255 RelType RISCV::getDynRel(RelType type) const {
256 return type == ctx.target->symbolicRel ? type
257 : static_cast<RelType>(R_RISCV_NONE);
260 RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
261 const uint8_t *loc) const {
262 switch (type) {
263 case R_RISCV_NONE:
264 return R_NONE;
265 case R_RISCV_32:
266 case R_RISCV_64:
267 case R_RISCV_HI20:
268 case R_RISCV_LO12_I:
269 case R_RISCV_LO12_S:
270 return R_ABS;
271 case R_RISCV_ADD8:
272 case R_RISCV_ADD16:
273 case R_RISCV_ADD32:
274 case R_RISCV_ADD64:
275 case R_RISCV_SET6:
276 case R_RISCV_SET8:
277 case R_RISCV_SET16:
278 case R_RISCV_SET32:
279 case R_RISCV_SUB6:
280 case R_RISCV_SUB8:
281 case R_RISCV_SUB16:
282 case R_RISCV_SUB32:
283 case R_RISCV_SUB64:
284 return RE_RISCV_ADD;
285 case R_RISCV_JAL:
286 case R_RISCV_BRANCH:
287 case R_RISCV_PCREL_HI20:
288 case R_RISCV_RVC_BRANCH:
289 case R_RISCV_RVC_JUMP:
290 case R_RISCV_32_PCREL:
291 return R_PC;
292 case R_RISCV_CALL:
293 case R_RISCV_CALL_PLT:
294 case R_RISCV_PLT32:
295 return R_PLT_PC;
296 case R_RISCV_GOT_HI20:
297 case R_RISCV_GOT32_PCREL:
298 return R_GOT_PC;
299 case R_RISCV_PCREL_LO12_I:
300 case R_RISCV_PCREL_LO12_S:
301 return RE_RISCV_PC_INDIRECT;
302 case R_RISCV_TLSDESC_HI20:
303 case R_RISCV_TLSDESC_LOAD_LO12:
304 case R_RISCV_TLSDESC_ADD_LO12:
305 return R_TLSDESC_PC;
306 case R_RISCV_TLSDESC_CALL:
307 return R_TLSDESC_CALL;
308 case R_RISCV_TLS_GD_HI20:
309 return R_TLSGD_PC;
310 case R_RISCV_TLS_GOT_HI20:
311 return R_GOT_PC;
312 case R_RISCV_TPREL_HI20:
313 case R_RISCV_TPREL_LO12_I:
314 case R_RISCV_TPREL_LO12_S:
315 return R_TPREL;
316 case R_RISCV_ALIGN:
317 return R_RELAX_HINT;
318 case R_RISCV_TPREL_ADD:
319 case R_RISCV_RELAX:
320 return ctx.arg.relax ? R_RELAX_HINT : R_NONE;
321 case R_RISCV_SET_ULEB128:
322 case R_RISCV_SUB_ULEB128:
323 return RE_RISCV_LEB128;
324 default:
325 Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v
326 << ") against symbol " << &s;
327 return R_NONE;
331 void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
332 const unsigned bits = ctx.arg.wordsize * 8;
334 switch (rel.type) {
335 case R_RISCV_32:
336 write32le(loc, val);
337 return;
338 case R_RISCV_64:
339 write64le(loc, val);
340 return;
342 case R_RISCV_RVC_BRANCH: {
343 checkInt(ctx, loc, val, 9, rel);
344 checkAlignment(ctx, loc, val, 2, rel);
345 uint16_t insn = read16le(loc) & 0xE383;
346 uint16_t imm8 = extractBits(val, 8, 8) << 12;
347 uint16_t imm4_3 = extractBits(val, 4, 3) << 10;
348 uint16_t imm7_6 = extractBits(val, 7, 6) << 5;
349 uint16_t imm2_1 = extractBits(val, 2, 1) << 3;
350 uint16_t imm5 = extractBits(val, 5, 5) << 2;
351 insn |= imm8 | imm4_3 | imm7_6 | imm2_1 | imm5;
353 write16le(loc, insn);
354 return;
357 case R_RISCV_RVC_JUMP: {
358 checkInt(ctx, loc, val, 12, rel);
359 checkAlignment(ctx, loc, val, 2, rel);
360 uint16_t insn = read16le(loc) & 0xE003;
361 uint16_t imm11 = extractBits(val, 11, 11) << 12;
362 uint16_t imm4 = extractBits(val, 4, 4) << 11;
363 uint16_t imm9_8 = extractBits(val, 9, 8) << 9;
364 uint16_t imm10 = extractBits(val, 10, 10) << 8;
365 uint16_t imm6 = extractBits(val, 6, 6) << 7;
366 uint16_t imm7 = extractBits(val, 7, 7) << 6;
367 uint16_t imm3_1 = extractBits(val, 3, 1) << 3;
368 uint16_t imm5 = extractBits(val, 5, 5) << 2;
369 insn |= imm11 | imm4 | imm9_8 | imm10 | imm6 | imm7 | imm3_1 | imm5;
371 write16le(loc, insn);
372 return;
375 case R_RISCV_JAL: {
376 checkInt(ctx, loc, val, 21, rel);
377 checkAlignment(ctx, loc, val, 2, rel);
379 uint32_t insn = read32le(loc) & 0xFFF;
380 uint32_t imm20 = extractBits(val, 20, 20) << 31;
381 uint32_t imm10_1 = extractBits(val, 10, 1) << 21;
382 uint32_t imm11 = extractBits(val, 11, 11) << 20;
383 uint32_t imm19_12 = extractBits(val, 19, 12) << 12;
384 insn |= imm20 | imm10_1 | imm11 | imm19_12;
386 write32le(loc, insn);
387 return;
390 case R_RISCV_BRANCH: {
391 checkInt(ctx, loc, val, 13, rel);
392 checkAlignment(ctx, loc, val, 2, rel);
394 uint32_t insn = read32le(loc) & 0x1FFF07F;
395 uint32_t imm12 = extractBits(val, 12, 12) << 31;
396 uint32_t imm10_5 = extractBits(val, 10, 5) << 25;
397 uint32_t imm4_1 = extractBits(val, 4, 1) << 8;
398 uint32_t imm11 = extractBits(val, 11, 11) << 7;
399 insn |= imm12 | imm10_5 | imm4_1 | imm11;
401 write32le(loc, insn);
402 return;
405 // auipc + jalr pair
406 case R_RISCV_CALL:
407 case R_RISCV_CALL_PLT: {
408 int64_t hi = SignExtend64(val + 0x800, bits) >> 12;
409 checkInt(ctx, loc, hi, 20, rel);
410 if (isInt<20>(hi)) {
411 relocateNoSym(loc, R_RISCV_PCREL_HI20, val);
412 relocateNoSym(loc + 4, R_RISCV_PCREL_LO12_I, val);
414 return;
417 case R_RISCV_GOT_HI20:
418 case R_RISCV_PCREL_HI20:
419 case R_RISCV_TLSDESC_HI20:
420 case R_RISCV_TLS_GD_HI20:
421 case R_RISCV_TLS_GOT_HI20:
422 case R_RISCV_TPREL_HI20:
423 case R_RISCV_HI20: {
424 uint64_t hi = val + 0x800;
425 checkInt(ctx, loc, SignExtend64(hi, bits) >> 12, 20, rel);
426 write32le(loc, (read32le(loc) & 0xFFF) | (hi & 0xFFFFF000));
427 return;
430 case R_RISCV_PCREL_LO12_I:
431 case R_RISCV_TLSDESC_LOAD_LO12:
432 case R_RISCV_TLSDESC_ADD_LO12:
433 case R_RISCV_TPREL_LO12_I:
434 case R_RISCV_LO12_I: {
435 uint64_t hi = (val + 0x800) >> 12;
436 uint64_t lo = val - (hi << 12);
437 write32le(loc, setLO12_I(read32le(loc), lo & 0xfff));
438 return;
441 case R_RISCV_PCREL_LO12_S:
442 case R_RISCV_TPREL_LO12_S:
443 case R_RISCV_LO12_S: {
444 uint64_t hi = (val + 0x800) >> 12;
445 uint64_t lo = val - (hi << 12);
446 write32le(loc, setLO12_S(read32le(loc), lo));
447 return;
450 case INTERNAL_R_RISCV_GPREL_I:
451 case INTERNAL_R_RISCV_GPREL_S: {
452 Defined *gp = ctx.sym.riscvGlobalPointer;
453 int64_t displace = SignExtend64(val - gp->getVA(ctx), bits);
454 checkInt(ctx, loc, displace, 12, rel);
455 uint32_t insn = (read32le(loc) & ~(31 << 15)) | (X_GP << 15);
456 if (rel.type == INTERNAL_R_RISCV_GPREL_I)
457 insn = setLO12_I(insn, displace);
458 else
459 insn = setLO12_S(insn, displace);
460 write32le(loc, insn);
461 return;
464 case R_RISCV_ADD8:
465 *loc += val;
466 return;
467 case R_RISCV_ADD16:
468 write16le(loc, read16le(loc) + val);
469 return;
470 case R_RISCV_ADD32:
471 write32le(loc, read32le(loc) + val);
472 return;
473 case R_RISCV_ADD64:
474 write64le(loc, read64le(loc) + val);
475 return;
476 case R_RISCV_SUB6:
477 *loc = (*loc & 0xc0) | (((*loc & 0x3f) - val) & 0x3f);
478 return;
479 case R_RISCV_SUB8:
480 *loc -= val;
481 return;
482 case R_RISCV_SUB16:
483 write16le(loc, read16le(loc) - val);
484 return;
485 case R_RISCV_SUB32:
486 write32le(loc, read32le(loc) - val);
487 return;
488 case R_RISCV_SUB64:
489 write64le(loc, read64le(loc) - val);
490 return;
491 case R_RISCV_SET6:
492 *loc = (*loc & 0xc0) | (val & 0x3f);
493 return;
494 case R_RISCV_SET8:
495 *loc = val;
496 return;
497 case R_RISCV_SET16:
498 write16le(loc, val);
499 return;
500 case R_RISCV_SET32:
501 case R_RISCV_32_PCREL:
502 case R_RISCV_PLT32:
503 case R_RISCV_GOT32_PCREL:
504 checkInt(ctx, loc, val, 32, rel);
505 write32le(loc, val);
506 return;
508 case R_RISCV_TLS_DTPREL32:
509 write32le(loc, val - dtpOffset);
510 break;
511 case R_RISCV_TLS_DTPREL64:
512 write64le(loc, val - dtpOffset);
513 break;
515 case R_RISCV_RELAX:
516 return;
517 case R_RISCV_TLSDESC:
518 // The addend is stored in the second word.
519 if (ctx.arg.is64)
520 write64le(loc + 8, val);
521 else
522 write32le(loc + 4, val);
523 break;
524 default:
525 llvm_unreachable("unknown relocation");
529 static bool relaxable(ArrayRef<Relocation> relocs, size_t i) {
530 return i + 1 != relocs.size() && relocs[i + 1].type == R_RISCV_RELAX;
533 static void tlsdescToIe(Ctx &ctx, uint8_t *loc, const Relocation &rel,
534 uint64_t val) {
535 switch (rel.type) {
536 case R_RISCV_TLSDESC_HI20:
537 case R_RISCV_TLSDESC_LOAD_LO12:
538 write32le(loc, 0x00000013); // nop
539 break;
540 case R_RISCV_TLSDESC_ADD_LO12:
541 write32le(loc, utype(AUIPC, X_A0, hi20(val))); // auipc a0,<hi20>
542 break;
543 case R_RISCV_TLSDESC_CALL:
544 if (ctx.arg.is64)
545 write32le(loc, itype(LD, X_A0, X_A0, lo12(val))); // ld a0,<lo12>(a0)
546 else
547 write32le(loc, itype(LW, X_A0, X_A0, lo12(val))); // lw a0,<lo12>(a0)
548 break;
549 default:
550 llvm_unreachable("unsupported relocation for TLSDESC to IE");
554 static void tlsdescToLe(uint8_t *loc, const Relocation &rel, uint64_t val) {
555 switch (rel.type) {
556 case R_RISCV_TLSDESC_HI20:
557 case R_RISCV_TLSDESC_LOAD_LO12:
558 write32le(loc, 0x00000013); // nop
559 return;
560 case R_RISCV_TLSDESC_ADD_LO12:
561 if (isInt<12>(val))
562 write32le(loc, 0x00000013); // nop
563 else
564 write32le(loc, utype(LUI, X_A0, hi20(val))); // lui a0,<hi20>
565 return;
566 case R_RISCV_TLSDESC_CALL:
567 if (isInt<12>(val))
568 write32le(loc, itype(ADDI, X_A0, 0, val)); // addi a0,zero,<lo12>
569 else
570 write32le(loc, itype(ADDI, X_A0, X_A0, lo12(val))); // addi a0,a0,<lo12>
571 return;
572 default:
573 llvm_unreachable("unsupported relocation for TLSDESC to LE");
577 void RISCV::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {
578 uint64_t secAddr = sec.getOutputSection()->addr;
579 if (auto *s = dyn_cast<InputSection>(&sec))
580 secAddr += s->outSecOff;
581 else if (auto *ehIn = dyn_cast<EhInputSection>(&sec))
582 secAddr += ehIn->getParent()->outSecOff;
583 uint64_t tlsdescVal = 0;
584 bool tlsdescRelax = false, isToLe = false;
585 const ArrayRef<Relocation> relocs = sec.relocs();
586 for (size_t i = 0, size = relocs.size(); i != size; ++i) {
587 const Relocation &rel = relocs[i];
588 uint8_t *loc = buf + rel.offset;
589 uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset);
591 switch (rel.expr) {
592 case R_RELAX_HINT:
593 continue;
594 case R_TLSDESC_PC:
595 // For R_RISCV_TLSDESC_HI20, store &got(sym)-PC to be used by the
596 // following two instructions L[DW] and ADDI.
597 if (rel.type == R_RISCV_TLSDESC_HI20)
598 tlsdescVal = val;
599 else
600 val = tlsdescVal;
601 break;
602 case R_RELAX_TLS_GD_TO_IE:
603 // Only R_RISCV_TLSDESC_HI20 reaches here. tlsdescVal will be finalized
604 // after we see R_RISCV_TLSDESC_ADD_LO12 in the R_RELAX_TLS_GD_TO_LE case.
605 // The net effect is that tlsdescVal will be smaller than `val` to take
606 // into account of NOP instructions (in the absence of R_RISCV_RELAX)
607 // before AUIPC.
608 tlsdescVal = val + rel.offset;
609 isToLe = false;
610 tlsdescRelax = relaxable(relocs, i);
611 if (!tlsdescRelax)
612 tlsdescToIe(ctx, loc, rel, val);
613 continue;
614 case R_RELAX_TLS_GD_TO_LE:
615 // See the comment in handleTlsRelocation. For TLSDESC=>IE,
616 // R_RISCV_TLSDESC_{LOAD_LO12,ADD_LO12,CALL} also reach here. If isToLe is
617 // false, this is actually TLSDESC=>IE optimization.
618 if (rel.type == R_RISCV_TLSDESC_HI20) {
619 tlsdescVal = val;
620 isToLe = true;
621 tlsdescRelax = relaxable(relocs, i);
622 } else {
623 if (!isToLe && rel.type == R_RISCV_TLSDESC_ADD_LO12)
624 tlsdescVal -= rel.offset;
625 val = tlsdescVal;
627 // When NOP conversion is eligible and relaxation applies, don't write a
628 // NOP in case an unrelated instruction follows the current instruction.
629 if (tlsdescRelax &&
630 (rel.type == R_RISCV_TLSDESC_HI20 ||
631 rel.type == R_RISCV_TLSDESC_LOAD_LO12 ||
632 (rel.type == R_RISCV_TLSDESC_ADD_LO12 && isToLe && !hi20(val))))
633 continue;
634 if (isToLe)
635 tlsdescToLe(loc, rel, val);
636 else
637 tlsdescToIe(ctx, loc, rel, val);
638 continue;
639 case RE_RISCV_LEB128:
640 if (i + 1 < size) {
641 const Relocation &rel1 = relocs[i + 1];
642 if (rel.type == R_RISCV_SET_ULEB128 &&
643 rel1.type == R_RISCV_SUB_ULEB128 && rel.offset == rel1.offset) {
644 auto val = rel.sym->getVA(ctx, rel.addend) -
645 rel1.sym->getVA(ctx, rel1.addend);
646 if (overwriteULEB128(loc, val) >= 0x80)
647 Err(ctx) << sec.getLocation(rel.offset) << ": ULEB128 value " << val
648 << " exceeds available space; references '" << rel.sym
649 << "'";
650 ++i;
651 continue;
654 Err(ctx) << sec.getLocation(rel.offset)
655 << ": R_RISCV_SET_ULEB128 not paired with R_RISCV_SUB_SET128";
656 return;
657 default:
658 break;
660 relocate(loc, rel, val);
664 void elf::initSymbolAnchors(Ctx &ctx) {
665 SmallVector<InputSection *, 0> storage;
666 for (OutputSection *osec : ctx.outputSections) {
667 if (!(osec->flags & SHF_EXECINSTR))
668 continue;
669 for (InputSection *sec : getInputSections(*osec, storage)) {
670 sec->relaxAux = make<RelaxAux>();
671 if (sec->relocs().size()) {
672 sec->relaxAux->relocDeltas =
673 std::make_unique<uint32_t[]>(sec->relocs().size());
674 sec->relaxAux->relocTypes =
675 std::make_unique<RelType[]>(sec->relocs().size());
679 // Store anchors (st_value and st_value+st_size) for symbols relative to text
680 // sections.
682 // For a defined symbol foo, we may have `d->file != file` with --wrap=foo.
683 // We should process foo, as the defining object file's symbol table may not
684 // contain foo after redirectSymbols changed the foo entry to __wrap_foo. To
685 // avoid adding a Defined that is undefined in one object file, use
686 // `!d->scriptDefined` to exclude symbols that are definitely not wrapped.
688 // `relaxAux->anchors` may contain duplicate symbols, but that is fine.
689 for (InputFile *file : ctx.objectFiles)
690 for (Symbol *sym : file->getSymbols()) {
691 auto *d = dyn_cast<Defined>(sym);
692 if (!d || (d->file != file && !d->scriptDefined))
693 continue;
694 if (auto *sec = dyn_cast_or_null<InputSection>(d->section))
695 if (sec->flags & SHF_EXECINSTR && sec->relaxAux) {
696 // If sec is discarded, relaxAux will be nullptr.
697 sec->relaxAux->anchors.push_back({d->value, d, false});
698 sec->relaxAux->anchors.push_back({d->value + d->size, d, true});
701 // Sort anchors by offset so that we can find the closest relocation
702 // efficiently. For a zero size symbol, ensure that its start anchor precedes
703 // its end anchor. For two symbols with anchors at the same offset, their
704 // order does not matter.
705 for (OutputSection *osec : ctx.outputSections) {
706 if (!(osec->flags & SHF_EXECINSTR))
707 continue;
708 for (InputSection *sec : getInputSections(*osec, storage)) {
709 llvm::sort(sec->relaxAux->anchors, [](auto &a, auto &b) {
710 return std::make_pair(a.offset, a.end) <
711 std::make_pair(b.offset, b.end);
717 // Relax R_RISCV_CALL/R_RISCV_CALL_PLT auipc+jalr to c.j, c.jal, or jal.
718 static void relaxCall(Ctx &ctx, const InputSection &sec, size_t i, uint64_t loc,
719 Relocation &r, uint32_t &remove) {
720 const bool rvc = getEFlags(ctx, sec.file) & EF_RISCV_RVC;
721 const Symbol &sym = *r.sym;
722 const uint64_t insnPair = read64le(sec.content().data() + r.offset);
723 const uint32_t rd = extractBits(insnPair, 32 + 11, 32 + 7);
724 const uint64_t dest =
725 (r.expr == R_PLT_PC ? sym.getPltVA(ctx) : sym.getVA(ctx)) + r.addend;
726 const int64_t displace = dest - loc;
728 if (rvc && isInt<12>(displace) && rd == 0) {
729 sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
730 sec.relaxAux->writes.push_back(0xa001); // c.j
731 remove = 6;
732 } else if (rvc && isInt<12>(displace) && rd == X_RA &&
733 !ctx.arg.is64) { // RV32C only
734 sec.relaxAux->relocTypes[i] = R_RISCV_RVC_JUMP;
735 sec.relaxAux->writes.push_back(0x2001); // c.jal
736 remove = 6;
737 } else if (isInt<21>(displace)) {
738 sec.relaxAux->relocTypes[i] = R_RISCV_JAL;
739 sec.relaxAux->writes.push_back(0x6f | rd << 7); // jal
740 remove = 4;
744 // Relax local-exec TLS when hi20 is zero.
745 static void relaxTlsLe(Ctx &ctx, const InputSection &sec, size_t i,
746 uint64_t loc, Relocation &r, uint32_t &remove) {
747 uint64_t val = r.sym->getVA(ctx, r.addend);
748 if (hi20(val) != 0)
749 return;
750 uint32_t insn = read32le(sec.content().data() + r.offset);
751 switch (r.type) {
752 case R_RISCV_TPREL_HI20:
753 case R_RISCV_TPREL_ADD:
754 // Remove lui rd, %tprel_hi(x) and add rd, rd, tp, %tprel_add(x).
755 sec.relaxAux->relocTypes[i] = R_RISCV_RELAX;
756 remove = 4;
757 break;
758 case R_RISCV_TPREL_LO12_I:
759 // addi rd, rd, %tprel_lo(x) => addi rd, tp, st_value(x)
760 sec.relaxAux->relocTypes[i] = R_RISCV_32;
761 insn = (insn & ~(31 << 15)) | (X_TP << 15);
762 sec.relaxAux->writes.push_back(setLO12_I(insn, val));
763 break;
764 case R_RISCV_TPREL_LO12_S:
765 // sw rs, %tprel_lo(x)(rd) => sw rs, st_value(x)(rd)
766 sec.relaxAux->relocTypes[i] = R_RISCV_32;
767 insn = (insn & ~(31 << 15)) | (X_TP << 15);
768 sec.relaxAux->writes.push_back(setLO12_S(insn, val));
769 break;
773 static void relaxHi20Lo12(Ctx &ctx, const InputSection &sec, size_t i,
774 uint64_t loc, Relocation &r, uint32_t &remove) {
775 const Defined *gp = ctx.sym.riscvGlobalPointer;
776 if (!gp)
777 return;
779 if (!isInt<12>(r.sym->getVA(ctx, r.addend) - gp->getVA(ctx)))
780 return;
782 switch (r.type) {
783 case R_RISCV_HI20:
784 // Remove lui rd, %hi20(x).
785 sec.relaxAux->relocTypes[i] = R_RISCV_RELAX;
786 remove = 4;
787 break;
788 case R_RISCV_LO12_I:
789 sec.relaxAux->relocTypes[i] = INTERNAL_R_RISCV_GPREL_I;
790 break;
791 case R_RISCV_LO12_S:
792 sec.relaxAux->relocTypes[i] = INTERNAL_R_RISCV_GPREL_S;
793 break;
797 static bool relax(Ctx &ctx, InputSection &sec) {
798 const uint64_t secAddr = sec.getVA();
799 const MutableArrayRef<Relocation> relocs = sec.relocs();
800 auto &aux = *sec.relaxAux;
801 bool changed = false;
802 ArrayRef<SymbolAnchor> sa = ArrayRef(aux.anchors);
803 uint64_t delta = 0;
804 bool tlsdescRelax = false, toLeShortForm = false;
806 std::fill_n(aux.relocTypes.get(), relocs.size(), R_RISCV_NONE);
807 aux.writes.clear();
808 for (auto [i, r] : llvm::enumerate(relocs)) {
809 const uint64_t loc = secAddr + r.offset - delta;
810 uint32_t &cur = aux.relocDeltas[i], remove = 0;
811 switch (r.type) {
812 case R_RISCV_ALIGN: {
813 const uint64_t nextLoc = loc + r.addend;
814 const uint64_t align = PowerOf2Ceil(r.addend + 2);
815 // All bytes beyond the alignment boundary should be removed.
816 remove = nextLoc - ((loc + align - 1) & -align);
817 // If we can't satisfy this alignment, we've found a bad input.
818 if (LLVM_UNLIKELY(static_cast<int32_t>(remove) < 0)) {
819 Err(ctx) << getErrorLoc(ctx, (const uint8_t *)loc)
820 << "insufficient padding bytes for " << r.type << ": "
821 << r.addend
822 << " bytes available "
823 "for requested alignment of "
824 << align << " bytes";
825 remove = 0;
827 break;
829 case R_RISCV_CALL:
830 case R_RISCV_CALL_PLT:
831 if (relaxable(relocs, i))
832 relaxCall(ctx, sec, i, loc, r, remove);
833 break;
834 case R_RISCV_TPREL_HI20:
835 case R_RISCV_TPREL_ADD:
836 case R_RISCV_TPREL_LO12_I:
837 case R_RISCV_TPREL_LO12_S:
838 if (relaxable(relocs, i))
839 relaxTlsLe(ctx, sec, i, loc, r, remove);
840 break;
841 case R_RISCV_HI20:
842 case R_RISCV_LO12_I:
843 case R_RISCV_LO12_S:
844 if (relaxable(relocs, i))
845 relaxHi20Lo12(ctx, sec, i, loc, r, remove);
846 break;
847 case R_RISCV_TLSDESC_HI20:
848 // For TLSDESC=>LE, we can use the short form if hi20 is zero.
849 tlsdescRelax = relaxable(relocs, i);
850 toLeShortForm = tlsdescRelax && r.expr == R_RELAX_TLS_GD_TO_LE &&
851 !hi20(r.sym->getVA(ctx, r.addend));
852 [[fallthrough]];
853 case R_RISCV_TLSDESC_LOAD_LO12:
854 // For TLSDESC=>LE/IE, AUIPC and L[DW] are removed if relaxable.
855 if (tlsdescRelax && r.expr != R_TLSDESC_PC)
856 remove = 4;
857 break;
858 case R_RISCV_TLSDESC_ADD_LO12:
859 if (toLeShortForm)
860 remove = 4;
861 break;
864 // For all anchors whose offsets are <= r.offset, they are preceded by
865 // the previous relocation whose `relocDeltas` value equals `delta`.
866 // Decrease their st_value and update their st_size.
867 for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(1)) {
868 if (sa[0].end)
869 sa[0].d->size = sa[0].offset - delta - sa[0].d->value;
870 else
871 sa[0].d->value = sa[0].offset - delta;
873 delta += remove;
874 if (delta != cur) {
875 cur = delta;
876 changed = true;
880 for (const SymbolAnchor &a : sa) {
881 if (a.end)
882 a.d->size = a.offset - delta - a.d->value;
883 else
884 a.d->value = a.offset - delta;
886 // Inform assignAddresses that the size has changed.
887 if (!isUInt<32>(delta))
888 Fatal(ctx) << "section size decrease is too large: " << delta;
889 sec.bytesDropped = delta;
890 return changed;
893 // When relaxing just R_RISCV_ALIGN, relocDeltas is usually changed only once in
894 // the absence of a linker script. For call and load/store R_RISCV_RELAX, code
895 // shrinkage may reduce displacement and make more relocations eligible for
896 // relaxation. Code shrinkage may increase displacement to a call/load/store
897 // target at a higher fixed address, invalidating an earlier relaxation. Any
898 // change in section sizes can have cascading effect and require another
899 // relaxation pass.
900 bool RISCV::relaxOnce(int pass) const {
901 llvm::TimeTraceScope timeScope("RISC-V relaxOnce");
902 if (ctx.arg.relocatable)
903 return false;
905 if (pass == 0)
906 initSymbolAnchors(ctx);
908 SmallVector<InputSection *, 0> storage;
909 bool changed = false;
910 for (OutputSection *osec : ctx.outputSections) {
911 if (!(osec->flags & SHF_EXECINSTR))
912 continue;
913 for (InputSection *sec : getInputSections(*osec, storage))
914 changed |= relax(ctx, *sec);
916 return changed;
919 void RISCV::finalizeRelax(int passes) const {
920 llvm::TimeTraceScope timeScope("Finalize RISC-V relaxation");
921 Log(ctx) << "relaxation passes: " << passes;
922 SmallVector<InputSection *, 0> storage;
923 for (OutputSection *osec : ctx.outputSections) {
924 if (!(osec->flags & SHF_EXECINSTR))
925 continue;
926 for (InputSection *sec : getInputSections(*osec, storage)) {
927 RelaxAux &aux = *sec->relaxAux;
928 if (!aux.relocDeltas)
929 continue;
931 MutableArrayRef<Relocation> rels = sec->relocs();
932 ArrayRef<uint8_t> old = sec->content();
933 size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1];
934 size_t writesIdx = 0;
935 uint8_t *p = ctx.bAlloc.Allocate<uint8_t>(newSize);
936 uint64_t offset = 0;
937 int64_t delta = 0;
938 sec->content_ = p;
939 sec->size = newSize;
940 sec->bytesDropped = 0;
942 // Update section content: remove NOPs for R_RISCV_ALIGN and rewrite
943 // instructions for relaxed relocations.
944 for (size_t i = 0, e = rels.size(); i != e; ++i) {
945 uint32_t remove = aux.relocDeltas[i] - delta;
946 delta = aux.relocDeltas[i];
947 if (remove == 0 && aux.relocTypes[i] == R_RISCV_NONE)
948 continue;
950 // Copy from last location to the current relocated location.
951 const Relocation &r = rels[i];
952 uint64_t size = r.offset - offset;
953 memcpy(p, old.data() + offset, size);
954 p += size;
956 // For R_RISCV_ALIGN, we will place `offset` in a location (among NOPs)
957 // to satisfy the alignment requirement. If both `remove` and r.addend
958 // are multiples of 4, it is as if we have skipped some NOPs. Otherwise
959 // we are in the middle of a 4-byte NOP, and we need to rewrite the NOP
960 // sequence.
961 int64_t skip = 0;
962 if (r.type == R_RISCV_ALIGN) {
963 if (remove % 4 || r.addend % 4) {
964 skip = r.addend - remove;
965 int64_t j = 0;
966 for (; j + 4 <= skip; j += 4)
967 write32le(p + j, 0x00000013); // nop
968 if (j != skip) {
969 assert(j + 2 == skip);
970 write16le(p + j, 0x0001); // c.nop
973 } else if (RelType newType = aux.relocTypes[i]) {
974 switch (newType) {
975 case INTERNAL_R_RISCV_GPREL_I:
976 case INTERNAL_R_RISCV_GPREL_S:
977 break;
978 case R_RISCV_RELAX:
979 // Used by relaxTlsLe to indicate the relocation is ignored.
980 break;
981 case R_RISCV_RVC_JUMP:
982 skip = 2;
983 write16le(p, aux.writes[writesIdx++]);
984 break;
985 case R_RISCV_JAL:
986 skip = 4;
987 write32le(p, aux.writes[writesIdx++]);
988 break;
989 case R_RISCV_32:
990 // Used by relaxTlsLe to write a uint32_t then suppress the handling
991 // in relocateAlloc.
992 skip = 4;
993 write32le(p, aux.writes[writesIdx++]);
994 aux.relocTypes[i] = R_RISCV_NONE;
995 break;
996 default:
997 llvm_unreachable("unsupported type");
1001 p += skip;
1002 offset = r.offset + skip + remove;
1004 memcpy(p, old.data() + offset, old.size() - offset);
1006 // Subtract the previous relocDeltas value from the relocation offset.
1007 // For a pair of R_RISCV_CALL/R_RISCV_RELAX with the same offset, decrease
1008 // their r_offset by the same delta.
1009 delta = 0;
1010 for (size_t i = 0, e = rels.size(); i != e;) {
1011 uint64_t cur = rels[i].offset;
1012 do {
1013 rels[i].offset -= delta;
1014 if (aux.relocTypes[i] != R_RISCV_NONE)
1015 rels[i].type = aux.relocTypes[i];
1016 } while (++i != e && rels[i].offset == cur);
1017 delta = aux.relocDeltas[i - 1];
1023 namespace {
1024 // Representation of the merged .riscv.attributes input sections. The psABI
1025 // specifies merge policy for attributes. E.g. if we link an object without an
1026 // extension with an object with the extension, the output Tag_RISCV_arch shall
1027 // contain the extension. Some tools like objdump parse .riscv.attributes and
1028 // disabling some instructions if the first Tag_RISCV_arch does not contain an
1029 // extension.
1030 class RISCVAttributesSection final : public SyntheticSection {
1031 public:
1032 RISCVAttributesSection(Ctx &ctx)
1033 : SyntheticSection(ctx, ".riscv.attributes", SHT_RISCV_ATTRIBUTES, 0, 1) {
1036 size_t getSize() const override { return size; }
1037 void writeTo(uint8_t *buf) override;
1039 static constexpr StringRef vendor = "riscv";
1040 DenseMap<unsigned, unsigned> intAttr;
1041 DenseMap<unsigned, StringRef> strAttr;
1042 size_t size = 0;
1044 } // namespace
1046 static void mergeArch(Ctx &ctx, RISCVISAUtils::OrderedExtensionMap &mergedExts,
1047 unsigned &mergedXlen, const InputSectionBase *sec,
1048 StringRef s) {
1049 auto maybeInfo = RISCVISAInfo::parseNormalizedArchString(s);
1050 if (!maybeInfo) {
1051 Err(ctx) << sec << ": " << s << ": " << maybeInfo.takeError();
1052 return;
1055 // Merge extensions.
1056 RISCVISAInfo &info = **maybeInfo;
1057 if (mergedExts.empty()) {
1058 mergedExts = info.getExtensions();
1059 mergedXlen = info.getXLen();
1060 } else {
1061 for (const auto &ext : info.getExtensions()) {
1062 auto p = mergedExts.insert(ext);
1063 if (!p.second) {
1064 if (std::tie(p.first->second.Major, p.first->second.Minor) <
1065 std::tie(ext.second.Major, ext.second.Minor))
1066 p.first->second = ext.second;
1072 static void mergeAtomic(Ctx &ctx, DenseMap<unsigned, unsigned>::iterator it,
1073 const InputSectionBase *oldSection,
1074 const InputSectionBase *newSection,
1075 RISCVAttrs::RISCVAtomicAbiTag oldTag,
1076 RISCVAttrs::RISCVAtomicAbiTag newTag) {
1077 using RISCVAttrs::RISCVAtomicAbiTag;
1078 // Same tags stay the same, and UNKNOWN is compatible with anything
1079 if (oldTag == newTag || newTag == RISCVAtomicAbiTag::UNKNOWN)
1080 return;
1082 auto reportAbiError = [&]() {
1083 Err(ctx) << "atomic abi mismatch for " << oldSection->name << "\n>>> "
1084 << oldSection << ": atomic_abi=" << static_cast<unsigned>(oldTag)
1085 << "\n>>> " << newSection
1086 << ": atomic_abi=" << static_cast<unsigned>(newTag);
1089 auto reportUnknownAbiError = [&](const InputSectionBase *section,
1090 RISCVAtomicAbiTag tag) {
1091 switch (tag) {
1092 case RISCVAtomicAbiTag::UNKNOWN:
1093 case RISCVAtomicAbiTag::A6C:
1094 case RISCVAtomicAbiTag::A6S:
1095 case RISCVAtomicAbiTag::A7:
1096 return;
1098 Err(ctx) << "unknown atomic abi for " << section->name << "\n>>> "
1099 << section << ": atomic_abi=" << static_cast<unsigned>(tag);
1101 switch (oldTag) {
1102 case RISCVAtomicAbiTag::UNKNOWN:
1103 it->getSecond() = static_cast<unsigned>(newTag);
1104 return;
1105 case RISCVAtomicAbiTag::A6C:
1106 switch (newTag) {
1107 case RISCVAtomicAbiTag::A6S:
1108 it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A6C);
1109 return;
1110 case RISCVAtomicAbiTag::A7:
1111 reportAbiError();
1112 return;
1113 case RISCVAttrs::RISCVAtomicAbiTag::UNKNOWN:
1114 case RISCVAttrs::RISCVAtomicAbiTag::A6C:
1115 return;
1117 break;
1119 case RISCVAtomicAbiTag::A6S:
1120 switch (newTag) {
1121 case RISCVAtomicAbiTag::A6C:
1122 it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A6C);
1123 return;
1124 case RISCVAtomicAbiTag::A7:
1125 it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A7);
1126 return;
1127 case RISCVAttrs::RISCVAtomicAbiTag::UNKNOWN:
1128 case RISCVAttrs::RISCVAtomicAbiTag::A6S:
1129 return;
1131 break;
1133 case RISCVAtomicAbiTag::A7:
1134 switch (newTag) {
1135 case RISCVAtomicAbiTag::A6S:
1136 it->getSecond() = static_cast<unsigned>(RISCVAtomicAbiTag::A7);
1137 return;
1138 case RISCVAtomicAbiTag::A6C:
1139 reportAbiError();
1140 return;
1141 case RISCVAttrs::RISCVAtomicAbiTag::UNKNOWN:
1142 case RISCVAttrs::RISCVAtomicAbiTag::A7:
1143 return;
1145 break;
1148 // If we get here, then we have an invalid tag, so report it.
1149 // Putting these checks at the end allows us to only do these checks when we
1150 // need to, since this is expected to be a rare occurrence.
1151 reportUnknownAbiError(oldSection, oldTag);
1152 reportUnknownAbiError(newSection, newTag);
1155 static RISCVAttributesSection *
1156 mergeAttributesSection(Ctx &ctx,
1157 const SmallVector<InputSectionBase *, 0> &sections) {
1158 using RISCVAttrs::RISCVAtomicAbiTag;
1159 RISCVISAUtils::OrderedExtensionMap exts;
1160 const InputSectionBase *firstStackAlign = nullptr;
1161 const InputSectionBase *firstAtomicAbi = nullptr;
1162 unsigned firstStackAlignValue = 0, xlen = 0;
1163 bool hasArch = false;
1165 ctx.in.riscvAttributes = std::make_unique<RISCVAttributesSection>(ctx);
1166 auto &merged = static_cast<RISCVAttributesSection &>(*ctx.in.riscvAttributes);
1168 // Collect all tags values from attributes section.
1169 const auto &attributesTags = RISCVAttrs::getRISCVAttributeTags();
1170 for (const InputSectionBase *sec : sections) {
1171 RISCVAttributeParser parser;
1172 if (Error e = parser.parse(sec->content(), llvm::endianness::little))
1173 Warn(ctx) << sec << ": " << std::move(e);
1174 for (const auto &tag : attributesTags) {
1175 switch (RISCVAttrs::AttrType(tag.attr)) {
1176 // Integer attributes.
1177 case RISCVAttrs::STACK_ALIGN:
1178 if (auto i = parser.getAttributeValue(tag.attr)) {
1179 auto r = merged.intAttr.try_emplace(tag.attr, *i);
1180 if (r.second) {
1181 firstStackAlign = sec;
1182 firstStackAlignValue = *i;
1183 } else if (r.first->second != *i) {
1184 Err(ctx) << sec << " has stack_align=" << *i << " but "
1185 << firstStackAlign
1186 << " has stack_align=" << firstStackAlignValue;
1189 continue;
1190 case RISCVAttrs::UNALIGNED_ACCESS:
1191 if (auto i = parser.getAttributeValue(tag.attr))
1192 merged.intAttr[tag.attr] |= *i;
1193 continue;
1195 // String attributes.
1196 case RISCVAttrs::ARCH:
1197 if (auto s = parser.getAttributeString(tag.attr)) {
1198 hasArch = true;
1199 mergeArch(ctx, exts, xlen, sec, *s);
1201 continue;
1203 // Attributes which use the default handling.
1204 case RISCVAttrs::PRIV_SPEC:
1205 case RISCVAttrs::PRIV_SPEC_MINOR:
1206 case RISCVAttrs::PRIV_SPEC_REVISION:
1207 break;
1209 case RISCVAttrs::AttrType::ATOMIC_ABI:
1210 if (auto i = parser.getAttributeValue(tag.attr)) {
1211 auto r = merged.intAttr.try_emplace(tag.attr, *i);
1212 if (r.second)
1213 firstAtomicAbi = sec;
1214 else
1215 mergeAtomic(ctx, r.first, firstAtomicAbi, sec,
1216 static_cast<RISCVAtomicAbiTag>(r.first->getSecond()),
1217 static_cast<RISCVAtomicAbiTag>(*i));
1219 continue;
1222 // Fallback for deprecated priv_spec* and other unknown attributes: retain
1223 // the attribute if all input sections agree on the value. GNU ld uses 0
1224 // and empty strings as default values which are not dumped to the output.
1225 // TODO Adjust after resolution to
1226 // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/issues/352
1227 if (tag.attr % 2 == 0) {
1228 if (auto i = parser.getAttributeValue(tag.attr)) {
1229 auto r = merged.intAttr.try_emplace(tag.attr, *i);
1230 if (!r.second && r.first->second != *i)
1231 r.first->second = 0;
1233 } else if (auto s = parser.getAttributeString(tag.attr)) {
1234 auto r = merged.strAttr.try_emplace(tag.attr, *s);
1235 if (!r.second && r.first->second != *s)
1236 r.first->second = {};
1241 if (hasArch && xlen != 0) {
1242 if (auto result = RISCVISAInfo::createFromExtMap(xlen, exts)) {
1243 merged.strAttr.try_emplace(RISCVAttrs::ARCH,
1244 ctx.saver.save((*result)->toString()));
1245 } else {
1246 Err(ctx) << result.takeError();
1250 // The total size of headers: format-version [ <section-length> "vendor-name"
1251 // [ <file-tag> <size>.
1252 size_t size = 5 + merged.vendor.size() + 1 + 5;
1253 for (auto &attr : merged.intAttr)
1254 if (attr.second != 0)
1255 size += getULEB128Size(attr.first) + getULEB128Size(attr.second);
1256 for (auto &attr : merged.strAttr)
1257 if (!attr.second.empty())
1258 size += getULEB128Size(attr.first) + attr.second.size() + 1;
1259 merged.size = size;
1260 return &merged;
1263 void RISCVAttributesSection::writeTo(uint8_t *buf) {
1264 const size_t size = getSize();
1265 uint8_t *const end = buf + size;
1266 *buf = ELFAttrs::Format_Version;
1267 write32(ctx, buf + 1, size - 1);
1268 buf += 5;
1270 memcpy(buf, vendor.data(), vendor.size());
1271 buf += vendor.size() + 1;
1273 *buf = ELFAttrs::File;
1274 write32(ctx, buf + 1, end - buf);
1275 buf += 5;
1277 for (auto &attr : intAttr) {
1278 if (attr.second == 0)
1279 continue;
1280 buf += encodeULEB128(attr.first, buf);
1281 buf += encodeULEB128(attr.second, buf);
1283 for (auto &attr : strAttr) {
1284 if (attr.second.empty())
1285 continue;
1286 buf += encodeULEB128(attr.first, buf);
1287 memcpy(buf, attr.second.data(), attr.second.size());
1288 buf += attr.second.size() + 1;
1292 void elf::mergeRISCVAttributesSections(Ctx &ctx) {
1293 // Find the first input SHT_RISCV_ATTRIBUTES; return if not found.
1294 size_t place =
1295 llvm::find_if(ctx.inputSections,
1296 [](auto *s) { return s->type == SHT_RISCV_ATTRIBUTES; }) -
1297 ctx.inputSections.begin();
1298 if (place == ctx.inputSections.size())
1299 return;
1301 // Extract all SHT_RISCV_ATTRIBUTES sections into `sections`.
1302 SmallVector<InputSectionBase *, 0> sections;
1303 llvm::erase_if(ctx.inputSections, [&](InputSectionBase *s) {
1304 if (s->type != SHT_RISCV_ATTRIBUTES)
1305 return false;
1306 sections.push_back(s);
1307 return true;
1310 // Add the merged section.
1311 ctx.inputSections.insert(ctx.inputSections.begin() + place,
1312 mergeAttributesSection(ctx, sections));
1315 void elf::setRISCVTargetInfo(Ctx &ctx) { ctx.target.reset(new RISCV(ctx)); }