1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2017 Zihao Yu
9 #include <linux/errno.h>
10 #include <linux/hashtable.h>
11 #include <linux/kernel.h>
12 #include <linux/log2.h>
13 #include <linux/moduleloader.h>
14 #include <linux/sizes.h>
15 #include <linux/pgtable.h>
16 #include <asm/alternative.h>
17 #include <asm/sections.h>
20 struct list_head head
;
21 struct hlist_head
*bucket
;
24 struct relocation_head
{
25 struct hlist_node node
;
26 struct list_head
*rel_entry
;
30 struct relocation_entry
{
31 struct list_head head
;
36 struct relocation_handlers
{
37 int (*reloc_handler
)(struct module
*me
, void *location
, Elf_Addr v
);
38 int (*accumulate_handler
)(struct module
*me
, void *location
,
43 * The auipc+jalr instruction pair can reach any PC-relative offset
44 * in the range [-2^31 - 2^11, 2^31 - 2^11)
46 static bool riscv_insn_valid_32bit_offset(ptrdiff_t val
)
51 return (-(1L << 31) - (1L << 11)) <= val
&& val
< ((1L << 31) - (1L << 11));
55 static int riscv_insn_rmw(void *location
, u32 keep
, u32 set
)
57 __le16
*parcel
= location
;
58 u32 insn
= (u32
)le16_to_cpu(parcel
[0]) | (u32
)le16_to_cpu(parcel
[1]) << 16;
63 parcel
[0] = cpu_to_le16(insn
);
64 parcel
[1] = cpu_to_le16(insn
>> 16);
68 static int riscv_insn_rvc_rmw(void *location
, u16 keep
, u16 set
)
70 __le16
*parcel
= location
;
71 u16 insn
= le16_to_cpu(*parcel
);
76 *parcel
= cpu_to_le16(insn
);
80 static int apply_r_riscv_32_rela(struct module
*me
, void *location
, Elf_Addr v
)
83 pr_err("%s: value %016llx out of range for 32-bit field\n",
84 me
->name
, (long long)v
);
91 static int apply_r_riscv_64_rela(struct module
*me
, void *location
, Elf_Addr v
)
97 static int apply_r_riscv_branch_rela(struct module
*me
, void *location
,
100 ptrdiff_t offset
= (void *)v
- location
;
101 u32 imm12
= (offset
& 0x1000) << (31 - 12);
102 u32 imm11
= (offset
& 0x800) >> (11 - 7);
103 u32 imm10_5
= (offset
& 0x7e0) << (30 - 10);
104 u32 imm4_1
= (offset
& 0x1e) << (11 - 4);
106 return riscv_insn_rmw(location
, 0x1fff07f, imm12
| imm11
| imm10_5
| imm4_1
);
109 static int apply_r_riscv_jal_rela(struct module
*me
, void *location
,
112 ptrdiff_t offset
= (void *)v
- location
;
113 u32 imm20
= (offset
& 0x100000) << (31 - 20);
114 u32 imm19_12
= (offset
& 0xff000);
115 u32 imm11
= (offset
& 0x800) << (20 - 11);
116 u32 imm10_1
= (offset
& 0x7fe) << (30 - 10);
118 return riscv_insn_rmw(location
, 0xfff, imm20
| imm19_12
| imm11
| imm10_1
);
121 static int apply_r_riscv_rvc_branch_rela(struct module
*me
, void *location
,
124 ptrdiff_t offset
= (void *)v
- location
;
125 u16 imm8
= (offset
& 0x100) << (12 - 8);
126 u16 imm7_6
= (offset
& 0xc0) >> (6 - 5);
127 u16 imm5
= (offset
& 0x20) >> (5 - 2);
128 u16 imm4_3
= (offset
& 0x18) << (12 - 5);
129 u16 imm2_1
= (offset
& 0x6) << (12 - 10);
131 return riscv_insn_rvc_rmw(location
, 0xe383,
132 imm8
| imm7_6
| imm5
| imm4_3
| imm2_1
);
135 static int apply_r_riscv_rvc_jump_rela(struct module
*me
, void *location
,
138 ptrdiff_t offset
= (void *)v
- location
;
139 u16 imm11
= (offset
& 0x800) << (12 - 11);
140 u16 imm10
= (offset
& 0x400) >> (10 - 8);
141 u16 imm9_8
= (offset
& 0x300) << (12 - 11);
142 u16 imm7
= (offset
& 0x80) >> (7 - 6);
143 u16 imm6
= (offset
& 0x40) << (12 - 11);
144 u16 imm5
= (offset
& 0x20) >> (5 - 2);
145 u16 imm4
= (offset
& 0x10) << (12 - 5);
146 u16 imm3_1
= (offset
& 0xe) << (12 - 10);
148 return riscv_insn_rvc_rmw(location
, 0xe003,
149 imm11
| imm10
| imm9_8
| imm7
| imm6
| imm5
| imm4
| imm3_1
);
152 static int apply_r_riscv_pcrel_hi20_rela(struct module
*me
, void *location
,
155 ptrdiff_t offset
= (void *)v
- location
;
157 if (!riscv_insn_valid_32bit_offset(offset
)) {
159 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
160 me
->name
, (long long)v
, location
);
164 return riscv_insn_rmw(location
, 0xfff, (offset
+ 0x800) & 0xfffff000);
167 static int apply_r_riscv_pcrel_lo12_i_rela(struct module
*me
, void *location
,
171 * v is the lo12 value to fill. It is calculated before calling this
174 return riscv_insn_rmw(location
, 0xfffff, (v
& 0xfff) << 20);
177 static int apply_r_riscv_pcrel_lo12_s_rela(struct module
*me
, void *location
,
181 * v is the lo12 value to fill. It is calculated before calling this
184 u32 imm11_5
= (v
& 0xfe0) << (31 - 11);
185 u32 imm4_0
= (v
& 0x1f) << (11 - 4);
187 return riscv_insn_rmw(location
, 0x1fff07f, imm11_5
| imm4_0
);
190 static int apply_r_riscv_hi20_rela(struct module
*me
, void *location
,
193 if (IS_ENABLED(CONFIG_CMODEL_MEDLOW
)) {
195 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
196 me
->name
, (long long)v
, location
);
200 return riscv_insn_rmw(location
, 0xfff, ((s32
)v
+ 0x800) & 0xfffff000);
203 static int apply_r_riscv_lo12_i_rela(struct module
*me
, void *location
,
206 /* Skip medlow checking because of filtering by HI20 already */
207 s32 hi20
= ((s32
)v
+ 0x800) & 0xfffff000;
208 s32 lo12
= ((s32
)v
- hi20
);
210 return riscv_insn_rmw(location
, 0xfffff, (lo12
& 0xfff) << 20);
213 static int apply_r_riscv_lo12_s_rela(struct module
*me
, void *location
,
216 /* Skip medlow checking because of filtering by HI20 already */
217 s32 hi20
= ((s32
)v
+ 0x800) & 0xfffff000;
218 s32 lo12
= ((s32
)v
- hi20
);
219 u32 imm11_5
= (lo12
& 0xfe0) << (31 - 11);
220 u32 imm4_0
= (lo12
& 0x1f) << (11 - 4);
222 return riscv_insn_rmw(location
, 0x1fff07f, imm11_5
| imm4_0
);
225 static int apply_r_riscv_got_hi20_rela(struct module
*me
, void *location
,
228 ptrdiff_t offset
= (void *)v
- location
;
230 /* Always emit the got entry */
231 if (IS_ENABLED(CONFIG_MODULE_SECTIONS
)) {
232 offset
= (void *)module_emit_got_entry(me
, v
) - location
;
235 "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
236 me
->name
, (long long)v
, location
);
240 return riscv_insn_rmw(location
, 0xfff, (offset
+ 0x800) & 0xfffff000);
243 static int apply_r_riscv_call_plt_rela(struct module
*me
, void *location
,
246 ptrdiff_t offset
= (void *)v
- location
;
249 if (!riscv_insn_valid_32bit_offset(offset
)) {
250 /* Only emit the plt entry if offset over 32-bit range */
251 if (IS_ENABLED(CONFIG_MODULE_SECTIONS
)) {
252 offset
= (void *)module_emit_plt_entry(me
, v
) - location
;
255 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
256 me
->name
, (long long)v
, location
);
261 hi20
= (offset
+ 0x800) & 0xfffff000;
262 lo12
= (offset
- hi20
) & 0xfff;
263 riscv_insn_rmw(location
, 0xfff, hi20
);
264 return riscv_insn_rmw(location
+ 4, 0xfffff, lo12
<< 20);
267 static int apply_r_riscv_call_rela(struct module
*me
, void *location
,
270 ptrdiff_t offset
= (void *)v
- location
;
273 if (!riscv_insn_valid_32bit_offset(offset
)) {
275 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
276 me
->name
, (long long)v
, location
);
280 hi20
= (offset
+ 0x800) & 0xfffff000;
281 lo12
= (offset
- hi20
) & 0xfff;
282 riscv_insn_rmw(location
, 0xfff, hi20
);
283 return riscv_insn_rmw(location
+ 4, 0xfffff, lo12
<< 20);
286 static int apply_r_riscv_relax_rela(struct module
*me
, void *location
,
292 static int apply_r_riscv_align_rela(struct module
*me
, void *location
,
296 "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
301 static int apply_r_riscv_add8_rela(struct module
*me
, void *location
, Elf_Addr v
)
303 *(u8
*)location
+= (u8
)v
;
307 static int apply_r_riscv_add16_rela(struct module
*me
, void *location
,
310 *(u16
*)location
+= (u16
)v
;
314 static int apply_r_riscv_add32_rela(struct module
*me
, void *location
,
317 *(u32
*)location
+= (u32
)v
;
321 static int apply_r_riscv_add64_rela(struct module
*me
, void *location
,
324 *(u64
*)location
+= (u64
)v
;
328 static int apply_r_riscv_sub8_rela(struct module
*me
, void *location
, Elf_Addr v
)
330 *(u8
*)location
-= (u8
)v
;
334 static int apply_r_riscv_sub16_rela(struct module
*me
, void *location
,
337 *(u16
*)location
-= (u16
)v
;
341 static int apply_r_riscv_sub32_rela(struct module
*me
, void *location
,
344 *(u32
*)location
-= (u32
)v
;
348 static int apply_r_riscv_sub64_rela(struct module
*me
, void *location
,
351 *(u64
*)location
-= (u64
)v
;
355 static int dynamic_linking_not_supported(struct module
*me
, void *location
,
358 pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
363 static int tls_not_supported(struct module
*me
, void *location
, Elf_Addr v
)
365 pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
370 static int apply_r_riscv_sub6_rela(struct module
*me
, void *location
, Elf_Addr v
)
375 *byte
= (*byte
- (value
& 0x3f)) & 0x3f;
379 static int apply_r_riscv_set6_rela(struct module
*me
, void *location
, Elf_Addr v
)
384 *byte
= (*byte
& 0xc0) | (value
& 0x3f);
388 static int apply_r_riscv_set8_rela(struct module
*me
, void *location
, Elf_Addr v
)
390 *(u8
*)location
= (u8
)v
;
394 static int apply_r_riscv_set16_rela(struct module
*me
, void *location
,
397 *(u16
*)location
= (u16
)v
;
401 static int apply_r_riscv_set32_rela(struct module
*me
, void *location
,
404 *(u32
*)location
= (u32
)v
;
408 static int apply_r_riscv_32_pcrel_rela(struct module
*me
, void *location
,
411 *(u32
*)location
= v
- (uintptr_t)location
;
415 static int apply_r_riscv_plt32_rela(struct module
*me
, void *location
,
418 ptrdiff_t offset
= (void *)v
- location
;
420 if (!riscv_insn_valid_32bit_offset(offset
)) {
421 /* Only emit the plt entry if offset over 32-bit range */
422 if (IS_ENABLED(CONFIG_MODULE_SECTIONS
)) {
423 offset
= (void *)module_emit_plt_entry(me
, v
) - location
;
425 pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
426 me
->name
, (long long)v
, location
);
431 *(u32
*)location
= (u32
)offset
;
435 static int apply_r_riscv_set_uleb128(struct module
*me
, void *location
, Elf_Addr v
)
437 *(long *)location
= v
;
441 static int apply_r_riscv_sub_uleb128(struct module
*me
, void *location
, Elf_Addr v
)
443 *(long *)location
-= v
;
447 static int apply_6_bit_accumulation(struct module
*me
, void *location
, long buffer
)
453 pr_err("%s: value %ld out of range for 6-bit relocation.\n",
458 *byte
= (*byte
& 0xc0) | (value
& 0x3f);
462 static int apply_8_bit_accumulation(struct module
*me
, void *location
, long buffer
)
464 if (buffer
> U8_MAX
) {
465 pr_err("%s: value %ld out of range for 8-bit relocation.\n",
469 *(u8
*)location
= (u8
)buffer
;
473 static int apply_16_bit_accumulation(struct module
*me
, void *location
, long buffer
)
475 if (buffer
> U16_MAX
) {
476 pr_err("%s: value %ld out of range for 16-bit relocation.\n",
480 *(u16
*)location
= (u16
)buffer
;
484 static int apply_32_bit_accumulation(struct module
*me
, void *location
, long buffer
)
486 if (buffer
> U32_MAX
) {
487 pr_err("%s: value %ld out of range for 32-bit relocation.\n",
491 *(u32
*)location
= (u32
)buffer
;
495 static int apply_64_bit_accumulation(struct module
*me
, void *location
, long buffer
)
497 *(u64
*)location
= (u64
)buffer
;
501 static int apply_uleb128_accumulation(struct module
*me
, void *location
, long buffer
)
504 * ULEB128 is a variable length encoding. Encode the buffer into
505 * the ULEB128 data format.
509 while (buffer
!= 0) {
510 u8 value
= buffer
& 0x7f;
513 value
|= (!!buffer
) << 7;
521 * Relocations defined in the riscv-elf-psabi-doc.
522 * This handles static linking only.
524 static const struct relocation_handlers reloc_handlers
[] = {
525 [R_RISCV_32
] = { .reloc_handler
= apply_r_riscv_32_rela
},
526 [R_RISCV_64
] = { .reloc_handler
= apply_r_riscv_64_rela
},
527 [R_RISCV_RELATIVE
] = { .reloc_handler
= dynamic_linking_not_supported
},
528 [R_RISCV_COPY
] = { .reloc_handler
= dynamic_linking_not_supported
},
529 [R_RISCV_JUMP_SLOT
] = { .reloc_handler
= dynamic_linking_not_supported
},
530 [R_RISCV_TLS_DTPMOD32
] = { .reloc_handler
= dynamic_linking_not_supported
},
531 [R_RISCV_TLS_DTPMOD64
] = { .reloc_handler
= dynamic_linking_not_supported
},
532 [R_RISCV_TLS_DTPREL32
] = { .reloc_handler
= dynamic_linking_not_supported
},
533 [R_RISCV_TLS_DTPREL64
] = { .reloc_handler
= dynamic_linking_not_supported
},
534 [R_RISCV_TLS_TPREL32
] = { .reloc_handler
= dynamic_linking_not_supported
},
535 [R_RISCV_TLS_TPREL64
] = { .reloc_handler
= dynamic_linking_not_supported
},
536 /* 12-15 undefined */
537 [R_RISCV_BRANCH
] = { .reloc_handler
= apply_r_riscv_branch_rela
},
538 [R_RISCV_JAL
] = { .reloc_handler
= apply_r_riscv_jal_rela
},
539 [R_RISCV_CALL
] = { .reloc_handler
= apply_r_riscv_call_rela
},
540 [R_RISCV_CALL_PLT
] = { .reloc_handler
= apply_r_riscv_call_plt_rela
},
541 [R_RISCV_GOT_HI20
] = { .reloc_handler
= apply_r_riscv_got_hi20_rela
},
542 [R_RISCV_TLS_GOT_HI20
] = { .reloc_handler
= tls_not_supported
},
543 [R_RISCV_TLS_GD_HI20
] = { .reloc_handler
= tls_not_supported
},
544 [R_RISCV_PCREL_HI20
] = { .reloc_handler
= apply_r_riscv_pcrel_hi20_rela
},
545 [R_RISCV_PCREL_LO12_I
] = { .reloc_handler
= apply_r_riscv_pcrel_lo12_i_rela
},
546 [R_RISCV_PCREL_LO12_S
] = { .reloc_handler
= apply_r_riscv_pcrel_lo12_s_rela
},
547 [R_RISCV_HI20
] = { .reloc_handler
= apply_r_riscv_hi20_rela
},
548 [R_RISCV_LO12_I
] = { .reloc_handler
= apply_r_riscv_lo12_i_rela
},
549 [R_RISCV_LO12_S
] = { .reloc_handler
= apply_r_riscv_lo12_s_rela
},
550 [R_RISCV_TPREL_HI20
] = { .reloc_handler
= tls_not_supported
},
551 [R_RISCV_TPREL_LO12_I
] = { .reloc_handler
= tls_not_supported
},
552 [R_RISCV_TPREL_LO12_S
] = { .reloc_handler
= tls_not_supported
},
553 [R_RISCV_TPREL_ADD
] = { .reloc_handler
= tls_not_supported
},
554 [R_RISCV_ADD8
] = { .reloc_handler
= apply_r_riscv_add8_rela
,
555 .accumulate_handler
= apply_8_bit_accumulation
},
556 [R_RISCV_ADD16
] = { .reloc_handler
= apply_r_riscv_add16_rela
,
557 .accumulate_handler
= apply_16_bit_accumulation
},
558 [R_RISCV_ADD32
] = { .reloc_handler
= apply_r_riscv_add32_rela
,
559 .accumulate_handler
= apply_32_bit_accumulation
},
560 [R_RISCV_ADD64
] = { .reloc_handler
= apply_r_riscv_add64_rela
,
561 .accumulate_handler
= apply_64_bit_accumulation
},
562 [R_RISCV_SUB8
] = { .reloc_handler
= apply_r_riscv_sub8_rela
,
563 .accumulate_handler
= apply_8_bit_accumulation
},
564 [R_RISCV_SUB16
] = { .reloc_handler
= apply_r_riscv_sub16_rela
,
565 .accumulate_handler
= apply_16_bit_accumulation
},
566 [R_RISCV_SUB32
] = { .reloc_handler
= apply_r_riscv_sub32_rela
,
567 .accumulate_handler
= apply_32_bit_accumulation
},
568 [R_RISCV_SUB64
] = { .reloc_handler
= apply_r_riscv_sub64_rela
,
569 .accumulate_handler
= apply_64_bit_accumulation
},
570 /* 41-42 reserved for future standard use */
571 [R_RISCV_ALIGN
] = { .reloc_handler
= apply_r_riscv_align_rela
},
572 [R_RISCV_RVC_BRANCH
] = { .reloc_handler
= apply_r_riscv_rvc_branch_rela
},
573 [R_RISCV_RVC_JUMP
] = { .reloc_handler
= apply_r_riscv_rvc_jump_rela
},
574 /* 46-50 reserved for future standard use */
575 [R_RISCV_RELAX
] = { .reloc_handler
= apply_r_riscv_relax_rela
},
576 [R_RISCV_SUB6
] = { .reloc_handler
= apply_r_riscv_sub6_rela
,
577 .accumulate_handler
= apply_6_bit_accumulation
},
578 [R_RISCV_SET6
] = { .reloc_handler
= apply_r_riscv_set6_rela
,
579 .accumulate_handler
= apply_6_bit_accumulation
},
580 [R_RISCV_SET8
] = { .reloc_handler
= apply_r_riscv_set8_rela
,
581 .accumulate_handler
= apply_8_bit_accumulation
},
582 [R_RISCV_SET16
] = { .reloc_handler
= apply_r_riscv_set16_rela
,
583 .accumulate_handler
= apply_16_bit_accumulation
},
584 [R_RISCV_SET32
] = { .reloc_handler
= apply_r_riscv_set32_rela
,
585 .accumulate_handler
= apply_32_bit_accumulation
},
586 [R_RISCV_32_PCREL
] = { .reloc_handler
= apply_r_riscv_32_pcrel_rela
},
587 [R_RISCV_IRELATIVE
] = { .reloc_handler
= dynamic_linking_not_supported
},
588 [R_RISCV_PLT32
] = { .reloc_handler
= apply_r_riscv_plt32_rela
},
589 [R_RISCV_SET_ULEB128
] = { .reloc_handler
= apply_r_riscv_set_uleb128
,
590 .accumulate_handler
= apply_uleb128_accumulation
},
591 [R_RISCV_SUB_ULEB128
] = { .reloc_handler
= apply_r_riscv_sub_uleb128
,
592 .accumulate_handler
= apply_uleb128_accumulation
},
593 /* 62-191 reserved for future standard use */
594 /* 192-255 nonstandard ABI extensions */
598 process_accumulated_relocations(struct module
*me
,
599 struct hlist_head
**relocation_hashtable
,
600 struct list_head
*used_buckets_list
)
603 * Only ADD/SUB/SET/ULEB128 should end up here.
605 * Each bucket may have more than one relocation location. All
606 * relocations for a location are stored in a list in a bucket.
608 * Relocations are applied to a temp variable before being stored to the
609 * provided location to check for overflow. This also allows ULEB128 to
610 * properly decide how many entries are needed before storing to
611 * location. The final value is stored into location using the handler
612 * for the last relocation to an address.
614 * Three layers of indexing:
615 * - Each of the buckets in use
616 * - Groups of relocations in each bucket by location address
617 * - Each relocation entry for a location address
619 struct used_bucket
*bucket_iter
;
620 struct used_bucket
*bucket_iter_tmp
;
621 struct relocation_head
*rel_head_iter
;
622 struct hlist_node
*rel_head_iter_tmp
;
623 struct relocation_entry
*rel_entry_iter
;
624 struct relocation_entry
*rel_entry_iter_tmp
;
629 list_for_each_entry_safe(bucket_iter
, bucket_iter_tmp
,
630 used_buckets_list
, head
) {
631 hlist_for_each_entry_safe(rel_head_iter
, rel_head_iter_tmp
,
632 bucket_iter
->bucket
, node
) {
634 location
= rel_head_iter
->location
;
635 list_for_each_entry_safe(rel_entry_iter
,
637 rel_head_iter
->rel_entry
,
639 curr_type
= rel_entry_iter
->type
;
640 reloc_handlers
[curr_type
].reloc_handler(
641 me
, &buffer
, rel_entry_iter
->value
);
642 kfree(rel_entry_iter
);
644 reloc_handlers
[curr_type
].accumulate_handler(
645 me
, location
, buffer
);
646 kfree(rel_head_iter
);
651 kfree(*relocation_hashtable
);
654 static int add_relocation_to_accumulate(struct module
*me
, int type
,
656 unsigned int hashtable_bits
, Elf_Addr v
,
657 struct hlist_head
*relocation_hashtable
,
658 struct list_head
*used_buckets_list
)
660 struct relocation_entry
*entry
;
661 struct relocation_head
*rel_head
;
662 struct hlist_head
*current_head
;
663 struct used_bucket
*bucket
;
666 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
671 INIT_LIST_HEAD(&entry
->head
);
675 hash
= hash_min((uintptr_t)location
, hashtable_bits
);
677 current_head
= &relocation_hashtable
[hash
];
680 * Search for the relocation_head for the relocations that happen at the
684 struct relocation_head
*rel_head_iter
;
686 hlist_for_each_entry(rel_head_iter
, current_head
, node
) {
687 if (rel_head_iter
->location
== location
) {
689 rel_head
= rel_head_iter
;
695 * If there has not yet been any relocations at the provided location,
696 * create a relocation_head for that location and populate it with this
700 rel_head
= kmalloc(sizeof(*rel_head
), GFP_KERNEL
);
707 rel_head
->rel_entry
=
708 kmalloc(sizeof(struct list_head
), GFP_KERNEL
);
710 if (!rel_head
->rel_entry
) {
716 INIT_LIST_HEAD(rel_head
->rel_entry
);
717 rel_head
->location
= location
;
718 INIT_HLIST_NODE(&rel_head
->node
);
719 if (!current_head
->first
) {
721 kmalloc(sizeof(struct used_bucket
), GFP_KERNEL
);
725 kfree(rel_head
->rel_entry
);
730 INIT_LIST_HEAD(&bucket
->head
);
731 bucket
->bucket
= current_head
;
732 list_add(&bucket
->head
, used_buckets_list
);
734 hlist_add_head(&rel_head
->node
, current_head
);
737 /* Add relocation to head of discovered rel_head */
738 list_add_tail(&entry
->head
, rel_head
->rel_entry
);
744 initialize_relocation_hashtable(unsigned int num_relocations
,
745 struct hlist_head
**relocation_hashtable
)
747 /* Can safely assume that bits is not greater than sizeof(long) */
748 unsigned long hashtable_size
= roundup_pow_of_two(num_relocations
);
750 * When hashtable_size == 1, hashtable_bits == 0.
751 * This is valid because the hashing algorithm returns 0 in this case.
753 unsigned int hashtable_bits
= ilog2(hashtable_size
);
756 * Double size of hashtable if num_relocations * 1.25 is greater than
759 int should_double_size
= ((num_relocations
+ (num_relocations
>> 2)) > (hashtable_size
));
761 hashtable_bits
+= should_double_size
;
763 hashtable_size
<<= should_double_size
;
765 *relocation_hashtable
= kmalloc_array(hashtable_size
,
766 sizeof(**relocation_hashtable
),
768 if (!*relocation_hashtable
)
771 __hash_init(*relocation_hashtable
, hashtable_size
);
773 return hashtable_bits
;
776 int apply_relocate_add(Elf_Shdr
*sechdrs
, const char *strtab
,
777 unsigned int symindex
, unsigned int relsec
,
780 Elf_Rela
*rel
= (void *) sechdrs
[relsec
].sh_addr
;
781 int (*handler
)(struct module
*me
, void *location
, Elf_Addr v
);
784 unsigned int i
, type
;
785 unsigned int j_idx
= 0;
788 unsigned int num_relocations
= sechdrs
[relsec
].sh_size
/ sizeof(*rel
);
789 struct hlist_head
*relocation_hashtable
;
790 unsigned int hashtable_bits
;
791 LIST_HEAD(used_buckets_list
);
793 hashtable_bits
= initialize_relocation_hashtable(num_relocations
,
794 &relocation_hashtable
);
796 if (!relocation_hashtable
)
799 pr_debug("Applying relocate section %u to %u\n", relsec
,
800 sechdrs
[relsec
].sh_info
);
802 for (i
= 0; i
< num_relocations
; i
++) {
803 /* This is where to make the change */
804 location
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
806 /* This is the symbol it is referring to */
807 sym
= (Elf_Sym
*)sechdrs
[symindex
].sh_addr
808 + ELF_RISCV_R_SYM(rel
[i
].r_info
);
809 if (IS_ERR_VALUE(sym
->st_value
)) {
810 /* Ignore unresolved weak symbol */
811 if (ELF_ST_BIND(sym
->st_info
) == STB_WEAK
)
813 pr_warn("%s: Unknown symbol %s\n",
814 me
->name
, strtab
+ sym
->st_name
);
818 type
= ELF_RISCV_R_TYPE(rel
[i
].r_info
);
820 if (type
< ARRAY_SIZE(reloc_handlers
))
821 handler
= reloc_handlers
[type
].reloc_handler
;
826 pr_err("%s: Unknown relocation type %u\n",
831 v
= sym
->st_value
+ rel
[i
].r_addend
;
833 if (type
== R_RISCV_PCREL_LO12_I
|| type
== R_RISCV_PCREL_LO12_S
) {
834 unsigned int j
= j_idx
;
838 unsigned long hi20_loc
=
839 sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
841 u32 hi20_type
= ELF_RISCV_R_TYPE(rel
[j
].r_info
);
843 /* Find the corresponding HI20 relocation entry */
844 if (hi20_loc
== sym
->st_value
845 && (hi20_type
== R_RISCV_PCREL_HI20
846 || hi20_type
== R_RISCV_GOT_HI20
)) {
849 (Elf_Sym
*)sechdrs
[symindex
].sh_addr
850 + ELF_RISCV_R_SYM(rel
[j
].r_info
);
851 unsigned long hi20_sym_val
=
856 size_t offset
= hi20_sym_val
- hi20_loc
;
857 if (IS_ENABLED(CONFIG_MODULE_SECTIONS
)
858 && hi20_type
== R_RISCV_GOT_HI20
) {
859 offset
= module_emit_got_entry(
861 offset
= offset
- hi20_loc
;
863 hi20
= (offset
+ 0x800) & 0xfffff000;
864 lo12
= offset
- hi20
;
872 if (j
> sechdrs
[relsec
].sh_size
/ sizeof(*rel
))
875 } while (j_idx
!= j
);
879 "%s: Can not find HI20 relocation information\n",
884 /* Record the previous j-loop end index */
888 if (reloc_handlers
[type
].accumulate_handler
)
889 res
= add_relocation_to_accumulate(me
, type
, location
,
891 relocation_hashtable
,
894 res
= handler(me
, location
, v
);
899 process_accumulated_relocations(me
, &relocation_hashtable
,
905 int module_finalize(const Elf_Ehdr
*hdr
,
906 const Elf_Shdr
*sechdrs
,
911 s
= find_section(hdr
, sechdrs
, ".alternative");
913 apply_module_alternatives((void *)s
->sh_addr
, s
->sh_size
);