1 // SPDX-License-Identifier: GPL-2.0-only
3 * AArch64 loadable module support.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #define pr_fmt(fmt) "Modules: " fmt
12 #include <linux/bitops.h>
13 #include <linux/elf.h>
14 #include <linux/ftrace.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
18 #include <linux/moduleloader.h>
19 #include <linux/random.h>
20 #include <linux/scs.h>
22 #include <asm/alternative.h>
25 #include <asm/sections.h>
27 enum aarch64_reloc_op
{
34 static u64
do_reloc(enum aarch64_reloc_op reloc_op
, __le32
*place
, u64 val
)
40 return val
- (u64
)place
;
42 return (val
& ~0xfff) - ((u64
)place
& ~0xfff);
47 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op
);
51 static int reloc_data(enum aarch64_reloc_op op
, void *place
, u64 val
, int len
)
53 s64 sval
= do_reloc(op
, place
, val
);
56 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
57 * relative and absolute relocations as having a range of [-2^15, 2^16)
58 * or [-2^31, 2^32), respectively. However, in order to be able to
59 * detect overflows reliably, we have to choose whether we interpret
60 * such quantities as signed or as unsigned, and stick with it.
61 * The way we organize our address space requires a signed
62 * interpretation of 32-bit relative references, so let's use that
63 * for all R_AARCH64_PRELxx relocations. This means our upper
64 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
72 if (sval
< 0 || sval
> U16_MAX
)
76 if (sval
< S16_MIN
|| sval
> S16_MAX
)
80 pr_err("Invalid 16-bit data relocation (%d)\n", op
);
88 if (sval
< 0 || sval
> U32_MAX
)
92 if (sval
< S32_MIN
|| sval
> S32_MAX
)
96 pr_err("Invalid 32-bit data relocation (%d)\n", op
);
101 *(s64
*)place
= sval
;
104 pr_err("Invalid length (%d) for data relocation\n", len
);
110 enum aarch64_insn_movw_imm_type
{
111 AARCH64_INSN_IMM_MOVNZ
,
112 AARCH64_INSN_IMM_MOVKZ
,
115 static int reloc_insn_movw(enum aarch64_reloc_op op
, __le32
*place
, u64 val
,
116 int lsb
, enum aarch64_insn_movw_imm_type imm_type
)
120 u32 insn
= le32_to_cpu(*place
);
122 sval
= do_reloc(op
, place
, val
);
125 if (imm_type
== AARCH64_INSN_IMM_MOVNZ
) {
127 * For signed MOVW relocations, we have to manipulate the
128 * instruction encoding depending on whether or not the
129 * immediate is less than zero.
133 /* >=0: Set the instruction to MOVZ (opcode 10b). */
137 * <0: Set the instruction to MOVN (opcode 00b).
138 * Since we've masked the opcode already, we
139 * don't need to do anything other than
140 * inverting the new immediate field.
146 /* Update the instruction with the new encoding. */
147 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
148 *place
= cpu_to_le32(insn
);
156 static int reloc_insn_imm(enum aarch64_reloc_op op
, __le32
*place
, u64 val
,
157 int lsb
, int len
, enum aarch64_insn_imm_type imm_type
)
161 u32 insn
= le32_to_cpu(*place
);
163 /* Calculate the relocation value. */
164 sval
= do_reloc(op
, place
, val
);
167 /* Extract the value bits and shift them to bit 0. */
168 imm_mask
= (BIT(lsb
+ len
) - 1) >> lsb
;
169 imm
= sval
& imm_mask
;
171 /* Update the instruction's immediate field. */
172 insn
= aarch64_insn_encode_immediate(imm_type
, insn
, imm
);
173 *place
= cpu_to_le32(insn
);
176 * Extract the upper value bits (including the sign bit) and
177 * shift them to bit 0.
179 sval
= (s64
)(sval
& ~(imm_mask
>> 1)) >> (len
- 1);
182 * Overflow has occurred if the upper bits are not all equal to
183 * the sign bit of the value.
185 if ((u64
)(sval
+ 1) >= 2)
191 static int reloc_insn_adrp(struct module
*mod
, Elf64_Shdr
*sechdrs
,
192 __le32
*place
, u64 val
)
196 if (!is_forbidden_offset_for_adrp(place
))
197 return reloc_insn_imm(RELOC_OP_PAGE
, place
, val
, 12, 21,
198 AARCH64_INSN_IMM_ADR
);
200 /* patch ADRP to ADR if it is in range */
201 if (!reloc_insn_imm(RELOC_OP_PREL
, place
, val
& ~0xfff, 0, 21,
202 AARCH64_INSN_IMM_ADR
)) {
203 insn
= le32_to_cpu(*place
);
206 /* out of range for ADR -> emit a veneer */
207 val
= module_emit_veneer_for_adrp(mod
, sechdrs
, place
, val
& ~0xfff);
210 insn
= aarch64_insn_gen_branch_imm((u64
)place
, val
,
211 AARCH64_INSN_BRANCH_NOLINK
);
214 *place
= cpu_to_le32(insn
);
218 int apply_relocate_add(Elf64_Shdr
*sechdrs
,
220 unsigned int symindex
,
230 Elf64_Rela
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
232 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
233 /* loc corresponds to P in the AArch64 ELF document. */
234 loc
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
237 /* sym is the ELF symbol we're referring to. */
238 sym
= (Elf64_Sym
*)sechdrs
[symindex
].sh_addr
239 + ELF64_R_SYM(rel
[i
].r_info
);
241 /* val corresponds to (S + A) in the AArch64 ELF document. */
242 val
= sym
->st_value
+ rel
[i
].r_addend
;
244 /* Check for overflow by default. */
245 overflow_check
= true;
247 /* Perform the static relocation. */
248 switch (ELF64_R_TYPE(rel
[i
].r_info
)) {
249 /* Null relocations. */
255 /* Data relocations. */
256 case R_AARCH64_ABS64
:
257 overflow_check
= false;
258 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 64);
260 case R_AARCH64_ABS32
:
261 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 32);
263 case R_AARCH64_ABS16
:
264 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 16);
266 case R_AARCH64_PREL64
:
267 overflow_check
= false;
268 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 64);
270 case R_AARCH64_PREL32
:
271 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 32);
273 case R_AARCH64_PREL16
:
274 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 16);
277 /* MOVW instruction relocations. */
278 case R_AARCH64_MOVW_UABS_G0_NC
:
279 overflow_check
= false;
281 case R_AARCH64_MOVW_UABS_G0
:
282 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
283 AARCH64_INSN_IMM_MOVKZ
);
285 case R_AARCH64_MOVW_UABS_G1_NC
:
286 overflow_check
= false;
288 case R_AARCH64_MOVW_UABS_G1
:
289 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
290 AARCH64_INSN_IMM_MOVKZ
);
292 case R_AARCH64_MOVW_UABS_G2_NC
:
293 overflow_check
= false;
295 case R_AARCH64_MOVW_UABS_G2
:
296 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
297 AARCH64_INSN_IMM_MOVKZ
);
299 case R_AARCH64_MOVW_UABS_G3
:
300 /* We're using the top bits so we can't overflow. */
301 overflow_check
= false;
302 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 48,
303 AARCH64_INSN_IMM_MOVKZ
);
305 case R_AARCH64_MOVW_SABS_G0
:
306 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
307 AARCH64_INSN_IMM_MOVNZ
);
309 case R_AARCH64_MOVW_SABS_G1
:
310 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
311 AARCH64_INSN_IMM_MOVNZ
);
313 case R_AARCH64_MOVW_SABS_G2
:
314 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
315 AARCH64_INSN_IMM_MOVNZ
);
317 case R_AARCH64_MOVW_PREL_G0_NC
:
318 overflow_check
= false;
319 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
320 AARCH64_INSN_IMM_MOVKZ
);
322 case R_AARCH64_MOVW_PREL_G0
:
323 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
324 AARCH64_INSN_IMM_MOVNZ
);
326 case R_AARCH64_MOVW_PREL_G1_NC
:
327 overflow_check
= false;
328 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
329 AARCH64_INSN_IMM_MOVKZ
);
331 case R_AARCH64_MOVW_PREL_G1
:
332 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
333 AARCH64_INSN_IMM_MOVNZ
);
335 case R_AARCH64_MOVW_PREL_G2_NC
:
336 overflow_check
= false;
337 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
338 AARCH64_INSN_IMM_MOVKZ
);
340 case R_AARCH64_MOVW_PREL_G2
:
341 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
342 AARCH64_INSN_IMM_MOVNZ
);
344 case R_AARCH64_MOVW_PREL_G3
:
345 /* We're using the top bits so we can't overflow. */
346 overflow_check
= false;
347 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 48,
348 AARCH64_INSN_IMM_MOVNZ
);
351 /* Immediate instruction relocations. */
352 case R_AARCH64_LD_PREL_LO19
:
353 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
354 AARCH64_INSN_IMM_19
);
356 case R_AARCH64_ADR_PREL_LO21
:
357 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 0, 21,
358 AARCH64_INSN_IMM_ADR
);
360 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
361 overflow_check
= false;
363 case R_AARCH64_ADR_PREL_PG_HI21
:
364 ovf
= reloc_insn_adrp(me
, sechdrs
, loc
, val
);
365 if (ovf
&& ovf
!= -ERANGE
)
368 case R_AARCH64_ADD_ABS_LO12_NC
:
369 case R_AARCH64_LDST8_ABS_LO12_NC
:
370 overflow_check
= false;
371 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 0, 12,
372 AARCH64_INSN_IMM_12
);
374 case R_AARCH64_LDST16_ABS_LO12_NC
:
375 overflow_check
= false;
376 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 1, 11,
377 AARCH64_INSN_IMM_12
);
379 case R_AARCH64_LDST32_ABS_LO12_NC
:
380 overflow_check
= false;
381 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 2, 10,
382 AARCH64_INSN_IMM_12
);
384 case R_AARCH64_LDST64_ABS_LO12_NC
:
385 overflow_check
= false;
386 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 3, 9,
387 AARCH64_INSN_IMM_12
);
389 case R_AARCH64_LDST128_ABS_LO12_NC
:
390 overflow_check
= false;
391 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 4, 8,
392 AARCH64_INSN_IMM_12
);
394 case R_AARCH64_TSTBR14
:
395 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 14,
396 AARCH64_INSN_IMM_14
);
398 case R_AARCH64_CONDBR19
:
399 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
400 AARCH64_INSN_IMM_19
);
402 case R_AARCH64_JUMP26
:
403 case R_AARCH64_CALL26
:
404 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 26,
405 AARCH64_INSN_IMM_26
);
406 if (ovf
== -ERANGE
) {
407 val
= module_emit_plt_entry(me
, sechdrs
, loc
, &rel
[i
], sym
);
410 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2,
411 26, AARCH64_INSN_IMM_26
);
416 pr_err("module %s: unsupported RELA relocation: %llu\n",
417 me
->name
, ELF64_R_TYPE(rel
[i
].r_info
));
421 if (overflow_check
&& ovf
== -ERANGE
)
429 pr_err("module %s: overflow in relocation type %d val %Lx\n",
430 me
->name
, (int)ELF64_R_TYPE(rel
[i
].r_info
), val
);
434 static inline void __init_plt(struct plt_entry
*plt
, unsigned long addr
)
436 *plt
= get_plt_entry(addr
, plt
);
439 static int module_init_ftrace_plt(const Elf_Ehdr
*hdr
,
440 const Elf_Shdr
*sechdrs
,
443 #if defined(CONFIG_DYNAMIC_FTRACE)
445 struct plt_entry
*plts
;
447 s
= find_section(hdr
, sechdrs
, ".text.ftrace_trampoline");
451 plts
= (void *)s
->sh_addr
;
453 __init_plt(&plts
[FTRACE_PLT_IDX
], FTRACE_ADDR
);
455 mod
->arch
.ftrace_trampolines
= plts
;
460 int module_finalize(const Elf_Ehdr
*hdr
,
461 const Elf_Shdr
*sechdrs
,
465 s
= find_section(hdr
, sechdrs
, ".altinstructions");
467 apply_alternatives_module((void *)s
->sh_addr
, s
->sh_size
);
469 if (scs_is_dynamic()) {
470 s
= find_section(hdr
, sechdrs
, ".init.eh_frame");
472 __pi_scs_patch((void *)s
->sh_addr
, s
->sh_size
);
475 return module_init_ftrace_plt(hdr
, sechdrs
, me
);