2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size
)
35 gfp_t gfp_mask
= GFP_KERNEL
;
38 /* Silence the initial allocation */
39 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS
))
40 gfp_mask
|= __GFP_NOWARN
;
42 p
= __vmalloc_node_range(size
, MODULE_ALIGN
, module_alloc_base
,
43 module_alloc_base
+ MODULES_VSIZE
,
44 gfp_mask
, PAGE_KERNEL_EXEC
, 0,
45 NUMA_NO_NODE
, __builtin_return_address(0));
47 if (!p
&& IS_ENABLED(CONFIG_ARM64_MODULE_PLTS
) &&
48 !IS_ENABLED(CONFIG_KASAN
))
50 * KASAN can only deal with module allocations being served
51 * from the reserved module region, since the remainder of
52 * the vmalloc region is already backed by zero shadow pages,
53 * and punching holes into it is non-trivial. Since the module
54 * region is not randomized when KASAN is enabled, it is even
55 * less likely that the module region gets exhausted, so we
56 * can simply omit this fallback in that case.
58 p
= __vmalloc_node_range(size
, MODULE_ALIGN
, module_alloc_base
,
59 module_alloc_base
+ SZ_4G
, GFP_KERNEL
,
60 PAGE_KERNEL_EXEC
, 0, NUMA_NO_NODE
,
61 __builtin_return_address(0));
63 if (p
&& (kasan_module_alloc(p
, size
) < 0)) {
71 enum aarch64_reloc_op
{
78 static u64
do_reloc(enum aarch64_reloc_op reloc_op
, __le32
*place
, u64 val
)
84 return val
- (u64
)place
;
86 return (val
& ~0xfff) - ((u64
)place
& ~0xfff);
91 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op
);
95 static int reloc_data(enum aarch64_reloc_op op
, void *place
, u64 val
, int len
)
97 s64 sval
= do_reloc(op
, place
, val
);
101 *(s16
*)place
= sval
;
102 if (sval
< S16_MIN
|| sval
> U16_MAX
)
106 *(s32
*)place
= sval
;
107 if (sval
< S32_MIN
|| sval
> U32_MAX
)
111 *(s64
*)place
= sval
;
114 pr_err("Invalid length (%d) for data relocation\n", len
);
120 enum aarch64_insn_movw_imm_type
{
121 AARCH64_INSN_IMM_MOVNZ
,
122 AARCH64_INSN_IMM_MOVKZ
,
125 static int reloc_insn_movw(enum aarch64_reloc_op op
, __le32
*place
, u64 val
,
126 int lsb
, enum aarch64_insn_movw_imm_type imm_type
)
130 u32 insn
= le32_to_cpu(*place
);
132 sval
= do_reloc(op
, place
, val
);
135 if (imm_type
== AARCH64_INSN_IMM_MOVNZ
) {
137 * For signed MOVW relocations, we have to manipulate the
138 * instruction encoding depending on whether or not the
139 * immediate is less than zero.
143 /* >=0: Set the instruction to MOVZ (opcode 10b). */
147 * <0: Set the instruction to MOVN (opcode 00b).
148 * Since we've masked the opcode already, we
149 * don't need to do anything other than
150 * inverting the new immediate field.
156 /* Update the instruction with the new encoding. */
157 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
158 *place
= cpu_to_le32(insn
);
166 static int reloc_insn_imm(enum aarch64_reloc_op op
, __le32
*place
, u64 val
,
167 int lsb
, int len
, enum aarch64_insn_imm_type imm_type
)
171 u32 insn
= le32_to_cpu(*place
);
173 /* Calculate the relocation value. */
174 sval
= do_reloc(op
, place
, val
);
177 /* Extract the value bits and shift them to bit 0. */
178 imm_mask
= (BIT(lsb
+ len
) - 1) >> lsb
;
179 imm
= sval
& imm_mask
;
181 /* Update the instruction's immediate field. */
182 insn
= aarch64_insn_encode_immediate(imm_type
, insn
, imm
);
183 *place
= cpu_to_le32(insn
);
186 * Extract the upper value bits (including the sign bit) and
187 * shift them to bit 0.
189 sval
= (s64
)(sval
& ~(imm_mask
>> 1)) >> (len
- 1);
192 * Overflow has occurred if the upper bits are not all equal to
193 * the sign bit of the value.
195 if ((u64
)(sval
+ 1) >= 2)
201 static int reloc_insn_adrp(struct module
*mod
, __le32
*place
, u64 val
)
205 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419
) ||
206 !cpus_have_const_cap(ARM64_WORKAROUND_843419
) ||
207 ((u64
)place
& 0xfff) < 0xff8)
208 return reloc_insn_imm(RELOC_OP_PAGE
, place
, val
, 12, 21,
209 AARCH64_INSN_IMM_ADR
);
211 /* patch ADRP to ADR if it is in range */
212 if (!reloc_insn_imm(RELOC_OP_PREL
, place
, val
& ~0xfff, 0, 21,
213 AARCH64_INSN_IMM_ADR
)) {
214 insn
= le32_to_cpu(*place
);
217 /* out of range for ADR -> emit a veneer */
218 val
= module_emit_veneer_for_adrp(mod
, place
, val
& ~0xfff);
221 insn
= aarch64_insn_gen_branch_imm((u64
)place
, val
,
222 AARCH64_INSN_BRANCH_NOLINK
);
225 *place
= cpu_to_le32(insn
);
229 int apply_relocate_add(Elf64_Shdr
*sechdrs
,
231 unsigned int symindex
,
241 Elf64_Rela
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
243 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
244 /* loc corresponds to P in the AArch64 ELF document. */
245 loc
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
248 /* sym is the ELF symbol we're referring to. */
249 sym
= (Elf64_Sym
*)sechdrs
[symindex
].sh_addr
250 + ELF64_R_SYM(rel
[i
].r_info
);
252 /* val corresponds to (S + A) in the AArch64 ELF document. */
253 val
= sym
->st_value
+ rel
[i
].r_addend
;
255 /* Check for overflow by default. */
256 overflow_check
= true;
258 /* Perform the static relocation. */
259 switch (ELF64_R_TYPE(rel
[i
].r_info
)) {
260 /* Null relocations. */
266 /* Data relocations. */
267 case R_AARCH64_ABS64
:
268 overflow_check
= false;
269 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 64);
271 case R_AARCH64_ABS32
:
272 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 32);
274 case R_AARCH64_ABS16
:
275 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 16);
277 case R_AARCH64_PREL64
:
278 overflow_check
= false;
279 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 64);
281 case R_AARCH64_PREL32
:
282 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 32);
284 case R_AARCH64_PREL16
:
285 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 16);
288 /* MOVW instruction relocations. */
289 case R_AARCH64_MOVW_UABS_G0_NC
:
290 overflow_check
= false;
291 case R_AARCH64_MOVW_UABS_G0
:
292 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
293 AARCH64_INSN_IMM_MOVKZ
);
295 case R_AARCH64_MOVW_UABS_G1_NC
:
296 overflow_check
= false;
297 case R_AARCH64_MOVW_UABS_G1
:
298 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
299 AARCH64_INSN_IMM_MOVKZ
);
301 case R_AARCH64_MOVW_UABS_G2_NC
:
302 overflow_check
= false;
303 case R_AARCH64_MOVW_UABS_G2
:
304 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
305 AARCH64_INSN_IMM_MOVKZ
);
307 case R_AARCH64_MOVW_UABS_G3
:
308 /* We're using the top bits so we can't overflow. */
309 overflow_check
= false;
310 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 48,
311 AARCH64_INSN_IMM_MOVKZ
);
313 case R_AARCH64_MOVW_SABS_G0
:
314 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
315 AARCH64_INSN_IMM_MOVNZ
);
317 case R_AARCH64_MOVW_SABS_G1
:
318 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
319 AARCH64_INSN_IMM_MOVNZ
);
321 case R_AARCH64_MOVW_SABS_G2
:
322 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
323 AARCH64_INSN_IMM_MOVNZ
);
325 case R_AARCH64_MOVW_PREL_G0_NC
:
326 overflow_check
= false;
327 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
328 AARCH64_INSN_IMM_MOVKZ
);
330 case R_AARCH64_MOVW_PREL_G0
:
331 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
332 AARCH64_INSN_IMM_MOVNZ
);
334 case R_AARCH64_MOVW_PREL_G1_NC
:
335 overflow_check
= false;
336 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
337 AARCH64_INSN_IMM_MOVKZ
);
339 case R_AARCH64_MOVW_PREL_G1
:
340 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
341 AARCH64_INSN_IMM_MOVNZ
);
343 case R_AARCH64_MOVW_PREL_G2_NC
:
344 overflow_check
= false;
345 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
346 AARCH64_INSN_IMM_MOVKZ
);
348 case R_AARCH64_MOVW_PREL_G2
:
349 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
350 AARCH64_INSN_IMM_MOVNZ
);
352 case R_AARCH64_MOVW_PREL_G3
:
353 /* We're using the top bits so we can't overflow. */
354 overflow_check
= false;
355 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 48,
356 AARCH64_INSN_IMM_MOVNZ
);
359 /* Immediate instruction relocations. */
360 case R_AARCH64_LD_PREL_LO19
:
361 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
362 AARCH64_INSN_IMM_19
);
364 case R_AARCH64_ADR_PREL_LO21
:
365 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 0, 21,
366 AARCH64_INSN_IMM_ADR
);
368 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
369 overflow_check
= false;
370 case R_AARCH64_ADR_PREL_PG_HI21
:
371 ovf
= reloc_insn_adrp(me
, loc
, val
);
372 if (ovf
&& ovf
!= -ERANGE
)
375 case R_AARCH64_ADD_ABS_LO12_NC
:
376 case R_AARCH64_LDST8_ABS_LO12_NC
:
377 overflow_check
= false;
378 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 0, 12,
379 AARCH64_INSN_IMM_12
);
381 case R_AARCH64_LDST16_ABS_LO12_NC
:
382 overflow_check
= false;
383 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 1, 11,
384 AARCH64_INSN_IMM_12
);
386 case R_AARCH64_LDST32_ABS_LO12_NC
:
387 overflow_check
= false;
388 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 2, 10,
389 AARCH64_INSN_IMM_12
);
391 case R_AARCH64_LDST64_ABS_LO12_NC
:
392 overflow_check
= false;
393 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 3, 9,
394 AARCH64_INSN_IMM_12
);
396 case R_AARCH64_LDST128_ABS_LO12_NC
:
397 overflow_check
= false;
398 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 4, 8,
399 AARCH64_INSN_IMM_12
);
401 case R_AARCH64_TSTBR14
:
402 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 14,
403 AARCH64_INSN_IMM_14
);
405 case R_AARCH64_CONDBR19
:
406 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
407 AARCH64_INSN_IMM_19
);
409 case R_AARCH64_JUMP26
:
410 case R_AARCH64_CALL26
:
411 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 26,
412 AARCH64_INSN_IMM_26
);
414 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS
) &&
416 val
= module_emit_plt_entry(me
, loc
, &rel
[i
], sym
);
419 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2,
420 26, AARCH64_INSN_IMM_26
);
425 pr_err("module %s: unsupported RELA relocation: %llu\n",
426 me
->name
, ELF64_R_TYPE(rel
[i
].r_info
));
430 if (overflow_check
&& ovf
== -ERANGE
)
438 pr_err("module %s: overflow in relocation type %d val %Lx\n",
439 me
->name
, (int)ELF64_R_TYPE(rel
[i
].r_info
), val
);
443 int module_finalize(const Elf_Ehdr
*hdr
,
444 const Elf_Shdr
*sechdrs
,
447 const Elf_Shdr
*s
, *se
;
448 const char *secstrs
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
450 for (s
= sechdrs
, se
= sechdrs
+ hdr
->e_shnum
; s
< se
; s
++) {
451 if (strcmp(".altinstructions", secstrs
+ s
->sh_name
) == 0)
452 apply_alternatives_module((void *)s
->sh_addr
, s
->sh_size
);
453 #ifdef CONFIG_ARM64_MODULE_PLTS
454 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE
) &&
455 !strcmp(".text.ftrace_trampoline", secstrs
+ s
->sh_name
))
456 me
->arch
.ftrace_trampoline
= (void *)s
->sh_addr
;