2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size
)
35 gfp_t gfp_mask
= GFP_KERNEL
;
38 /* Silence the initial allocation */
39 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS
))
40 gfp_mask
|= __GFP_NOWARN
;
42 p
= __vmalloc_node_range(size
, MODULE_ALIGN
, module_alloc_base
,
43 module_alloc_base
+ MODULES_VSIZE
,
44 gfp_mask
, PAGE_KERNEL_EXEC
, 0,
45 NUMA_NO_NODE
, __builtin_return_address(0));
47 if (!p
&& IS_ENABLED(CONFIG_ARM64_MODULE_PLTS
) &&
48 !IS_ENABLED(CONFIG_KASAN
))
50 * KASAN can only deal with module allocations being served
51 * from the reserved module region, since the remainder of
52 * the vmalloc region is already backed by zero shadow pages,
53 * and punching holes into it is non-trivial. Since the module
54 * region is not randomized when KASAN is enabled, it is even
55 * less likely that the module region gets exhausted, so we
56 * can simply omit this fallback in that case.
58 p
= __vmalloc_node_range(size
, MODULE_ALIGN
, VMALLOC_START
,
59 VMALLOC_END
, GFP_KERNEL
, PAGE_KERNEL_EXEC
, 0,
60 NUMA_NO_NODE
, __builtin_return_address(0));
62 if (p
&& (kasan_module_alloc(p
, size
) < 0)) {
70 enum aarch64_reloc_op
{
77 static u64
do_reloc(enum aarch64_reloc_op reloc_op
, __le32
*place
, u64 val
)
83 return val
- (u64
)place
;
85 return (val
& ~0xfff) - ((u64
)place
& ~0xfff);
90 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op
);
94 static int reloc_data(enum aarch64_reloc_op op
, void *place
, u64 val
, int len
)
96 s64 sval
= do_reloc(op
, place
, val
);
100 *(s16
*)place
= sval
;
101 if (sval
< S16_MIN
|| sval
> U16_MAX
)
105 *(s32
*)place
= sval
;
106 if (sval
< S32_MIN
|| sval
> U32_MAX
)
110 *(s64
*)place
= sval
;
113 pr_err("Invalid length (%d) for data relocation\n", len
);
119 enum aarch64_insn_movw_imm_type
{
120 AARCH64_INSN_IMM_MOVNZ
,
121 AARCH64_INSN_IMM_MOVKZ
,
124 static int reloc_insn_movw(enum aarch64_reloc_op op
, __le32
*place
, u64 val
,
125 int lsb
, enum aarch64_insn_movw_imm_type imm_type
)
129 u32 insn
= le32_to_cpu(*place
);
131 sval
= do_reloc(op
, place
, val
);
134 if (imm_type
== AARCH64_INSN_IMM_MOVNZ
) {
136 * For signed MOVW relocations, we have to manipulate the
137 * instruction encoding depending on whether or not the
138 * immediate is less than zero.
142 /* >=0: Set the instruction to MOVZ (opcode 10b). */
146 * <0: Set the instruction to MOVN (opcode 00b).
147 * Since we've masked the opcode already, we
148 * don't need to do anything other than
149 * inverting the new immediate field.
155 /* Update the instruction with the new encoding. */
156 insn
= aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16
, insn
, imm
);
157 *place
= cpu_to_le32(insn
);
165 static int reloc_insn_imm(enum aarch64_reloc_op op
, __le32
*place
, u64 val
,
166 int lsb
, int len
, enum aarch64_insn_imm_type imm_type
)
170 u32 insn
= le32_to_cpu(*place
);
172 /* Calculate the relocation value. */
173 sval
= do_reloc(op
, place
, val
);
176 /* Extract the value bits and shift them to bit 0. */
177 imm_mask
= (BIT(lsb
+ len
) - 1) >> lsb
;
178 imm
= sval
& imm_mask
;
180 /* Update the instruction's immediate field. */
181 insn
= aarch64_insn_encode_immediate(imm_type
, insn
, imm
);
182 *place
= cpu_to_le32(insn
);
185 * Extract the upper value bits (including the sign bit) and
186 * shift them to bit 0.
188 sval
= (s64
)(sval
& ~(imm_mask
>> 1)) >> (len
- 1);
191 * Overflow has occurred if the upper bits are not all equal to
192 * the sign bit of the value.
194 if ((u64
)(sval
+ 1) >= 2)
200 int apply_relocate_add(Elf64_Shdr
*sechdrs
,
202 unsigned int symindex
,
212 Elf64_Rela
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
214 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
215 /* loc corresponds to P in the AArch64 ELF document. */
216 loc
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
219 /* sym is the ELF symbol we're referring to. */
220 sym
= (Elf64_Sym
*)sechdrs
[symindex
].sh_addr
221 + ELF64_R_SYM(rel
[i
].r_info
);
223 /* val corresponds to (S + A) in the AArch64 ELF document. */
224 val
= sym
->st_value
+ rel
[i
].r_addend
;
226 /* Check for overflow by default. */
227 overflow_check
= true;
229 /* Perform the static relocation. */
230 switch (ELF64_R_TYPE(rel
[i
].r_info
)) {
231 /* Null relocations. */
237 /* Data relocations. */
238 case R_AARCH64_ABS64
:
239 overflow_check
= false;
240 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 64);
242 case R_AARCH64_ABS32
:
243 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 32);
245 case R_AARCH64_ABS16
:
246 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 16);
248 case R_AARCH64_PREL64
:
249 overflow_check
= false;
250 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 64);
252 case R_AARCH64_PREL32
:
253 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 32);
255 case R_AARCH64_PREL16
:
256 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 16);
259 /* MOVW instruction relocations. */
260 case R_AARCH64_MOVW_UABS_G0_NC
:
261 overflow_check
= false;
262 case R_AARCH64_MOVW_UABS_G0
:
263 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
264 AARCH64_INSN_IMM_MOVKZ
);
266 case R_AARCH64_MOVW_UABS_G1_NC
:
267 overflow_check
= false;
268 case R_AARCH64_MOVW_UABS_G1
:
269 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
270 AARCH64_INSN_IMM_MOVKZ
);
272 case R_AARCH64_MOVW_UABS_G2_NC
:
273 overflow_check
= false;
274 case R_AARCH64_MOVW_UABS_G2
:
275 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
276 AARCH64_INSN_IMM_MOVKZ
);
278 case R_AARCH64_MOVW_UABS_G3
:
279 /* We're using the top bits so we can't overflow. */
280 overflow_check
= false;
281 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 48,
282 AARCH64_INSN_IMM_MOVKZ
);
284 case R_AARCH64_MOVW_SABS_G0
:
285 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
286 AARCH64_INSN_IMM_MOVNZ
);
288 case R_AARCH64_MOVW_SABS_G1
:
289 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
290 AARCH64_INSN_IMM_MOVNZ
);
292 case R_AARCH64_MOVW_SABS_G2
:
293 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
294 AARCH64_INSN_IMM_MOVNZ
);
296 case R_AARCH64_MOVW_PREL_G0_NC
:
297 overflow_check
= false;
298 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
299 AARCH64_INSN_IMM_MOVKZ
);
301 case R_AARCH64_MOVW_PREL_G0
:
302 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
303 AARCH64_INSN_IMM_MOVNZ
);
305 case R_AARCH64_MOVW_PREL_G1_NC
:
306 overflow_check
= false;
307 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
308 AARCH64_INSN_IMM_MOVKZ
);
310 case R_AARCH64_MOVW_PREL_G1
:
311 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
312 AARCH64_INSN_IMM_MOVNZ
);
314 case R_AARCH64_MOVW_PREL_G2_NC
:
315 overflow_check
= false;
316 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
317 AARCH64_INSN_IMM_MOVKZ
);
319 case R_AARCH64_MOVW_PREL_G2
:
320 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
321 AARCH64_INSN_IMM_MOVNZ
);
323 case R_AARCH64_MOVW_PREL_G3
:
324 /* We're using the top bits so we can't overflow. */
325 overflow_check
= false;
326 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 48,
327 AARCH64_INSN_IMM_MOVNZ
);
330 /* Immediate instruction relocations. */
331 case R_AARCH64_LD_PREL_LO19
:
332 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
333 AARCH64_INSN_IMM_19
);
335 case R_AARCH64_ADR_PREL_LO21
:
336 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 0, 21,
337 AARCH64_INSN_IMM_ADR
);
339 #ifndef CONFIG_ARM64_ERRATUM_843419
340 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
341 overflow_check
= false;
342 case R_AARCH64_ADR_PREL_PG_HI21
:
343 ovf
= reloc_insn_imm(RELOC_OP_PAGE
, loc
, val
, 12, 21,
344 AARCH64_INSN_IMM_ADR
);
347 case R_AARCH64_ADD_ABS_LO12_NC
:
348 case R_AARCH64_LDST8_ABS_LO12_NC
:
349 overflow_check
= false;
350 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 0, 12,
351 AARCH64_INSN_IMM_12
);
353 case R_AARCH64_LDST16_ABS_LO12_NC
:
354 overflow_check
= false;
355 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 1, 11,
356 AARCH64_INSN_IMM_12
);
358 case R_AARCH64_LDST32_ABS_LO12_NC
:
359 overflow_check
= false;
360 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 2, 10,
361 AARCH64_INSN_IMM_12
);
363 case R_AARCH64_LDST64_ABS_LO12_NC
:
364 overflow_check
= false;
365 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 3, 9,
366 AARCH64_INSN_IMM_12
);
368 case R_AARCH64_LDST128_ABS_LO12_NC
:
369 overflow_check
= false;
370 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 4, 8,
371 AARCH64_INSN_IMM_12
);
373 case R_AARCH64_TSTBR14
:
374 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 14,
375 AARCH64_INSN_IMM_14
);
377 case R_AARCH64_CONDBR19
:
378 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
379 AARCH64_INSN_IMM_19
);
381 case R_AARCH64_JUMP26
:
382 case R_AARCH64_CALL26
:
383 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 26,
384 AARCH64_INSN_IMM_26
);
386 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS
) &&
388 val
= module_emit_plt_entry(me
, loc
, &rel
[i
], sym
);
389 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2,
390 26, AARCH64_INSN_IMM_26
);
395 pr_err("module %s: unsupported RELA relocation: %llu\n",
396 me
->name
, ELF64_R_TYPE(rel
[i
].r_info
));
400 if (overflow_check
&& ovf
== -ERANGE
)
408 pr_err("module %s: overflow in relocation type %d val %Lx\n",
409 me
->name
, (int)ELF64_R_TYPE(rel
[i
].r_info
), val
);
413 int module_finalize(const Elf_Ehdr
*hdr
,
414 const Elf_Shdr
*sechdrs
,
417 const Elf_Shdr
*s
, *se
;
418 const char *secstrs
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
420 for (s
= sechdrs
, se
= sechdrs
+ hdr
->e_shnum
; s
< se
; s
++) {
421 if (strcmp(".altinstructions", secstrs
+ s
->sh_name
) == 0) {
422 apply_alternatives((void *)s
->sh_addr
, s
->sh_size
);
424 #ifdef CONFIG_ARM64_MODULE_PLTS
425 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE
) &&
426 !strcmp(".text.ftrace_trampoline", secstrs
+ s
->sh_name
))
427 me
->arch
.ftrace_trampoline
= (void *)s
->sh_addr
;