2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
34 #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
36 void *module_alloc(unsigned long size
)
40 p
= __vmalloc_node_range(size
, MODULE_ALIGN
, MODULES_VADDR
, MODULES_END
,
41 GFP_KERNEL
, PAGE_KERNEL_EXEC
, 0,
42 NUMA_NO_NODE
, __builtin_return_address(0));
44 if (p
&& (kasan_module_alloc(p
, size
) < 0)) {
52 enum aarch64_reloc_op
{
59 static u64
do_reloc(enum aarch64_reloc_op reloc_op
, void *place
, u64 val
)
65 return val
- (u64
)place
;
67 return (val
& ~0xfff) - ((u64
)place
& ~0xfff);
72 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op
);
76 static int reloc_data(enum aarch64_reloc_op op
, void *place
, u64 val
, int len
)
78 u64 imm_mask
= (1 << len
) - 1;
79 s64 sval
= do_reloc(op
, place
, val
);
92 pr_err("Invalid length (%d) for data relocation\n", len
);
97 * Extract the upper value bits (including the sign bit) and
98 * shift them to bit 0.
100 sval
= (s64
)(sval
& ~(imm_mask
>> 1)) >> (len
- 1);
103 * Overflow has occurred if the value is not representable in
104 * len bits (i.e the bottom len bits are not sign-extended and
105 * the top bits are not all zero).
107 if ((u64
)(sval
+ 1) > 2)
113 static int reloc_insn_movw(enum aarch64_reloc_op op
, void *place
, u64 val
,
114 int lsb
, enum aarch64_insn_imm_type imm_type
)
118 u32 insn
= le32_to_cpu(*(u32
*)place
);
120 sval
= do_reloc(op
, place
, val
);
124 if (imm_type
== AARCH64_INSN_IMM_MOVNZ
) {
126 * For signed MOVW relocations, we have to manipulate the
127 * instruction encoding depending on whether or not the
128 * immediate is less than zero.
132 /* >=0: Set the instruction to MOVZ (opcode 10b). */
136 * <0: Set the instruction to MOVN (opcode 00b).
137 * Since we've masked the opcode already, we
138 * don't need to do anything other than
139 * inverting the new immediate field.
143 imm_type
= AARCH64_INSN_IMM_MOVK
;
146 /* Update the instruction with the new encoding. */
147 insn
= aarch64_insn_encode_immediate(imm_type
, insn
, imm
);
148 *(u32
*)place
= cpu_to_le32(insn
);
150 /* Shift out the immediate field. */
154 * For unsigned immediates, the overflow check is straightforward.
155 * For signed immediates, the sign bit is actually the bit past the
156 * most significant bit of the field.
157 * The AARCH64_INSN_IMM_16 immediate type is unsigned.
159 if (imm_type
!= AARCH64_INSN_IMM_16
) {
164 /* Check the upper bits depending on the sign of the immediate. */
165 if ((u64
)sval
> limit
)
171 static int reloc_insn_imm(enum aarch64_reloc_op op
, void *place
, u64 val
,
172 int lsb
, int len
, enum aarch64_insn_imm_type imm_type
)
176 u32 insn
= le32_to_cpu(*(u32
*)place
);
178 /* Calculate the relocation value. */
179 sval
= do_reloc(op
, place
, val
);
182 /* Extract the value bits and shift them to bit 0. */
183 imm_mask
= (BIT(lsb
+ len
) - 1) >> lsb
;
184 imm
= sval
& imm_mask
;
186 /* Update the instruction's immediate field. */
187 insn
= aarch64_insn_encode_immediate(imm_type
, insn
, imm
);
188 *(u32
*)place
= cpu_to_le32(insn
);
191 * Extract the upper value bits (including the sign bit) and
192 * shift them to bit 0.
194 sval
= (s64
)(sval
& ~(imm_mask
>> 1)) >> (len
- 1);
197 * Overflow has occurred if the upper bits are not all equal to
198 * the sign bit of the value.
200 if ((u64
)(sval
+ 1) >= 2)
206 int apply_relocate_add(Elf64_Shdr
*sechdrs
,
208 unsigned int symindex
,
218 Elf64_Rela
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
220 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
221 /* loc corresponds to P in the AArch64 ELF document. */
222 loc
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
225 /* sym is the ELF symbol we're referring to. */
226 sym
= (Elf64_Sym
*)sechdrs
[symindex
].sh_addr
227 + ELF64_R_SYM(rel
[i
].r_info
);
229 /* val corresponds to (S + A) in the AArch64 ELF document. */
230 val
= sym
->st_value
+ rel
[i
].r_addend
;
232 /* Check for overflow by default. */
233 overflow_check
= true;
235 /* Perform the static relocation. */
236 switch (ELF64_R_TYPE(rel
[i
].r_info
)) {
237 /* Null relocations. */
243 /* Data relocations. */
244 case R_AARCH64_ABS64
:
245 overflow_check
= false;
246 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 64);
248 case R_AARCH64_ABS32
:
249 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 32);
251 case R_AARCH64_ABS16
:
252 ovf
= reloc_data(RELOC_OP_ABS
, loc
, val
, 16);
254 case R_AARCH64_PREL64
:
255 overflow_check
= false;
256 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 64);
258 case R_AARCH64_PREL32
:
259 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 32);
261 case R_AARCH64_PREL16
:
262 ovf
= reloc_data(RELOC_OP_PREL
, loc
, val
, 16);
265 /* MOVW instruction relocations. */
266 case R_AARCH64_MOVW_UABS_G0_NC
:
267 overflow_check
= false;
268 case R_AARCH64_MOVW_UABS_G0
:
269 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
270 AARCH64_INSN_IMM_16
);
272 case R_AARCH64_MOVW_UABS_G1_NC
:
273 overflow_check
= false;
274 case R_AARCH64_MOVW_UABS_G1
:
275 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
276 AARCH64_INSN_IMM_16
);
278 case R_AARCH64_MOVW_UABS_G2_NC
:
279 overflow_check
= false;
280 case R_AARCH64_MOVW_UABS_G2
:
281 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
282 AARCH64_INSN_IMM_16
);
284 case R_AARCH64_MOVW_UABS_G3
:
285 /* We're using the top bits so we can't overflow. */
286 overflow_check
= false;
287 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 48,
288 AARCH64_INSN_IMM_16
);
290 case R_AARCH64_MOVW_SABS_G0
:
291 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 0,
292 AARCH64_INSN_IMM_MOVNZ
);
294 case R_AARCH64_MOVW_SABS_G1
:
295 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 16,
296 AARCH64_INSN_IMM_MOVNZ
);
298 case R_AARCH64_MOVW_SABS_G2
:
299 ovf
= reloc_insn_movw(RELOC_OP_ABS
, loc
, val
, 32,
300 AARCH64_INSN_IMM_MOVNZ
);
302 case R_AARCH64_MOVW_PREL_G0_NC
:
303 overflow_check
= false;
304 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
305 AARCH64_INSN_IMM_MOVK
);
307 case R_AARCH64_MOVW_PREL_G0
:
308 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 0,
309 AARCH64_INSN_IMM_MOVNZ
);
311 case R_AARCH64_MOVW_PREL_G1_NC
:
312 overflow_check
= false;
313 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
314 AARCH64_INSN_IMM_MOVK
);
316 case R_AARCH64_MOVW_PREL_G1
:
317 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 16,
318 AARCH64_INSN_IMM_MOVNZ
);
320 case R_AARCH64_MOVW_PREL_G2_NC
:
321 overflow_check
= false;
322 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
323 AARCH64_INSN_IMM_MOVK
);
325 case R_AARCH64_MOVW_PREL_G2
:
326 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 32,
327 AARCH64_INSN_IMM_MOVNZ
);
329 case R_AARCH64_MOVW_PREL_G3
:
330 /* We're using the top bits so we can't overflow. */
331 overflow_check
= false;
332 ovf
= reloc_insn_movw(RELOC_OP_PREL
, loc
, val
, 48,
333 AARCH64_INSN_IMM_MOVNZ
);
336 /* Immediate instruction relocations. */
337 case R_AARCH64_LD_PREL_LO19
:
338 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
339 AARCH64_INSN_IMM_19
);
341 case R_AARCH64_ADR_PREL_LO21
:
342 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 0, 21,
343 AARCH64_INSN_IMM_ADR
);
345 #ifndef CONFIG_ARM64_ERRATUM_843419
346 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
347 overflow_check
= false;
348 case R_AARCH64_ADR_PREL_PG_HI21
:
349 ovf
= reloc_insn_imm(RELOC_OP_PAGE
, loc
, val
, 12, 21,
350 AARCH64_INSN_IMM_ADR
);
353 case R_AARCH64_ADD_ABS_LO12_NC
:
354 case R_AARCH64_LDST8_ABS_LO12_NC
:
355 overflow_check
= false;
356 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 0, 12,
357 AARCH64_INSN_IMM_12
);
359 case R_AARCH64_LDST16_ABS_LO12_NC
:
360 overflow_check
= false;
361 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 1, 11,
362 AARCH64_INSN_IMM_12
);
364 case R_AARCH64_LDST32_ABS_LO12_NC
:
365 overflow_check
= false;
366 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 2, 10,
367 AARCH64_INSN_IMM_12
);
369 case R_AARCH64_LDST64_ABS_LO12_NC
:
370 overflow_check
= false;
371 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 3, 9,
372 AARCH64_INSN_IMM_12
);
374 case R_AARCH64_LDST128_ABS_LO12_NC
:
375 overflow_check
= false;
376 ovf
= reloc_insn_imm(RELOC_OP_ABS
, loc
, val
, 4, 8,
377 AARCH64_INSN_IMM_12
);
379 case R_AARCH64_TSTBR14
:
380 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 14,
381 AARCH64_INSN_IMM_14
);
383 case R_AARCH64_CONDBR19
:
384 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 19,
385 AARCH64_INSN_IMM_19
);
387 case R_AARCH64_JUMP26
:
388 case R_AARCH64_CALL26
:
389 ovf
= reloc_insn_imm(RELOC_OP_PREL
, loc
, val
, 2, 26,
390 AARCH64_INSN_IMM_26
);
394 pr_err("module %s: unsupported RELA relocation: %llu\n",
395 me
->name
, ELF64_R_TYPE(rel
[i
].r_info
));
399 if (overflow_check
&& ovf
== -ERANGE
)
407 pr_err("module %s: overflow in relocation type %d val %Lx\n",
408 me
->name
, (int)ELF64_R_TYPE(rel
[i
].r_info
), val
);
412 int module_finalize(const Elf_Ehdr
*hdr
,
413 const Elf_Shdr
*sechdrs
,
416 const Elf_Shdr
*s
, *se
;
417 const char *secstrs
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
419 for (s
= sechdrs
, se
= sechdrs
+ hdr
->e_shnum
; s
< se
; s
++) {
420 if (strcmp(".altinstructions", secstrs
+ s
->sh_name
) == 0) {
421 apply_alternatives((void *)s
->sh_addr
, s
->sh_size
);