mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / arm64 / kernel / module.c
blobf4bc779e62e887547b7a17b7672487f0851e1479
1 /*
2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
30 #include <asm/insn.h>
31 #include <asm/sections.h>
33 #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
34 #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
36 void *module_alloc(unsigned long size)
38 void *p;
40 p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
41 GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
42 NUMA_NO_NODE, __builtin_return_address(0));
44 if (p && (kasan_module_alloc(p, size) < 0)) {
45 vfree(p);
46 return NULL;
49 return p;
52 enum aarch64_reloc_op {
53 RELOC_OP_NONE,
54 RELOC_OP_ABS,
55 RELOC_OP_PREL,
56 RELOC_OP_PAGE,
59 static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
61 switch (reloc_op) {
62 case RELOC_OP_ABS:
63 return val;
64 case RELOC_OP_PREL:
65 return val - (u64)place;
66 case RELOC_OP_PAGE:
67 return (val & ~0xfff) - ((u64)place & ~0xfff);
68 case RELOC_OP_NONE:
69 return 0;
72 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
73 return 0;
76 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
78 u64 imm_mask = (1 << len) - 1;
79 s64 sval = do_reloc(op, place, val);
81 switch (len) {
82 case 16:
83 *(s16 *)place = sval;
84 break;
85 case 32:
86 *(s32 *)place = sval;
87 break;
88 case 64:
89 *(s64 *)place = sval;
90 break;
91 default:
92 pr_err("Invalid length (%d) for data relocation\n", len);
93 return 0;
97 * Extract the upper value bits (including the sign bit) and
98 * shift them to bit 0.
100 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
103 * Overflow has occurred if the value is not representable in
104 * len bits (i.e the bottom len bits are not sign-extended and
105 * the top bits are not all zero).
107 if ((u64)(sval + 1) > 2)
108 return -ERANGE;
110 return 0;
113 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
114 int lsb, enum aarch64_insn_imm_type imm_type)
116 u64 imm, limit = 0;
117 s64 sval;
118 u32 insn = le32_to_cpu(*(u32 *)place);
120 sval = do_reloc(op, place, val);
121 sval >>= lsb;
122 imm = sval & 0xffff;
124 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
126 * For signed MOVW relocations, we have to manipulate the
127 * instruction encoding depending on whether or not the
128 * immediate is less than zero.
130 insn &= ~(3 << 29);
131 if ((s64)imm >= 0) {
132 /* >=0: Set the instruction to MOVZ (opcode 10b). */
133 insn |= 2 << 29;
134 } else {
136 * <0: Set the instruction to MOVN (opcode 00b).
137 * Since we've masked the opcode already, we
138 * don't need to do anything other than
139 * inverting the new immediate field.
141 imm = ~imm;
143 imm_type = AARCH64_INSN_IMM_MOVK;
146 /* Update the instruction with the new encoding. */
147 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
148 *(u32 *)place = cpu_to_le32(insn);
150 /* Shift out the immediate field. */
151 sval >>= 16;
154 * For unsigned immediates, the overflow check is straightforward.
155 * For signed immediates, the sign bit is actually the bit past the
156 * most significant bit of the field.
157 * The AARCH64_INSN_IMM_16 immediate type is unsigned.
159 if (imm_type != AARCH64_INSN_IMM_16) {
160 sval++;
161 limit++;
164 /* Check the upper bits depending on the sign of the immediate. */
165 if ((u64)sval > limit)
166 return -ERANGE;
168 return 0;
171 static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
172 int lsb, int len, enum aarch64_insn_imm_type imm_type)
174 u64 imm, imm_mask;
175 s64 sval;
176 u32 insn = le32_to_cpu(*(u32 *)place);
178 /* Calculate the relocation value. */
179 sval = do_reloc(op, place, val);
180 sval >>= lsb;
182 /* Extract the value bits and shift them to bit 0. */
183 imm_mask = (BIT(lsb + len) - 1) >> lsb;
184 imm = sval & imm_mask;
186 /* Update the instruction's immediate field. */
187 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
188 *(u32 *)place = cpu_to_le32(insn);
191 * Extract the upper value bits (including the sign bit) and
192 * shift them to bit 0.
194 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
197 * Overflow has occurred if the upper bits are not all equal to
198 * the sign bit of the value.
200 if ((u64)(sval + 1) >= 2)
201 return -ERANGE;
203 return 0;
206 int apply_relocate_add(Elf64_Shdr *sechdrs,
207 const char *strtab,
208 unsigned int symindex,
209 unsigned int relsec,
210 struct module *me)
212 unsigned int i;
213 int ovf;
214 bool overflow_check;
215 Elf64_Sym *sym;
216 void *loc;
217 u64 val;
218 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
220 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
221 /* loc corresponds to P in the AArch64 ELF document. */
222 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
223 + rel[i].r_offset;
225 /* sym is the ELF symbol we're referring to. */
226 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
227 + ELF64_R_SYM(rel[i].r_info);
229 /* val corresponds to (S + A) in the AArch64 ELF document. */
230 val = sym->st_value + rel[i].r_addend;
232 /* Check for overflow by default. */
233 overflow_check = true;
235 /* Perform the static relocation. */
236 switch (ELF64_R_TYPE(rel[i].r_info)) {
237 /* Null relocations. */
238 case R_ARM_NONE:
239 case R_AARCH64_NONE:
240 ovf = 0;
241 break;
243 /* Data relocations. */
244 case R_AARCH64_ABS64:
245 overflow_check = false;
246 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
247 break;
248 case R_AARCH64_ABS32:
249 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
250 break;
251 case R_AARCH64_ABS16:
252 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
253 break;
254 case R_AARCH64_PREL64:
255 overflow_check = false;
256 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
257 break;
258 case R_AARCH64_PREL32:
259 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
260 break;
261 case R_AARCH64_PREL16:
262 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
263 break;
265 /* MOVW instruction relocations. */
266 case R_AARCH64_MOVW_UABS_G0_NC:
267 overflow_check = false;
268 case R_AARCH64_MOVW_UABS_G0:
269 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
270 AARCH64_INSN_IMM_16);
271 break;
272 case R_AARCH64_MOVW_UABS_G1_NC:
273 overflow_check = false;
274 case R_AARCH64_MOVW_UABS_G1:
275 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
276 AARCH64_INSN_IMM_16);
277 break;
278 case R_AARCH64_MOVW_UABS_G2_NC:
279 overflow_check = false;
280 case R_AARCH64_MOVW_UABS_G2:
281 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
282 AARCH64_INSN_IMM_16);
283 break;
284 case R_AARCH64_MOVW_UABS_G3:
285 /* We're using the top bits so we can't overflow. */
286 overflow_check = false;
287 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
288 AARCH64_INSN_IMM_16);
289 break;
290 case R_AARCH64_MOVW_SABS_G0:
291 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
292 AARCH64_INSN_IMM_MOVNZ);
293 break;
294 case R_AARCH64_MOVW_SABS_G1:
295 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
296 AARCH64_INSN_IMM_MOVNZ);
297 break;
298 case R_AARCH64_MOVW_SABS_G2:
299 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
300 AARCH64_INSN_IMM_MOVNZ);
301 break;
302 case R_AARCH64_MOVW_PREL_G0_NC:
303 overflow_check = false;
304 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
305 AARCH64_INSN_IMM_MOVK);
306 break;
307 case R_AARCH64_MOVW_PREL_G0:
308 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
309 AARCH64_INSN_IMM_MOVNZ);
310 break;
311 case R_AARCH64_MOVW_PREL_G1_NC:
312 overflow_check = false;
313 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
314 AARCH64_INSN_IMM_MOVK);
315 break;
316 case R_AARCH64_MOVW_PREL_G1:
317 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
318 AARCH64_INSN_IMM_MOVNZ);
319 break;
320 case R_AARCH64_MOVW_PREL_G2_NC:
321 overflow_check = false;
322 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
323 AARCH64_INSN_IMM_MOVK);
324 break;
325 case R_AARCH64_MOVW_PREL_G2:
326 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
327 AARCH64_INSN_IMM_MOVNZ);
328 break;
329 case R_AARCH64_MOVW_PREL_G3:
330 /* We're using the top bits so we can't overflow. */
331 overflow_check = false;
332 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
333 AARCH64_INSN_IMM_MOVNZ);
334 break;
336 /* Immediate instruction relocations. */
337 case R_AARCH64_LD_PREL_LO19:
338 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
339 AARCH64_INSN_IMM_19);
340 break;
341 case R_AARCH64_ADR_PREL_LO21:
342 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
343 AARCH64_INSN_IMM_ADR);
344 break;
345 #ifndef CONFIG_ARM64_ERRATUM_843419
346 case R_AARCH64_ADR_PREL_PG_HI21_NC:
347 overflow_check = false;
348 case R_AARCH64_ADR_PREL_PG_HI21:
349 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
350 AARCH64_INSN_IMM_ADR);
351 break;
352 #endif
353 case R_AARCH64_ADD_ABS_LO12_NC:
354 case R_AARCH64_LDST8_ABS_LO12_NC:
355 overflow_check = false;
356 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
357 AARCH64_INSN_IMM_12);
358 break;
359 case R_AARCH64_LDST16_ABS_LO12_NC:
360 overflow_check = false;
361 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
362 AARCH64_INSN_IMM_12);
363 break;
364 case R_AARCH64_LDST32_ABS_LO12_NC:
365 overflow_check = false;
366 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
367 AARCH64_INSN_IMM_12);
368 break;
369 case R_AARCH64_LDST64_ABS_LO12_NC:
370 overflow_check = false;
371 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
372 AARCH64_INSN_IMM_12);
373 break;
374 case R_AARCH64_LDST128_ABS_LO12_NC:
375 overflow_check = false;
376 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
377 AARCH64_INSN_IMM_12);
378 break;
379 case R_AARCH64_TSTBR14:
380 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
381 AARCH64_INSN_IMM_14);
382 break;
383 case R_AARCH64_CONDBR19:
384 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
385 AARCH64_INSN_IMM_19);
386 break;
387 case R_AARCH64_JUMP26:
388 case R_AARCH64_CALL26:
389 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
390 AARCH64_INSN_IMM_26);
391 break;
393 default:
394 pr_err("module %s: unsupported RELA relocation: %llu\n",
395 me->name, ELF64_R_TYPE(rel[i].r_info));
396 return -ENOEXEC;
399 if (overflow_check && ovf == -ERANGE)
400 goto overflow;
404 return 0;
406 overflow:
407 pr_err("module %s: overflow in relocation type %d val %Lx\n",
408 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
409 return -ENOEXEC;
412 int module_finalize(const Elf_Ehdr *hdr,
413 const Elf_Shdr *sechdrs,
414 struct module *me)
416 const Elf_Shdr *s, *se;
417 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
419 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
420 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
421 apply_alternatives((void *)s->sh_addr, s->sh_size);
422 return 0;
426 return 0;