1 // SPDX-License-Identifier: GPL-2.0+
3 * Kernel module help for s390.
6 * Copyright IBM Corp. 2002, 2003
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * based on i386 version
11 * Copyright (C) 2001 Rusty Russell.
13 #include <linux/module.h>
14 #include <linux/elf.h>
15 #include <linux/vmalloc.h>
17 #include <linux/string.h>
18 #include <linux/kernel.h>
19 #include <linux/kasan.h>
20 #include <linux/moduleloader.h>
21 #include <linux/bug.h>
22 #include <linux/memory.h>
23 #include <asm/alternative.h>
24 #include <asm/nospec-branch.h>
25 #include <asm/facility.h>
30 #define DEBUGP(fmt , ...)
33 #define PLT_ENTRY_SIZE 20
35 void *module_alloc(unsigned long size
)
39 if (PAGE_ALIGN(size
) > MODULES_LEN
)
41 p
= __vmalloc_node_range(size
, MODULE_ALIGN
, MODULES_VADDR
, MODULES_END
,
42 GFP_KERNEL
, PAGE_KERNEL_EXEC
, 0, NUMA_NO_NODE
,
43 __builtin_return_address(0));
44 if (p
&& (kasan_module_alloc(p
, size
) < 0)) {
51 void module_arch_freeing_init(struct module
*mod
)
53 if (is_livepatch_module(mod
) &&
54 mod
->state
== MODULE_STATE_LIVE
)
57 vfree(mod
->arch
.syminfo
);
58 mod
->arch
.syminfo
= NULL
;
61 static void check_rela(Elf_Rela
*rela
, struct module
*me
)
63 struct mod_arch_syminfo
*info
;
65 info
= me
->arch
.syminfo
+ ELF_R_SYM (rela
->r_info
);
66 switch (ELF_R_TYPE (rela
->r_info
)) {
67 case R_390_GOT12
: /* 12 bit GOT offset. */
68 case R_390_GOT16
: /* 16 bit GOT offset. */
69 case R_390_GOT20
: /* 20 bit GOT offset. */
70 case R_390_GOT32
: /* 32 bit GOT offset. */
71 case R_390_GOT64
: /* 64 bit GOT offset. */
72 case R_390_GOTENT
: /* 32 bit PC rel. to GOT entry shifted by 1. */
73 case R_390_GOTPLT12
: /* 12 bit offset to jump slot. */
74 case R_390_GOTPLT16
: /* 16 bit offset to jump slot. */
75 case R_390_GOTPLT20
: /* 20 bit offset to jump slot. */
76 case R_390_GOTPLT32
: /* 32 bit offset to jump slot. */
77 case R_390_GOTPLT64
: /* 64 bit offset to jump slot. */
78 case R_390_GOTPLTENT
: /* 32 bit rel. offset to jump slot >> 1. */
79 if (info
->got_offset
== -1UL) {
80 info
->got_offset
= me
->arch
.got_size
;
81 me
->arch
.got_size
+= sizeof(void*);
84 case R_390_PLT16DBL
: /* 16 bit PC rel. PLT shifted by 1. */
85 case R_390_PLT32DBL
: /* 32 bit PC rel. PLT shifted by 1. */
86 case R_390_PLT32
: /* 32 bit PC relative PLT address. */
87 case R_390_PLT64
: /* 64 bit PC relative PLT address. */
88 case R_390_PLTOFF16
: /* 16 bit offset from GOT to PLT. */
89 case R_390_PLTOFF32
: /* 32 bit offset from GOT to PLT. */
90 case R_390_PLTOFF64
: /* 16 bit offset from GOT to PLT. */
91 if (info
->plt_offset
== -1UL) {
92 info
->plt_offset
= me
->arch
.plt_size
;
93 me
->arch
.plt_size
+= PLT_ENTRY_SIZE
;
100 /* Only needed if we want to support loading of
101 modules linked with -shared. */
107 * Account for GOT and PLT relocations. We can't add sections for
108 * got and plt but we can increase the core module size.
110 int module_frob_arch_sections(Elf_Ehdr
*hdr
, Elf_Shdr
*sechdrs
,
111 char *secstrings
, struct module
*me
)
119 /* Find symbol table and string table. */
121 for (i
= 0; i
< hdr
->e_shnum
; i
++)
122 switch (sechdrs
[i
].sh_type
) {
124 symtab
= sechdrs
+ i
;
128 printk(KERN_ERR
"module %s: no symbol table\n", me
->name
);
132 /* Allocate one syminfo structure per symbol. */
133 me
->arch
.nsyms
= symtab
->sh_size
/ sizeof(Elf_Sym
);
134 me
->arch
.syminfo
= vmalloc(array_size(sizeof(struct mod_arch_syminfo
),
136 if (!me
->arch
.syminfo
)
138 symbols
= (void *) hdr
+ symtab
->sh_offset
;
139 strings
= (void *) hdr
+ sechdrs
[symtab
->sh_link
].sh_offset
;
140 for (i
= 0; i
< me
->arch
.nsyms
; i
++) {
141 if (symbols
[i
].st_shndx
== SHN_UNDEF
&&
142 strcmp(strings
+ symbols
[i
].st_name
,
143 "_GLOBAL_OFFSET_TABLE_") == 0)
144 /* "Define" it as absolute. */
145 symbols
[i
].st_shndx
= SHN_ABS
;
146 me
->arch
.syminfo
[i
].got_offset
= -1UL;
147 me
->arch
.syminfo
[i
].plt_offset
= -1UL;
148 me
->arch
.syminfo
[i
].got_initialized
= 0;
149 me
->arch
.syminfo
[i
].plt_initialized
= 0;
152 /* Search for got/plt relocations. */
153 me
->arch
.got_size
= me
->arch
.plt_size
= 0;
154 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
155 if (sechdrs
[i
].sh_type
!= SHT_RELA
)
157 nrela
= sechdrs
[i
].sh_size
/ sizeof(Elf_Rela
);
158 rela
= (void *) hdr
+ sechdrs
[i
].sh_offset
;
159 for (j
= 0; j
< nrela
; j
++)
160 check_rela(rela
+ j
, me
);
163 /* Increase core size by size of got & plt and set start
164 offsets for got and plt. */
165 me
->core_layout
.size
= ALIGN(me
->core_layout
.size
, 4);
166 me
->arch
.got_offset
= me
->core_layout
.size
;
167 me
->core_layout
.size
+= me
->arch
.got_size
;
168 me
->arch
.plt_offset
= me
->core_layout
.size
;
169 if (me
->arch
.plt_size
) {
170 if (IS_ENABLED(CONFIG_EXPOLINE
) && !nospec_disable
)
171 me
->arch
.plt_size
+= PLT_ENTRY_SIZE
;
172 me
->core_layout
.size
+= me
->arch
.plt_size
;
177 static int apply_rela_bits(Elf_Addr loc
, Elf_Addr val
,
178 int sign
, int bits
, int shift
,
179 void *(*write
)(void *dest
, const void *src
, size_t len
))
183 void *dest
= (void *)loc
;
185 if (val
& ((1UL << shift
) - 1))
188 val
= (Elf_Addr
)(((long) val
) >> shift
);
189 min
= -(1L << (bits
- 1));
190 max
= (1L << (bits
- 1)) - 1;
191 if ((long) val
< min
|| (long) val
> max
)
195 umax
= ((1UL << (bits
- 1)) << 1) - 1;
196 if ((unsigned long) val
> umax
)
201 unsigned char tmp
= val
;
202 write(dest
, &tmp
, 1);
203 } else if (bits
== 12) {
204 unsigned short tmp
= (val
& 0xfff) |
205 (*(unsigned short *) loc
& 0xf000);
206 write(dest
, &tmp
, 2);
207 } else if (bits
== 16) {
208 unsigned short tmp
= val
;
209 write(dest
, &tmp
, 2);
210 } else if (bits
== 20) {
211 unsigned int tmp
= (val
& 0xfff) << 16 |
212 (val
& 0xff000) >> 4 | (*(unsigned int *) loc
& 0xf00000ff);
213 write(dest
, &tmp
, 4);
214 } else if (bits
== 32) {
215 unsigned int tmp
= val
;
216 write(dest
, &tmp
, 4);
217 } else if (bits
== 64) {
218 unsigned long tmp
= val
;
219 write(dest
, &tmp
, 8);
224 static int apply_rela(Elf_Rela
*rela
, Elf_Addr base
, Elf_Sym
*symtab
,
225 const char *strtab
, struct module
*me
,
226 void *(*write
)(void *dest
, const void *src
, size_t len
))
228 struct mod_arch_syminfo
*info
;
233 /* This is where to make the change */
234 loc
= base
+ rela
->r_offset
;
235 /* This is the symbol it is referring to. Note that all
236 undefined symbols have been resolved. */
237 r_sym
= ELF_R_SYM(rela
->r_info
);
238 r_type
= ELF_R_TYPE(rela
->r_info
);
239 info
= me
->arch
.syminfo
+ r_sym
;
240 val
= symtab
[r_sym
].st_value
;
243 case R_390_NONE
: /* No relocation. */
246 case R_390_8
: /* Direct 8 bit. */
247 case R_390_12
: /* Direct 12 bit. */
248 case R_390_16
: /* Direct 16 bit. */
249 case R_390_20
: /* Direct 20 bit. */
250 case R_390_32
: /* Direct 32 bit. */
251 case R_390_64
: /* Direct 64 bit. */
252 val
+= rela
->r_addend
;
253 if (r_type
== R_390_8
)
254 rc
= apply_rela_bits(loc
, val
, 0, 8, 0, write
);
255 else if (r_type
== R_390_12
)
256 rc
= apply_rela_bits(loc
, val
, 0, 12, 0, write
);
257 else if (r_type
== R_390_16
)
258 rc
= apply_rela_bits(loc
, val
, 0, 16, 0, write
);
259 else if (r_type
== R_390_20
)
260 rc
= apply_rela_bits(loc
, val
, 1, 20, 0, write
);
261 else if (r_type
== R_390_32
)
262 rc
= apply_rela_bits(loc
, val
, 0, 32, 0, write
);
263 else if (r_type
== R_390_64
)
264 rc
= apply_rela_bits(loc
, val
, 0, 64, 0, write
);
266 case R_390_PC16
: /* PC relative 16 bit. */
267 case R_390_PC16DBL
: /* PC relative 16 bit shifted by 1. */
268 case R_390_PC32DBL
: /* PC relative 32 bit shifted by 1. */
269 case R_390_PC32
: /* PC relative 32 bit. */
270 case R_390_PC64
: /* PC relative 64 bit. */
271 val
+= rela
->r_addend
- loc
;
272 if (r_type
== R_390_PC16
)
273 rc
= apply_rela_bits(loc
, val
, 1, 16, 0, write
);
274 else if (r_type
== R_390_PC16DBL
)
275 rc
= apply_rela_bits(loc
, val
, 1, 16, 1, write
);
276 else if (r_type
== R_390_PC32DBL
)
277 rc
= apply_rela_bits(loc
, val
, 1, 32, 1, write
);
278 else if (r_type
== R_390_PC32
)
279 rc
= apply_rela_bits(loc
, val
, 1, 32, 0, write
);
280 else if (r_type
== R_390_PC64
)
281 rc
= apply_rela_bits(loc
, val
, 1, 64, 0, write
);
283 case R_390_GOT12
: /* 12 bit GOT offset. */
284 case R_390_GOT16
: /* 16 bit GOT offset. */
285 case R_390_GOT20
: /* 20 bit GOT offset. */
286 case R_390_GOT32
: /* 32 bit GOT offset. */
287 case R_390_GOT64
: /* 64 bit GOT offset. */
288 case R_390_GOTENT
: /* 32 bit PC rel. to GOT entry shifted by 1. */
289 case R_390_GOTPLT12
: /* 12 bit offset to jump slot. */
290 case R_390_GOTPLT20
: /* 20 bit offset to jump slot. */
291 case R_390_GOTPLT16
: /* 16 bit offset to jump slot. */
292 case R_390_GOTPLT32
: /* 32 bit offset to jump slot. */
293 case R_390_GOTPLT64
: /* 64 bit offset to jump slot. */
294 case R_390_GOTPLTENT
: /* 32 bit rel. offset to jump slot >> 1. */
295 if (info
->got_initialized
== 0) {
296 Elf_Addr
*gotent
= me
->core_layout
.base
+
297 me
->arch
.got_offset
+
300 write(gotent
, &val
, sizeof(*gotent
));
301 info
->got_initialized
= 1;
303 val
= info
->got_offset
+ rela
->r_addend
;
304 if (r_type
== R_390_GOT12
||
305 r_type
== R_390_GOTPLT12
)
306 rc
= apply_rela_bits(loc
, val
, 0, 12, 0, write
);
307 else if (r_type
== R_390_GOT16
||
308 r_type
== R_390_GOTPLT16
)
309 rc
= apply_rela_bits(loc
, val
, 0, 16, 0, write
);
310 else if (r_type
== R_390_GOT20
||
311 r_type
== R_390_GOTPLT20
)
312 rc
= apply_rela_bits(loc
, val
, 1, 20, 0, write
);
313 else if (r_type
== R_390_GOT32
||
314 r_type
== R_390_GOTPLT32
)
315 rc
= apply_rela_bits(loc
, val
, 0, 32, 0, write
);
316 else if (r_type
== R_390_GOT64
||
317 r_type
== R_390_GOTPLT64
)
318 rc
= apply_rela_bits(loc
, val
, 0, 64, 0, write
);
319 else if (r_type
== R_390_GOTENT
||
320 r_type
== R_390_GOTPLTENT
) {
321 val
+= (Elf_Addr
) me
->core_layout
.base
- loc
;
322 rc
= apply_rela_bits(loc
, val
, 1, 32, 1, write
);
325 case R_390_PLT16DBL
: /* 16 bit PC rel. PLT shifted by 1. */
326 case R_390_PLT32DBL
: /* 32 bit PC rel. PLT shifted by 1. */
327 case R_390_PLT32
: /* 32 bit PC relative PLT address. */
328 case R_390_PLT64
: /* 64 bit PC relative PLT address. */
329 case R_390_PLTOFF16
: /* 16 bit offset from GOT to PLT. */
330 case R_390_PLTOFF32
: /* 32 bit offset from GOT to PLT. */
331 case R_390_PLTOFF64
: /* 16 bit offset from GOT to PLT. */
332 if (info
->plt_initialized
== 0) {
333 unsigned int insn
[5];
334 unsigned int *ip
= me
->core_layout
.base
+
335 me
->arch
.plt_offset
+
338 insn
[0] = 0x0d10e310; /* basr 1,0 */
339 insn
[1] = 0x100a0004; /* lg 1,10(1) */
340 if (IS_ENABLED(CONFIG_EXPOLINE
) && !nospec_disable
) {
342 ij
= me
->core_layout
.base
+
343 me
->arch
.plt_offset
+
344 me
->arch
.plt_size
- PLT_ENTRY_SIZE
;
345 insn
[2] = 0xa7f40000 + /* j __jump_r1 */
347 (((unsigned long) ij
- 8 -
348 (unsigned long) ip
) / 2);
350 insn
[2] = 0x07f10000; /* br %r1 */
352 insn
[3] = (unsigned int) (val
>> 32);
353 insn
[4] = (unsigned int) val
;
355 write(ip
, insn
, sizeof(insn
));
356 info
->plt_initialized
= 1;
358 if (r_type
== R_390_PLTOFF16
||
359 r_type
== R_390_PLTOFF32
||
360 r_type
== R_390_PLTOFF64
)
361 val
= me
->arch
.plt_offset
- me
->arch
.got_offset
+
362 info
->plt_offset
+ rela
->r_addend
;
364 if (!((r_type
== R_390_PLT16DBL
&&
365 val
- loc
+ 0xffffUL
< 0x1ffffeUL
) ||
366 (r_type
== R_390_PLT32DBL
&&
367 val
- loc
+ 0xffffffffULL
< 0x1fffffffeULL
)))
368 val
= (Elf_Addr
) me
->core_layout
.base
+
369 me
->arch
.plt_offset
+
371 val
+= rela
->r_addend
- loc
;
373 if (r_type
== R_390_PLT16DBL
)
374 rc
= apply_rela_bits(loc
, val
, 1, 16, 1, write
);
375 else if (r_type
== R_390_PLTOFF16
)
376 rc
= apply_rela_bits(loc
, val
, 0, 16, 0, write
);
377 else if (r_type
== R_390_PLT32DBL
)
378 rc
= apply_rela_bits(loc
, val
, 1, 32, 1, write
);
379 else if (r_type
== R_390_PLT32
||
380 r_type
== R_390_PLTOFF32
)
381 rc
= apply_rela_bits(loc
, val
, 0, 32, 0, write
);
382 else if (r_type
== R_390_PLT64
||
383 r_type
== R_390_PLTOFF64
)
384 rc
= apply_rela_bits(loc
, val
, 0, 64, 0, write
);
386 case R_390_GOTOFF16
: /* 16 bit offset to GOT. */
387 case R_390_GOTOFF32
: /* 32 bit offset to GOT. */
388 case R_390_GOTOFF64
: /* 64 bit offset to GOT. */
389 val
= val
+ rela
->r_addend
-
390 ((Elf_Addr
) me
->core_layout
.base
+ me
->arch
.got_offset
);
391 if (r_type
== R_390_GOTOFF16
)
392 rc
= apply_rela_bits(loc
, val
, 0, 16, 0, write
);
393 else if (r_type
== R_390_GOTOFF32
)
394 rc
= apply_rela_bits(loc
, val
, 0, 32, 0, write
);
395 else if (r_type
== R_390_GOTOFF64
)
396 rc
= apply_rela_bits(loc
, val
, 0, 64, 0, write
);
398 case R_390_GOTPC
: /* 32 bit PC relative offset to GOT. */
399 case R_390_GOTPCDBL
: /* 32 bit PC rel. off. to GOT shifted by 1. */
400 val
= (Elf_Addr
) me
->core_layout
.base
+ me
->arch
.got_offset
+
401 rela
->r_addend
- loc
;
402 if (r_type
== R_390_GOTPC
)
403 rc
= apply_rela_bits(loc
, val
, 1, 32, 0, write
);
404 else if (r_type
== R_390_GOTPCDBL
)
405 rc
= apply_rela_bits(loc
, val
, 1, 32, 1, write
);
408 case R_390_GLOB_DAT
: /* Create GOT entry. */
409 case R_390_JMP_SLOT
: /* Create PLT entry. */
410 case R_390_RELATIVE
: /* Adjust by program base. */
411 /* Only needed if we want to support loading of
412 modules linked with -shared. */
415 printk(KERN_ERR
"module %s: unknown relocation: %u\n",
420 printk(KERN_ERR
"module %s: relocation error for symbol %s "
421 "(r_type %i, value 0x%lx)\n",
422 me
->name
, strtab
+ symtab
[r_sym
].st_name
,
423 r_type
, (unsigned long) val
);
429 static int __apply_relocate_add(Elf_Shdr
*sechdrs
, const char *strtab
,
430 unsigned int symindex
, unsigned int relsec
,
432 void *(*write
)(void *dest
, const void *src
, size_t len
))
440 DEBUGP("Applying relocate section %u to %u\n",
441 relsec
, sechdrs
[relsec
].sh_info
);
442 base
= sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
;
443 symtab
= (Elf_Sym
*) sechdrs
[symindex
].sh_addr
;
444 rela
= (Elf_Rela
*) sechdrs
[relsec
].sh_addr
;
445 n
= sechdrs
[relsec
].sh_size
/ sizeof(Elf_Rela
);
447 for (i
= 0; i
< n
; i
++, rela
++) {
448 rc
= apply_rela(rela
, base
, symtab
, strtab
, me
, write
);
455 int apply_relocate_add(Elf_Shdr
*sechdrs
, const char *strtab
,
456 unsigned int symindex
, unsigned int relsec
,
459 bool early
= me
->state
== MODULE_STATE_UNFORMED
;
460 void *(*write
)(void *, const void *, size_t) = memcpy
;
463 write
= s390_kernel_write
;
465 return __apply_relocate_add(sechdrs
, strtab
, symindex
, relsec
, me
,
469 int module_finalize(const Elf_Ehdr
*hdr
,
470 const Elf_Shdr
*sechdrs
,
474 char *secstrings
, *secname
;
477 if (IS_ENABLED(CONFIG_EXPOLINE
) &&
478 !nospec_disable
&& me
->arch
.plt_size
) {
481 ij
= me
->core_layout
.base
+ me
->arch
.plt_offset
+
482 me
->arch
.plt_size
- PLT_ENTRY_SIZE
;
483 if (test_facility(35)) {
484 ij
[0] = 0xc6000000; /* exrl %r0,.+10 */
485 ij
[1] = 0x0005a7f4; /* j . */
486 ij
[2] = 0x000007f1; /* br %r1 */
488 ij
[0] = 0x44000000 | (unsigned int)
489 offsetof(struct lowcore
, br_r1_trampoline
);
490 ij
[1] = 0xa7f40000; /* j . */
494 secstrings
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
495 for (s
= sechdrs
; s
< sechdrs
+ hdr
->e_shnum
; s
++) {
496 aseg
= (void *) s
->sh_addr
;
497 secname
= secstrings
+ s
->sh_name
;
499 if (!strcmp(".altinstructions", secname
))
500 /* patch .altinstructions */
501 apply_alternatives(aseg
, aseg
+ s
->sh_size
);
503 if (IS_ENABLED(CONFIG_EXPOLINE
) &&
504 (str_has_prefix(secname
, ".s390_indirect")))
505 nospec_revert(aseg
, aseg
+ s
->sh_size
);
507 if (IS_ENABLED(CONFIG_EXPOLINE
) &&
508 (str_has_prefix(secname
, ".s390_return")))
509 nospec_revert(aseg
, aseg
+ s
->sh_size
);
512 jump_label_apply_nops(me
);