1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Kernel module help for x86.
3 Copyright (C) 2001 Rusty Russell.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
21 #include <linux/memory.h>
23 #include <asm/text-patching.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
29 #define DEBUGP(fmt, ...) \
30 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
32 #define DEBUGP(fmt, ...) \
35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset
;
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex
);
45 static unsigned long int get_module_load_offset(void)
47 if (kaslr_enabled()) {
48 mutex_lock(&module_kaslr_mutex
);
50 * Calculate the module_load_offset the first time this
51 * code is called. Once calculated it stays the same until
54 if (module_load_offset
== 0)
56 (get_random_int() % 1024 + 1) * PAGE_SIZE
;
57 mutex_unlock(&module_kaslr_mutex
);
59 return module_load_offset
;
62 static unsigned long int get_module_load_offset(void)
68 void *module_alloc(unsigned long size
)
72 if (PAGE_ALIGN(size
) > MODULES_LEN
)
75 p
= __vmalloc_node_range(size
, MODULE_ALIGN
,
76 MODULES_VADDR
+ get_module_load_offset(),
77 MODULES_END
, GFP_KERNEL
,
78 PAGE_KERNEL
, 0, NUMA_NO_NODE
,
79 __builtin_return_address(0));
80 if (p
&& (kasan_module_alloc(p
, size
) < 0)) {
89 int apply_relocate(Elf32_Shdr
*sechdrs
,
91 unsigned int symindex
,
96 Elf32_Rel
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
100 DEBUGP("Applying relocate section %u to %u\n",
101 relsec
, sechdrs
[relsec
].sh_info
);
102 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
103 /* This is where to make the change */
104 location
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
106 /* This is the symbol it is referring to. Note that all
107 undefined symbols have been resolved. */
108 sym
= (Elf32_Sym
*)sechdrs
[symindex
].sh_addr
109 + ELF32_R_SYM(rel
[i
].r_info
);
111 switch (ELF32_R_TYPE(rel
[i
].r_info
)) {
113 /* We add the value into the location given */
114 *location
+= sym
->st_value
;
117 /* Add the value, subtract its position */
118 *location
+= sym
->st_value
- (uint32_t)location
;
121 pr_err("%s: Unknown relocation: %u\n",
122 me
->name
, ELF32_R_TYPE(rel
[i
].r_info
));
129 static int __apply_relocate_add(Elf64_Shdr
*sechdrs
,
131 unsigned int symindex
,
134 void *(*write
)(void *dest
, const void *src
, size_t len
))
137 Elf64_Rela
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
142 DEBUGP("Applying relocate section %u to %u\n",
143 relsec
, sechdrs
[relsec
].sh_info
);
144 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
145 /* This is where to make the change */
146 loc
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
149 /* This is the symbol it is referring to. Note that all
150 undefined symbols have been resolved. */
151 sym
= (Elf64_Sym
*)sechdrs
[symindex
].sh_addr
152 + ELF64_R_SYM(rel
[i
].r_info
);
154 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
155 (int)ELF64_R_TYPE(rel
[i
].r_info
),
156 sym
->st_value
, rel
[i
].r_addend
, (u64
)loc
);
158 val
= sym
->st_value
+ rel
[i
].r_addend
;
160 switch (ELF64_R_TYPE(rel
[i
].r_info
)) {
164 if (*(u64
*)loc
!= 0)
165 goto invalid_relocation
;
169 if (*(u32
*)loc
!= 0)
170 goto invalid_relocation
;
172 if (val
!= *(u32
*)loc
)
176 if (*(s32
*)loc
!= 0)
177 goto invalid_relocation
;
179 if ((s64
)val
!= *(s32
*)loc
)
184 if (*(u32
*)loc
!= 0)
185 goto invalid_relocation
;
189 if ((s64
)val
!= *(s32
*)loc
)
194 if (*(u64
*)loc
!= 0)
195 goto invalid_relocation
;
200 pr_err("%s: Unknown rela relocation: %llu\n",
201 me
->name
, ELF64_R_TYPE(rel
[i
].r_info
));
208 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
209 (int)ELF64_R_TYPE(rel
[i
].r_info
), loc
, val
);
213 pr_err("overflow in relocation type %d val %Lx\n",
214 (int)ELF64_R_TYPE(rel
[i
].r_info
), val
);
215 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
220 int apply_relocate_add(Elf64_Shdr
*sechdrs
,
222 unsigned int symindex
,
227 bool early
= me
->state
== MODULE_STATE_UNFORMED
;
228 void *(*write
)(void *, const void *, size_t) = memcpy
;
232 mutex_lock(&text_mutex
);
235 ret
= __apply_relocate_add(sechdrs
, strtab
, symindex
, relsec
, me
,
240 mutex_unlock(&text_mutex
);
248 int module_finalize(const Elf_Ehdr
*hdr
,
249 const Elf_Shdr
*sechdrs
,
252 const Elf_Shdr
*s
, *text
= NULL
, *alt
= NULL
, *locks
= NULL
,
253 *para
= NULL
, *orc
= NULL
, *orc_ip
= NULL
;
254 char *secstrings
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
256 for (s
= sechdrs
; s
< sechdrs
+ hdr
->e_shnum
; s
++) {
257 if (!strcmp(".text", secstrings
+ s
->sh_name
))
259 if (!strcmp(".altinstructions", secstrings
+ s
->sh_name
))
261 if (!strcmp(".smp_locks", secstrings
+ s
->sh_name
))
263 if (!strcmp(".parainstructions", secstrings
+ s
->sh_name
))
265 if (!strcmp(".orc_unwind", secstrings
+ s
->sh_name
))
267 if (!strcmp(".orc_unwind_ip", secstrings
+ s
->sh_name
))
272 /* patch .altinstructions */
273 void *aseg
= (void *)alt
->sh_addr
;
274 apply_alternatives(aseg
, aseg
+ alt
->sh_size
);
277 void *lseg
= (void *)locks
->sh_addr
;
278 void *tseg
= (void *)text
->sh_addr
;
279 alternatives_smp_module_add(me
, me
->name
,
280 lseg
, lseg
+ locks
->sh_size
,
281 tseg
, tseg
+ text
->sh_size
);
285 void *pseg
= (void *)para
->sh_addr
;
286 apply_paravirt(pseg
, pseg
+ para
->sh_size
);
289 /* make jump label nops */
290 jump_label_apply_nops(me
);
293 unwind_module_init(me
, (void *)orc_ip
->sh_addr
, orc_ip
->sh_size
,
294 (void *)orc
->sh_addr
, orc
->sh_size
);
299 void module_arch_cleanup(struct module
*mod
)
301 alternatives_smp_module_del(mod
);