1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Kernel module help for x86.
3 Copyright (C) 2001 Rusty Russell.
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
22 #include <asm/text-patching.h>
24 #include <asm/pgtable.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
29 #define DEBUGP(fmt, ...) \
30 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
32 #define DEBUGP(fmt, ...) \
35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset
;
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex
);
45 static unsigned long int get_module_load_offset(void)
47 if (kaslr_enabled()) {
48 mutex_lock(&module_kaslr_mutex
);
50 * Calculate the module_load_offset the first time this
51 * code is called. Once calculated it stays the same until
54 if (module_load_offset
== 0)
56 (get_random_int() % 1024 + 1) * PAGE_SIZE
;
57 mutex_unlock(&module_kaslr_mutex
);
59 return module_load_offset
;
62 static unsigned long int get_module_load_offset(void)
68 void *module_alloc(unsigned long size
)
72 if (PAGE_ALIGN(size
) > MODULES_LEN
)
75 p
= __vmalloc_node_range(size
, MODULE_ALIGN
,
76 MODULES_VADDR
+ get_module_load_offset(),
77 MODULES_END
, GFP_KERNEL
,
78 PAGE_KERNEL
, 0, NUMA_NO_NODE
,
79 __builtin_return_address(0));
80 if (p
&& (kasan_module_alloc(p
, size
) < 0)) {
89 int apply_relocate(Elf32_Shdr
*sechdrs
,
91 unsigned int symindex
,
96 Elf32_Rel
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
100 DEBUGP("Applying relocate section %u to %u\n",
101 relsec
, sechdrs
[relsec
].sh_info
);
102 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
103 /* This is where to make the change */
104 location
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
106 /* This is the symbol it is referring to. Note that all
107 undefined symbols have been resolved. */
108 sym
= (Elf32_Sym
*)sechdrs
[symindex
].sh_addr
109 + ELF32_R_SYM(rel
[i
].r_info
);
111 switch (ELF32_R_TYPE(rel
[i
].r_info
)) {
113 /* We add the value into the location given */
114 *location
+= sym
->st_value
;
117 /* Add the value, subtract its position */
118 *location
+= sym
->st_value
- (uint32_t)location
;
121 pr_err("%s: Unknown relocation: %u\n",
122 me
->name
, ELF32_R_TYPE(rel
[i
].r_info
));
129 int apply_relocate_add(Elf64_Shdr
*sechdrs
,
131 unsigned int symindex
,
136 Elf64_Rela
*rel
= (void *)sechdrs
[relsec
].sh_addr
;
141 DEBUGP("Applying relocate section %u to %u\n",
142 relsec
, sechdrs
[relsec
].sh_info
);
143 for (i
= 0; i
< sechdrs
[relsec
].sh_size
/ sizeof(*rel
); i
++) {
144 /* This is where to make the change */
145 loc
= (void *)sechdrs
[sechdrs
[relsec
].sh_info
].sh_addr
148 /* This is the symbol it is referring to. Note that all
149 undefined symbols have been resolved. */
150 sym
= (Elf64_Sym
*)sechdrs
[symindex
].sh_addr
151 + ELF64_R_SYM(rel
[i
].r_info
);
153 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
154 (int)ELF64_R_TYPE(rel
[i
].r_info
),
155 sym
->st_value
, rel
[i
].r_addend
, (u64
)loc
);
157 val
= sym
->st_value
+ rel
[i
].r_addend
;
159 switch (ELF64_R_TYPE(rel
[i
].r_info
)) {
163 if (*(u64
*)loc
!= 0)
164 goto invalid_relocation
;
168 if (*(u32
*)loc
!= 0)
169 goto invalid_relocation
;
171 if (val
!= *(u32
*)loc
)
175 if (*(s32
*)loc
!= 0)
176 goto invalid_relocation
;
178 if ((s64
)val
!= *(s32
*)loc
)
183 if (*(u32
*)loc
!= 0)
184 goto invalid_relocation
;
188 if ((s64
)val
!= *(s32
*)loc
)
193 if (*(u64
*)loc
!= 0)
194 goto invalid_relocation
;
199 pr_err("%s: Unknown rela relocation: %llu\n",
200 me
->name
, ELF64_R_TYPE(rel
[i
].r_info
));
207 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
208 (int)ELF64_R_TYPE(rel
[i
].r_info
), loc
, val
);
212 pr_err("overflow in relocation type %d val %Lx\n",
213 (int)ELF64_R_TYPE(rel
[i
].r_info
), val
);
214 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
220 int module_finalize(const Elf_Ehdr
*hdr
,
221 const Elf_Shdr
*sechdrs
,
224 const Elf_Shdr
*s
, *text
= NULL
, *alt
= NULL
, *locks
= NULL
,
225 *para
= NULL
, *orc
= NULL
, *orc_ip
= NULL
;
226 char *secstrings
= (void *)hdr
+ sechdrs
[hdr
->e_shstrndx
].sh_offset
;
228 for (s
= sechdrs
; s
< sechdrs
+ hdr
->e_shnum
; s
++) {
229 if (!strcmp(".text", secstrings
+ s
->sh_name
))
231 if (!strcmp(".altinstructions", secstrings
+ s
->sh_name
))
233 if (!strcmp(".smp_locks", secstrings
+ s
->sh_name
))
235 if (!strcmp(".parainstructions", secstrings
+ s
->sh_name
))
237 if (!strcmp(".orc_unwind", secstrings
+ s
->sh_name
))
239 if (!strcmp(".orc_unwind_ip", secstrings
+ s
->sh_name
))
244 /* patch .altinstructions */
245 void *aseg
= (void *)alt
->sh_addr
;
246 apply_alternatives(aseg
, aseg
+ alt
->sh_size
);
249 void *lseg
= (void *)locks
->sh_addr
;
250 void *tseg
= (void *)text
->sh_addr
;
251 alternatives_smp_module_add(me
, me
->name
,
252 lseg
, lseg
+ locks
->sh_size
,
253 tseg
, tseg
+ text
->sh_size
);
257 void *pseg
= (void *)para
->sh_addr
;
258 apply_paravirt(pseg
, pseg
+ para
->sh_size
);
261 /* make jump label nops */
262 jump_label_apply_nops(me
);
265 unwind_module_init(me
, (void *)orc_ip
->sh_addr
, orc_ip
->sh_size
,
266 (void *)orc
->sh_addr
, orc
->sh_size
);
271 void module_arch_cleanup(struct module
*mod
)
273 alternatives_smp_module_del(mod
);