WIP FPC-III support
[linux/fpc-iii.git] / arch / x86 / kernel / module.c
blob34b153cbd4acb4975fb9a2acbb40500dceca7e11
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Kernel module help for x86.
3 Copyright (C) 2001 Rusty Russell.
5 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/moduleloader.h>
10 #include <linux/elf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/kernel.h>
15 #include <linux/kasan.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/gfp.h>
19 #include <linux/jump_label.h>
20 #include <linux/random.h>
21 #include <linux/memory.h>
23 #include <asm/text-patching.h>
24 #include <asm/page.h>
25 #include <asm/setup.h>
26 #include <asm/unwind.h>
28 #if 0
29 #define DEBUGP(fmt, ...) \
30 printk(KERN_DEBUG fmt, ##__VA_ARGS__)
31 #else
32 #define DEBUGP(fmt, ...) \
33 do { \
34 if (0) \
35 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
36 } while (0)
37 #endif
39 #ifdef CONFIG_RANDOMIZE_BASE
40 static unsigned long module_load_offset;
42 /* Mutex protects the module_load_offset. */
43 static DEFINE_MUTEX(module_kaslr_mutex);
45 static unsigned long int get_module_load_offset(void)
47 if (kaslr_enabled()) {
48 mutex_lock(&module_kaslr_mutex);
50 * Calculate the module_load_offset the first time this
51 * code is called. Once calculated it stays the same until
52 * reboot.
54 if (module_load_offset == 0)
55 module_load_offset =
56 (get_random_int() % 1024 + 1) * PAGE_SIZE;
57 mutex_unlock(&module_kaslr_mutex);
59 return module_load_offset;
61 #else
62 static unsigned long int get_module_load_offset(void)
64 return 0;
66 #endif
68 void *module_alloc(unsigned long size)
70 void *p;
72 if (PAGE_ALIGN(size) > MODULES_LEN)
73 return NULL;
75 p = __vmalloc_node_range(size, MODULE_ALIGN,
76 MODULES_VADDR + get_module_load_offset(),
77 MODULES_END, GFP_KERNEL,
78 PAGE_KERNEL, 0, NUMA_NO_NODE,
79 __builtin_return_address(0));
80 if (p && (kasan_module_alloc(p, size) < 0)) {
81 vfree(p);
82 return NULL;
85 return p;
88 #ifdef CONFIG_X86_32
89 int apply_relocate(Elf32_Shdr *sechdrs,
90 const char *strtab,
91 unsigned int symindex,
92 unsigned int relsec,
93 struct module *me)
95 unsigned int i;
96 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
97 Elf32_Sym *sym;
98 uint32_t *location;
100 DEBUGP("Applying relocate section %u to %u\n",
101 relsec, sechdrs[relsec].sh_info);
102 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
103 /* This is where to make the change */
104 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
105 + rel[i].r_offset;
106 /* This is the symbol it is referring to. Note that all
107 undefined symbols have been resolved. */
108 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
109 + ELF32_R_SYM(rel[i].r_info);
111 switch (ELF32_R_TYPE(rel[i].r_info)) {
112 case R_386_32:
113 /* We add the value into the location given */
114 *location += sym->st_value;
115 break;
116 case R_386_PC32:
117 /* Add the value, subtract its position */
118 *location += sym->st_value - (uint32_t)location;
119 break;
120 default:
121 pr_err("%s: Unknown relocation: %u\n",
122 me->name, ELF32_R_TYPE(rel[i].r_info));
123 return -ENOEXEC;
126 return 0;
128 #else /*X86_64*/
129 static int __apply_relocate_add(Elf64_Shdr *sechdrs,
130 const char *strtab,
131 unsigned int symindex,
132 unsigned int relsec,
133 struct module *me,
134 void *(*write)(void *dest, const void *src, size_t len))
136 unsigned int i;
137 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
138 Elf64_Sym *sym;
139 void *loc;
140 u64 val;
142 DEBUGP("Applying relocate section %u to %u\n",
143 relsec, sechdrs[relsec].sh_info);
144 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
145 /* This is where to make the change */
146 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
147 + rel[i].r_offset;
149 /* This is the symbol it is referring to. Note that all
150 undefined symbols have been resolved. */
151 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
152 + ELF64_R_SYM(rel[i].r_info);
154 DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
155 (int)ELF64_R_TYPE(rel[i].r_info),
156 sym->st_value, rel[i].r_addend, (u64)loc);
158 val = sym->st_value + rel[i].r_addend;
160 switch (ELF64_R_TYPE(rel[i].r_info)) {
161 case R_X86_64_NONE:
162 break;
163 case R_X86_64_64:
164 if (*(u64 *)loc != 0)
165 goto invalid_relocation;
166 write(loc, &val, 8);
167 break;
168 case R_X86_64_32:
169 if (*(u32 *)loc != 0)
170 goto invalid_relocation;
171 write(loc, &val, 4);
172 if (val != *(u32 *)loc)
173 goto overflow;
174 break;
175 case R_X86_64_32S:
176 if (*(s32 *)loc != 0)
177 goto invalid_relocation;
178 write(loc, &val, 4);
179 if ((s64)val != *(s32 *)loc)
180 goto overflow;
181 break;
182 case R_X86_64_PC32:
183 case R_X86_64_PLT32:
184 if (*(u32 *)loc != 0)
185 goto invalid_relocation;
186 val -= (u64)loc;
187 write(loc, &val, 4);
188 #if 0
189 if ((s64)val != *(s32 *)loc)
190 goto overflow;
191 #endif
192 break;
193 case R_X86_64_PC64:
194 if (*(u64 *)loc != 0)
195 goto invalid_relocation;
196 val -= (u64)loc;
197 write(loc, &val, 8);
198 break;
199 default:
200 pr_err("%s: Unknown rela relocation: %llu\n",
201 me->name, ELF64_R_TYPE(rel[i].r_info));
202 return -ENOEXEC;
205 return 0;
207 invalid_relocation:
208 pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
209 (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
210 return -ENOEXEC;
212 overflow:
213 pr_err("overflow in relocation type %d val %Lx\n",
214 (int)ELF64_R_TYPE(rel[i].r_info), val);
215 pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
216 me->name);
217 return -ENOEXEC;
220 int apply_relocate_add(Elf64_Shdr *sechdrs,
221 const char *strtab,
222 unsigned int symindex,
223 unsigned int relsec,
224 struct module *me)
226 int ret;
227 bool early = me->state == MODULE_STATE_UNFORMED;
228 void *(*write)(void *, const void *, size_t) = memcpy;
230 if (!early) {
231 write = text_poke;
232 mutex_lock(&text_mutex);
235 ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
236 write);
238 if (!early) {
239 text_poke_sync();
240 mutex_unlock(&text_mutex);
243 return ret;
246 #endif
248 int module_finalize(const Elf_Ehdr *hdr,
249 const Elf_Shdr *sechdrs,
250 struct module *me)
252 const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
253 *para = NULL, *orc = NULL, *orc_ip = NULL;
254 char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
256 for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
257 if (!strcmp(".text", secstrings + s->sh_name))
258 text = s;
259 if (!strcmp(".altinstructions", secstrings + s->sh_name))
260 alt = s;
261 if (!strcmp(".smp_locks", secstrings + s->sh_name))
262 locks = s;
263 if (!strcmp(".parainstructions", secstrings + s->sh_name))
264 para = s;
265 if (!strcmp(".orc_unwind", secstrings + s->sh_name))
266 orc = s;
267 if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name))
268 orc_ip = s;
271 if (alt) {
272 /* patch .altinstructions */
273 void *aseg = (void *)alt->sh_addr;
274 apply_alternatives(aseg, aseg + alt->sh_size);
276 if (locks && text) {
277 void *lseg = (void *)locks->sh_addr;
278 void *tseg = (void *)text->sh_addr;
279 alternatives_smp_module_add(me, me->name,
280 lseg, lseg + locks->sh_size,
281 tseg, tseg + text->sh_size);
284 if (para) {
285 void *pseg = (void *)para->sh_addr;
286 apply_paravirt(pseg, pseg + para->sh_size);
289 /* make jump label nops */
290 jump_label_apply_nops(me);
292 if (orc && orc_ip)
293 unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size,
294 (void *)orc->sh_addr, orc->sh_size);
296 return 0;
299 void module_arch_cleanup(struct module *mod)
301 alternatives_smp_module_del(mod);