nfsd4: typo logical vs bitwise negate for want_mask
[linux-btrfs-devel.git] / arch / powerpc / kernel / module_64.c
blob9f44a775a106dbfd29ceba2ea93d9bf5df66e5ce
1 /* Kernel module help for PPC64.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include <linux/module.h>
19 #include <linux/elf.h>
20 #include <linux/moduleloader.h>
21 #include <linux/err.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ftrace.h>
24 #include <linux/bug.h>
25 #include <asm/module.h>
26 #include <asm/firmware.h>
27 #include <asm/code-patching.h>
28 #include <linux/sort.h>
30 #include "setup.h"
32 /* FIXME: We don't do .init separately. To do this, we'd need to have
33 a separate r2 value in the init and core section, and stub between
34 them, too.
36 Using a magic allocator which places modules within 32MB solves
37 this, and makes other things simpler. Anton?
38 --RR. */
39 #if 0
40 #define DEBUGP printk
41 #else
42 #define DEBUGP(fmt , ...)
43 #endif
45 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
46 the kernel itself). But on PPC64, these need to be used for every
47 jump, actually, to reset r2 (TOC+0x8000). */
48 struct ppc64_stub_entry
50 /* 28 byte jump instruction sequence (7 instructions) */
51 unsigned char jump[28];
52 unsigned char unused[4];
53 /* Data for the above code */
54 struct ppc64_opd_entry opd;
57 /* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
58 function which may be more than 24-bits away. We could simply
59 patch the new r2 value and function pointer into the stub, but it's
60 significantly shorter to put these values at the end of the stub
61 code, and patch the stub address (32-bits relative to the TOC ptr,
62 r2) into the stub. */
63 static struct ppc64_stub_entry ppc64_stub =
64 { .jump = {
65 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */
66 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */
67 /* Save current r2 value in magic place on the stack. */
68 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */
69 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */
70 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */
71 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */
72 0x4e, 0x80, 0x04, 0x20 /* bctr */
73 } };
75 /* Count how many different 24-bit relocations (different symbol,
76 different addend) */
77 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
79 unsigned int i, r_info, r_addend, _count_relocs;
81 /* FIXME: Only count external ones --RR */
82 _count_relocs = 0;
83 r_info = 0;
84 r_addend = 0;
85 for (i = 0; i < num; i++)
86 /* Only count 24-bit relocs, others don't need stubs */
87 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
88 (r_info != ELF64_R_SYM(rela[i].r_info) ||
89 r_addend != rela[i].r_addend)) {
90 _count_relocs++;
91 r_info = ELF64_R_SYM(rela[i].r_info);
92 r_addend = rela[i].r_addend;
95 return _count_relocs;
98 static int relacmp(const void *_x, const void *_y)
100 const Elf64_Rela *x, *y;
102 y = (Elf64_Rela *)_x;
103 x = (Elf64_Rela *)_y;
105 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
106 * make the comparison cheaper/faster. It won't affect the sorting or
107 * the counting algorithms' performance
109 if (x->r_info < y->r_info)
110 return -1;
111 else if (x->r_info > y->r_info)
112 return 1;
113 else if (x->r_addend < y->r_addend)
114 return -1;
115 else if (x->r_addend > y->r_addend)
116 return 1;
117 else
118 return 0;
121 static void relaswap(void *_x, void *_y, int size)
123 uint64_t *x, *y, tmp;
124 int i;
126 y = (uint64_t *)_x;
127 x = (uint64_t *)_y;
129 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
130 tmp = x[i];
131 x[i] = y[i];
132 y[i] = tmp;
136 /* Get size of potential trampolines required. */
137 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
138 const Elf64_Shdr *sechdrs)
140 /* One extra reloc so it's always 0-funcaddr terminated */
141 unsigned long relocs = 1;
142 unsigned i;
144 /* Every relocated section... */
145 for (i = 1; i < hdr->e_shnum; i++) {
146 if (sechdrs[i].sh_type == SHT_RELA) {
147 DEBUGP("Found relocations in section %u\n", i);
148 DEBUGP("Ptr: %p. Number: %lu\n",
149 (void *)sechdrs[i].sh_addr,
150 sechdrs[i].sh_size / sizeof(Elf64_Rela));
152 /* Sort the relocation information based on a symbol and
153 * addend key. This is a stable O(n*log n) complexity
154 * alogrithm but it will reduce the complexity of
155 * count_relocs() to linear complexity O(n)
157 sort((void *)sechdrs[i].sh_addr,
158 sechdrs[i].sh_size / sizeof(Elf64_Rela),
159 sizeof(Elf64_Rela), relacmp, relaswap);
161 relocs += count_relocs((void *)sechdrs[i].sh_addr,
162 sechdrs[i].sh_size
163 / sizeof(Elf64_Rela));
167 #ifdef CONFIG_DYNAMIC_FTRACE
168 /* make the trampoline to the ftrace_caller */
169 relocs++;
170 #endif
172 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
173 return relocs * sizeof(struct ppc64_stub_entry);
176 static void dedotify_versions(struct modversion_info *vers,
177 unsigned long size)
179 struct modversion_info *end;
181 for (end = (void *)vers + size; vers < end; vers++)
182 if (vers->name[0] == '.')
183 memmove(vers->name, vers->name+1, strlen(vers->name));
186 /* Undefined symbols which refer to .funcname, hack to funcname */
187 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
189 unsigned int i;
191 for (i = 1; i < numsyms; i++) {
192 if (syms[i].st_shndx == SHN_UNDEF) {
193 char *name = strtab + syms[i].st_name;
194 if (name[0] == '.')
195 memmove(name, name+1, strlen(name));
200 int module_frob_arch_sections(Elf64_Ehdr *hdr,
201 Elf64_Shdr *sechdrs,
202 char *secstrings,
203 struct module *me)
205 unsigned int i;
207 /* Find .toc and .stubs sections, symtab and strtab */
208 for (i = 1; i < hdr->e_shnum; i++) {
209 char *p;
210 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
211 me->arch.stubs_section = i;
212 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
213 me->arch.toc_section = i;
214 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
215 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
216 sechdrs[i].sh_size);
218 /* We don't handle .init for the moment: rename to _init */
219 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
220 p[0] = '_';
222 if (sechdrs[i].sh_type == SHT_SYMTAB)
223 dedotify((void *)hdr + sechdrs[i].sh_offset,
224 sechdrs[i].sh_size / sizeof(Elf64_Sym),
225 (void *)hdr
226 + sechdrs[sechdrs[i].sh_link].sh_offset);
229 if (!me->arch.stubs_section) {
230 printk("%s: doesn't contain .stubs.\n", me->name);
231 return -ENOEXEC;
234 /* If we don't have a .toc, just use .stubs. We need to set r2
235 to some reasonable value in case the module calls out to
236 other functions via a stub, or if a function pointer escapes
237 the module by some means. */
238 if (!me->arch.toc_section)
239 me->arch.toc_section = me->arch.stubs_section;
241 /* Override the stubs size */
242 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
243 return 0;
246 /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
247 gives the value maximum span in an instruction which uses a signed
248 offset) */
249 static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
251 return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
254 /* Both low and high 16 bits are added as SIGNED additions, so if low
255 16 bits has high bit set, high 16 bits must be adjusted. These
256 macros do that (stolen from binutils). */
257 #define PPC_LO(v) ((v) & 0xffff)
258 #define PPC_HI(v) (((v) >> 16) & 0xffff)
259 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
261 /* Patch stub to reference function and correct r2 value. */
262 static inline int create_stub(Elf64_Shdr *sechdrs,
263 struct ppc64_stub_entry *entry,
264 struct ppc64_opd_entry *opd,
265 struct module *me)
267 Elf64_Half *loc1, *loc2;
268 long reladdr;
270 *entry = ppc64_stub;
272 loc1 = (Elf64_Half *)&entry->jump[2];
273 loc2 = (Elf64_Half *)&entry->jump[6];
275 /* Stub uses address relative to r2. */
276 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
277 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
278 printk("%s: Address %p of stub out of range of %p.\n",
279 me->name, (void *)reladdr, (void *)my_r2);
280 return 0;
282 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
284 *loc1 = PPC_HA(reladdr);
285 *loc2 = PPC_LO(reladdr);
286 entry->opd.funcaddr = opd->funcaddr;
287 entry->opd.r2 = opd->r2;
288 return 1;
291 /* Create stub to jump to function described in this OPD: we need the
292 stub to set up the TOC ptr (r2) for the function. */
293 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
294 unsigned long opdaddr,
295 struct module *me)
297 struct ppc64_stub_entry *stubs;
298 struct ppc64_opd_entry *opd = (void *)opdaddr;
299 unsigned int i, num_stubs;
301 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
303 /* Find this stub, or if that fails, the next avail. entry */
304 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
305 for (i = 0; stubs[i].opd.funcaddr; i++) {
306 BUG_ON(i >= num_stubs);
308 if (stubs[i].opd.funcaddr == opd->funcaddr)
309 return (unsigned long)&stubs[i];
312 if (!create_stub(sechdrs, &stubs[i], opd, me))
313 return 0;
315 return (unsigned long)&stubs[i];
318 /* We expect a noop next: if it is, replace it with instruction to
319 restore r2. */
320 static int restore_r2(u32 *instruction, struct module *me)
322 if (*instruction != PPC_INST_NOP) {
323 printk("%s: Expect noop after relocate, got %08x\n",
324 me->name, *instruction);
325 return 0;
327 *instruction = 0xe8410028; /* ld r2,40(r1) */
328 return 1;
331 int apply_relocate_add(Elf64_Shdr *sechdrs,
332 const char *strtab,
333 unsigned int symindex,
334 unsigned int relsec,
335 struct module *me)
337 unsigned int i;
338 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
339 Elf64_Sym *sym;
340 unsigned long *location;
341 unsigned long value;
343 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
344 sechdrs[relsec].sh_info);
345 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
346 /* This is where to make the change */
347 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
348 + rela[i].r_offset;
349 /* This is the symbol it is referring to */
350 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
351 + ELF64_R_SYM(rela[i].r_info);
353 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
354 location, (long)ELF64_R_TYPE(rela[i].r_info),
355 strtab + sym->st_name, (unsigned long)sym->st_value,
356 (long)rela[i].r_addend);
358 /* `Everything is relative'. */
359 value = sym->st_value + rela[i].r_addend;
361 switch (ELF64_R_TYPE(rela[i].r_info)) {
362 case R_PPC64_ADDR32:
363 /* Simply set it */
364 *(u32 *)location = value;
365 break;
367 case R_PPC64_ADDR64:
368 /* Simply set it */
369 *(unsigned long *)location = value;
370 break;
372 case R_PPC64_TOC:
373 *(unsigned long *)location = my_r2(sechdrs, me);
374 break;
376 case R_PPC64_TOC16:
377 /* Subtract TOC pointer */
378 value -= my_r2(sechdrs, me);
379 if (value + 0x8000 > 0xffff) {
380 printk("%s: bad TOC16 relocation (%lu)\n",
381 me->name, value);
382 return -ENOEXEC;
384 *((uint16_t *) location)
385 = (*((uint16_t *) location) & ~0xffff)
386 | (value & 0xffff);
387 break;
389 case R_PPC64_TOC16_DS:
390 /* Subtract TOC pointer */
391 value -= my_r2(sechdrs, me);
392 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
393 printk("%s: bad TOC16_DS relocation (%lu)\n",
394 me->name, value);
395 return -ENOEXEC;
397 *((uint16_t *) location)
398 = (*((uint16_t *) location) & ~0xfffc)
399 | (value & 0xfffc);
400 break;
402 case R_PPC_REL24:
403 /* FIXME: Handle weak symbols here --RR */
404 if (sym->st_shndx == SHN_UNDEF) {
405 /* External: go via stub */
406 value = stub_for_addr(sechdrs, value, me);
407 if (!value)
408 return -ENOENT;
409 if (!restore_r2((u32 *)location + 1, me))
410 return -ENOEXEC;
413 /* Convert value to relative */
414 value -= (unsigned long)location;
415 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
416 printk("%s: REL24 %li out of range!\n",
417 me->name, (long int)value);
418 return -ENOEXEC;
421 /* Only replace bits 2 through 26 */
422 *(uint32_t *)location
423 = (*(uint32_t *)location & ~0x03fffffc)
424 | (value & 0x03fffffc);
425 break;
427 case R_PPC64_REL64:
428 /* 64 bits relative (used by features fixups) */
429 *location = value - (unsigned long)location;
430 break;
432 default:
433 printk("%s: Unknown ADD relocation: %lu\n",
434 me->name,
435 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
436 return -ENOEXEC;
440 #ifdef CONFIG_DYNAMIC_FTRACE
441 me->arch.toc = my_r2(sechdrs, me);
442 me->arch.tramp = stub_for_addr(sechdrs,
443 (unsigned long)ftrace_caller,
444 me);
445 #endif
447 return 0;