Linux 2.6.31.6
[linux/fpc-iii.git] / arch / powerpc / kernel / module_64.c
blob8fbb12508bf3a25b54401ef1907d3930bc30522e
1 /* Kernel module help for PPC64.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include <linux/module.h>
19 #include <linux/elf.h>
20 #include <linux/moduleloader.h>
21 #include <linux/err.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ftrace.h>
24 #include <linux/bug.h>
25 #include <asm/module.h>
26 #include <asm/firmware.h>
27 #include <asm/code-patching.h>
28 #include <linux/sort.h>
30 #include "setup.h"
32 /* FIXME: We don't do .init separately. To do this, we'd need to have
33 a separate r2 value in the init and core section, and stub between
34 them, too.
36 Using a magic allocator which places modules within 32MB solves
37 this, and makes other things simpler. Anton?
38 --RR. */
39 #if 0
40 #define DEBUGP printk
41 #else
42 #define DEBUGP(fmt , ...)
43 #endif
45 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
46 the kernel itself). But on PPC64, these need to be used for every
47 jump, actually, to reset r2 (TOC+0x8000). */
48 struct ppc64_stub_entry
50 /* 28 byte jump instruction sequence (7 instructions) */
51 unsigned char jump[28];
52 unsigned char unused[4];
53 /* Data for the above code */
54 struct ppc64_opd_entry opd;
57 /* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
58 function which may be more than 24-bits away. We could simply
59 patch the new r2 value and function pointer into the stub, but it's
60 significantly shorter to put these values at the end of the stub
61 code, and patch the stub address (32-bits relative to the TOC ptr,
62 r2) into the stub. */
63 static struct ppc64_stub_entry ppc64_stub =
64 { .jump = {
65 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */
66 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */
67 /* Save current r2 value in magic place on the stack. */
68 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */
69 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */
70 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */
71 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */
72 0x4e, 0x80, 0x04, 0x20 /* bctr */
73 } };
75 /* Count how many different 24-bit relocations (different symbol,
76 different addend) */
77 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
79 unsigned int i, r_info, r_addend, _count_relocs;
81 /* FIXME: Only count external ones --RR */
82 _count_relocs = 0;
83 r_info = 0;
84 r_addend = 0;
85 for (i = 0; i < num; i++)
86 /* Only count 24-bit relocs, others don't need stubs */
87 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
88 (r_info != ELF64_R_SYM(rela[i].r_info) ||
89 r_addend != rela[i].r_addend)) {
90 _count_relocs++;
91 r_info = ELF64_R_SYM(rela[i].r_info);
92 r_addend = rela[i].r_addend;
95 return _count_relocs;
98 static int relacmp(const void *_x, const void *_y)
100 const Elf64_Rela *x, *y;
102 y = (Elf64_Rela *)_x;
103 x = (Elf64_Rela *)_y;
105 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
106 * make the comparison cheaper/faster. It won't affect the sorting or
107 * the counting algorithms' performance
109 if (x->r_info < y->r_info)
110 return -1;
111 else if (x->r_info > y->r_info)
112 return 1;
113 else if (x->r_addend < y->r_addend)
114 return -1;
115 else if (x->r_addend > y->r_addend)
116 return 1;
117 else
118 return 0;
121 static void relaswap(void *_x, void *_y, int size)
123 uint64_t *x, *y, tmp;
124 int i;
126 y = (uint64_t *)_x;
127 x = (uint64_t *)_y;
129 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
130 tmp = x[i];
131 x[i] = y[i];
132 y[i] = tmp;
136 /* Get size of potential trampolines required. */
137 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
138 const Elf64_Shdr *sechdrs)
140 /* One extra reloc so it's always 0-funcaddr terminated */
141 unsigned long relocs = 1;
142 unsigned i;
144 /* Every relocated section... */
145 for (i = 1; i < hdr->e_shnum; i++) {
146 if (sechdrs[i].sh_type == SHT_RELA) {
147 DEBUGP("Found relocations in section %u\n", i);
148 DEBUGP("Ptr: %p. Number: %lu\n",
149 (void *)sechdrs[i].sh_addr,
150 sechdrs[i].sh_size / sizeof(Elf64_Rela));
152 /* Sort the relocation information based on a symbol and
153 * addend key. This is a stable O(n*log n) complexity
154 * alogrithm but it will reduce the complexity of
155 * count_relocs() to linear complexity O(n)
157 sort((void *)sechdrs[i].sh_addr,
158 sechdrs[i].sh_size / sizeof(Elf64_Rela),
159 sizeof(Elf64_Rela), relacmp, relaswap);
161 relocs += count_relocs((void *)sechdrs[i].sh_addr,
162 sechdrs[i].sh_size
163 / sizeof(Elf64_Rela));
167 #ifdef CONFIG_DYNAMIC_FTRACE
168 /* make the trampoline to the ftrace_caller */
169 relocs++;
170 #endif
172 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
173 return relocs * sizeof(struct ppc64_stub_entry);
176 static void dedotify_versions(struct modversion_info *vers,
177 unsigned long size)
179 struct modversion_info *end;
181 for (end = (void *)vers + size; vers < end; vers++)
182 if (vers->name[0] == '.')
183 memmove(vers->name, vers->name+1, strlen(vers->name));
186 /* Undefined symbols which refer to .funcname, hack to funcname */
187 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
189 unsigned int i;
191 for (i = 1; i < numsyms; i++) {
192 if (syms[i].st_shndx == SHN_UNDEF) {
193 char *name = strtab + syms[i].st_name;
194 if (name[0] == '.')
195 memmove(name, name+1, strlen(name));
200 int module_frob_arch_sections(Elf64_Ehdr *hdr,
201 Elf64_Shdr *sechdrs,
202 char *secstrings,
203 struct module *me)
205 unsigned int i;
207 /* Find .toc and .stubs sections, symtab and strtab */
208 for (i = 1; i < hdr->e_shnum; i++) {
209 char *p;
210 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
211 me->arch.stubs_section = i;
212 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
213 me->arch.toc_section = i;
214 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
215 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
216 sechdrs[i].sh_size);
218 /* We don't handle .init for the moment: rename to _init */
219 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
220 p[0] = '_';
222 if (sechdrs[i].sh_type == SHT_SYMTAB)
223 dedotify((void *)hdr + sechdrs[i].sh_offset,
224 sechdrs[i].sh_size / sizeof(Elf64_Sym),
225 (void *)hdr
226 + sechdrs[sechdrs[i].sh_link].sh_offset);
229 if (!me->arch.stubs_section) {
230 printk("%s: doesn't contain .stubs.\n", me->name);
231 return -ENOEXEC;
234 /* If we don't have a .toc, just use .stubs. We need to set r2
235 to some reasonable value in case the module calls out to
236 other functions via a stub, or if a function pointer escapes
237 the module by some means. */
238 if (!me->arch.toc_section)
239 me->arch.toc_section = me->arch.stubs_section;
241 /* Override the stubs size */
242 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
243 return 0;
246 int apply_relocate(Elf64_Shdr *sechdrs,
247 const char *strtab,
248 unsigned int symindex,
249 unsigned int relsec,
250 struct module *me)
252 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
253 return -ENOEXEC;
256 /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
257 gives the value maximum span in an instruction which uses a signed
258 offset) */
259 static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
261 return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
264 /* Both low and high 16 bits are added as SIGNED additions, so if low
265 16 bits has high bit set, high 16 bits must be adjusted. These
266 macros do that (stolen from binutils). */
267 #define PPC_LO(v) ((v) & 0xffff)
268 #define PPC_HI(v) (((v) >> 16) & 0xffff)
269 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
271 /* Patch stub to reference function and correct r2 value. */
272 static inline int create_stub(Elf64_Shdr *sechdrs,
273 struct ppc64_stub_entry *entry,
274 struct ppc64_opd_entry *opd,
275 struct module *me)
277 Elf64_Half *loc1, *loc2;
278 long reladdr;
280 *entry = ppc64_stub;
282 loc1 = (Elf64_Half *)&entry->jump[2];
283 loc2 = (Elf64_Half *)&entry->jump[6];
285 /* Stub uses address relative to r2. */
286 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
287 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
288 printk("%s: Address %p of stub out of range of %p.\n",
289 me->name, (void *)reladdr, (void *)my_r2);
290 return 0;
292 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
294 *loc1 = PPC_HA(reladdr);
295 *loc2 = PPC_LO(reladdr);
296 entry->opd.funcaddr = opd->funcaddr;
297 entry->opd.r2 = opd->r2;
298 return 1;
301 /* Create stub to jump to function described in this OPD: we need the
302 stub to set up the TOC ptr (r2) for the function. */
303 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
304 unsigned long opdaddr,
305 struct module *me)
307 struct ppc64_stub_entry *stubs;
308 struct ppc64_opd_entry *opd = (void *)opdaddr;
309 unsigned int i, num_stubs;
311 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
313 /* Find this stub, or if that fails, the next avail. entry */
314 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
315 for (i = 0; stubs[i].opd.funcaddr; i++) {
316 BUG_ON(i >= num_stubs);
318 if (stubs[i].opd.funcaddr == opd->funcaddr)
319 return (unsigned long)&stubs[i];
322 if (!create_stub(sechdrs, &stubs[i], opd, me))
323 return 0;
325 return (unsigned long)&stubs[i];
328 /* We expect a noop next: if it is, replace it with instruction to
329 restore r2. */
330 static int restore_r2(u32 *instruction, struct module *me)
332 if (*instruction != PPC_INST_NOP) {
333 printk("%s: Expect noop after relocate, got %08x\n",
334 me->name, *instruction);
335 return 0;
337 *instruction = 0xe8410028; /* ld r2,40(r1) */
338 return 1;
341 int apply_relocate_add(Elf64_Shdr *sechdrs,
342 const char *strtab,
343 unsigned int symindex,
344 unsigned int relsec,
345 struct module *me)
347 unsigned int i;
348 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
349 Elf64_Sym *sym;
350 unsigned long *location;
351 unsigned long value;
353 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
354 sechdrs[relsec].sh_info);
355 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
356 /* This is where to make the change */
357 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
358 + rela[i].r_offset;
359 /* This is the symbol it is referring to */
360 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
361 + ELF64_R_SYM(rela[i].r_info);
363 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
364 location, (long)ELF64_R_TYPE(rela[i].r_info),
365 strtab + sym->st_name, (unsigned long)sym->st_value,
366 (long)rela[i].r_addend);
368 /* `Everything is relative'. */
369 value = sym->st_value + rela[i].r_addend;
371 switch (ELF64_R_TYPE(rela[i].r_info)) {
372 case R_PPC64_ADDR32:
373 /* Simply set it */
374 *(u32 *)location = value;
375 break;
377 case R_PPC64_ADDR64:
378 /* Simply set it */
379 *(unsigned long *)location = value;
380 break;
382 case R_PPC64_TOC:
383 *(unsigned long *)location = my_r2(sechdrs, me);
384 break;
386 case R_PPC64_TOC16:
387 /* Subtract TOC pointer */
388 value -= my_r2(sechdrs, me);
389 if (value + 0x8000 > 0xffff) {
390 printk("%s: bad TOC16 relocation (%lu)\n",
391 me->name, value);
392 return -ENOEXEC;
394 *((uint16_t *) location)
395 = (*((uint16_t *) location) & ~0xffff)
396 | (value & 0xffff);
397 break;
399 case R_PPC64_TOC16_DS:
400 /* Subtract TOC pointer */
401 value -= my_r2(sechdrs, me);
402 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
403 printk("%s: bad TOC16_DS relocation (%lu)\n",
404 me->name, value);
405 return -ENOEXEC;
407 *((uint16_t *) location)
408 = (*((uint16_t *) location) & ~0xfffc)
409 | (value & 0xfffc);
410 break;
412 case R_PPC_REL24:
413 /* FIXME: Handle weak symbols here --RR */
414 if (sym->st_shndx == SHN_UNDEF) {
415 /* External: go via stub */
416 value = stub_for_addr(sechdrs, value, me);
417 if (!value)
418 return -ENOENT;
419 if (!restore_r2((u32 *)location + 1, me))
420 return -ENOEXEC;
423 /* Convert value to relative */
424 value -= (unsigned long)location;
425 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
426 printk("%s: REL24 %li out of range!\n",
427 me->name, (long int)value);
428 return -ENOEXEC;
431 /* Only replace bits 2 through 26 */
432 *(uint32_t *)location
433 = (*(uint32_t *)location & ~0x03fffffc)
434 | (value & 0x03fffffc);
435 break;
437 case R_PPC64_REL64:
438 /* 64 bits relative (used by features fixups) */
439 *location = value - (unsigned long)location;
440 break;
442 default:
443 printk("%s: Unknown ADD relocation: %lu\n",
444 me->name,
445 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
446 return -ENOEXEC;
450 #ifdef CONFIG_DYNAMIC_FTRACE
451 me->arch.toc = my_r2(sechdrs, me);
452 me->arch.tramp = stub_for_addr(sechdrs,
453 (unsigned long)ftrace_caller,
454 me);
455 #endif
457 return 0;