2 * IA-64-specific support for kernel module loader.
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Loosely based on patch by Rusty Russell.
10 /* relocs tested so far:
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
36 #include <asm/patch.h>
37 #include <asm/unaligned.h>
39 #define ARCH_MODULE_DEBUG 0
42 # define DEBUGP printk
45 # define DEBUGP(fmt , a...)
54 #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
56 /* Define some relocation helper macros/types: */
58 #define FORMAT_SHIFT 0
60 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
63 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
65 enum reloc_target_format
{
66 /* direct encoded formats: */
76 /* formats that cannot be directly decoded: */
78 RF_INSN21B
, /* imm21 form 1 */
79 RF_INSN21M
, /* imm21 form 2 */
80 RF_INSN21F
/* imm21 form 3 */
83 enum reloc_value_formula
{
84 RV_DIRECT
= 4, /* S + A */
85 RV_GPREL
= 5, /* @gprel(S + A) */
86 RV_LTREL
= 6, /* @ltoff(S + A) */
87 RV_PLTREL
= 7, /* @pltoff(S + A) */
88 RV_FPTR
= 8, /* @fptr(S + A) */
89 RV_PCREL
= 9, /* S + A - P */
90 RV_LTREL_FPTR
= 10, /* @ltoff(@fptr(S + A)) */
91 RV_SEGREL
= 11, /* @segrel(S + A) */
92 RV_SECREL
= 12, /* @secrel(S + A) */
93 RV_BDREL
= 13, /* BD + A */
94 RV_LTV
= 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
95 RV_PCREL2
= 15, /* S + A - P */
96 RV_SPECIAL
= 16, /* various (see below) */
98 RV_TPREL
= 18, /* @tprel(S + A) */
99 RV_LTREL_TPREL
= 19, /* @ltoff(@tprel(S + A)) */
100 RV_DTPMOD
= 20, /* @dtpmod(S + A) */
101 RV_LTREL_DTPMOD
= 21, /* @ltoff(@dtpmod(S + A)) */
102 RV_DTPREL
= 22, /* @dtprel(S + A) */
103 RV_LTREL_DTPREL
= 23, /* @ltoff(@dtprel(S + A)) */
108 /* 28-31 reserved for implementation-specific purposes. */
111 #define N(reloc) [R_IA64_##reloc] = #reloc
113 static const char *reloc_name
[256] = {
114 N(NONE
), N(IMM14
), N(IMM22
), N(IMM64
),
115 N(DIR32MSB
), N(DIR32LSB
), N(DIR64MSB
), N(DIR64LSB
),
116 N(GPREL22
), N(GPREL64I
), N(GPREL32MSB
), N(GPREL32LSB
),
117 N(GPREL64MSB
), N(GPREL64LSB
), N(LTOFF22
), N(LTOFF64I
),
118 N(PLTOFF22
), N(PLTOFF64I
), N(PLTOFF64MSB
), N(PLTOFF64LSB
),
119 N(FPTR64I
), N(FPTR32MSB
), N(FPTR32LSB
), N(FPTR64MSB
),
120 N(FPTR64LSB
), N(PCREL60B
), N(PCREL21B
), N(PCREL21M
),
121 N(PCREL21F
), N(PCREL32MSB
), N(PCREL32LSB
), N(PCREL64MSB
),
122 N(PCREL64LSB
), N(LTOFF_FPTR22
), N(LTOFF_FPTR64I
), N(LTOFF_FPTR32MSB
),
123 N(LTOFF_FPTR32LSB
), N(LTOFF_FPTR64MSB
), N(LTOFF_FPTR64LSB
), N(SEGREL32MSB
),
124 N(SEGREL32LSB
), N(SEGREL64MSB
), N(SEGREL64LSB
), N(SECREL32MSB
),
125 N(SECREL32LSB
), N(SECREL64MSB
), N(SECREL64LSB
), N(REL32MSB
),
126 N(REL32LSB
), N(REL64MSB
), N(REL64LSB
), N(LTV32MSB
),
127 N(LTV32LSB
), N(LTV64MSB
), N(LTV64LSB
), N(PCREL21BI
),
128 N(PCREL22
), N(PCREL64I
), N(IPLTMSB
), N(IPLTLSB
),
129 N(COPY
), N(LTOFF22X
), N(LDXMOV
), N(TPREL14
),
130 N(TPREL22
), N(TPREL64I
), N(TPREL64MSB
), N(TPREL64LSB
),
131 N(LTOFF_TPREL22
), N(DTPMOD64MSB
), N(DTPMOD64LSB
), N(LTOFF_DTPMOD22
),
132 N(DTPREL14
), N(DTPREL22
), N(DTPREL64I
), N(DTPREL32MSB
),
133 N(DTPREL32LSB
), N(DTPREL64MSB
), N(DTPREL64LSB
), N(LTOFF_DTPREL22
)
138 /* Opaque struct for insns, to protect against derefs. */
141 static inline uint64_t
142 bundle (const struct insn
*insn
)
144 return (uint64_t) insn
& ~0xfUL
;
148 slot (const struct insn
*insn
)
150 return (uint64_t) insn
& 0x3;
154 apply_imm64 (struct module
*mod
, struct insn
*insn
, uint64_t val
)
156 if (slot(insn
) != 2) {
157 printk(KERN_ERR
"%s: invalid slot number %d for IMM64\n",
158 mod
->name
, slot(insn
));
161 ia64_patch_imm64((u64
) insn
, val
);
166 apply_imm60 (struct module
*mod
, struct insn
*insn
, uint64_t val
)
168 if (slot(insn
) != 2) {
169 printk(KERN_ERR
"%s: invalid slot number %d for IMM60\n",
170 mod
->name
, slot(insn
));
173 if (val
+ ((uint64_t) 1 << 59) >= (1UL << 60)) {
174 printk(KERN_ERR
"%s: value %ld out of IMM60 range\n",
175 mod
->name
, (long) val
);
178 ia64_patch_imm60((u64
) insn
, val
);
183 apply_imm22 (struct module
*mod
, struct insn
*insn
, uint64_t val
)
185 if (val
+ (1 << 21) >= (1 << 22)) {
186 printk(KERN_ERR
"%s: value %li out of IMM22 range\n",
187 mod
->name
, (long)val
);
190 ia64_patch((u64
) insn
, 0x01fffcfe000UL
, ( ((val
& 0x200000UL
) << 15) /* bit 21 -> 36 */
191 | ((val
& 0x1f0000UL
) << 6) /* bit 16 -> 22 */
192 | ((val
& 0x00ff80UL
) << 20) /* bit 7 -> 27 */
193 | ((val
& 0x00007fUL
) << 13) /* bit 0 -> 13 */));
198 apply_imm21b (struct module
*mod
, struct insn
*insn
, uint64_t val
)
200 if (val
+ (1 << 20) >= (1 << 21)) {
201 printk(KERN_ERR
"%s: value %li out of IMM21b range\n",
202 mod
->name
, (long)val
);
205 ia64_patch((u64
) insn
, 0x11ffffe000UL
, ( ((val
& 0x100000UL
) << 16) /* bit 20 -> 36 */
206 | ((val
& 0x0fffffUL
) << 13) /* bit 0 -> 13 */));
213 /* Three instruction bundles in PLT. */
214 unsigned char bundle
[2][16];
217 static const struct plt_entry ia64_plt_template
= {
220 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
222 0x00, 0x00, 0x00, 0x60
225 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
227 0x08, 0x00, 0x00, 0xc0
233 patch_plt (struct module
*mod
, struct plt_entry
*plt
, long target_ip
, unsigned long target_gp
)
235 if (apply_imm64(mod
, (struct insn
*) (plt
->bundle
[0] + 2), target_gp
)
236 && apply_imm60(mod
, (struct insn
*) (plt
->bundle
[1] + 2),
237 (target_ip
- (int64_t) plt
->bundle
[1]) / 16))
243 plt_target (struct plt_entry
*plt
)
245 uint64_t b0
, b1
, *b
= (uint64_t *) plt
->bundle
[1];
248 b0
= b
[0]; b1
= b
[1];
249 off
= ( ((b1
& 0x00fffff000000000UL
) >> 36) /* imm20b -> bit 0 */
250 | ((b0
>> 48) << 20) | ((b1
& 0x7fffffUL
) << 36) /* imm39 -> bit 20 */
251 | ((b1
& 0x0800000000000000UL
) << 0)); /* i -> bit 59 */
252 return (long) plt
->bundle
[1] + 16*off
;
258 /* Three instruction bundles in PLT. */
259 unsigned char bundle
[3][16];
262 static const struct plt_entry ia64_plt_template
= {
265 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
267 0x02, 0x00, 0x00, 0x60
270 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
272 0x00, 0x00, 0x00, 0x60
275 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
276 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
277 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
283 patch_plt (struct module
*mod
, struct plt_entry
*plt
, long target_ip
, unsigned long target_gp
)
285 if (apply_imm64(mod
, (struct insn
*) (plt
->bundle
[0] + 2), target_ip
)
286 && apply_imm64(mod
, (struct insn
*) (plt
->bundle
[1] + 2), target_gp
))
292 plt_target (struct plt_entry
*plt
)
294 uint64_t b0
, b1
, *b
= (uint64_t *) plt
->bundle
[0];
296 b0
= b
[0]; b1
= b
[1];
297 return ( ((b1
& 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
298 | ((b1
& 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
299 | ((b1
& 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
300 | ((b1
& 0x0000100000000000) >> 23) /* ic -> bit 21 */
301 | ((b0
>> 46) << 22) | ((b1
& 0x7fffff) << 40) /* imm41 -> bit 22 */
302 | ((b1
& 0x0800000000000000) << 4)); /* i -> bit 63 */
305 #endif /* !USE_BRL */
308 module_free (struct module
*mod
, void *module_region
)
310 if (mod
&& mod
->arch
.init_unw_table
&&
311 module_region
== mod
->module_init
) {
312 unw_remove_unwind_table(mod
->arch
.init_unw_table
);
313 mod
->arch
.init_unw_table
= NULL
;
315 vfree(module_region
);
318 /* Have we already seen one of these relocations? */
319 /* FIXME: we could look in other sections, too --RR */
321 duplicate_reloc (const Elf64_Rela
*rela
, unsigned int num
)
325 for (i
= 0; i
< num
; i
++) {
326 if (rela
[i
].r_info
== rela
[num
].r_info
&& rela
[i
].r_addend
== rela
[num
].r_addend
)
332 /* Count how many GOT entries we may need */
334 count_gots (const Elf64_Rela
*rela
, unsigned int num
)
336 unsigned int i
, ret
= 0;
338 /* Sure, this is order(n^2), but it's usually short, and not
340 for (i
= 0; i
< num
; i
++) {
341 switch (ELF64_R_TYPE(rela
[i
].r_info
)) {
343 case R_IA64_LTOFF22X
:
344 case R_IA64_LTOFF64I
:
345 case R_IA64_LTOFF_FPTR22
:
346 case R_IA64_LTOFF_FPTR64I
:
347 case R_IA64_LTOFF_FPTR32MSB
:
348 case R_IA64_LTOFF_FPTR32LSB
:
349 case R_IA64_LTOFF_FPTR64MSB
:
350 case R_IA64_LTOFF_FPTR64LSB
:
351 if (!duplicate_reloc(rela
, i
))
359 /* Count how many PLT entries we may need */
361 count_plts (const Elf64_Rela
*rela
, unsigned int num
)
363 unsigned int i
, ret
= 0;
365 /* Sure, this is order(n^2), but it's usually short, and not
367 for (i
= 0; i
< num
; i
++) {
368 switch (ELF64_R_TYPE(rela
[i
].r_info
)) {
369 case R_IA64_PCREL21B
:
370 case R_IA64_PLTOFF22
:
371 case R_IA64_PLTOFF64I
:
372 case R_IA64_PLTOFF64MSB
:
373 case R_IA64_PLTOFF64LSB
:
376 if (!duplicate_reloc(rela
, i
))
384 /* We need to create an function-descriptors for any internal function
385 which is referenced. */
387 count_fdescs (const Elf64_Rela
*rela
, unsigned int num
)
389 unsigned int i
, ret
= 0;
391 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
392 for (i
= 0; i
< num
; i
++) {
393 switch (ELF64_R_TYPE(rela
[i
].r_info
)) {
395 case R_IA64_FPTR32LSB
:
396 case R_IA64_FPTR32MSB
:
397 case R_IA64_FPTR64LSB
:
398 case R_IA64_FPTR64MSB
:
399 case R_IA64_LTOFF_FPTR22
:
400 case R_IA64_LTOFF_FPTR32LSB
:
401 case R_IA64_LTOFF_FPTR32MSB
:
402 case R_IA64_LTOFF_FPTR64I
:
403 case R_IA64_LTOFF_FPTR64LSB
:
404 case R_IA64_LTOFF_FPTR64MSB
:
408 * Jumps to static functions sometimes go straight to their
409 * offset. Of course, that may not be possible if the jump is
410 * from init -> core or vice. versa, so we need to generate an
411 * FDESC (and PLT etc) for that.
413 case R_IA64_PCREL21B
:
414 if (!duplicate_reloc(rela
, i
))
423 module_frob_arch_sections (Elf_Ehdr
*ehdr
, Elf_Shdr
*sechdrs
, char *secstrings
,
426 unsigned long core_plts
= 0, init_plts
= 0, gots
= 0, fdescs
= 0;
427 Elf64_Shdr
*s
, *sechdrs_end
= sechdrs
+ ehdr
->e_shnum
;
430 * To store the PLTs and function-descriptors, we expand the .text section for
431 * core module-code and the .init.text section for initialization code.
433 for (s
= sechdrs
; s
< sechdrs_end
; ++s
)
434 if (strcmp(".core.plt", secstrings
+ s
->sh_name
) == 0)
435 mod
->arch
.core_plt
= s
;
436 else if (strcmp(".init.plt", secstrings
+ s
->sh_name
) == 0)
437 mod
->arch
.init_plt
= s
;
438 else if (strcmp(".got", secstrings
+ s
->sh_name
) == 0)
440 else if (strcmp(".opd", secstrings
+ s
->sh_name
) == 0)
442 else if (strcmp(".IA_64.unwind", secstrings
+ s
->sh_name
) == 0)
443 mod
->arch
.unwind
= s
;
444 #ifdef CONFIG_PARAVIRT
445 else if (strcmp(".paravirt_bundles",
446 secstrings
+ s
->sh_name
) == 0)
447 mod
->arch
.paravirt_bundles
= s
;
448 else if (strcmp(".paravirt_insts",
449 secstrings
+ s
->sh_name
) == 0)
450 mod
->arch
.paravirt_insts
= s
;
453 if (!mod
->arch
.core_plt
|| !mod
->arch
.init_plt
|| !mod
->arch
.got
|| !mod
->arch
.opd
) {
454 printk(KERN_ERR
"%s: sections missing\n", mod
->name
);
458 /* GOT and PLTs can occur in any relocated section... */
459 for (s
= sechdrs
+ 1; s
< sechdrs_end
; ++s
) {
460 const Elf64_Rela
*rels
= (void *)ehdr
+ s
->sh_offset
;
461 unsigned long numrels
= s
->sh_size
/sizeof(Elf64_Rela
);
463 if (s
->sh_type
!= SHT_RELA
)
466 gots
+= count_gots(rels
, numrels
);
467 fdescs
+= count_fdescs(rels
, numrels
);
468 if (strstr(secstrings
+ s
->sh_name
, ".init"))
469 init_plts
+= count_plts(rels
, numrels
);
471 core_plts
+= count_plts(rels
, numrels
);
474 mod
->arch
.core_plt
->sh_type
= SHT_NOBITS
;
475 mod
->arch
.core_plt
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
476 mod
->arch
.core_plt
->sh_addralign
= 16;
477 mod
->arch
.core_plt
->sh_size
= core_plts
* sizeof(struct plt_entry
);
478 mod
->arch
.init_plt
->sh_type
= SHT_NOBITS
;
479 mod
->arch
.init_plt
->sh_flags
= SHF_EXECINSTR
| SHF_ALLOC
;
480 mod
->arch
.init_plt
->sh_addralign
= 16;
481 mod
->arch
.init_plt
->sh_size
= init_plts
* sizeof(struct plt_entry
);
482 mod
->arch
.got
->sh_type
= SHT_NOBITS
;
483 mod
->arch
.got
->sh_flags
= ARCH_SHF_SMALL
| SHF_ALLOC
;
484 mod
->arch
.got
->sh_addralign
= 8;
485 mod
->arch
.got
->sh_size
= gots
* sizeof(struct got_entry
);
486 mod
->arch
.opd
->sh_type
= SHT_NOBITS
;
487 mod
->arch
.opd
->sh_flags
= SHF_ALLOC
;
488 mod
->arch
.opd
->sh_addralign
= 8;
489 mod
->arch
.opd
->sh_size
= fdescs
* sizeof(struct fdesc
);
490 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
491 __func__
, mod
->arch
.core_plt
->sh_size
, mod
->arch
.init_plt
->sh_size
,
492 mod
->arch
.got
->sh_size
, mod
->arch
.opd
->sh_size
);
497 in_init (const struct module
*mod
, uint64_t addr
)
499 return addr
- (uint64_t) mod
->module_init
< mod
->init_size
;
503 in_core (const struct module
*mod
, uint64_t addr
)
505 return addr
- (uint64_t) mod
->module_core
< mod
->core_size
;
509 is_internal (const struct module
*mod
, uint64_t value
)
511 return in_init(mod
, value
) || in_core(mod
, value
);
515 * Get gp-relative offset for the linkage-table entry of VALUE.
518 get_ltoff (struct module
*mod
, uint64_t value
, int *okp
)
520 struct got_entry
*got
, *e
;
525 got
= (void *) mod
->arch
.got
->sh_addr
;
526 for (e
= got
; e
< got
+ mod
->arch
.next_got_entry
; ++e
)
530 /* Not enough GOT entries? */
531 BUG_ON(e
>= (struct got_entry
*) (mod
->arch
.got
->sh_addr
+ mod
->arch
.got
->sh_size
));
534 ++mod
->arch
.next_got_entry
;
536 return (uint64_t) e
- mod
->arch
.gp
;
540 gp_addressable (struct module
*mod
, uint64_t value
)
542 return value
- mod
->arch
.gp
+ MAX_LTOFF
/2 < MAX_LTOFF
;
545 /* Get PC-relative PLT entry for this value. Returns 0 on failure. */
547 get_plt (struct module
*mod
, const struct insn
*insn
, uint64_t value
, int *okp
)
549 struct plt_entry
*plt
, *plt_end
;
550 uint64_t target_ip
, target_gp
;
555 if (in_init(mod
, (uint64_t) insn
)) {
556 plt
= (void *) mod
->arch
.init_plt
->sh_addr
;
557 plt_end
= (void *) plt
+ mod
->arch
.init_plt
->sh_size
;
559 plt
= (void *) mod
->arch
.core_plt
->sh_addr
;
560 plt_end
= (void *) plt
+ mod
->arch
.core_plt
->sh_size
;
563 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
564 target_ip
= ((uint64_t *) value
)[0];
565 target_gp
= ((uint64_t *) value
)[1];
567 /* Look for existing PLT entry. */
568 while (plt
->bundle
[0][0]) {
569 if (plt_target(plt
) == target_ip
)
571 if (++plt
>= plt_end
)
574 *plt
= ia64_plt_template
;
575 if (!patch_plt(mod
, plt
, target_ip
, target_gp
)) {
579 #if ARCH_MODULE_DEBUG
580 if (plt_target(plt
) != target_ip
) {
581 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
582 __func__
, target_ip
, plt_target(plt
));
588 return (uint64_t) plt
;
591 /* Get function descriptor for VALUE. */
593 get_fdesc (struct module
*mod
, uint64_t value
, int *okp
)
595 struct fdesc
*fdesc
= (void *) mod
->arch
.opd
->sh_addr
;
601 printk(KERN_ERR
"%s: fdesc for zero requested!\n", mod
->name
);
605 if (!is_internal(mod
, value
))
607 * If it's not a module-local entry-point, "value" already points to a
608 * function-descriptor.
612 /* Look for existing function descriptor. */
614 if (fdesc
->ip
== value
)
615 return (uint64_t)fdesc
;
616 if ((uint64_t) ++fdesc
>= mod
->arch
.opd
->sh_addr
+ mod
->arch
.opd
->sh_size
)
622 fdesc
->gp
= mod
->arch
.gp
;
623 return (uint64_t) fdesc
;
627 do_reloc (struct module
*mod
, uint8_t r_type
, Elf64_Sym
*sym
, uint64_t addend
,
628 Elf64_Shdr
*sec
, void *location
)
630 enum reloc_target_format format
= (r_type
>> FORMAT_SHIFT
) & FORMAT_MASK
;
631 enum reloc_value_formula formula
= (r_type
>> VALUE_SHIFT
) & VALUE_MASK
;
635 val
= sym
->st_value
+ addend
;
638 case RV_SEGREL
: /* segment base is arbitrarily chosen to be 0 for kernel modules */
642 case RV_GPREL
: val
-= mod
->arch
.gp
; break;
643 case RV_LTREL
: val
= get_ltoff(mod
, val
, &ok
); break;
644 case RV_PLTREL
: val
= get_plt(mod
, location
, val
, &ok
); break;
645 case RV_FPTR
: val
= get_fdesc(mod
, val
, &ok
); break;
646 case RV_SECREL
: val
-= sec
->sh_addr
; break;
647 case RV_LTREL_FPTR
: val
= get_ltoff(mod
, get_fdesc(mod
, val
, &ok
), &ok
); break;
651 case R_IA64_PCREL21B
:
652 if ((in_init(mod
, val
) && in_core(mod
, (uint64_t)location
)) ||
653 (in_core(mod
, val
) && in_init(mod
, (uint64_t)location
))) {
655 * Init section may have been allocated far away from core,
656 * if the branch won't reach, then allocate a plt for it.
658 uint64_t delta
= ((int64_t)val
- (int64_t)location
) / 16;
659 if (delta
+ (1 << 20) >= (1 << 21)) {
660 val
= get_fdesc(mod
, val
, &ok
);
661 val
= get_plt(mod
, location
, val
, &ok
);
663 } else if (!is_internal(mod
, val
))
664 val
= get_plt(mod
, location
, val
, &ok
);
667 val
-= bundle(location
);
670 case R_IA64_PCREL32MSB
:
671 case R_IA64_PCREL32LSB
:
672 case R_IA64_PCREL64MSB
:
673 case R_IA64_PCREL64LSB
:
674 val
-= (uint64_t) location
;
679 case R_IA64_PCREL60B
: format
= RF_INSN60
; break;
680 case R_IA64_PCREL21B
: format
= RF_INSN21B
; break;
681 case R_IA64_PCREL21M
: format
= RF_INSN21M
; break;
682 case R_IA64_PCREL21F
: format
= RF_INSN21F
; break;
688 val
-= (uint64_t) (in_init(mod
, val
) ? mod
->module_init
: mod
->module_core
);
692 /* can link-time value relocs happen here? */
697 if (r_type
== R_IA64_PCREL21BI
) {
698 if (!is_internal(mod
, val
)) {
699 printk(KERN_ERR
"%s: %s reloc against "
700 "non-local symbol (%lx)\n", __func__
,
701 reloc_name
[r_type
], (unsigned long)val
);
706 val
-= bundle(location
);
713 val
= get_fdesc(mod
, get_plt(mod
, location
, val
, &ok
), &ok
);
715 if (r_type
== R_IA64_IPLTMSB
)
720 val
= addend
- sym
->st_value
;
724 case R_IA64_LTOFF22X
:
725 if (gp_addressable(mod
, val
))
728 val
= get_ltoff(mod
, val
, &ok
);
733 if (gp_addressable(mod
, val
)) {
734 /* turn "ld8" into "mov": */
735 DEBUGP("%s: patching ld8 at %p to mov\n", __func__
, location
);
736 ia64_patch((u64
) location
, 0x1fff80fe000UL
, 0x10000000000UL
);
741 if (reloc_name
[r_type
])
742 printk(KERN_ERR
"%s: special reloc %s not supported",
743 mod
->name
, reloc_name
[r_type
]);
745 printk(KERN_ERR
"%s: unknown special reloc %x\n",
754 case RV_LTREL_DTPMOD
:
756 case RV_LTREL_DTPREL
:
757 printk(KERN_ERR
"%s: %s reloc not supported\n",
758 mod
->name
, reloc_name
[r_type
] ? reloc_name
[r_type
] : "?");
762 printk(KERN_ERR
"%s: unknown reloc %x\n", mod
->name
, r_type
);
769 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__
, location
, val
,
770 reloc_name
[r_type
] ? reloc_name
[r_type
] : "?", sym
->st_value
+ addend
);
773 case RF_INSN21B
: ok
= apply_imm21b(mod
, location
, (int64_t) val
/ 16); break;
774 case RF_INSN22
: ok
= apply_imm22(mod
, location
, val
); break;
775 case RF_INSN64
: ok
= apply_imm64(mod
, location
, val
); break;
776 case RF_INSN60
: ok
= apply_imm60(mod
, location
, (int64_t) val
/ 16); break;
777 case RF_32LSB
: put_unaligned(val
, (uint32_t *) location
); break;
778 case RF_64LSB
: put_unaligned(val
, (uint64_t *) location
); break;
779 case RF_32MSB
: /* ia64 Linux is little-endian... */
780 case RF_64MSB
: /* ia64 Linux is little-endian... */
781 case RF_INSN14
: /* must be within-module, i.e., resolved by "ld -r" */
782 case RF_INSN21M
: /* must be within-module, i.e., resolved by "ld -r" */
783 case RF_INSN21F
: /* must be within-module, i.e., resolved by "ld -r" */
784 printk(KERN_ERR
"%s: format %u needed by %s reloc is not supported\n",
785 mod
->name
, format
, reloc_name
[r_type
] ? reloc_name
[r_type
] : "?");
789 printk(KERN_ERR
"%s: relocation %s resulted in unknown format %u\n",
790 mod
->name
, reloc_name
[r_type
] ? reloc_name
[r_type
] : "?", format
);
793 return ok
? 0 : -ENOEXEC
;
797 apply_relocate_add (Elf64_Shdr
*sechdrs
, const char *strtab
, unsigned int symindex
,
798 unsigned int relsec
, struct module
*mod
)
800 unsigned int i
, n
= sechdrs
[relsec
].sh_size
/ sizeof(Elf64_Rela
);
801 Elf64_Rela
*rela
= (void *) sechdrs
[relsec
].sh_addr
;
802 Elf64_Shdr
*target_sec
;
805 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__
,
806 relsec
, n
, sechdrs
[relsec
].sh_info
);
808 target_sec
= sechdrs
+ sechdrs
[relsec
].sh_info
;
810 if (target_sec
->sh_entsize
== ~0UL)
812 * If target section wasn't allocated, we don't need to relocate it.
813 * Happens, e.g., for debug sections.
819 * XXX Should have an arch-hook for running this after final section
820 * addresses have been selected...
823 if (mod
->core_size
> MAX_LTOFF
)
825 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
826 * at the end of the module.
828 gp
= mod
->core_size
- MAX_LTOFF
/ 2;
830 gp
= mod
->core_size
/ 2;
831 gp
= (uint64_t) mod
->module_core
+ ((gp
+ 7) & -8);
833 DEBUGP("%s: placing gp at 0x%lx\n", __func__
, gp
);
836 for (i
= 0; i
< n
; i
++) {
837 ret
= do_reloc(mod
, ELF64_R_TYPE(rela
[i
].r_info
),
838 ((Elf64_Sym
*) sechdrs
[symindex
].sh_addr
839 + ELF64_R_SYM(rela
[i
].r_info
)),
840 rela
[i
].r_addend
, target_sec
,
841 (void *) target_sec
->sh_addr
+ rela
[i
].r_offset
);
849 * Modules contain a single unwind table which covers both the core and the init text
850 * sections but since the two are not contiguous, we need to split this table up such that
851 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
852 * more complicated than it really is.
855 register_unwind_table (struct module
*mod
)
857 struct unw_table_entry
*start
= (void *) mod
->arch
.unwind
->sh_addr
;
858 struct unw_table_entry
*end
= start
+ mod
->arch
.unwind
->sh_size
/ sizeof (*start
);
859 struct unw_table_entry tmp
, *e1
, *e2
, *core
, *init
;
860 unsigned long num_init
= 0, num_core
= 0;
862 /* First, count how many init and core unwind-table entries there are. */
863 for (e1
= start
; e1
< end
; ++e1
)
864 if (in_init(mod
, e1
->start_offset
))
869 * Second, sort the table such that all unwind-table entries for the init and core
870 * text sections are nicely separated. We do this with a stupid bubble sort
871 * (unwind tables don't get ridiculously huge).
873 for (e1
= start
; e1
< end
; ++e1
) {
874 for (e2
= e1
+ 1; e2
< end
; ++e2
) {
875 if (e2
->start_offset
< e1
->start_offset
) {
883 * Third, locate the init and core segments in the unwind table:
885 if (in_init(mod
, start
->start_offset
)) {
887 core
= start
+ num_init
;
890 init
= start
+ num_core
;
893 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__
,
894 mod
->name
, mod
->arch
.gp
, num_init
, num_core
);
897 * Fourth, register both tables (if not empty).
900 mod
->arch
.core_unw_table
= unw_add_unwind_table(mod
->name
, 0, mod
->arch
.gp
,
901 core
, core
+ num_core
);
902 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__
,
903 mod
->arch
.core_unw_table
, core
, core
+ num_core
);
906 mod
->arch
.init_unw_table
= unw_add_unwind_table(mod
->name
, 0, mod
->arch
.gp
,
907 init
, init
+ num_init
);
908 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__
,
909 mod
->arch
.init_unw_table
, init
, init
+ num_init
);
914 module_finalize (const Elf_Ehdr
*hdr
, const Elf_Shdr
*sechdrs
, struct module
*mod
)
916 DEBUGP("%s: init: entry=%p\n", __func__
, mod
->init
);
917 if (mod
->arch
.unwind
)
918 register_unwind_table(mod
);
919 #ifdef CONFIG_PARAVIRT
920 if (mod
->arch
.paravirt_bundles
) {
921 struct paravirt_patch_site_bundle
*start
=
922 (struct paravirt_patch_site_bundle
*)
923 mod
->arch
.paravirt_bundles
->sh_addr
;
924 struct paravirt_patch_site_bundle
*end
=
925 (struct paravirt_patch_site_bundle
*)
926 (mod
->arch
.paravirt_bundles
->sh_addr
+
927 mod
->arch
.paravirt_bundles
->sh_size
);
929 paravirt_patch_apply_bundle(start
, end
);
931 if (mod
->arch
.paravirt_insts
) {
932 struct paravirt_patch_site_inst
*start
=
933 (struct paravirt_patch_site_inst
*)
934 mod
->arch
.paravirt_insts
->sh_addr
;
935 struct paravirt_patch_site_inst
*end
=
936 (struct paravirt_patch_site_inst
*)
937 (mod
->arch
.paravirt_insts
->sh_addr
+
938 mod
->arch
.paravirt_insts
->sh_size
);
940 paravirt_patch_apply_inst(start
, end
);
947 module_arch_cleanup (struct module
*mod
)
949 if (mod
->arch
.init_unw_table
)
950 unw_remove_unwind_table(mod
->arch
.init_unw_table
);
951 if (mod
->arch
.core_unw_table
)
952 unw_remove_unwind_table(mod
->arch
.core_unw_table
);