(elf_machine_rela): Ensure relocation doesn't clobber any bits outside of the
[glibc/history.git] / sysdeps / sparc / sparc64 / dl-machine.h
blob314a784dbc92925c274fcda43a82085f923bc467
1 /* Machine-dependent ELF dynamic relocation inline functions. Sparc64 version.
2 Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, write to the Free
18 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307 USA. */
21 #ifndef dl_machine_h
22 #define dl_machine_h
24 #define ELF_MACHINE_NAME "sparc64"
26 #include <string.h>
27 #include <sys/param.h>
28 #include <ldsodefs.h>
29 #include <sysdep.h>
31 #ifndef VALIDX
32 # define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
33 + DT_EXTRANUM + DT_VALTAGIDX (tag))
34 #endif
36 #define ELF64_R_TYPE_ID(info) ((info) & 0xff)
37 #define ELF64_R_TYPE_DATA(info) ((info) >> 8)
39 /* Return nonzero iff ELF header is compatible with the running host. */
40 static inline int
41 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
43 return ehdr->e_machine == EM_SPARCV9;
46 /* We have to do this because elf_machine_{dynamic,load_address} can be
47 invoked from functions that have no GOT references, and thus the compiler
48 has no obligation to load the PIC register. */
49 #define LOAD_PIC_REG(PIC_REG) \
50 do { Elf64_Addr tmp; \
51 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
52 "rd %%pc, %0\n\t" \
53 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t" \
54 "add %0, %1, %0" \
55 : "=r" (PIC_REG), "=r" (tmp)); \
56 } while (0)
58 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
59 first element of the GOT. This must be inlined in a function which
60 uses global data. */
61 static inline Elf64_Addr
62 elf_machine_dynamic (void)
64 register Elf64_Addr *elf_pic_register __asm__("%l7");
66 LOAD_PIC_REG (elf_pic_register);
68 return *elf_pic_register;
71 /* Return the run-time load address of the shared object. */
72 static inline Elf64_Addr
73 elf_machine_load_address (void)
75 register Elf32_Addr *pc __asm ("%o7");
76 register Elf64_Addr *got __asm ("%l7");
78 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
79 "call 1f\n\t"
80 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
81 "call _DYNAMIC\n\t"
82 "call _GLOBAL_OFFSET_TABLE_\n"
83 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
85 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
86 *got is _DYNAMIC
87 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
88 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
89 return (Elf64_Addr) got - *got + (Elf32_Sword) ((pc[2] - pc[3]) * 4) - 4;
92 /* We have 4 cases to handle. And we code different code sequences
93 for each one. I love V9 code models... */
94 static inline void __attribute__ ((always_inline))
95 sparc64_fixup_plt (struct link_map *map, const Elf64_Rela *reloc,
96 Elf64_Addr *reloc_addr, Elf64_Addr value,
97 Elf64_Addr high, int t)
99 unsigned int *insns = (unsigned int *) reloc_addr;
100 Elf64_Addr plt_vaddr = (Elf64_Addr) reloc_addr;
101 Elf64_Sxword disp = value - plt_vaddr;
103 /* Now move plt_vaddr up to the call instruction. */
104 plt_vaddr += ((t + 1) * 4);
106 /* PLT entries .PLT32768 and above look always the same. */
107 if (__builtin_expect (high, 0) != 0)
109 *reloc_addr = value - map->l_addr;
111 /* Near destination. */
112 else if (disp >= -0x800000 && disp < 0x800000)
114 /* As this is just one instruction, it is thread safe and so
115 we can avoid the unnecessary sethi FOO, %g1.
116 b,a target */
117 insns[0] = 0x30800000 | ((disp >> 2) & 0x3fffff);
118 __asm __volatile ("flush %0" : : "r" (insns));
120 /* 32-bit Sparc style, the target is in the lower 32-bits of
121 address space. */
122 else if (insns += t, (value >> 32) == 0)
124 /* sethi %hi(target), %g1
125 jmpl %g1 + %lo(target), %g0 */
127 insns[1] = 0x81c06000 | (value & 0x3ff);
128 __asm __volatile ("flush %0 + 4" : : "r" (insns));
130 insns[0] = 0x03000000 | ((unsigned int)(value >> 10));
131 __asm __volatile ("flush %0" : : "r" (insns));
133 /* We can also get somewhat simple sequences if the distance between
134 the target and the PLT entry is within +/- 2GB. */
135 else if ((plt_vaddr > value
136 && ((plt_vaddr - value) >> 31) == 0)
137 || (value > plt_vaddr
138 && ((value - plt_vaddr) >> 31) == 0))
140 unsigned int displacement;
142 if (plt_vaddr > value)
143 displacement = (0 - (plt_vaddr - value));
144 else
145 displacement = value - plt_vaddr;
147 /* mov %o7, %g1
148 call displacement
149 mov %g1, %o7 */
151 insns[2] = 0x9e100001;
152 __asm __volatile ("flush %0 + 8" : : "r" (insns));
154 insns[1] = 0x40000000 | (displacement >> 2);
155 __asm __volatile ("flush %0 + 4" : : "r" (insns));
157 insns[0] = 0x8210000f;
158 __asm __volatile ("flush %0" : : "r" (insns));
160 /* Worst case, ho hum... */
161 else
163 unsigned int high32 = (value >> 32);
164 unsigned int low32 = (unsigned int) value;
166 /* ??? Some tricks can be stolen from the sparc64 egcs backend
167 constant formation code I wrote. -DaveM */
169 if (__builtin_expect (high32 & 0x3ff, 0))
171 /* sethi %hh(value), %g1
172 sethi %lm(value), %g5
173 or %g1, %hm(value), %g1
174 or %g5, %lo(value), %g5
175 sllx %g1, 32, %g1
176 jmpl %g1 + %g5, %g0
177 nop */
179 insns[5] = 0x81c04005;
180 __asm __volatile ("flush %0 + 20" : : "r" (insns));
182 insns[4] = 0x83287020;
183 __asm __volatile ("flush %0 + 16" : : "r" (insns));
185 insns[3] = 0x8a116000 | (low32 & 0x3ff);
186 __asm __volatile ("flush %0 + 12" : : "r" (insns));
188 insns[2] = 0x82106000 | (high32 & 0x3ff);
190 else
192 /* sethi %hh(value), %g1
193 sethi %lm(value), %g5
194 sllx %g1, 32, %g1
195 or %g5, %lo(value), %g5
196 jmpl %g1 + %g5, %g0
197 nop */
199 insns[4] = 0x81c04005;
200 __asm __volatile ("flush %0 + 16" : : "r" (insns));
202 insns[3] = 0x8a116000 | (low32 & 0x3ff);
203 __asm __volatile ("flush %0 + 12" : : "r" (insns));
205 insns[2] = 0x83287020;
208 __asm __volatile ("flush %0 + 8" : : "r" (insns));
210 insns[1] = 0x0b000000 | (low32 >> 10);
211 __asm __volatile ("flush %0 + 4" : : "r" (insns));
213 insns[0] = 0x03000000 | (high32 >> 10);
214 __asm __volatile ("flush %0" : : "r" (insns));
218 static inline Elf64_Addr __attribute__ ((always_inline))
219 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
220 const Elf64_Rela *reloc,
221 Elf64_Addr *reloc_addr, Elf64_Addr value)
223 sparc64_fixup_plt (map, reloc, reloc_addr, value + reloc->r_addend,
224 reloc->r_addend, 1);
225 return value;
228 /* Return the final value of a plt relocation. */
229 static inline Elf64_Addr
230 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
231 Elf64_Addr value)
233 /* Don't add addend here, but in elf_machine_fixup_plt instead.
234 value + reloc->r_addend is the value which should actually be
235 stored into .plt data slot. */
236 return value;
239 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
240 PLT entries should not be allowed to define the value.
241 ELF_RTYPE_CLASS_NOCOPY iff TYPE should not be allowed to resolve to one
242 of the main executable's symbols, as for a COPY reloc. */
243 #if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD)
244 # define elf_machine_type_class(type) \
245 ((((type) == R_SPARC_JMP_SLOT \
246 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
247 * ELF_RTYPE_CLASS_PLT) \
248 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
249 #else
250 # define elf_machine_type_class(type) \
251 ((((type) == R_SPARC_JMP_SLOT) * ELF_RTYPE_CLASS_PLT) \
252 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
253 #endif
255 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
256 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
258 /* The SPARC never uses Elf64_Rel relocations. */
259 #define ELF_MACHINE_NO_REL 1
261 /* The SPARC overlaps DT_RELA and DT_PLTREL. */
262 #define ELF_MACHINE_PLTREL_OVERLAP 1
264 /* Set up the loaded object described by L so its unrelocated PLT
265 entries will jump to the on-demand fixup code in dl-runtime.c. */
267 static inline int
268 elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
270 if (l->l_info[DT_JMPREL] && lazy)
272 extern void _dl_runtime_resolve_0 (void);
273 extern void _dl_runtime_resolve_1 (void);
274 extern void _dl_runtime_profile_0 (void);
275 extern void _dl_runtime_profile_1 (void);
276 Elf64_Addr res0_addr, res1_addr;
277 unsigned int *plt = (void *) D_PTR (l, l_info[DT_PLTGOT]);
279 if (__builtin_expect(profile, 0))
281 res0_addr = (Elf64_Addr) &_dl_runtime_profile_0;
282 res1_addr = (Elf64_Addr) &_dl_runtime_profile_1;
284 if (GLRO(dl_profile) != NULL
285 && _dl_name_match_p (GLRO(dl_profile), l))
286 GL(dl_profile_map) = l;
288 else
290 res0_addr = (Elf64_Addr) &_dl_runtime_resolve_0;
291 res1_addr = (Elf64_Addr) &_dl_runtime_resolve_1;
294 /* PLT0 looks like:
296 sethi %uhi(_dl_runtime_{resolve,profile}_0), %g4
297 sethi %hi(_dl_runtime_{resolve,profile}_0), %g5
298 or %g4, %ulo(_dl_runtime_{resolve,profile}_0), %g4
299 or %g5, %lo(_dl_runtime_{resolve,profile}_0), %g5
300 sllx %g4, 32, %g4
301 add %g4, %g5, %g5
302 jmpl %g5, %g4
306 plt[0] = 0x09000000 | (res0_addr >> (64 - 22));
307 plt[1] = 0x0b000000 | ((res0_addr >> 10) & 0x003fffff);
308 plt[2] = 0x88112000 | ((res0_addr >> 32) & 0x3ff);
309 plt[3] = 0x8a116000 | (res0_addr & 0x3ff);
310 plt[4] = 0x89293020;
311 plt[5] = 0x8a010005;
312 plt[6] = 0x89c14000;
313 plt[7] = 0x01000000;
315 /* PLT1 looks like:
317 sethi %uhi(_dl_runtime_{resolve,profile}_1), %g4
318 sethi %hi(_dl_runtime_{resolve,profile}_1), %g5
319 or %g4, %ulo(_dl_runtime_{resolve,profile}_1), %g4
320 or %g5, %lo(_dl_runtime_{resolve,profile}_1), %g5
321 sllx %g4, 32, %g4
322 add %g4, %g5, %g5
323 jmpl %g5, %g4
327 plt[8] = 0x09000000 | (res1_addr >> (64 - 22));
328 plt[9] = 0x0b000000 | ((res1_addr >> 10) & 0x003fffff);
329 plt[10] = 0x88112000 | ((res1_addr >> 32) & 0x3ff);
330 plt[11] = 0x8a116000 | (res1_addr & 0x3ff);
331 plt[12] = 0x89293020;
332 plt[13] = 0x8a010005;
333 plt[14] = 0x89c14000;
334 plt[15] = 0x01000000;
336 /* Now put the magic cookie at the beginning of .PLT2
337 Entry .PLT3 is unused by this implementation. */
338 *((struct link_map **)(&plt[16])) = l;
340 if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
341 || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
343 /* Need to reinitialize .plt to undo prelinking. */
344 Elf64_Rela *rela = (Elf64_Rela *) D_PTR (l, l_info[DT_JMPREL]);
345 Elf64_Rela *relaend
346 = (Elf64_Rela *) ((char *) rela
347 + l->l_info[DT_PLTRELSZ]->d_un.d_val);
349 /* prelink must ensure there are no R_SPARC_NONE relocs left
350 in .rela.plt. */
351 while (rela < relaend)
353 if (__builtin_expect (rela->r_addend, 0) != 0)
355 Elf64_Addr slot = ((rela->r_offset + 0x400
356 - (Elf64_Addr) plt)
357 / 0x1400) * 0x1400
358 + (Elf64_Addr) plt - 0x400;
359 /* ldx [%o7 + X], %g1 */
360 unsigned int first_ldx = *(unsigned int *)(slot + 12);
361 Elf64_Addr ptr = slot + (first_ldx & 0xfff) + 4;
363 *(Elf64_Addr *) rela->r_offset
364 = (Elf64_Addr) plt
365 - (slot + ((rela->r_offset - ptr) / 8) * 24 + 4);
366 ++rela;
367 continue;
370 *(unsigned int *) rela->r_offset
371 = 0x03000000 | (rela->r_offset - (Elf64_Addr) plt);
372 *(unsigned int *) (rela->r_offset + 4)
373 = 0x30680000 | ((((Elf64_Addr) plt + 32
374 - rela->r_offset - 4) >> 2) & 0x7ffff);
375 __asm __volatile ("flush %0" : : "r" (rela->r_offset));
376 __asm __volatile ("flush %0+4" : : "r" (rela->r_offset));
377 ++rela;
382 return lazy;
385 /* The PLT uses Elf64_Rela relocs. */
386 #define elf_machine_relplt elf_machine_rela
388 /* Undo the sub %sp, 6*8, %sp; add %sp, STACK_BIAS + 22*8, %o0 below
389 to get at the value we want in __libc_stack_end. */
390 #define DL_STACK_END(cookie) \
391 ((void *) (((long) (cookie)) - (22 - 6) * 8 - STACK_BIAS))
393 /* Initial entry point code for the dynamic linker.
394 The C function `_dl_start' is the real entry point;
395 its return value is the user program's entry point. */
397 #define __S1(x) #x
398 #define __S(x) __S1(x)
400 #define RTLD_START __asm__ ( "\n" \
401 " .text\n" \
402 " .global _start\n" \
403 " .type _start, @function\n" \
404 " .align 32\n" \
405 "_start:\n" \
406 " /* Make room for functions to drop their arguments on the stack. */\n" \
407 " sub %sp, 6*8, %sp\n" \
408 " /* Pass pointer to argument block to _dl_start. */\n" \
409 " call _dl_start\n" \
410 " add %sp," __S(STACK_BIAS) "+22*8,%o0\n" \
411 " /* FALLTHRU */\n" \
412 " .size _start, .-_start\n" \
413 "\n" \
414 " .global _dl_start_user\n" \
415 " .type _dl_start_user, @function\n" \
416 "_dl_start_user:\n" \
417 " /* Load the GOT register. */\n" \
418 "1: call 11f\n" \
419 " sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
420 "11: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
421 " sethi %hi(_dl_skip_args), %g5\n" \
422 " add %l7, %o7, %l7\n" \
423 " or %g5, %lo(_dl_skip_args), %g5\n" \
424 " /* Save the user entry point address in %l0. */\n" \
425 " mov %o0, %l0\n" \
426 " /* See if we were run as a command with the executable file name as an\n" \
427 " extra leading argument. If so, we must shift things around since we\n" \
428 " must keep the stack doubleword aligned. */\n" \
429 " ldx [%l7 + %g5], %i0\n" \
430 " ld [%i0], %i0\n" \
431 " brz,pt %i0, 2f\n" \
432 " ldx [%sp + " __S(STACK_BIAS) " + 22*8], %i5\n" \
433 " /* Find out how far to shift. */\n" \
434 " sethi %hi(_dl_argv), %l4\n" \
435 " sub %i5, %i0, %i5\n" \
436 " or %l4, %lo(_dl_argv), %l4\n" \
437 " sllx %i0, 3, %l6\n" \
438 " ldx [%l7 + %l4], %l4\n" \
439 " stx %i5, [%sp + " __S(STACK_BIAS) " + 22*8]\n" \
440 " add %sp, " __S(STACK_BIAS) " + 23*8, %i1\n" \
441 " add %i1, %l6, %i2\n" \
442 " ldx [%l4], %l5\n" \
443 " /* Copy down argv. */\n" \
444 "12: ldx [%i2], %i3\n" \
445 " add %i2, 8, %i2\n" \
446 " stx %i3, [%i1]\n" \
447 " brnz,pt %i3, 12b\n" \
448 " add %i1, 8, %i1\n" \
449 " sub %l5, %l6, %l5\n" \
450 " /* Copy down envp. */\n" \
451 "13: ldx [%i2], %i3\n" \
452 " add %i2, 8, %i2\n" \
453 " stx %i3, [%i1]\n" \
454 " brnz,pt %i3, 13b\n" \
455 " add %i1, 8, %i1\n" \
456 " /* Copy down auxiliary table. */\n" \
457 "14: ldx [%i2], %i3\n" \
458 " ldx [%i2 + 8], %i4\n" \
459 " add %i2, 16, %i2\n" \
460 " stx %i3, [%i1]\n" \
461 " stx %i4, [%i1 + 8]\n" \
462 " brnz,pt %i3, 14b\n" \
463 " add %i1, 16, %i1\n" \
464 " stx %l5, [%l4]\n" \
465 " /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n" \
466 "2: sethi %hi(_rtld_local), %o0\n" \
467 " add %sp, " __S(STACK_BIAS) " + 23*8, %o2\n" \
468 " orcc %o0, %lo(_rtld_local), %o0\n" \
469 " sllx %i5, 3, %o3\n" \
470 " ldx [%l7 + %o0], %o0\n" \
471 " add %o3, 8, %o3\n" \
472 " mov %i5, %o1\n" \
473 " add %o2, %o3, %o3\n" \
474 " call _dl_init_internal\n" \
475 " ldx [%o0], %o0\n" \
476 " /* Pass our finalizer function to the user in %g1. */\n" \
477 " sethi %hi(_dl_fini), %g1\n" \
478 " or %g1, %lo(_dl_fini), %g1\n" \
479 " ldx [%l7 + %g1], %g1\n" \
480 " /* Jump to the user's entry point and deallocate the extra stack we got. */\n" \
481 " jmp %l0\n" \
482 " add %sp, 6*8, %sp\n" \
483 " .size _dl_start_user, . - _dl_start_user\n" \
484 " .previous\n");
486 #endif /* dl_machine_h */
488 #define ARCH_LA_PLTENTER sparc64_gnu_pltenter
489 #define ARCH_LA_PLTEXIT sparc64_gnu_pltexit
491 #ifdef RESOLVE_MAP
493 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
494 MAP is the object containing the reloc. */
496 auto inline void
497 __attribute__ ((always_inline))
498 elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
499 const Elf64_Sym *sym, const struct r_found_version *version,
500 void *const reloc_addr_arg)
502 Elf64_Addr *const reloc_addr = reloc_addr_arg;
503 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
504 const Elf64_Sym *const refsym = sym;
505 #endif
506 Elf64_Addr value;
507 const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info);
508 #if !defined RESOLVE_CONFLICT_FIND_MAP
509 struct link_map *sym_map = NULL;
510 #endif
512 #if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
513 /* This is defined in rtld.c, but nowhere in the static libc.a; make the
514 reference weak so static programs can still link. This declaration
515 cannot be done when compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP)
516 because rtld.c contains the common defn for _dl_rtld_map, which is
517 incompatible with a weak decl in the same file. */
518 weak_extern (_dl_rtld_map);
519 #endif
521 if (__builtin_expect (r_type == R_SPARC_NONE, 0))
522 return;
524 #if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
525 if (__builtin_expect (r_type == R_SPARC_RELATIVE, 0))
527 # if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC
528 if (map != &_dl_rtld_map) /* Already done in rtld itself. */
529 # endif
530 *reloc_addr += map->l_addr + reloc->r_addend;
531 return;
533 #endif
535 #ifndef RESOLVE_CONFLICT_FIND_MAP
536 if (__builtin_expect (ELF64_ST_BIND (sym->st_info) == STB_LOCAL, 0)
537 && sym->st_shndx != SHN_UNDEF)
539 value = map->l_addr;
541 else
543 sym_map = RESOLVE_MAP (&sym, version, r_type);
544 value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value;
546 #else
547 value = 0;
548 #endif
550 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
552 switch (r_type)
554 #if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
555 case R_SPARC_COPY:
556 if (sym == NULL)
557 /* This can happen in trace mode if an object could not be
558 found. */
559 break;
560 if (sym->st_size > refsym->st_size
561 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
563 const char *strtab;
565 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
566 _dl_error_printf ("\
567 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
568 rtld_progname ?: "<program name unknown>",
569 strtab + refsym->st_name);
571 memcpy (reloc_addr_arg, (void *) value,
572 MIN (sym->st_size, refsym->st_size));
573 break;
574 #endif
575 case R_SPARC_64:
576 case R_SPARC_GLOB_DAT:
577 *reloc_addr = value;
578 break;
579 case R_SPARC_JMP_SLOT:
580 #ifdef RESOLVE_CONFLICT_FIND_MAP
581 /* R_SPARC_JMP_SLOT conflicts against .plt[32768+]
582 relocs should be turned into R_SPARC_64 relocs
583 in .gnu.conflict section.
584 r_addend non-zero does not mean it is a .plt[32768+]
585 reloc, instead it is the actual address of the function
586 to call. */
587 sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0);
588 #else
589 sparc64_fixup_plt (map, reloc, reloc_addr, value, reloc->r_addend, 0);
590 #endif
591 break;
592 #if defined USE_TLS && (!defined RTLD_BOOTSTRAP || USE___THREAD) \
593 && !defined RESOLVE_CONFLICT_FIND_MAP
594 case R_SPARC_TLS_DTPMOD64:
595 /* Get the information from the link map returned by the
596 resolv function. */
597 if (sym_map != NULL)
598 *reloc_addr = sym_map->l_tls_modid;
599 break;
600 case R_SPARC_TLS_DTPOFF64:
601 /* During relocation all TLS symbols are defined and used.
602 Therefore the offset is already correct. */
603 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
604 break;
605 case R_SPARC_TLS_TPOFF64:
606 /* The offset is negative, forward from the thread pointer. */
607 /* We know the offset of object the symbol is contained in.
608 It is a negative value which will be added to the
609 thread pointer. */
610 if (sym != NULL)
612 CHECK_STATIC_TLS (map, sym_map);
613 *reloc_addr = sym->st_value - sym_map->l_tls_offset
614 + reloc->r_addend;
616 break;
617 # ifndef RTLD_BOOTSTRAP
618 case R_SPARC_TLS_LE_HIX22:
619 case R_SPARC_TLS_LE_LOX10:
620 if (sym != NULL)
622 CHECK_STATIC_TLS (map, sym_map);
623 value = sym->st_value - sym_map->l_tls_offset
624 + reloc->r_addend;
625 if (r_type == R_SPARC_TLS_LE_HIX22)
626 *reloc_addr = (*reloc_addr & 0xffc00000)
627 | (((~value) >> 10) & 0x3fffff);
628 else
629 *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff)
630 | 0x1c00;
632 break;
633 # endif
634 #endif
635 #ifndef RTLD_BOOTSTRAP
636 case R_SPARC_8:
637 *(char *) reloc_addr = value;
638 break;
639 case R_SPARC_16:
640 *(short *) reloc_addr = value;
641 break;
642 case R_SPARC_32:
643 *(unsigned int *) reloc_addr = value;
644 break;
645 case R_SPARC_DISP8:
646 *(char *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
647 break;
648 case R_SPARC_DISP16:
649 *(short *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
650 break;
651 case R_SPARC_DISP32:
652 *(unsigned int *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
653 break;
654 case R_SPARC_WDISP30:
655 *(unsigned int *) reloc_addr =
656 ((*(unsigned int *)reloc_addr & 0xc0000000) |
657 (((value - (Elf64_Addr) reloc_addr) >> 2) & 0x3fffffff));
658 break;
660 /* MEDLOW code model relocs */
661 case R_SPARC_LO10:
662 *(unsigned int *) reloc_addr =
663 ((*(unsigned int *)reloc_addr & ~0x3ff) |
664 (value & 0x3ff));
665 break;
666 case R_SPARC_HI22:
667 *(unsigned int *) reloc_addr =
668 ((*(unsigned int *)reloc_addr & 0xffc00000) |
669 ((value >> 10) & 0x3fffff));
670 break;
671 case R_SPARC_OLO10:
672 *(unsigned int *) reloc_addr =
673 ((*(unsigned int *)reloc_addr & ~0x1fff) |
674 (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff));
675 break;
677 /* MEDMID code model relocs */
678 case R_SPARC_H44:
679 *(unsigned int *) reloc_addr =
680 ((*(unsigned int *)reloc_addr & 0xffc00000) |
681 ((value >> 22) & 0x3fffff));
682 break;
683 case R_SPARC_M44:
684 *(unsigned int *) reloc_addr =
685 ((*(unsigned int *)reloc_addr & ~0x3ff) |
686 ((value >> 12) & 0x3ff));
687 break;
688 case R_SPARC_L44:
689 *(unsigned int *) reloc_addr =
690 ((*(unsigned int *)reloc_addr & ~0xfff) |
691 (value & 0xfff));
692 break;
694 /* MEDANY code model relocs */
695 case R_SPARC_HH22:
696 *(unsigned int *) reloc_addr =
697 ((*(unsigned int *)reloc_addr & 0xffc00000) |
698 (value >> 42));
699 break;
700 case R_SPARC_HM10:
701 *(unsigned int *) reloc_addr =
702 ((*(unsigned int *)reloc_addr & ~0x3ff) |
703 ((value >> 32) & 0x3ff));
704 break;
705 case R_SPARC_LM22:
706 *(unsigned int *) reloc_addr =
707 ((*(unsigned int *)reloc_addr & 0xffc00000) |
708 ((value >> 10) & 0x003fffff));
709 break;
710 case R_SPARC_UA16:
711 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
712 ((unsigned char *) reloc_addr_arg) [1] = value;
713 break;
714 case R_SPARC_UA32:
715 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
716 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
717 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
718 ((unsigned char *) reloc_addr_arg) [3] = value;
719 break;
720 case R_SPARC_UA64:
721 if (! ((long) reloc_addr_arg & 3))
723 /* Common in .eh_frame */
724 ((unsigned int *) reloc_addr_arg) [0] = value >> 32;
725 ((unsigned int *) reloc_addr_arg) [1] = value;
726 break;
728 ((unsigned char *) reloc_addr_arg) [0] = value >> 56;
729 ((unsigned char *) reloc_addr_arg) [1] = value >> 48;
730 ((unsigned char *) reloc_addr_arg) [2] = value >> 40;
731 ((unsigned char *) reloc_addr_arg) [3] = value >> 32;
732 ((unsigned char *) reloc_addr_arg) [4] = value >> 24;
733 ((unsigned char *) reloc_addr_arg) [5] = value >> 16;
734 ((unsigned char *) reloc_addr_arg) [6] = value >> 8;
735 ((unsigned char *) reloc_addr_arg) [7] = value;
736 break;
737 #endif
738 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
739 default:
740 _dl_reloc_bad_type (map, r_type, 0);
741 break;
742 #endif
746 auto inline void
747 __attribute__ ((always_inline))
748 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
749 void *const reloc_addr_arg)
751 Elf64_Addr *const reloc_addr = reloc_addr_arg;
752 *reloc_addr = l_addr + reloc->r_addend;
755 auto inline void
756 __attribute__ ((always_inline))
757 elf_machine_lazy_rel (struct link_map *map,
758 Elf64_Addr l_addr, const Elf64_Rela *reloc)
760 switch (ELF64_R_TYPE (reloc->r_info))
762 case R_SPARC_NONE:
763 break;
764 case R_SPARC_JMP_SLOT:
765 break;
766 default:
767 _dl_reloc_bad_type (map, ELFW(R_TYPE) (reloc->r_info), 1);
768 break;
772 #endif /* RESOLVE_MAP */