4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1988 AT&T
26 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
31 * SPARC machine dependent and ELF file class dependent functions.
32 * Contains routines for performing function binding and symbol relocations.
37 #include <sys/elf_SPARC.h>
48 #include "_inline_gen.h"
49 #include "_inline_reloc.h"
52 extern void iflush_range(caddr_t
, size_t);
53 extern void plt_full_range(uintptr_t, uintptr_t);
56 elf_mach_flags_check(Rej_desc
*rej
, Ehdr
*ehdr
)
59 * Check machine type and flags.
61 if (ehdr
->e_machine
!= EM_SPARC
) {
62 if (ehdr
->e_machine
!= EM_SPARC32PLUS
) {
63 rej
->rej_type
= SGS_REJ_MACH
;
64 rej
->rej_info
= (uint_t
)ehdr
->e_machine
;
67 if ((ehdr
->e_flags
& EF_SPARC_32PLUS
) == 0) {
68 rej
->rej_type
= SGS_REJ_MISFLAG
;
69 rej
->rej_info
= (uint_t
)ehdr
->e_flags
;
72 if ((ehdr
->e_flags
& ~at_flags
) & EF_SPARC_32PLUS_MASK
) {
73 rej
->rej_type
= SGS_REJ_BADFLAG
;
74 rej
->rej_info
= (uint_t
)ehdr
->e_flags
;
77 } else if ((ehdr
->e_flags
& ~EF_SPARCV9_MM
) != 0) {
78 rej
->rej_type
= SGS_REJ_BADFLAG
;
79 rej
->rej_info
= (uint_t
)ehdr
->e_flags
;
86 ldso_plt_init(Rt_map
*lmp
)
89 * There is no need to analyze ld.so because we don't map in any of
90 * its dependencies. However we may map these dependencies in later
91 * (as if ld.so had dlopened them), so initialize the plt and the
92 * permission information.
95 elf_plt_init((PLTGOT(lmp
)), (caddr_t
)lmp
);
99 * elf_plt_write() will test to see how far away our destination
100 * address lies. If it is close enough that a branch can
101 * be used instead of a jmpl - we will fill the plt in with
102 * single branch. The branches are much quicker then
103 * a jmpl instruction - see bug#4356879 for further
106 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since
107 * librtld/dldump update PLT's who's physical
108 * address is not the same as the 'virtual' runtime
113 elf_plt_write(uintptr_t addr
, uintptr_t vaddr
, void *rptr
, uintptr_t symval
,
116 Rela
*rel
= (Rela
*)rptr
;
117 uintptr_t vpltaddr
, pltaddr
;
120 pltaddr
= addr
+ rel
->r_offset
;
121 vpltaddr
= vaddr
+ rel
->r_offset
;
122 disp
= symval
- vpltaddr
- 4;
125 * Test if the destination address is close enough to use
126 * a ba,a... instruction to reach it.
128 if (S_INRANGE(disp
, 23) && !(rtld_flags
& RT_FL_NOBAPLT
)) {
129 uint_t
*pltent
, bainstr
;
132 pltent
= (uint_t
*)pltaddr
;
137 * ba,a,pt %icc, <dest>
139 * is the most efficient of the PLT's. If we
140 * are within +-20 bits *and* running on a
141 * v8plus architecture - use that branch.
143 if ((at_flags
& EF_SPARC_32PLUS
) &&
144 S_INRANGE(disp
, 20)) {
145 bainstr
= M_BA_A_PT
; /* ba,a,pt %icc,<dest> */
146 bainstr
|= (S_MASK(19) & (disp
>> 2));
148 DBG_CALL(pltcnt21d
++);
151 * Otherwise - we fall back to the good old
155 * Which still beats a jmpl instruction.
157 bainstr
= M_BA_A
; /* ba,a <dest> */
158 bainstr
|= (S_MASK(22) & (disp
>> 2));
160 DBG_CALL(pltcnt24d
++);
163 pltent
[2] = M_NOP
; /* nop instr */
166 iflush_range((char *)(&pltent
[1]), 4);
167 pltent
[0] = M_NOP
; /* nop instr */
168 iflush_range((char *)(&pltent
[0]), 4);
173 * The PLT destination is not in reach of
174 * a branch instruction - so we fall back
175 * to a 'jmpl' sequence.
177 plt_full_range(pltaddr
, symval
);
178 DBG_CALL(pltcntfull
++);
183 * Local storage space created on the stack created for this glue
184 * code includes space for:
185 * 0x4 pointer to dyn_data
186 * 0x4 size prev stack frame
188 static const uchar_t dyn_plt_template
[] = {
189 /* 0x00 */ 0x80, 0x90, 0x00, 0x1e, /* tst %fp */
190 /* 0x04 */ 0x02, 0x80, 0x00, 0x04, /* be 0x14 */
191 /* 0x08 */ 0x82, 0x27, 0x80, 0x0e, /* sub %sp, %fp, %g1 */
192 /* 0x0c */ 0x10, 0x80, 0x00, 0x03, /* ba 0x20 */
193 /* 0x10 */ 0x01, 0x00, 0x00, 0x00, /* nop */
194 /* 0x14 */ 0x82, 0x10, 0x20, 0x60, /* mov 0x60, %g1 */
195 /* 0x18 */ 0x9d, 0xe3, 0xbf, 0x98, /* save %sp, -0x68, %sp */
196 /* 0x1c */ 0xc2, 0x27, 0xbf, 0xf8, /* st %g1, [%fp + -0x8] */
197 /* 0x20 */ 0x03, 0x00, 0x00, 0x00, /* sethi %hi(val), %g1 */
198 /* 0x24 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(val), %g1 */
199 /* 0x28 */ 0x40, 0x00, 0x00, 0x00, /* call <rel_addr> */
200 /* 0x2c */ 0xc2, 0x27, 0xbf, 0xfc /* st %g1, [%fp + -0x4] */
203 int dyn_plt_ent_size
= sizeof (dyn_plt_template
) +
204 sizeof (uintptr_t) + /* reflmp */
205 sizeof (uintptr_t) + /* deflmp */
206 sizeof (ulong_t
) + /* symndx */
207 sizeof (ulong_t
) + /* sb_flags */
208 sizeof (Sym
); /* symdef */
211 * the dynamic plt entry is:
220 * mov SA(MINFRAME), %g1 ! if %fp is null this is the
221 * ! 'minimum stack'. %fp is null
222 * ! on the initial stack frame
224 * save %sp, -(SA(MINFRAME) + 2 * CLONGSIZE), %sp
225 * st %g1, [%fp + -0x8] ! store prev_stack size in [%fp - 8]
226 * sethi %hi(dyn_data), %g1
227 * or %g1, %lo(dyn_data), %g1
229 * st %g1, [%fp + -0x4] ! store dyn_data ptr in [%fp - 4]
238 elf_plt_trace_write(caddr_t addr
, Rela
*rptr
, Rt_map
*rlmp
, Rt_map
*dlmp
,
239 Sym
*sym
, ulong_t symndx
, ulong_t pltndx
, caddr_t to
, ulong_t sb_flags
,
242 extern ulong_t
elf_plt_trace();
247 * If both pltenter & pltexit have been disabled there
248 * there is no reason to even create the glue code.
250 if ((sb_flags
& (LA_SYMB_NOPLTENTER
| LA_SYMB_NOPLTEXIT
)) ==
251 (LA_SYMB_NOPLTENTER
| LA_SYMB_NOPLTEXIT
)) {
252 (void) elf_plt_write((uintptr_t)addr
, (uintptr_t)addr
,
253 rptr
, (uintptr_t)to
, pltndx
);
258 * We only need to add the glue code if there is an auditing
259 * library that is interested in this binding.
261 dyn_plt
= (uchar_t
*)((uintptr_t)AUDINFO(rlmp
)->ai_dynplts
+
262 (pltndx
* dyn_plt_ent_size
));
265 * Have we initialized this dynamic plt entry yet? If we haven't do it
266 * now. Otherwise this function has been called before, but from a
267 * different plt (ie. from another shared object). In that case
268 * we just set the plt to point to the new dyn_plt.
273 Lm_list
*lml
= LIST(rlmp
);
275 (void) memcpy((void *)dyn_plt
, dyn_plt_template
,
276 sizeof (dyn_plt_template
));
277 dyndata
= (uintptr_t *)((uintptr_t)dyn_plt
+
278 sizeof (dyn_plt_template
));
282 * sethi %hi(dyndata), %g1
284 symvalue
= (Xword
)dyndata
;
285 if (do_reloc_rtld(R_SPARC_HI22
, (dyn_plt
+ 0x20),
286 &symvalue
, MSG_ORIG(MSG_SYM_LADYNDATA
),
287 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
294 * or %g1, %lo(dyndata), %g1
296 symvalue
= (Xword
)dyndata
;
297 if (do_reloc_rtld(R_SPARC_LO10
, (dyn_plt
+ 0x24),
298 &symvalue
, MSG_ORIG(MSG_SYM_LADYNDATA
),
299 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
308 symvalue
= (Xword
)((uintptr_t)&elf_plt_trace
-
309 (uintptr_t)(dyn_plt
+ 0x28));
310 if (do_reloc_rtld(R_SPARC_WDISP30
, (dyn_plt
+ 0x28),
311 &symvalue
, MSG_ORIG(MSG_SYM_ELFPLTTRACE
),
312 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
317 *dyndata
++ = (uintptr_t)rlmp
;
318 *dyndata
++ = (uintptr_t)dlmp
;
319 *(ulong_t
*)dyndata
++ = symndx
;
320 *(ulong_t
*)dyndata
++ = sb_flags
;
321 symp
= (Sym
*)dyndata
;
323 symp
->st_name
+= (Word
)STRTAB(dlmp
);
324 symp
->st_value
= (Addr
)to
;
326 iflush_range((void *)dyn_plt
, sizeof (dyn_plt_template
));
329 (void) elf_plt_write((uintptr_t)addr
, (uintptr_t)addr
, rptr
,
330 (uintptr_t)dyn_plt
, 0);
331 return ((caddr_t
)dyn_plt
);
335 * Function binding routine - invoked on the first call to a function through
336 * the procedure linkage table;
337 * passes first through an assembly language interface.
339 * Takes the address of the PLT entry where the call originated,
340 * the offset into the relocation table of the associated
341 * relocation entry and the address of the link map (rt_private_map struct)
344 * Returns the address of the function referenced after re-writing the PLT
345 * entry to invoke the function directly.
347 * On error, causes process to terminate with a signal.
350 elf_bndr(Rt_map
*lmp
, ulong_t pltoff
, caddr_t from
)
353 ulong_t addr
, vaddr
, reloff
, symval
, rsymndx
;
358 uint_t binfo
, sb_flags
= 0, dbg_class
;
366 * For compatibility with libthread (TI_VERSION 1) we track the entry
367 * value. A zero value indicates we have recursed into ld.so.1 to
368 * further process a locking request. Under this recursion we disable
369 * tsort and cleanup activities.
374 if ((lmflags
= lml
->lm_flags
) & LML_FLG_RTLDLM
) {
375 dbg_class
= dbg_desc
->d_class
;
376 dbg_desc
->d_class
= 0;
380 * Must calculate true plt relocation address from reloc.
381 * Take offset, subtract number of reserved PLT entries, and divide
382 * by PLT entry size, which should give the index of the plt
383 * entry (and relocation entry since they have been defined to be
384 * in the same order). Then we must multiply by the size of
385 * a relocation entry, which will give us the offset of the
386 * plt relocation entry from the start of them given by JMPREL(lm).
388 addr
= pltoff
- M_PLT_RESERVSZ
;
389 pltndx
= addr
/ M_PLT_ENTSIZE
;
392 * Perform some basic sanity checks. If we didn't get a load map
393 * or the plt offset is invalid then its possible someone has walked
394 * over the plt entries or jumped to plt[0] out of the blue.
396 if (!lmp
|| ((addr
% M_PLT_ENTSIZE
) != 0)) {
397 Conv_inv_buf_t inv_buf
;
399 eprintf(lml
, ERR_FATAL
, MSG_INTL(MSG_REL_PLTREF
),
400 conv_reloc_SPARC_type(R_SPARC_JMP_SLOT
, 0, &inv_buf
),
401 EC_NATPTR(lmp
), EC_XWORD(pltoff
), EC_NATPTR(from
));
404 reloff
= pltndx
* sizeof (Rela
);
407 * Use relocation entry to get symbol table entry and symbol name.
409 addr
= (ulong_t
)JMPREL(lmp
);
410 rptr
= (Rela
*)(addr
+ reloff
);
411 rsymndx
= ELF_R_SYM(rptr
->r_info
);
412 rsym
= (Sym
*)((ulong_t
)SYMTAB(lmp
) + (rsymndx
* SYMENT(lmp
)));
413 name
= (char *)(STRTAB(lmp
) + rsym
->st_name
);
416 * Determine the last link-map of this list, this'll be the starting
417 * point for any tsort() processing.
422 * Find definition for symbol. Initialize the symbol lookup, and
423 * symbol result, data structures.
425 SLOOKUP_INIT(sl
, name
, lmp
, lml
->lm_head
, ld_entry_cnt
, 0,
426 rsymndx
, rsym
, 0, LKUP_DEFT
);
427 SRESULT_INIT(sr
, name
);
429 if (lookup_sym(&sl
, &sr
, &binfo
, NULL
) == 0) {
430 eprintf(lml
, ERR_FATAL
, MSG_INTL(MSG_REL_NOSYM
), NAME(lmp
),
435 name
= (char *)sr
.sr_name
;
439 symval
= nsym
->st_value
;
441 if (!(FLAGS(nlmp
) & FLG_RT_FIXED
) &&
442 (nsym
->st_shndx
!= SHN_ABS
))
443 symval
+= ADDR(nlmp
);
444 if ((lmp
!= nlmp
) && ((FLAGS1(nlmp
) & FL1_RT_NOINIFIN
) == 0)) {
446 * Record that this new link map is now bound to the caller.
448 if (bind_one(lmp
, nlmp
, BND_REFER
) == 0)
452 if ((lml
->lm_tflags
| AFLAGS(lmp
) | AFLAGS(nlmp
)) &
453 LML_TFLG_AUD_SYMBIND
) {
454 ulong_t symndx
= (((uintptr_t)nsym
-
455 (uintptr_t)SYMTAB(nlmp
)) / SYMENT(nlmp
));
457 symval
= audit_symbind(lmp
, nlmp
, nsym
, symndx
, symval
,
461 if (FLAGS(lmp
) & FLG_RT_FIXED
)
467 if (!(rtld_flags
& RT_FL_NOBIND
)) {
468 if (((lml
->lm_tflags
| AFLAGS(lmp
)) &
469 (LML_TFLG_AUD_PLTENTER
| LML_TFLG_AUD_PLTEXIT
)) &&
470 AUDINFO(lmp
)->ai_dynplts
) {
472 ulong_t symndx
= (((uintptr_t)nsym
-
473 (uintptr_t)SYMTAB(nlmp
)) / SYMENT(nlmp
));
475 symval
= (ulong_t
)elf_plt_trace_write((caddr_t
)vaddr
,
476 rptr
, lmp
, nlmp
, nsym
, symndx
, pltndx
,
477 (caddr_t
)symval
, sb_flags
, &fail
);
482 * Write standard PLT entry to jump directly
483 * to newly bound function.
485 pbtype
= elf_plt_write((uintptr_t)vaddr
,
486 (uintptr_t)vaddr
, rptr
, symval
, pltndx
);
491 * Print binding information and rebuild PLT entry.
493 DBG_CALL(Dbg_bind_global(lmp
, (Addr
)from
, (Off
)(from
- ADDR(lmp
)),
494 pltndx
, pbtype
, nlmp
, (Addr
)symval
, nsym
->st_value
, name
, binfo
));
497 * Complete any processing for newly loaded objects. Note we don't
498 * know exactly where any new objects are loaded (we know the object
499 * that supplied the symbol, but others may have been loaded lazily as
500 * we searched for the symbol), so sorting starts from the last
501 * link-map know on entry to this routine.
504 load_completion(llmp
);
507 * Some operations like dldump() or dlopen()'ing a relocatable object
508 * result in objects being loaded on rtld's link-map, make sure these
509 * objects are initialized also.
511 if ((LIST(nlmp
)->lm_flags
& LML_FLG_RTLDLM
) && LIST(nlmp
)->lm_init
)
512 load_completion(nlmp
);
515 * Make sure the object to which we've bound has had it's .init fired.
516 * Cleanup before return to user code.
519 is_dep_init(nlmp
, lmp
);
523 if (lmflags
& LML_FLG_RTLDLM
)
524 dbg_desc
->d_class
= dbg_class
;
530 * Read and process the relocations for one link object, we assume all
531 * relocation sections for loadable segments are stored contiguously in
535 elf_reloc(Rt_map
*lmp
, uint_t plt
, int *in_nfavl
, APlist
**textrel
)
537 ulong_t relbgn
, relend
, relsiz
, basebgn
, pltbgn
, pltend
;
538 ulong_t dsymndx
, pltndx
, roffset
, rsymndx
, psymndx
= 0;
540 long reladd
, value
, pvalue
, relacount
= RELACOUNT(lmp
);
541 Sym
*symref
, *psymref
, *symdef
, *psymdef
;
545 int ret
= 1, noplt
= 0;
548 uint_t binfo
, pbinfo
;
549 APlist
*bound
= NULL
;
552 * If an object has any DT_REGISTER entries associated with
553 * it, they are processed now.
555 if ((plt
== 0) && (FLAGS(lmp
) & FLG_RT_REGSYMS
)) {
556 if (elf_regsyms(lmp
) == 0)
561 * Although only necessary for lazy binding, initialize the first
562 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems
563 * to find this useful.
565 if ((plt
== 0) && PLTGOT(lmp
)) {
566 mmapobj_result_t
*mpp
;
569 * Make sure the segment is writable.
572 find_segment((caddr_t
)PLTGOT(lmp
), lmp
)) != NULL
) &&
573 ((mpp
->mr_prot
& PROT_WRITE
) == 0)) &&
574 ((set_prot(lmp
, mpp
, 1) == 0) ||
575 (aplist_append(textrel
, mpp
, AL_CNT_TEXTREL
) == NULL
)))
578 elf_plt_init(PLTGOT(lmp
), (caddr_t
)lmp
);
582 * Initialize the plt start and end addresses.
584 if ((pltbgn
= (ulong_t
)JMPREL(lmp
)) != 0)
585 pltend
= pltbgn
+ (ulong_t
)(PLTRELSZ(lmp
));
588 * If we've been called upon to promote an RTLD_LAZY object to an
589 * RTLD_NOW then we're only interested in scaning the .plt table.
596 * The relocation sections appear to the run-time linker as a
597 * single table. Determine the address of the beginning and end
598 * of this table. There are two different interpretations of
599 * the ABI at this point:
601 * - The REL table and its associated RELSZ indicate the
602 * concatenation of *all* relocation sections (this is the
603 * model our link-editor constructs).
605 * - The REL table and its associated RELSZ indicate the
606 * concatenation of all *but* the .plt relocations. These
607 * relocations are specified individually by the JMPREL and
610 * Determine from our knowledege of the relocation range and
611 * .plt range, the range of the total relocation table. Note
612 * that one other ABI assumption seems to be that the .plt
613 * relocations always follow any other relocations, the
614 * following range checking drops that assumption.
616 relbgn
= (ulong_t
)(REL(lmp
));
617 relend
= relbgn
+ (ulong_t
)(RELSZ(lmp
));
619 if (!relbgn
|| (relbgn
> pltbgn
))
621 if (!relbgn
|| (relend
< pltend
))
625 if (!relbgn
|| (relbgn
== relend
)) {
626 DBG_CALL(Dbg_reloc_run(lmp
, 0, plt
, DBG_REL_NONE
));
630 relsiz
= (ulong_t
)(RELENT(lmp
));
633 DBG_CALL(Dbg_reloc_run(lmp
, M_REL_SHT_TYPE
, plt
, DBG_REL_START
));
636 * If we're processing in lazy mode there is no need to scan the
639 if (pltbgn
&& ((MODE(lmp
) & RTLD_NOW
) == 0))
644 * Loop through relocations.
646 while (relbgn
< relend
) {
647 mmapobj_result_t
*mpp
;
651 rtype
= ELF_R_TYPE(((Rela
*)relbgn
)->r_info
, M_MACH
);
654 * If this is a RELATIVE relocation in a shared object (the
655 * common case), and if we are not debugging, then jump into one
656 * of the tighter relocation loops.
658 if ((rtype
== R_SPARC_RELATIVE
) &&
659 ((FLAGS(lmp
) & FLG_RT_FIXED
) == 0) && (DBG_ENABLED
== 0)) {
661 relbgn
= elf_reloc_relative_count(relbgn
,
662 relacount
, relsiz
, basebgn
, lmp
,
666 relbgn
= elf_reloc_relative(relbgn
, relend
,
667 relsiz
, basebgn
, lmp
, textrel
, 0);
669 if (relbgn
>= relend
)
671 rtype
= ELF_R_TYPE(((Rela
*)relbgn
)->r_info
, M_MACH
);
674 roffset
= ((Rela
*)relbgn
)->r_offset
;
676 reladd
= (long)(((Rela
*)relbgn
)->r_addend
);
677 rsymndx
= ELF_R_SYM(((Rela
*)relbgn
)->r_info
);
678 rel
= (Rela
*)relbgn
;
684 if (rtype
== R_SPARC_NONE
)
686 if (noplt
&& ((ulong_t
)rel
>= pltbgn
) &&
687 ((ulong_t
)rel
< pltend
)) {
692 if (rtype
!= R_SPARC_REGISTER
) {
694 * If this is a shared object, add the base address
697 if (!(FLAGS(lmp
) & FLG_RT_FIXED
))
701 * If this relocation is not against part of the image
702 * mapped into memory we skip it.
704 if ((mpp
= find_segment((caddr_t
)roffset
,
706 elf_reloc_bad(lmp
, (void *)rel
, rtype
, roffset
,
713 * If we're promoting .plts, try and determine if this one has
714 * already been written. An uninitialized .plts' second
715 * instruction is a branch. Note, elf_plt_write() optimizes
716 * .plt relocations, and it's possible that a relocated entry
717 * is a branch. If this is the case, we can't tell the
718 * difference between an uninitialized .plt and a relocated,
719 * .plt that uses a branch. In this case, we'll simply redo
720 * the relocation calculation, which is a bit sad.
723 ulong_t
*_roffset
= (ulong_t
*)roffset
;
726 if ((*_roffset
& (~(S_MASK(22)))) != M_BA_A
)
731 pltndx
= (ulong_t
)-1;
735 * If a symbol index is specified then get the symbol table
736 * entry, locate the symbol definition, and determine its
741 * If a Syminfo section is provided, determine if this
742 * symbol is deferred, and if so, skip this relocation.
744 if (sip
&& is_sym_deferred((ulong_t
)rel
, basebgn
, lmp
,
745 textrel
, sip
, rsymndx
))
749 * Get the local symbol table entry.
751 symref
= (Sym
*)((ulong_t
)SYMTAB(lmp
) +
752 (rsymndx
* SYMENT(lmp
)));
755 * If this is a local symbol, just use the base address.
756 * (we should have no local relocations in the
759 if (ELF_ST_BIND(symref
->st_info
) == STB_LOCAL
) {
764 * Special case TLS relocations.
766 if (rtype
== R_SPARC_TLS_DTPMOD32
) {
770 value
= TLSMODID(lmp
);
772 } else if (rtype
== R_SPARC_TLS_TPOFF32
) {
773 if ((value
= elf_static_tls(lmp
, symref
,
774 rel
, rtype
, 0, roffset
, 0)) == 0) {
781 * If the symbol index is equal to the previous
782 * symbol index relocation we processed then
783 * reuse the previous values. (Note that there
784 * have been cases where a relocation exists
785 * against a copy relocation symbol, our ld(1)
786 * should optimize this away, but make sure we
787 * don't use the same symbol information should
790 if ((rsymndx
== psymndx
) &&
791 (rtype
!= R_SPARC_COPY
)) {
794 DBG_CALL(Dbg_bind_weak(lmp
,
795 (Addr
)roffset
, (Addr
)
796 (roffset
- basebgn
), name
));
811 if ((LIST(_lmp
)->lm_tflags
|
813 LML_TFLG_AUD_SYMBIND
) {
814 value
= audit_symbind(lmp
, _lmp
,
816 symdef
, dsymndx
, value
,
824 * Lookup the symbol definition.
825 * Initialize the symbol lookup, and
826 * symbol result, data structures.
828 name
= (char *)(STRTAB(lmp
) +
831 SLOOKUP_INIT(sl
, name
, lmp
, 0,
832 ld_entry_cnt
, 0, rsymndx
, symref
,
833 rtype
, LKUP_STDRELOC
);
834 SRESULT_INIT(sr
, name
);
837 if (lookup_sym(&sl
, &sr
, &binfo
,
839 name
= (char *)sr
.sr_name
;
845 * If the symbol is not found and the
846 * reference was not to a weak symbol,
847 * report an error. Weak references
852 if (sl
.sl_bind
!= STB_WEAK
) {
853 if (elf_reloc_error(lmp
, name
,
864 DBG_CALL(Dbg_bind_weak(lmp
,
865 (Addr
)roffset
, (Addr
)
866 (roffset
- basebgn
), name
));
873 * If symbol was found in an object
874 * other than the referencing object
875 * then record the binding.
877 if ((lmp
!= _lmp
) && ((FLAGS1(_lmp
) &
878 FL1_RT_NOINIFIN
) == 0)) {
879 if (aplist_test(&bound
, _lmp
,
880 AL_CNT_RELBIND
) == 0) {
887 * Calculate the location of definition;
888 * symbol value plus base address of
889 * containing shared object.
892 value
= symdef
->st_size
;
894 value
= symdef
->st_value
;
896 if (!(FLAGS(_lmp
) & FLG_RT_FIXED
) &&
898 (symdef
->st_shndx
!= SHN_ABS
) &&
899 (ELF_ST_TYPE(symdef
->st_info
) !=
904 * Retain this symbol index and the
905 * value in case it can be used for the
906 * subsequent relocations.
908 if (rtype
!= R_SPARC_COPY
) {
917 if ((LIST(_lmp
)->lm_tflags
|
919 LML_TFLG_AUD_SYMBIND
) {
920 dsymndx
= (((uintptr_t)symdef
-
921 (uintptr_t)SYMTAB(_lmp
)) /
923 value
= audit_symbind(lmp
, _lmp
,
924 symdef
, dsymndx
, value
,
930 * If relocation is PC-relative, subtract
933 if (IS_PC_RELATIVE(rtype
))
937 * Special case TLS relocations.
939 if (rtype
== R_SPARC_TLS_DTPMOD32
) {
941 * Relocation value is the TLS modid.
943 value
= TLSMODID(_lmp
);
945 } else if (rtype
== R_SPARC_TLS_TPOFF32
) {
946 if ((value
= elf_static_tls(_lmp
,
947 symdef
, rel
, rtype
, name
, roffset
,
958 if (rtype
== R_SPARC_REGISTER
) {
960 * A register symbol associated with symbol
961 * index 0 is initialized (i.e. relocated) to
962 * a constant in the r_addend field rather than
967 } else if (rtype
== R_SPARC_TLS_DTPMOD32
) {
969 * TLS relocation value is the TLS modid.
971 value
= TLSMODID(lmp
);
978 DBG_CALL(Dbg_reloc_in(LIST(lmp
), ELF_DBG_RTLD
, M_MACH
,
979 M_REL_SHT_TYPE
, rel
, NULL
, 0, name
));
982 * Make sure the segment is writable.
984 if ((rtype
!= R_SPARC_REGISTER
) &&
985 ((mpp
->mr_prot
& PROT_WRITE
) == 0) &&
986 ((set_prot(lmp
, mpp
, 1) == 0) ||
987 (aplist_append(textrel
, mpp
, AL_CNT_TEXTREL
) == NULL
))) {
993 * Call relocation routine to perform required relocation.
996 case R_SPARC_REGISTER
:
998 * The v9 ABI 4.2.4 says that system objects may,
999 * but are not required to, use register symbols
1000 * to inidcate how they use global registers. Thus
1001 * at least %g6, %g7 must be allowed in addition
1005 if (roffset
== STO_SPARC_REGISTER_G1
) {
1006 set_sparc_g1(value
);
1007 } else if (roffset
== STO_SPARC_REGISTER_G2
) {
1008 set_sparc_g2(value
);
1009 } else if (roffset
== STO_SPARC_REGISTER_G3
) {
1010 set_sparc_g3(value
);
1011 } else if (roffset
== STO_SPARC_REGISTER_G4
) {
1012 set_sparc_g4(value
);
1013 } else if (roffset
== STO_SPARC_REGISTER_G5
) {
1014 set_sparc_g5(value
);
1015 } else if (roffset
== STO_SPARC_REGISTER_G6
) {
1016 set_sparc_g6(value
);
1017 } else if (roffset
== STO_SPARC_REGISTER_G7
) {
1018 set_sparc_g7(value
);
1020 eprintf(LIST(lmp
), ERR_FATAL
,
1021 MSG_INTL(MSG_REL_BADREG
), NAME(lmp
),
1027 DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp
), ELF_DBG_RTLD
,
1028 M_MACH
, (Xword
)roffset
, (Xword
)value
));
1031 if (elf_copy_reloc(name
, symref
, lmp
, (void *)roffset
,
1032 symdef
, _lmp
, (const void *)value
) == 0)
1035 case R_SPARC_JMP_SLOT
:
1036 pltndx
= ((ulong_t
)rel
-
1037 (uintptr_t)JMPREL(lmp
)) / relsiz
;
1039 if (FLAGS(lmp
) & FLG_RT_FIXED
)
1044 if (((LIST(lmp
)->lm_tflags
| AFLAGS(lmp
)) &
1045 (LML_TFLG_AUD_PLTENTER
| LML_TFLG_AUD_PLTEXIT
)) &&
1046 AUDINFO(lmp
)->ai_dynplts
) {
1048 ulong_t symndx
= (((uintptr_t)symdef
-
1049 (uintptr_t)SYMTAB(_lmp
)) / SYMENT(_lmp
));
1051 (void) elf_plt_trace_write((caddr_t
)vaddr
,
1052 (Rela
*)rel
, lmp
, _lmp
, symdef
, symndx
,
1053 pltndx
, (caddr_t
)value
, sb_flags
, &fail
);
1058 * Write standard PLT entry to jump directly
1059 * to newly bound function.
1061 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp
),
1062 ELF_DBG_RTLD
, (Xword
)roffset
,
1064 pbtype
= elf_plt_write((uintptr_t)vaddr
,
1065 (uintptr_t)vaddr
, (void *)rel
, value
,
1073 * Write the relocation out. If this relocation is a
1074 * common basic write, skip the doreloc() engine.
1076 if ((rtype
== R_SPARC_GLOB_DAT
) ||
1077 (rtype
== R_SPARC_32
)) {
1078 if (roffset
& 0x3) {
1079 Conv_inv_buf_t inv_buf
;
1081 eprintf(LIST(lmp
), ERR_FATAL
,
1082 MSG_INTL(MSG_REL_NONALIGN
),
1083 conv_reloc_SPARC_type(rtype
,
1085 NAME(lmp
), demangle(name
),
1089 *(uint_t
*)roffset
+= value
;
1091 if (do_reloc_rtld(rtype
, (uchar_t
*)roffset
,
1092 (Xword
*)&value
, name
,
1093 NAME(lmp
), LIST(lmp
)) == 0)
1098 * The value now contains the 'bit-shifted' value that
1099 * was or'ed into memory (this was set by
1102 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp
), ELF_DBG_RTLD
,
1103 (Xword
)roffset
, (Xword
)value
));
1106 * If this relocation is against a text segment, make
1107 * sure that the instruction cache is flushed.
1110 iflush_range((caddr_t
)roffset
, 0x4);
1114 ((LIST(lmp
)->lm_flags
& LML_FLG_TRC_WARN
) == 0))
1118 DBG_CALL(Dbg_bind_global(lmp
, (Addr
)roffset
,
1119 (Off
)(roffset
- basebgn
), pltndx
, pbtype
,
1120 _lmp
, (Addr
)value
, symdef
->st_value
, name
, binfo
));
1124 return (relocate_finish(lmp
, bound
, ret
));
1128 * Provide a machine specific interface to the conversion routine. By calling
1129 * the machine specific version, rather than the generic version, we insure that
1130 * the data tables/strings for all known machine versions aren't dragged into
1134 _conv_reloc_type(uint_t rel
)
1136 static Conv_inv_buf_t inv_buf
;
1138 return (conv_reloc_SPARC_type(rel
, 0, &inv_buf
));