4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 1988 AT&T
26 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
31 * x86 machine dependent and ELF file class dependent functions.
32 * Contains routines for performing function binding and symbol relocations.
37 #include <sys/elf_386.h>
43 #include <krtld/reloc.h>
48 #include "_inline_gen.h"
49 #include "_inline_reloc.h"
52 extern void elf_rtbndr(Rt_map
*, ulong_t
, caddr_t
);
55 elf_mach_flags_check(Rej_desc
*rej
, Ehdr
*ehdr
)
58 * Check machine type and flags.
60 if (ehdr
->e_flags
!= 0) {
61 rej
->rej_type
= SGS_REJ_BADFLAG
;
62 rej
->rej_info
= (uint_t
)ehdr
->e_flags
;
69 ldso_plt_init(Rt_map
*lmp
)
72 * There is no need to analyze ld.so because we don't map in any of
73 * its dependencies. However we may map these dependencies in later
74 * (as if ld.so had dlopened them), so initialize the plt and the
75 * permission information.
78 elf_plt_init((PLTGOT(lmp
)), (caddr_t
)lmp
);
81 static const uchar_t dyn_plt_template
[] = {
82 /* 0x00 */ 0x55, /* pushl %ebp */
83 /* 0x01 */ 0x8b, 0xec, /* movl %esp, %ebp */
84 /* 0x03 */ 0x68, 0x00, 0x00, 0x00, 0x00, /* pushl trace_fields */
85 /* 0x08 */ 0xe9, 0xfc, 0xff, 0xff, 0xff, 0xff /* jmp elf_plt_trace */
87 int dyn_plt_ent_size
= sizeof (dyn_plt_template
);
90 * the dynamic plt entry is:
105 elf_plt_trace_write(uint_t roffset
, Rt_map
*rlmp
, Rt_map
*dlmp
, Sym
*sym
,
106 uint_t symndx
, uint_t pltndx
, caddr_t to
, uint_t sb_flags
, int *fail
)
108 extern int elf_plt_trace();
114 * We only need to add the glue code if there is an auditing
115 * library that is interested in this binding.
117 dyn_plt
= (uchar_t
*)((uintptr_t)AUDINFO(rlmp
)->ai_dynplts
+
118 (pltndx
* dyn_plt_ent_size
));
121 * Have we initialized this dynamic plt entry yet? If we haven't do it
122 * now. Otherwise this function has been called before, but from a
123 * different plt (ie. from another shared object). In that case
124 * we just set the plt to point to the new dyn_plt.
129 Lm_list
*lml
= LIST(rlmp
);
131 (void) memcpy((void *)dyn_plt
, dyn_plt_template
,
132 sizeof (dyn_plt_template
));
133 dyndata
= (uintptr_t *)((uintptr_t)dyn_plt
+
134 ROUND(sizeof (dyn_plt_template
), M_WORD_ALIGN
));
140 symvalue
= (Word
)dyndata
;
141 if (do_reloc_rtld(R_386_32
, &dyn_plt
[4], &symvalue
,
142 MSG_ORIG(MSG_SYM_LADYNDATA
),
143 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
149 * jmps are relative, so I need to figure out the relative
150 * address to elf_plt_trace.
155 symvalue
= (ulong_t
)(elf_plt_trace
) - (ulong_t
)(dyn_plt
+ 9);
156 if (do_reloc_rtld(R_386_PC32
, &dyn_plt
[9], &symvalue
,
157 MSG_ORIG(MSG_SYM_ELFPLTTRACE
),
158 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
163 *dyndata
++ = (uintptr_t)rlmp
;
164 *dyndata
++ = (uintptr_t)dlmp
;
165 *dyndata
++ = (uint_t
)symndx
;
166 *dyndata
++ = (uint_t
)sb_flags
;
167 symp
= (Sym
*)dyndata
;
169 symp
->st_name
+= (Word
)STRTAB(dlmp
);
170 symp
->st_value
= (Addr
)to
;
173 got_entry
= (ulong_t
)roffset
;
174 *(ulong_t
*)got_entry
= (ulong_t
)dyn_plt
;
175 return ((caddr_t
)dyn_plt
);
179 * Function binding routine - invoked on the first call to a function through
180 * the procedure linkage table;
181 * passes first through an assembly language interface.
183 * Takes the offset into the relocation table of the associated
184 * relocation entry and the address of the link map (rt_private_map struct)
187 * Returns the address of the function referenced after re-writing the PLT
188 * entry to invoke the function directly.
190 * On error, causes process to terminate with a signal.
193 elf_bndr(Rt_map
*lmp
, ulong_t reloff
, caddr_t from
)
196 ulong_t addr
, symval
, rsymndx
;
200 uint_t binfo
, sb_flags
= 0, dbg_class
;
207 * For compatibility with libthread (TI_VERSION 1) we track the entry
208 * value. A zero value indicates we have recursed into ld.so.1 to
209 * further process a locking request. Under this recursion we disable
210 * tsort and cleanup activities.
215 if ((lmflags
= lml
->lm_flags
) & LML_FLG_RTLDLM
) {
216 dbg_class
= dbg_desc
->d_class
;
217 dbg_desc
->d_class
= 0;
221 * Perform some basic sanity checks. If we didn't get a load map or
222 * the relocation offset is invalid then its possible someone has walked
223 * over the .got entries or jumped to plt0 out of the blue.
225 if (!lmp
|| ((reloff
% sizeof (Rel
)) != 0)) {
226 Conv_inv_buf_t inv_buf
;
228 eprintf(lml
, ERR_FATAL
, MSG_INTL(MSG_REL_PLTREF
),
229 conv_reloc_386_type(R_386_JMP_SLOT
, 0, &inv_buf
),
230 EC_NATPTR(lmp
), EC_XWORD(reloff
), EC_NATPTR(from
));
235 * Use relocation entry to get symbol table entry and symbol name.
237 addr
= (ulong_t
)JMPREL(lmp
);
238 rptr
= (Rel
*)(addr
+ reloff
);
239 rsymndx
= ELF_R_SYM(rptr
->r_info
);
240 rsym
= (Sym
*)((ulong_t
)SYMTAB(lmp
) + (rsymndx
* SYMENT(lmp
)));
241 name
= (char *)(STRTAB(lmp
) + rsym
->st_name
);
244 * Determine the last link-map of this list, this'll be the starting
245 * point for any tsort() processing.
250 * Find definition for symbol. Initialize the symbol lookup, and
251 * symbol result, data structures.
253 SLOOKUP_INIT(sl
, name
, lmp
, lml
->lm_head
, ld_entry_cnt
, 0,
254 rsymndx
, rsym
, 0, LKUP_DEFT
);
255 SRESULT_INIT(sr
, name
);
257 if (lookup_sym(&sl
, &sr
, &binfo
, NULL
) == 0) {
258 eprintf(lml
, ERR_FATAL
, MSG_INTL(MSG_REL_NOSYM
), NAME(lmp
),
263 name
= (char *)sr
.sr_name
;
267 symval
= nsym
->st_value
;
269 if (!(FLAGS(nlmp
) & FLG_RT_FIXED
) &&
270 (nsym
->st_shndx
!= SHN_ABS
))
271 symval
+= ADDR(nlmp
);
272 if ((lmp
!= nlmp
) && ((FLAGS1(nlmp
) & FL1_RT_NOINIFIN
) == 0)) {
274 * Record that this new link map is now bound to the caller.
276 if (bind_one(lmp
, nlmp
, BND_REFER
) == 0)
280 if ((lml
->lm_tflags
| AFLAGS(lmp
) | AFLAGS(nlmp
)) &
281 LML_TFLG_AUD_SYMBIND
) {
282 uint_t symndx
= (((uintptr_t)nsym
-
283 (uintptr_t)SYMTAB(nlmp
)) / SYMENT(nlmp
));
284 symval
= audit_symbind(lmp
, nlmp
, nsym
, symndx
, symval
,
288 if (!(rtld_flags
& RT_FL_NOBIND
)) {
289 addr
= rptr
->r_offset
;
290 if (!(FLAGS(lmp
) & FLG_RT_FIXED
))
292 if (((lml
->lm_tflags
| AFLAGS(lmp
)) &
293 (LML_TFLG_AUD_PLTENTER
| LML_TFLG_AUD_PLTEXIT
)) &&
294 AUDINFO(lmp
)->ai_dynplts
) {
296 uint_t pltndx
= reloff
/ sizeof (Rel
);
297 uint_t symndx
= (((uintptr_t)nsym
-
298 (uintptr_t)SYMTAB(nlmp
)) / SYMENT(nlmp
));
300 symval
= (ulong_t
)elf_plt_trace_write(addr
, lmp
, nlmp
,
301 nsym
, symndx
, pltndx
, (caddr_t
)symval
, sb_flags
,
307 * Write standard PLT entry to jump directly
308 * to newly bound function.
310 *(ulong_t
*)addr
= symval
;
315 * Print binding information and rebuild PLT entry.
317 DBG_CALL(Dbg_bind_global(lmp
, (Addr
)from
, (Off
)(from
- ADDR(lmp
)),
318 (Xword
)(reloff
/ sizeof (Rel
)), PLT_T_FULL
, nlmp
, (Addr
)symval
,
319 nsym
->st_value
, name
, binfo
));
322 * Complete any processing for newly loaded objects. Note we don't
323 * know exactly where any new objects are loaded (we know the object
324 * that supplied the symbol, but others may have been loaded lazily as
325 * we searched for the symbol), so sorting starts from the last
326 * link-map know on entry to this routine.
329 load_completion(llmp
);
332 * Some operations like dldump() or dlopen()'ing a relocatable object
333 * result in objects being loaded on rtld's link-map, make sure these
334 * objects are initialized also.
336 if ((LIST(nlmp
)->lm_flags
& LML_FLG_RTLDLM
) && LIST(nlmp
)->lm_init
)
337 load_completion(nlmp
);
340 * Make sure the object to which we've bound has had it's .init fired.
341 * Cleanup before return to user code.
344 is_dep_init(nlmp
, lmp
);
348 if (lmflags
& LML_FLG_RTLDLM
)
349 dbg_desc
->d_class
= dbg_class
;
355 * Read and process the relocations for one link object, we assume all
356 * relocation sections for loadable segments are stored contiguously in
360 elf_reloc(Rt_map
*lmp
, uint_t plt
, int *in_nfavl
, APlist
**textrel
)
362 ulong_t relbgn
, relend
, relsiz
, basebgn
, pltbgn
, pltend
;
363 ulong_t _pltbgn
, _pltend
;
364 ulong_t dsymndx
, roffset
, rsymndx
, psymndx
= 0;
367 Sym
*symref
, *psymref
, *symdef
, *psymdef
;
371 int ret
= 1, noplt
= 0;
372 int relacount
= RELACOUNT(lmp
), plthint
= 0;
374 uint_t binfo
, pbinfo
;
375 APlist
*bound
= NULL
;
378 * Although only necessary for lazy binding, initialize the first
379 * global offset entry to go to elf_rtbndr(). dbx(1) seems
380 * to find this useful.
382 if ((plt
== 0) && PLTGOT(lmp
)) {
383 mmapobj_result_t
*mpp
;
386 * Make sure the segment is writable.
389 find_segment((caddr_t
)PLTGOT(lmp
), lmp
)) != NULL
) &&
390 ((mpp
->mr_prot
& PROT_WRITE
) == 0)) &&
391 ((set_prot(lmp
, mpp
, 1) == 0) ||
392 (aplist_append(textrel
, mpp
, AL_CNT_TEXTREL
) == NULL
)))
395 elf_plt_init(PLTGOT(lmp
), (caddr_t
)lmp
);
399 * Initialize the plt start and end addresses.
401 if ((pltbgn
= (ulong_t
)JMPREL(lmp
)) != 0)
402 pltend
= pltbgn
+ (ulong_t
)(PLTRELSZ(lmp
));
404 relsiz
= (ulong_t
)(RELENT(lmp
));
408 plthint
= PLTRELSZ(lmp
) / relsiz
;
411 * If we've been called upon to promote an RTLD_LAZY object to an
412 * RTLD_NOW then we're only interested in scaning the .plt table.
413 * An uninitialized .plt is the case where the associated got entry
414 * points back to the plt itself. Determine the range of the real .plt
415 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
423 if (!relbgn
|| (relbgn
== relend
))
427 * Initialize the symbol lookup, and symbol result, data
430 SLOOKUP_INIT(sl
, MSG_ORIG(MSG_SYM_PLT
), lmp
, lmp
, ld_entry_cnt
,
431 elf_hash(MSG_ORIG(MSG_SYM_PLT
)), 0, 0, 0, LKUP_DEFT
);
432 SRESULT_INIT(sr
, MSG_ORIG(MSG_SYM_PLT
));
434 if (elf_find_sym(&sl
, &sr
, &binfo
, NULL
) == 0)
438 _pltbgn
= symdef
->st_value
;
439 if (!(FLAGS(lmp
) & FLG_RT_FIXED
) &&
440 (symdef
->st_shndx
!= SHN_ABS
))
442 _pltend
= _pltbgn
+ (((PLTRELSZ(lmp
) / relsiz
)) *
443 M_PLT_ENTSIZE
) + M_PLT_RESERVSZ
;
447 * The relocation sections appear to the run-time linker as a
448 * single table. Determine the address of the beginning and end
449 * of this table. There are two different interpretations of
450 * the ABI at this point:
452 * o The REL table and its associated RELSZ indicate the
453 * concatenation of *all* relocation sections (this is the
454 * model our link-editor constructs).
456 * o The REL table and its associated RELSZ indicate the
457 * concatenation of all *but* the .plt relocations. These
458 * relocations are specified individually by the JMPREL and
461 * Determine from our knowledege of the relocation range and
462 * .plt range, the range of the total relocation table. Note
463 * that one other ABI assumption seems to be that the .plt
464 * relocations always follow any other relocations, the
465 * following range checking drops that assumption.
467 relbgn
= (ulong_t
)(REL(lmp
));
468 relend
= relbgn
+ (ulong_t
)(RELSZ(lmp
));
470 if (!relbgn
|| (relbgn
> pltbgn
))
472 if (!relbgn
|| (relend
< pltend
))
476 if (!relbgn
|| (relbgn
== relend
)) {
477 DBG_CALL(Dbg_reloc_run(lmp
, 0, plt
, DBG_REL_NONE
));
480 DBG_CALL(Dbg_reloc_run(lmp
, M_REL_SHT_TYPE
, plt
, DBG_REL_START
));
483 * If we're processing a dynamic executable in lazy mode there is no
484 * need to scan the .rel.plt table, however if we're processing a shared
485 * object in lazy mode the .got addresses associated to each .plt must
486 * be relocated to reflect the location of the shared object.
488 if (pltbgn
&& ((MODE(lmp
) & RTLD_NOW
) == 0) &&
489 (FLAGS(lmp
) & FLG_RT_FIXED
))
494 * Loop through relocations.
496 while (relbgn
< relend
) {
497 mmapobj_result_t
*mpp
;
500 rtype
= ELF_R_TYPE(((Rel
*)relbgn
)->r_info
, M_MACH
);
503 * If this is a RELATIVE relocation in a shared object (the
504 * common case), and if we are not debugging, then jump into a
505 * tighter relocation loop (elf_reloc_relative).
507 if ((rtype
== R_386_RELATIVE
) &&
508 ((FLAGS(lmp
) & FLG_RT_FIXED
) == 0) && (DBG_ENABLED
== 0)) {
510 relbgn
= elf_reloc_relative_count(relbgn
,
511 relacount
, relsiz
, basebgn
, lmp
,
515 relbgn
= elf_reloc_relative(relbgn
, relend
,
516 relsiz
, basebgn
, lmp
, textrel
, 0);
518 if (relbgn
>= relend
)
520 rtype
= ELF_R_TYPE(((Rel
*)relbgn
)->r_info
, M_MACH
);
523 roffset
= ((Rel
*)relbgn
)->r_offset
;
526 * If this is a shared object, add the base address to offset.
528 if (!(FLAGS(lmp
) & FLG_RT_FIXED
)) {
530 * If we're processing lazy bindings, we have to step
531 * through the plt entries and add the base address
532 * to the corresponding got entry.
534 if (plthint
&& (plt
== 0) &&
535 (rtype
== R_386_JMP_SLOT
) &&
536 ((MODE(lmp
) & RTLD_NOW
) == 0)) {
537 relbgn
= elf_reloc_relative_count(relbgn
,
538 plthint
, relsiz
, basebgn
, lmp
, textrel
, 0);
545 rsymndx
= ELF_R_SYM(((Rel
*)relbgn
)->r_info
);
552 if (rtype
== R_386_NONE
)
554 if (noplt
&& ((ulong_t
)rel
>= pltbgn
) &&
555 ((ulong_t
)rel
< pltend
)) {
561 * If we're promoting plts, determine if this one has already
564 if (plt
&& ((*(ulong_t
*)roffset
< _pltbgn
) ||
565 (*(ulong_t
*)roffset
> _pltend
)))
569 * If this relocation is not against part of the image
570 * mapped into memory we skip it.
572 if ((mpp
= find_segment((caddr_t
)roffset
, lmp
)) == NULL
) {
573 elf_reloc_bad(lmp
, (void *)rel
, rtype
, roffset
,
580 * If a symbol index is specified then get the symbol table
581 * entry, locate the symbol definition, and determine its
586 * If a Syminfo section is provided, determine if this
587 * symbol is deferred, and if so, skip this relocation.
589 if (sip
&& is_sym_deferred((ulong_t
)rel
, basebgn
, lmp
,
590 textrel
, sip
, rsymndx
))
594 * Get the local symbol table entry.
596 symref
= (Sym
*)((ulong_t
)SYMTAB(lmp
) +
597 (rsymndx
* SYMENT(lmp
)));
600 * If this is a local symbol, just use the base address.
601 * (we should have no local relocations in the
604 if (ELF_ST_BIND(symref
->st_info
) == STB_LOCAL
) {
609 * Special case TLS relocations.
611 if (rtype
== R_386_TLS_DTPMOD32
) {
615 value
= TLSMODID(lmp
);
617 } else if (rtype
== R_386_TLS_TPOFF
) {
618 if ((value
= elf_static_tls(lmp
, symref
,
619 rel
, rtype
, 0, roffset
, 0)) == 0) {
626 * If the symbol index is equal to the previous
627 * symbol index relocation we processed then
628 * reuse the previous values. (Note that there
629 * have been cases where a relocation exists
630 * against a copy relocation symbol, our ld(1)
631 * should optimize this away, but make sure we
632 * don't use the same symbol information should
635 if ((rsymndx
== psymndx
) &&
636 (rtype
!= R_386_COPY
)) {
639 DBG_CALL(Dbg_bind_weak(lmp
,
640 (Addr
)roffset
, (Addr
)
641 (roffset
- basebgn
), name
));
657 if ((LIST(_lmp
)->lm_tflags
|
659 LML_TFLG_AUD_SYMBIND
) {
660 value
= audit_symbind(lmp
, _lmp
,
662 symdef
, dsymndx
, value
,
670 * Lookup the symbol definition.
671 * Initialize the symbol lookup, and
672 * symbol result, data structures.
674 name
= (char *)(STRTAB(lmp
) +
677 SLOOKUP_INIT(sl
, name
, lmp
, 0,
678 ld_entry_cnt
, 0, rsymndx
, symref
,
679 rtype
, LKUP_STDRELOC
);
680 SRESULT_INIT(sr
, name
);
683 if (lookup_sym(&sl
, &sr
, &binfo
,
685 name
= (char *)sr
.sr_name
;
691 * If the symbol is not found and the
692 * reference was not to a weak symbol,
693 * report an error. Weak references
698 if (sl
.sl_bind
!= STB_WEAK
) {
699 if (elf_reloc_error(lmp
, name
,
710 DBG_CALL(Dbg_bind_weak(lmp
,
711 (Addr
)roffset
, (Addr
)
712 (roffset
- basebgn
), name
));
719 * If symbol was found in an object
720 * other than the referencing object
721 * then record the binding.
723 if ((lmp
!= _lmp
) && ((FLAGS1(_lmp
) &
724 FL1_RT_NOINIFIN
) == 0)) {
725 if (aplist_test(&bound
, _lmp
,
726 AL_CNT_RELBIND
) == 0) {
733 * Calculate the location of definition;
734 * symbol value plus base address of
735 * containing shared object.
738 value
= symdef
->st_size
;
740 value
= symdef
->st_value
;
742 if (!(FLAGS(_lmp
) & FLG_RT_FIXED
) &&
744 (symdef
->st_shndx
!= SHN_ABS
) &&
745 (ELF_ST_TYPE(symdef
->st_info
) !=
750 * Retain this symbol index and the
751 * value in case it can be used for the
752 * subsequent relocations.
754 if (rtype
!= R_386_COPY
) {
763 if ((LIST(_lmp
)->lm_tflags
|
765 LML_TFLG_AUD_SYMBIND
) {
766 dsymndx
= (((uintptr_t)symdef
-
767 (uintptr_t)SYMTAB(_lmp
)) /
769 value
= audit_symbind(lmp
, _lmp
,
770 symdef
, dsymndx
, value
,
776 * If relocation is PC-relative, subtract
779 if (IS_PC_RELATIVE(rtype
))
783 * Special case TLS relocations.
785 if (rtype
== R_386_TLS_DTPMOD32
) {
787 * Relocation value is the TLS modid.
789 value
= TLSMODID(_lmp
);
791 } else if (rtype
== R_386_TLS_TPOFF
) {
792 if ((value
= elf_static_tls(_lmp
,
793 symdef
, rel
, rtype
, name
, roffset
,
804 if (rtype
== R_386_TLS_DTPMOD32
) {
806 * TLS relocation value is the TLS modid.
808 value
= TLSMODID(lmp
);
815 DBG_CALL(Dbg_reloc_in(LIST(lmp
), ELF_DBG_RTLD
, M_MACH
,
816 M_REL_SHT_TYPE
, rel
, NULL
, 0, name
));
819 * Make sure the segment is writable.
821 if (((mpp
->mr_prot
& PROT_WRITE
) == 0) &&
822 ((set_prot(lmp
, mpp
, 1) == 0) ||
823 (aplist_append(textrel
, mpp
, AL_CNT_TEXTREL
) == NULL
))) {
829 * Call relocation routine to perform required relocation.
833 if (elf_copy_reloc(name
, symref
, lmp
, (void *)roffset
,
834 symdef
, _lmp
, (const void *)value
) == 0)
838 if (((LIST(lmp
)->lm_tflags
| AFLAGS(lmp
)) &
839 (LML_TFLG_AUD_PLTENTER
| LML_TFLG_AUD_PLTEXIT
)) &&
840 AUDINFO(lmp
)->ai_dynplts
) {
842 int pltndx
= (((ulong_t
)rel
-
843 (uintptr_t)JMPREL(lmp
)) / relsiz
);
844 int symndx
= (((uintptr_t)symdef
-
845 (uintptr_t)SYMTAB(_lmp
)) / SYMENT(_lmp
));
847 (void) elf_plt_trace_write(roffset
, lmp
, _lmp
,
848 symdef
, symndx
, pltndx
, (caddr_t
)value
,
854 * Write standard PLT entry to jump directly
855 * to newly bound function.
857 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp
),
858 ELF_DBG_RTLD
, (Xword
)roffset
,
860 *(ulong_t
*)roffset
= value
;
865 * Write the relocation out.
867 if (do_reloc_rtld(rtype
, (uchar_t
*)roffset
,
868 (Word
*)&value
, name
, NAME(lmp
), LIST(lmp
)) == 0)
871 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp
), ELF_DBG_RTLD
,
872 (Xword
)roffset
, (Xword
)value
));
876 ((LIST(lmp
)->lm_flags
& LML_FLG_TRC_WARN
) == 0))
880 DBG_CALL(Dbg_bind_global(lmp
, (Addr
)roffset
,
881 (Off
)(roffset
- basebgn
), (Xword
)(-1), PLT_T_FULL
,
882 _lmp
, (Addr
)value
, symdef
->st_value
, name
, binfo
));
886 return (relocate_finish(lmp
, bound
, ret
));
890 * Initialize the first few got entries so that function calls go to
893 * GOT[GOT_XLINKMAP] = the address of the link map
894 * GOT[GOT_XRTLD] = the address of rtbinder
897 elf_plt_init(void *got
, caddr_t l
)
901 Rt_map
*lmp
= (Rt_map
*)l
;
903 _got
= (uint_t
*)got
+ M_GOT_XLINKMAP
;
905 _got
= (uint_t
*)got
+ M_GOT_XRTLD
;
906 *_got
= (uint_t
)elf_rtbndr
;
910 * For SVR4 Intel compatability. USL uses /usr/lib/libc.so.1 as the run-time
911 * linker, so the interpreter's address will differ from /usr/lib/ld.so.1.
912 * Further, USL has special _iob[] and _ctype[] processing that makes up for the
913 * fact that these arrays do not have associated copy relocations. So we try
914 * and make up for that here. Any relocations found will be added to the global
915 * copy relocation list and will be processed in setup().
918 _elf_copy_reloc(const char *name
, Rt_map
*rlmp
, Rt_map
*dlmp
)
920 Sym
*symref
, *symdef
;
929 * Determine if the special symbol exists as a reference in the dynamic
930 * executable, and that an associated definition exists in libc.so.1.
932 * Initialize the symbol lookup, and symbol result, data structures.
934 SLOOKUP_INIT(sl
, name
, rlmp
, rlmp
, ld_entry_cnt
, 0, 0, 0, 0,
936 SRESULT_INIT(sr
, name
);
938 if (lookup_sym(&sl
, &sr
, &binfo
, NULL
) == 0)
942 SLOOKUP_INIT(sl
, name
, rlmp
, dlmp
, ld_entry_cnt
, 0, 0, 0, 0,
944 SRESULT_INIT(sr
, name
);
946 if (lookup_sym(&sl
, &sr
, &binfo
, NULL
) == 0)
952 if (strcmp(NAME(sr
.sr_dmap
), MSG_ORIG(MSG_PTH_LIBC
)))
956 * Determine the reference and definition addresses.
958 ref
= (void *)(symref
->st_value
);
959 if (!(FLAGS(rlmp
) & FLG_RT_FIXED
))
961 def
= (void *)(symdef
->st_value
);
962 if (!(FLAGS(sr
.sr_dmap
) & FLG_RT_FIXED
))
966 * Set up a relocation entry for debugging and call the generic copy
967 * relocation function to provide symbol size error checking and to
968 * record the copy relocation that must be performed.
970 rel
.r_offset
= (Addr
)ref
;
971 rel
.r_info
= (Word
)R_386_COPY
;
972 DBG_CALL(Dbg_reloc_in(LIST(rlmp
), ELF_DBG_RTLD
, M_MACH
, M_REL_SHT_TYPE
,
973 &rel
, NULL
, 0, name
));
975 return (elf_copy_reloc((char *)name
, symref
, rlmp
, (void *)ref
, symdef
,
980 elf_copy_gen(Rt_map
*lmp
)
982 if (interp
&& ((ulong_t
)interp
->i_faddr
!=
983 r_debug
.rtd_rdebug
.r_ldbase
) &&
984 !(strcmp(interp
->i_name
, MSG_ORIG(MSG_PTH_LIBC
)))) {
986 DBG_CALL(Dbg_reloc_run(lmp
, M_REL_SHT_TYPE
, 0,
989 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_CTYPE
), lmp
,
990 (Rt_map
*)NEXT(lmp
)) == 0)
992 if (_elf_copy_reloc(MSG_ORIG(MSG_SYM_IOB
), lmp
,
993 (Rt_map
*)NEXT(lmp
)) == 0)
1000 * Plt writing interface to allow debugging initialization to be generic.
1004 elf_plt_write(uintptr_t addr
, uintptr_t vaddr
, void *rptr
, uintptr_t symval
,
1007 Rel
*rel
= (Rel
*)rptr
;
1010 pltaddr
= addr
+ rel
->r_offset
;
1011 *(ulong_t
*)pltaddr
= (ulong_t
)symval
;
1012 DBG_CALL(pltcntfull
++);
1013 return (PLT_T_FULL
);
1017 * Provide a machine specific interface to the conversion routine. By calling
1018 * the machine specific version, rather than the generic version, we insure that
1019 * the data tables/strings for all known machine versions aren't dragged into
1023 _conv_reloc_type(uint_t rel
)
1025 static Conv_inv_buf_t inv_buf
;
1027 return (conv_reloc_386_type(rel
, 0, &inv_buf
));