4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
28 * amd64 machine dependent and ELF file class dependent functions.
29 * Contains routines for performing function binding and symbol relocations.
34 #include <sys/elf_amd64.h>
40 #include <krtld/reloc.h>
45 #include "_inline_gen.h"
46 #include "_inline_reloc.h"
49 extern void elf_rtbndr(Rt_map
*, ulong_t
, caddr_t
);
52 elf_mach_flags_check(Rej_desc
*rej
, Ehdr
*ehdr
)
55 * Check machine type and flags.
57 if (ehdr
->e_flags
!= 0) {
58 rej
->rej_type
= SGS_REJ_BADFLAG
;
59 rej
->rej_info
= (uint_t
)ehdr
->e_flags
;
66 ldso_plt_init(Rt_map
*lmp
)
69 * There is no need to analyze ld.so because we don't map in any of
70 * its dependencies. However we may map these dependencies in later
71 * (as if ld.so had dlopened them), so initialize the plt and the
72 * permission information.
75 elf_plt_init((PLTGOT(lmp
)), (caddr_t
)lmp
);
78 static const uchar_t dyn_plt_template
[] = {
79 /* 0x00 */ 0x55, /* pushq %rbp */
80 /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */
81 /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */
82 /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */
84 /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */
85 /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */
88 /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */
93 * And the virutal outstanding relocations against the
96 * reloc offset Addend symbol
97 * R_AMD64_PC32 0x0b -4 trace_fields
98 * R_AMD64_64 0x15 0 elf_plt_trace
101 #define TRCREL1OFF 0x0b
102 #define TRCREL2OFF 0x15
104 int dyn_plt_ent_size
= sizeof (dyn_plt_template
);
107 * the dynamic plt entry is:
112 * leaq trace_fields(%rip), %r11
113 * movq %r11, -0x8(%rbp)
114 * movq $elf_plt_trace, %r11
125 elf_plt_trace_write(ulong_t roffset
, Rt_map
*rlmp
, Rt_map
*dlmp
, Sym
*sym
,
126 uint_t symndx
, uint_t pltndx
, caddr_t to
, uint_t sb_flags
, int *fail
)
128 extern int elf_plt_trace();
134 * We only need to add the glue code if there is an auditing
135 * library that is interested in this binding.
137 dyn_plt
= (uchar_t
*)((uintptr_t)AUDINFO(rlmp
)->ai_dynplts
+
138 (pltndx
* dyn_plt_ent_size
));
141 * Have we initialized this dynamic plt entry yet? If we haven't do it
142 * now. Otherwise this function has been called before, but from a
143 * different plt (ie. from another shared object). In that case
144 * we just set the plt to point to the new dyn_plt.
149 Lm_list
*lml
= LIST(rlmp
);
151 (void) memcpy((void *)dyn_plt
, dyn_plt_template
,
152 sizeof (dyn_plt_template
));
153 dyndata
= (uintptr_t *)((uintptr_t)dyn_plt
+
154 ROUND(sizeof (dyn_plt_template
), M_WORD_ALIGN
));
158 * leaq trace_fields(%rip), %r11
159 * R_AMD64_PC32 0x0b -4 trace_fields
161 symvalue
= (Xword
)((uintptr_t)dyndata
-
162 (uintptr_t)(&dyn_plt
[TRCREL1OFF
]) - 4);
163 if (do_reloc_rtld(R_AMD64_PC32
, &dyn_plt
[TRCREL1OFF
],
164 &symvalue
, MSG_ORIG(MSG_SYM_LADYNDATA
),
165 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
172 * movq $elf_plt_trace, %r11
173 * R_AMD64_64 0x15 0 elf_plt_trace
175 symvalue
= (Xword
)elf_plt_trace
;
176 if (do_reloc_rtld(R_AMD64_64
, &dyn_plt
[TRCREL2OFF
],
177 &symvalue
, MSG_ORIG(MSG_SYM_ELFPLTTRACE
),
178 MSG_ORIG(MSG_SPECFIL_DYNPLT
), lml
) == 0) {
183 *dyndata
++ = (uintptr_t)rlmp
;
184 *dyndata
++ = (uintptr_t)dlmp
;
185 *dyndata
= (uintptr_t)(((uint64_t)sb_flags
<< 32) | symndx
);
187 symp
= (Sym
*)dyndata
;
189 symp
->st_value
= (Addr
)to
;
192 got_entry
= (ulong_t
)roffset
;
193 *(ulong_t
*)got_entry
= (ulong_t
)dyn_plt
;
194 return ((caddr_t
)dyn_plt
);
198 * Function binding routine - invoked on the first call to a function through
199 * the procedure linkage table;
200 * passes first through an assembly language interface.
202 * Takes the offset into the relocation table of the associated
203 * relocation entry and the address of the link map (rt_private_map struct)
206 * Returns the address of the function referenced after re-writing the PLT
207 * entry to invoke the function directly.
209 * On error, causes process to terminate with a signal.
212 elf_bndr(Rt_map
*lmp
, ulong_t pltndx
, caddr_t from
)
215 ulong_t addr
, reloff
, symval
, rsymndx
;
219 uint_t binfo
, sb_flags
= 0, dbg_class
;
226 * For compatibility with libthread (TI_VERSION 1) we track the entry
227 * value. A zero value indicates we have recursed into ld.so.1 to
228 * further process a locking request. Under this recursion we disable
229 * tsort and cleanup activities.
234 if ((lmflags
= lml
->lm_flags
) & LML_FLG_RTLDLM
) {
235 dbg_class
= dbg_desc
->d_class
;
236 dbg_desc
->d_class
= 0;
240 * Perform some basic sanity checks. If we didn't get a load map or
241 * the relocation offset is invalid then its possible someone has walked
242 * over the .got entries or jumped to plt0 out of the blue.
244 if ((!lmp
) && (pltndx
<=
245 (ulong_t
)PLTRELSZ(lmp
) / (ulong_t
)RELENT(lmp
))) {
246 Conv_inv_buf_t inv_buf
;
248 eprintf(lml
, ERR_FATAL
, MSG_INTL(MSG_REL_PLTREF
),
249 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT
, 0, &inv_buf
),
250 EC_NATPTR(lmp
), EC_XWORD(pltndx
), EC_NATPTR(from
));
253 reloff
= pltndx
* (ulong_t
)RELENT(lmp
);
256 * Use relocation entry to get symbol table entry and symbol name.
258 addr
= (ulong_t
)JMPREL(lmp
);
259 rptr
= (Rela
*)(addr
+ reloff
);
260 rsymndx
= ELF_R_SYM(rptr
->r_info
);
261 rsym
= (Sym
*)((ulong_t
)SYMTAB(lmp
) + (rsymndx
* SYMENT(lmp
)));
262 name
= (char *)(STRTAB(lmp
) + rsym
->st_name
);
265 * Determine the last link-map of this list, this'll be the starting
266 * point for any tsort() processing.
271 * Find definition for symbol. Initialize the symbol lookup, and
272 * symbol result, data structures.
274 SLOOKUP_INIT(sl
, name
, lmp
, lml
->lm_head
, ld_entry_cnt
, 0,
275 rsymndx
, rsym
, 0, LKUP_DEFT
);
276 SRESULT_INIT(sr
, name
);
278 if (lookup_sym(&sl
, &sr
, &binfo
, NULL
) == 0) {
279 eprintf(lml
, ERR_FATAL
, MSG_INTL(MSG_REL_NOSYM
), NAME(lmp
),
284 name
= (char *)sr
.sr_name
;
288 symval
= nsym
->st_value
;
290 if (!(FLAGS(nlmp
) & FLG_RT_FIXED
) &&
291 (nsym
->st_shndx
!= SHN_ABS
))
292 symval
+= ADDR(nlmp
);
293 if ((lmp
!= nlmp
) && ((FLAGS1(nlmp
) & FL1_RT_NOINIFIN
) == 0)) {
295 * Record that this new link map is now bound to the caller.
297 if (bind_one(lmp
, nlmp
, BND_REFER
) == 0)
301 if ((lml
->lm_tflags
| AFLAGS(lmp
) | AFLAGS(nlmp
)) &
302 LML_TFLG_AUD_SYMBIND
) {
303 uint_t symndx
= (((uintptr_t)nsym
-
304 (uintptr_t)SYMTAB(nlmp
)) / SYMENT(nlmp
));
305 symval
= audit_symbind(lmp
, nlmp
, nsym
, symndx
, symval
,
309 if (!(rtld_flags
& RT_FL_NOBIND
)) {
310 addr
= rptr
->r_offset
;
311 if (!(FLAGS(lmp
) & FLG_RT_FIXED
))
313 if (((lml
->lm_tflags
| AFLAGS(lmp
)) &
314 (LML_TFLG_AUD_PLTENTER
| LML_TFLG_AUD_PLTEXIT
)) &&
315 AUDINFO(lmp
)->ai_dynplts
) {
317 uint_t pltndx
= reloff
/ sizeof (Rela
);
318 uint_t symndx
= (((uintptr_t)nsym
-
319 (uintptr_t)SYMTAB(nlmp
)) / SYMENT(nlmp
));
321 symval
= (ulong_t
)elf_plt_trace_write(addr
, lmp
, nlmp
,
322 nsym
, symndx
, pltndx
, (caddr_t
)symval
, sb_flags
,
328 * Write standard PLT entry to jump directly
329 * to newly bound function.
331 *(ulong_t
*)addr
= symval
;
336 * Print binding information and rebuild PLT entry.
338 DBG_CALL(Dbg_bind_global(lmp
, (Addr
)from
, (Off
)(from
- ADDR(lmp
)),
339 (Xword
)(reloff
/ sizeof (Rela
)), PLT_T_FULL
, nlmp
, (Addr
)symval
,
340 nsym
->st_value
, name
, binfo
));
343 * Complete any processing for newly loaded objects. Note we don't
344 * know exactly where any new objects are loaded (we know the object
345 * that supplied the symbol, but others may have been loaded lazily as
346 * we searched for the symbol), so sorting starts from the last
347 * link-map know on entry to this routine.
350 load_completion(llmp
);
353 * Some operations like dldump() or dlopen()'ing a relocatable object
354 * result in objects being loaded on rtld's link-map, make sure these
355 * objects are initialized also.
357 if ((LIST(nlmp
)->lm_flags
& LML_FLG_RTLDLM
) && LIST(nlmp
)->lm_init
)
358 load_completion(nlmp
);
361 * Make sure the object to which we've bound has had it's .init fired.
362 * Cleanup before return to user code.
365 is_dep_init(nlmp
, lmp
);
369 if (lmflags
& LML_FLG_RTLDLM
)
370 dbg_desc
->d_class
= dbg_class
;
376 * Read and process the relocations for one link object, we assume all
377 * relocation sections for loadable segments are stored contiguously in
381 elf_reloc(Rt_map
*lmp
, uint_t plt
, int *in_nfavl
, APlist
**textrel
)
383 ulong_t relbgn
, relend
, relsiz
, basebgn
;
384 ulong_t pltbgn
, pltend
, _pltbgn
, _pltend
;
385 ulong_t roffset
, rsymndx
, psymndx
= 0;
388 long reladd
, value
, pvalue
;
389 Sym
*symref
, *psymref
, *symdef
, *psymdef
;
393 int ret
= 1, noplt
= 0;
394 int relacount
= RELACOUNT(lmp
), plthint
= 0;
396 uint_t binfo
, pbinfo
;
397 APlist
*bound
= NULL
;
400 * Although only necessary for lazy binding, initialize the first
401 * global offset entry to go to elf_rtbndr(). dbx(1) seems
402 * to find this useful.
404 if ((plt
== 0) && PLTGOT(lmp
)) {
405 mmapobj_result_t
*mpp
;
408 * Make sure the segment is writable.
411 find_segment((caddr_t
)PLTGOT(lmp
), lmp
)) != NULL
) &&
412 ((mpp
->mr_prot
& PROT_WRITE
) == 0)) &&
413 ((set_prot(lmp
, mpp
, 1) == 0) ||
414 (aplist_append(textrel
, mpp
, AL_CNT_TEXTREL
) == NULL
)))
417 elf_plt_init(PLTGOT(lmp
), (caddr_t
)lmp
);
421 * Initialize the plt start and end addresses.
423 if ((pltbgn
= (ulong_t
)JMPREL(lmp
)) != 0)
424 pltend
= pltbgn
+ (ulong_t
)(PLTRELSZ(lmp
));
426 relsiz
= (ulong_t
)(RELENT(lmp
));
430 plthint
= PLTRELSZ(lmp
) / relsiz
;
433 * If we've been called upon to promote an RTLD_LAZY object to an
434 * RTLD_NOW then we're only interested in scaning the .plt table.
435 * An uninitialized .plt is the case where the associated got entry
436 * points back to the plt itself. Determine the range of the real .plt
437 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
445 if (!relbgn
|| (relbgn
== relend
))
449 * Initialize the symbol lookup, and symbol result, data
452 SLOOKUP_INIT(sl
, MSG_ORIG(MSG_SYM_PLT
), lmp
, lmp
, ld_entry_cnt
,
453 elf_hash(MSG_ORIG(MSG_SYM_PLT
)), 0, 0, 0, LKUP_DEFT
);
454 SRESULT_INIT(sr
, MSG_ORIG(MSG_SYM_PLT
));
456 if (elf_find_sym(&sl
, &sr
, &binfo
, NULL
) == 0)
460 _pltbgn
= symdef
->st_value
;
461 if (!(FLAGS(lmp
) & FLG_RT_FIXED
) &&
462 (symdef
->st_shndx
!= SHN_ABS
))
464 _pltend
= _pltbgn
+ (((PLTRELSZ(lmp
) / relsiz
)) *
465 M_PLT_ENTSIZE
) + M_PLT_RESERVSZ
;
469 * The relocation sections appear to the run-time linker as a
470 * single table. Determine the address of the beginning and end
471 * of this table. There are two different interpretations of
472 * the ABI at this point:
474 * o The REL table and its associated RELSZ indicate the
475 * concatenation of *all* relocation sections (this is the
476 * model our link-editor constructs).
478 * o The REL table and its associated RELSZ indicate the
479 * concatenation of all *but* the .plt relocations. These
480 * relocations are specified individually by the JMPREL and
483 * Determine from our knowledege of the relocation range and
484 * .plt range, the range of the total relocation table. Note
485 * that one other ABI assumption seems to be that the .plt
486 * relocations always follow any other relocations, the
487 * following range checking drops that assumption.
489 relbgn
= (ulong_t
)(REL(lmp
));
490 relend
= relbgn
+ (ulong_t
)(RELSZ(lmp
));
492 if (!relbgn
|| (relbgn
> pltbgn
))
494 if (!relbgn
|| (relend
< pltend
))
498 if (!relbgn
|| (relbgn
== relend
)) {
499 DBG_CALL(Dbg_reloc_run(lmp
, 0, plt
, DBG_REL_NONE
));
502 DBG_CALL(Dbg_reloc_run(lmp
, M_REL_SHT_TYPE
, plt
, DBG_REL_START
));
505 * If we're processing a dynamic executable in lazy mode there is no
506 * need to scan the .rel.plt table, however if we're processing a shared
507 * object in lazy mode the .got addresses associated to each .plt must
508 * be relocated to reflect the location of the shared object.
510 if (pltbgn
&& ((MODE(lmp
) & RTLD_NOW
) == 0) &&
511 (FLAGS(lmp
) & FLG_RT_FIXED
))
516 * Loop through relocations.
518 while (relbgn
< relend
) {
519 mmapobj_result_t
*mpp
;
522 rtype
= ELF_R_TYPE(((Rela
*)relbgn
)->r_info
, M_MACH
);
525 * If this is a RELATIVE relocation in a shared object (the
526 * common case), and if we are not debugging, then jump into a
527 * tighter relocation loop (elf_reloc_relative).
529 if ((rtype
== R_AMD64_RELATIVE
) &&
530 ((FLAGS(lmp
) & FLG_RT_FIXED
) == 0) && (DBG_ENABLED
== 0)) {
532 relbgn
= elf_reloc_relative_count(relbgn
,
533 relacount
, relsiz
, basebgn
, lmp
,
537 relbgn
= elf_reloc_relative(relbgn
, relend
,
538 relsiz
, basebgn
, lmp
, textrel
, 0);
540 if (relbgn
>= relend
)
542 rtype
= ELF_R_TYPE(((Rela
*)relbgn
)->r_info
, M_MACH
);
545 roffset
= ((Rela
*)relbgn
)->r_offset
;
548 * If this is a shared object, add the base address to offset.
550 if (!(FLAGS(lmp
) & FLG_RT_FIXED
)) {
552 * If we're processing lazy bindings, we have to step
553 * through the plt entries and add the base address
554 * to the corresponding got entry.
556 if (plthint
&& (plt
== 0) &&
557 (rtype
== R_AMD64_JUMP_SLOT
) &&
558 ((MODE(lmp
) & RTLD_NOW
) == 0)) {
559 relbgn
= elf_reloc_relative_count(relbgn
,
560 plthint
, relsiz
, basebgn
, lmp
, textrel
, 1);
567 reladd
= (long)(((Rela
*)relbgn
)->r_addend
);
568 rsymndx
= ELF_R_SYM(((Rela
*)relbgn
)->r_info
);
569 rel
= (Rela
*)relbgn
;
575 if (rtype
== R_AMD64_NONE
)
577 if (noplt
&& ((ulong_t
)rel
>= pltbgn
) &&
578 ((ulong_t
)rel
< pltend
)) {
584 * If we're promoting plts, determine if this one has already
587 if (plt
&& ((*(ulong_t
*)roffset
< _pltbgn
) ||
588 (*(ulong_t
*)roffset
> _pltend
)))
592 * If this relocation is not against part of the image
593 * mapped into memory we skip it.
595 if ((mpp
= find_segment((caddr_t
)roffset
, lmp
)) == NULL
) {
596 elf_reloc_bad(lmp
, (void *)rel
, rtype
, roffset
,
603 * If a symbol index is specified then get the symbol table
604 * entry, locate the symbol definition, and determine its
609 * If a Syminfo section is provided, determine if this
610 * symbol is deferred, and if so, skip this relocation.
612 if (sip
&& is_sym_deferred((ulong_t
)rel
, basebgn
, lmp
,
613 textrel
, sip
, rsymndx
))
617 * Get the local symbol table entry.
619 symref
= (Sym
*)((ulong_t
)SYMTAB(lmp
) +
620 (rsymndx
* SYMENT(lmp
)));
623 * If this is a local symbol, just use the base address.
624 * (we should have no local relocations in the
627 if (ELF_ST_BIND(symref
->st_info
) == STB_LOCAL
) {
632 * Special case TLS relocations.
634 if (rtype
== R_AMD64_DTPMOD64
) {
638 value
= TLSMODID(lmp
);
640 } else if ((rtype
== R_AMD64_TPOFF64
) ||
641 (rtype
== R_AMD64_TPOFF32
)) {
642 if ((value
= elf_static_tls(lmp
, symref
,
643 rel
, rtype
, 0, roffset
, 0)) == 0) {
650 * If the symbol index is equal to the previous
651 * symbol index relocation we processed then
652 * reuse the previous values. (Note that there
653 * have been cases where a relocation exists
654 * against a copy relocation symbol, our ld(1)
655 * should optimize this away, but make sure we
656 * don't use the same symbol information should
659 if ((rsymndx
== psymndx
) &&
660 (rtype
!= R_AMD64_COPY
)) {
663 DBG_CALL(Dbg_bind_weak(lmp
,
664 (Addr
)roffset
, (Addr
)
665 (roffset
- basebgn
), name
));
681 if ((LIST(_lmp
)->lm_tflags
|
683 LML_TFLG_AUD_SYMBIND
) {
684 value
= audit_symbind(lmp
, _lmp
,
686 symdef
, dsymndx
, value
,
694 * Lookup the symbol definition.
695 * Initialize the symbol lookup, and
696 * symbol result, data structure.
698 name
= (char *)(STRTAB(lmp
) +
701 SLOOKUP_INIT(sl
, name
, lmp
, 0,
702 ld_entry_cnt
, 0, rsymndx
, symref
,
703 rtype
, LKUP_STDRELOC
);
704 SRESULT_INIT(sr
, name
);
707 if (lookup_sym(&sl
, &sr
, &binfo
,
709 name
= (char *)sr
.sr_name
;
715 * If the symbol is not found and the
716 * reference was not to a weak symbol,
717 * report an error. Weak references
722 if (sl
.sl_bind
!= STB_WEAK
) {
723 if (elf_reloc_error(lmp
, name
,
734 DBG_CALL(Dbg_bind_weak(lmp
,
735 (Addr
)roffset
, (Addr
)
736 (roffset
- basebgn
), name
));
743 * If symbol was found in an object
744 * other than the referencing object
745 * then record the binding.
747 if ((lmp
!= _lmp
) && ((FLAGS1(_lmp
) &
748 FL1_RT_NOINIFIN
) == 0)) {
749 if (aplist_test(&bound
, _lmp
,
750 AL_CNT_RELBIND
) == 0) {
757 * Calculate the location of definition;
758 * symbol value plus base address of
759 * containing shared object.
762 value
= symdef
->st_size
;
764 value
= symdef
->st_value
;
766 if (!(FLAGS(_lmp
) & FLG_RT_FIXED
) &&
768 (symdef
->st_shndx
!= SHN_ABS
) &&
769 (ELF_ST_TYPE(symdef
->st_info
) !=
774 * Retain this symbol index and the
775 * value in case it can be used for the
776 * subsequent relocations.
778 if (rtype
!= R_AMD64_COPY
) {
787 if ((LIST(_lmp
)->lm_tflags
|
789 LML_TFLG_AUD_SYMBIND
) {
790 dsymndx
= (((uintptr_t)symdef
-
791 (uintptr_t)SYMTAB(_lmp
)) /
793 value
= audit_symbind(lmp
, _lmp
,
794 symdef
, dsymndx
, value
,
800 * If relocation is PC-relative, subtract
803 if (IS_PC_RELATIVE(rtype
))
807 * Special case TLS relocations.
809 if (rtype
== R_AMD64_DTPMOD64
) {
811 * Relocation value is the TLS modid.
813 value
= TLSMODID(_lmp
);
815 } else if ((rtype
== R_AMD64_TPOFF64
) ||
816 (rtype
== R_AMD64_TPOFF32
)) {
817 if ((value
= elf_static_tls(_lmp
,
818 symdef
, rel
, rtype
, name
, roffset
,
829 if (rtype
== R_AMD64_DTPMOD64
) {
831 * TLS relocation value is the TLS modid.
833 value
= TLSMODID(lmp
);
840 DBG_CALL(Dbg_reloc_in(LIST(lmp
), ELF_DBG_RTLD
, M_MACH
,
841 M_REL_SHT_TYPE
, rel
, NULL
, 0, name
));
844 * Make sure the segment is writable.
846 if (((mpp
->mr_prot
& PROT_WRITE
) == 0) &&
847 ((set_prot(lmp
, mpp
, 1) == 0) ||
848 (aplist_append(textrel
, mpp
, AL_CNT_TEXTREL
) == NULL
))) {
854 * Call relocation routine to perform required relocation.
858 if (elf_copy_reloc(name
, symref
, lmp
, (void *)roffset
,
859 symdef
, _lmp
, (const void *)value
) == 0)
862 case R_AMD64_JUMP_SLOT
:
863 if (((LIST(lmp
)->lm_tflags
| AFLAGS(lmp
)) &
864 (LML_TFLG_AUD_PLTENTER
| LML_TFLG_AUD_PLTEXIT
)) &&
865 AUDINFO(lmp
)->ai_dynplts
) {
867 int pltndx
= (((ulong_t
)rel
-
868 (uintptr_t)JMPREL(lmp
)) / relsiz
);
869 int symndx
= (((uintptr_t)symdef
-
870 (uintptr_t)SYMTAB(_lmp
)) / SYMENT(_lmp
));
872 (void) elf_plt_trace_write(roffset
, lmp
, _lmp
,
873 symdef
, symndx
, pltndx
, (caddr_t
)value
,
879 * Write standard PLT entry to jump directly
880 * to newly bound function.
882 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp
),
883 ELF_DBG_RTLD
, (Xword
)roffset
,
885 *(ulong_t
*)roffset
= value
;
891 * Write the relocation out.
893 if (do_reloc_rtld(rtype
, (uchar_t
*)roffset
,
894 (Xword
*)&value
, name
, NAME(lmp
), LIST(lmp
)) == 0)
897 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp
), ELF_DBG_RTLD
,
898 (Xword
)roffset
, (Xword
)value
));
902 ((LIST(lmp
)->lm_flags
& LML_FLG_TRC_WARN
) == 0))
906 DBG_CALL(Dbg_bind_global(lmp
, (Addr
)roffset
,
907 (Off
)(roffset
- basebgn
), (Xword
)(-1), PLT_T_FULL
,
908 _lmp
, (Addr
)value
, symdef
->st_value
, name
, binfo
));
912 return (relocate_finish(lmp
, bound
, ret
));
916 * Initialize the first few got entries so that function calls go to
919 * GOT[GOT_XLINKMAP] = the address of the link map
920 * GOT[GOT_XRTLD] = the address of rtbinder
923 elf_plt_init(void *got
, caddr_t l
)
927 Rt_map
*lmp
= (Rt_map
*)l
;
929 _got
= (uint64_t *)got
+ M_GOT_XLINKMAP
;
930 *_got
= (uint64_t)lmp
;
931 _got
= (uint64_t *)got
+ M_GOT_XRTLD
;
932 *_got
= (uint64_t)elf_rtbndr
;
936 * Plt writing interface to allow debugging initialization to be generic.
940 elf_plt_write(uintptr_t addr
, uintptr_t vaddr
, void *rptr
, uintptr_t symval
,
943 Rela
*rel
= (Rela
*)rptr
;
946 pltaddr
= addr
+ rel
->r_offset
;
947 *(ulong_t
*)pltaddr
= (ulong_t
)symval
+ rel
->r_addend
;
948 DBG_CALL(pltcntfull
++);
953 * Provide a machine specific interface to the conversion routine. By calling
954 * the machine specific version, rather than the generic version, we insure that
955 * the data tables/strings for all known machine versions aren't dragged into
959 _conv_reloc_type(uint_t rel
)
961 static Conv_inv_buf_t inv_buf
;
963 return (conv_reloc_amd64_type(rel
, 0, &inv_buf
));