8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / cmd / sgs / rtld / sparc / sparc_elf.c
blob4387276e26f14ba2dc2be403ddc60873216c50fd
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1988 AT&T
24 * All Rights Reserved
26 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
27 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
31 * SPARC machine dependent and ELF file class dependent functions.
32 * Contains routines for performing function binding and symbol relocations.
35 #include <stdio.h>
36 #include <sys/elf.h>
37 #include <sys/elf_SPARC.h>
38 #include <sys/mman.h>
39 #include <dlfcn.h>
40 #include <synch.h>
41 #include <string.h>
42 #include <debug.h>
43 #include <reloc.h>
44 #include <conv.h>
45 #include "_rtld.h"
46 #include "_audit.h"
47 #include "_elf.h"
48 #include "_inline_gen.h"
49 #include "_inline_reloc.h"
50 #include "msg.h"
52 extern void iflush_range(caddr_t, size_t);
53 extern void plt_full_range(uintptr_t, uintptr_t);
55 int
56 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
59 * Check machine type and flags.
61 if (ehdr->e_machine != EM_SPARC) {
62 if (ehdr->e_machine != EM_SPARC32PLUS) {
63 rej->rej_type = SGS_REJ_MACH;
64 rej->rej_info = (uint_t)ehdr->e_machine;
65 return (0);
67 if ((ehdr->e_flags & EF_SPARC_32PLUS) == 0) {
68 rej->rej_type = SGS_REJ_MISFLAG;
69 rej->rej_info = (uint_t)ehdr->e_flags;
70 return (0);
72 if ((ehdr->e_flags & ~at_flags) & EF_SPARC_32PLUS_MASK) {
73 rej->rej_type = SGS_REJ_BADFLAG;
74 rej->rej_info = (uint_t)ehdr->e_flags;
75 return (0);
77 } else if ((ehdr->e_flags & ~EF_SPARCV9_MM) != 0) {
78 rej->rej_type = SGS_REJ_BADFLAG;
79 rej->rej_info = (uint_t)ehdr->e_flags;
80 return (0);
82 return (1);
85 void
86 ldso_plt_init(Rt_map *lmp)
89 * There is no need to analyze ld.so because we don't map in any of
90 * its dependencies. However we may map these dependencies in later
91 * (as if ld.so had dlopened them), so initialize the plt and the
92 * permission information.
94 if (PLTGOT(lmp))
95 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp);
99 * elf_plt_write() will test to see how far away our destination
100 * address lies. If it is close enough that a branch can
101 * be used instead of a jmpl - we will fill the plt in with
102 * single branch. The branches are much quicker then
103 * a jmpl instruction - see bug#4356879 for further
104 * details.
106 * NOTE: we pass in both a 'pltaddr' and a 'vpltaddr' since
107 * librtld/dldump update PLT's who's physical
108 * address is not the same as the 'virtual' runtime
109 * address.
111 Pltbindtype
112 /* ARGSUSED4 */
113 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
114 Xword pltndx)
116 Rela *rel = (Rela *)rptr;
117 uintptr_t vpltaddr, pltaddr;
118 long disp;
120 pltaddr = addr + rel->r_offset;
121 vpltaddr = vaddr + rel->r_offset;
122 disp = symval - vpltaddr - 4;
125 * Test if the destination address is close enough to use
126 * a ba,a... instruction to reach it.
128 if (S_INRANGE(disp, 23) && !(rtld_flags & RT_FL_NOBAPLT)) {
129 uint_t *pltent, bainstr;
130 Pltbindtype rc;
132 pltent = (uint_t *)pltaddr;
135 * The
137 * ba,a,pt %icc, <dest>
139 * is the most efficient of the PLT's. If we
140 * are within +-20 bits *and* running on a
141 * v8plus architecture - use that branch.
143 if ((at_flags & EF_SPARC_32PLUS) &&
144 S_INRANGE(disp, 20)) {
145 bainstr = M_BA_A_PT; /* ba,a,pt %icc,<dest> */
146 bainstr |= (S_MASK(19) & (disp >> 2));
147 rc = PLT_T_21D;
148 DBG_CALL(pltcnt21d++);
149 } else {
151 * Otherwise - we fall back to the good old
153 * ba,a <dest>
155 * Which still beats a jmpl instruction.
157 bainstr = M_BA_A; /* ba,a <dest> */
158 bainstr |= (S_MASK(22) & (disp >> 2));
159 rc = PLT_T_24D;
160 DBG_CALL(pltcnt24d++);
163 pltent[2] = M_NOP; /* nop instr */
164 pltent[1] = bainstr;
166 iflush_range((char *)(&pltent[1]), 4);
167 pltent[0] = M_NOP; /* nop instr */
168 iflush_range((char *)(&pltent[0]), 4);
169 return (rc);
173 * The PLT destination is not in reach of
174 * a branch instruction - so we fall back
175 * to a 'jmpl' sequence.
177 plt_full_range(pltaddr, symval);
178 DBG_CALL(pltcntfull++);
179 return (PLT_T_FULL);
183 * Local storage space created on the stack created for this glue
184 * code includes space for:
185 * 0x4 pointer to dyn_data
186 * 0x4 size prev stack frame
188 static const uchar_t dyn_plt_template[] = {
189 /* 0x00 */ 0x80, 0x90, 0x00, 0x1e, /* tst %fp */
190 /* 0x04 */ 0x02, 0x80, 0x00, 0x04, /* be 0x14 */
191 /* 0x08 */ 0x82, 0x27, 0x80, 0x0e, /* sub %sp, %fp, %g1 */
192 /* 0x0c */ 0x10, 0x80, 0x00, 0x03, /* ba 0x20 */
193 /* 0x10 */ 0x01, 0x00, 0x00, 0x00, /* nop */
194 /* 0x14 */ 0x82, 0x10, 0x20, 0x60, /* mov 0x60, %g1 */
195 /* 0x18 */ 0x9d, 0xe3, 0xbf, 0x98, /* save %sp, -0x68, %sp */
196 /* 0x1c */ 0xc2, 0x27, 0xbf, 0xf8, /* st %g1, [%fp + -0x8] */
197 /* 0x20 */ 0x03, 0x00, 0x00, 0x00, /* sethi %hi(val), %g1 */
198 /* 0x24 */ 0x82, 0x10, 0x60, 0x00, /* or %g1, %lo(val), %g1 */
199 /* 0x28 */ 0x40, 0x00, 0x00, 0x00, /* call <rel_addr> */
200 /* 0x2c */ 0xc2, 0x27, 0xbf, 0xfc /* st %g1, [%fp + -0x4] */
203 int dyn_plt_ent_size = sizeof (dyn_plt_template) +
204 sizeof (uintptr_t) + /* reflmp */
205 sizeof (uintptr_t) + /* deflmp */
206 sizeof (ulong_t) + /* symndx */
207 sizeof (ulong_t) + /* sb_flags */
208 sizeof (Sym); /* symdef */
211 * the dynamic plt entry is:
213 * tst %fp
214 * be 1f
215 * nop
216 * sub %sp, %fp, %g1
217 * ba 2f
218 * nop
219 * 1:
220 * mov SA(MINFRAME), %g1 ! if %fp is null this is the
221 * ! 'minimum stack'. %fp is null
222 * ! on the initial stack frame
223 * 2:
224 * save %sp, -(SA(MINFRAME) + 2 * CLONGSIZE), %sp
225 * st %g1, [%fp + -0x8] ! store prev_stack size in [%fp - 8]
226 * sethi %hi(dyn_data), %g1
227 * or %g1, %lo(dyn_data), %g1
228 * call elf_plt_trace
229 * st %g1, [%fp + -0x4] ! store dyn_data ptr in [%fp - 4]
230 * dyn data:
231 * uintptr_t reflmp
232 * uintptr_t deflmp
233 * ulong_t symndx
234 * ulong_t sb_flags
235 * Sym symdef
237 static caddr_t
238 elf_plt_trace_write(caddr_t addr, Rela *rptr, Rt_map *rlmp, Rt_map *dlmp,
239 Sym *sym, ulong_t symndx, ulong_t pltndx, caddr_t to, ulong_t sb_flags,
240 int *fail)
242 extern ulong_t elf_plt_trace();
243 uchar_t *dyn_plt;
244 uintptr_t *dyndata;
247 * If both pltenter & pltexit have been disabled there
248 * there is no reason to even create the glue code.
250 if ((sb_flags & (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) ==
251 (LA_SYMB_NOPLTENTER | LA_SYMB_NOPLTEXIT)) {
252 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr,
253 rptr, (uintptr_t)to, pltndx);
254 return (to);
258 * We only need to add the glue code if there is an auditing
259 * library that is interested in this binding.
261 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
262 (pltndx * dyn_plt_ent_size));
265 * Have we initialized this dynamic plt entry yet? If we haven't do it
266 * now. Otherwise this function has been called before, but from a
267 * different plt (ie. from another shared object). In that case
268 * we just set the plt to point to the new dyn_plt.
270 if (*dyn_plt == 0) {
271 Sym *symp;
272 Xword symvalue;
273 Lm_list *lml = LIST(rlmp);
275 (void) memcpy((void *)dyn_plt, dyn_plt_template,
276 sizeof (dyn_plt_template));
277 dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
278 sizeof (dyn_plt_template));
281 * relocating:
282 * sethi %hi(dyndata), %g1
284 symvalue = (Xword)dyndata;
285 if (do_reloc_rtld(R_SPARC_HI22, (dyn_plt + 0x20),
286 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
287 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
288 *fail = 1;
289 return (0);
293 * relocating:
294 * or %g1, %lo(dyndata), %g1
296 symvalue = (Xword)dyndata;
297 if (do_reloc_rtld(R_SPARC_LO10, (dyn_plt + 0x24),
298 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
299 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
300 *fail = 1;
301 return (0);
305 * relocating:
306 * call elf_plt_trace
308 symvalue = (Xword)((uintptr_t)&elf_plt_trace -
309 (uintptr_t)(dyn_plt + 0x28));
310 if (do_reloc_rtld(R_SPARC_WDISP30, (dyn_plt + 0x28),
311 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
312 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
313 *fail = 1;
314 return (0);
317 *dyndata++ = (uintptr_t)rlmp;
318 *dyndata++ = (uintptr_t)dlmp;
319 *(ulong_t *)dyndata++ = symndx;
320 *(ulong_t *)dyndata++ = sb_flags;
321 symp = (Sym *)dyndata;
322 *symp = *sym;
323 symp->st_name += (Word)STRTAB(dlmp);
324 symp->st_value = (Addr)to;
326 iflush_range((void *)dyn_plt, sizeof (dyn_plt_template));
329 (void) elf_plt_write((uintptr_t)addr, (uintptr_t)addr, rptr,
330 (uintptr_t)dyn_plt, 0);
331 return ((caddr_t)dyn_plt);
335 * Function binding routine - invoked on the first call to a function through
336 * the procedure linkage table;
337 * passes first through an assembly language interface.
339 * Takes the address of the PLT entry where the call originated,
340 * the offset into the relocation table of the associated
341 * relocation entry and the address of the link map (rt_private_map struct)
342 * for the entry.
344 * Returns the address of the function referenced after re-writing the PLT
345 * entry to invoke the function directly.
347 * On error, causes process to terminate with a signal.
349 ulong_t
350 elf_bndr(Rt_map *lmp, ulong_t pltoff, caddr_t from)
352 Rt_map *nlmp, *llmp;
353 ulong_t addr, vaddr, reloff, symval, rsymndx;
354 char *name;
355 Rela *rptr;
356 Sym *rsym, *nsym;
357 Xword pltndx;
358 uint_t binfo, sb_flags = 0, dbg_class;
359 Slookup sl;
360 Sresult sr;
361 Pltbindtype pbtype;
362 int entry, lmflags;
363 Lm_list *lml;
366 * For compatibility with libthread (TI_VERSION 1) we track the entry
367 * value. A zero value indicates we have recursed into ld.so.1 to
368 * further process a locking request. Under this recursion we disable
369 * tsort and cleanup activities.
371 entry = enter(0);
373 lml = LIST(lmp);
374 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
375 dbg_class = dbg_desc->d_class;
376 dbg_desc->d_class = 0;
380 * Must calculate true plt relocation address from reloc.
381 * Take offset, subtract number of reserved PLT entries, and divide
382 * by PLT entry size, which should give the index of the plt
383 * entry (and relocation entry since they have been defined to be
384 * in the same order). Then we must multiply by the size of
385 * a relocation entry, which will give us the offset of the
386 * plt relocation entry from the start of them given by JMPREL(lm).
388 addr = pltoff - M_PLT_RESERVSZ;
389 pltndx = addr / M_PLT_ENTSIZE;
392 * Perform some basic sanity checks. If we didn't get a load map
393 * or the plt offset is invalid then its possible someone has walked
394 * over the plt entries or jumped to plt[0] out of the blue.
396 if (!lmp || ((addr % M_PLT_ENTSIZE) != 0)) {
397 Conv_inv_buf_t inv_buf;
399 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
400 conv_reloc_SPARC_type(R_SPARC_JMP_SLOT, 0, &inv_buf),
401 EC_NATPTR(lmp), EC_XWORD(pltoff), EC_NATPTR(from));
402 rtldexit(lml, 1);
404 reloff = pltndx * sizeof (Rela);
407 * Use relocation entry to get symbol table entry and symbol name.
409 addr = (ulong_t)JMPREL(lmp);
410 rptr = (Rela *)(addr + reloff);
411 rsymndx = ELF_R_SYM(rptr->r_info);
412 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
413 name = (char *)(STRTAB(lmp) + rsym->st_name);
416 * Determine the last link-map of this list, this'll be the starting
417 * point for any tsort() processing.
419 llmp = lml->lm_tail;
422 * Find definition for symbol. Initialize the symbol lookup, and
423 * symbol result, data structures.
425 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
426 rsymndx, rsym, 0, LKUP_DEFT);
427 SRESULT_INIT(sr, name);
429 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
430 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
431 demangle(name));
432 rtldexit(lml, 1);
435 name = (char *)sr.sr_name;
436 nlmp = sr.sr_dmap;
437 nsym = sr.sr_sym;
439 symval = nsym->st_value;
441 if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
442 (nsym->st_shndx != SHN_ABS))
443 symval += ADDR(nlmp);
444 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
446 * Record that this new link map is now bound to the caller.
448 if (bind_one(lmp, nlmp, BND_REFER) == 0)
449 rtldexit(lml, 1);
452 if ((lml->lm_tflags | AFLAGS(lmp) | AFLAGS(nlmp)) &
453 LML_TFLG_AUD_SYMBIND) {
454 ulong_t symndx = (((uintptr_t)nsym -
455 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
457 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
458 &sb_flags);
461 if (FLAGS(lmp) & FLG_RT_FIXED)
462 vaddr = 0;
463 else
464 vaddr = ADDR(lmp);
466 pbtype = PLT_T_NONE;
467 if (!(rtld_flags & RT_FL_NOBIND)) {
468 if (((lml->lm_tflags | AFLAGS(lmp)) &
469 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
470 AUDINFO(lmp)->ai_dynplts) {
471 int fail = 0;
472 ulong_t symndx = (((uintptr_t)nsym -
473 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
475 symval = (ulong_t)elf_plt_trace_write((caddr_t)vaddr,
476 rptr, lmp, nlmp, nsym, symndx, pltndx,
477 (caddr_t)symval, sb_flags, &fail);
478 if (fail)
479 rtldexit(lml, 1);
480 } else {
482 * Write standard PLT entry to jump directly
483 * to newly bound function.
485 pbtype = elf_plt_write((uintptr_t)vaddr,
486 (uintptr_t)vaddr, rptr, symval, pltndx);
491 * Print binding information and rebuild PLT entry.
493 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
494 pltndx, pbtype, nlmp, (Addr)symval, nsym->st_value, name, binfo));
497 * Complete any processing for newly loaded objects. Note we don't
498 * know exactly where any new objects are loaded (we know the object
499 * that supplied the symbol, but others may have been loaded lazily as
500 * we searched for the symbol), so sorting starts from the last
501 * link-map know on entry to this routine.
503 if (entry)
504 load_completion(llmp);
507 * Some operations like dldump() or dlopen()'ing a relocatable object
508 * result in objects being loaded on rtld's link-map, make sure these
509 * objects are initialized also.
511 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
512 load_completion(nlmp);
515 * Make sure the object to which we've bound has had it's .init fired.
516 * Cleanup before return to user code.
518 if (entry) {
519 is_dep_init(nlmp, lmp);
520 leave(lml, 0);
523 if (lmflags & LML_FLG_RTLDLM)
524 dbg_desc->d_class = dbg_class;
526 return (symval);
530 * Read and process the relocations for one link object, we assume all
531 * relocation sections for loadable segments are stored contiguously in
532 * the file.
535 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
537 ulong_t relbgn, relend, relsiz, basebgn, pltbgn, pltend;
538 ulong_t dsymndx, pltndx, roffset, rsymndx, psymndx = 0;
539 uchar_t rtype;
540 long reladd, value, pvalue, relacount = RELACOUNT(lmp);
541 Sym *symref, *psymref, *symdef, *psymdef;
542 Syminfo *sip;
543 char *name, *pname;
544 Rt_map *_lmp, *plmp;
545 int ret = 1, noplt = 0;
546 Rela *rel;
547 Pltbindtype pbtype;
548 uint_t binfo, pbinfo;
549 APlist *bound = NULL;
552 * If an object has any DT_REGISTER entries associated with
553 * it, they are processed now.
555 if ((plt == 0) && (FLAGS(lmp) & FLG_RT_REGSYMS)) {
556 if (elf_regsyms(lmp) == 0)
557 return (0);
561 * Although only necessary for lazy binding, initialize the first
562 * procedure linkage table entry to go to elf_rtbndr(). dbx(1) seems
563 * to find this useful.
565 if ((plt == 0) && PLTGOT(lmp)) {
566 mmapobj_result_t *mpp;
569 * Make sure the segment is writable.
571 if ((((mpp =
572 find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
573 ((mpp->mr_prot & PROT_WRITE) == 0)) &&
574 ((set_prot(lmp, mpp, 1) == 0) ||
575 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
576 return (0);
578 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp);
582 * Initialize the plt start and end addresses.
584 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
585 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
588 * If we've been called upon to promote an RTLD_LAZY object to an
589 * RTLD_NOW then we're only interested in scaning the .plt table.
591 if (plt) {
592 relbgn = pltbgn;
593 relend = pltend;
594 } else {
596 * The relocation sections appear to the run-time linker as a
597 * single table. Determine the address of the beginning and end
598 * of this table. There are two different interpretations of
599 * the ABI at this point:
601 * - The REL table and its associated RELSZ indicate the
602 * concatenation of *all* relocation sections (this is the
603 * model our link-editor constructs).
605 * - The REL table and its associated RELSZ indicate the
606 * concatenation of all *but* the .plt relocations. These
607 * relocations are specified individually by the JMPREL and
608 * PLTRELSZ entries.
610 * Determine from our knowledege of the relocation range and
611 * .plt range, the range of the total relocation table. Note
612 * that one other ABI assumption seems to be that the .plt
613 * relocations always follow any other relocations, the
614 * following range checking drops that assumption.
616 relbgn = (ulong_t)(REL(lmp));
617 relend = relbgn + (ulong_t)(RELSZ(lmp));
618 if (pltbgn) {
619 if (!relbgn || (relbgn > pltbgn))
620 relbgn = pltbgn;
621 if (!relbgn || (relend < pltend))
622 relend = pltend;
625 if (!relbgn || (relbgn == relend)) {
626 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
627 return (1);
630 relsiz = (ulong_t)(RELENT(lmp));
631 basebgn = ADDR(lmp);
633 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
636 * If we're processing in lazy mode there is no need to scan the
637 * .rela.plt table.
639 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0))
640 noplt = 1;
642 sip = SYMINFO(lmp);
644 * Loop through relocations.
646 while (relbgn < relend) {
647 mmapobj_result_t *mpp;
648 uint_t sb_flags = 0;
649 Addr vaddr;
651 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
654 * If this is a RELATIVE relocation in a shared object (the
655 * common case), and if we are not debugging, then jump into one
656 * of the tighter relocation loops.
658 if ((rtype == R_SPARC_RELATIVE) &&
659 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
660 if (relacount) {
661 relbgn = elf_reloc_relative_count(relbgn,
662 relacount, relsiz, basebgn, lmp,
663 textrel, 0);
664 relacount = 0;
665 } else {
666 relbgn = elf_reloc_relative(relbgn, relend,
667 relsiz, basebgn, lmp, textrel, 0);
669 if (relbgn >= relend)
670 break;
671 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
674 roffset = ((Rela *)relbgn)->r_offset;
676 reladd = (long)(((Rela *)relbgn)->r_addend);
677 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
678 rel = (Rela *)relbgn;
679 relbgn += relsiz;
682 * Optimizations.
684 if (rtype == R_SPARC_NONE)
685 continue;
686 if (noplt && ((ulong_t)rel >= pltbgn) &&
687 ((ulong_t)rel < pltend)) {
688 relbgn = pltend;
689 continue;
692 if (rtype != R_SPARC_REGISTER) {
694 * If this is a shared object, add the base address
695 * to offset.
697 if (!(FLAGS(lmp) & FLG_RT_FIXED))
698 roffset += basebgn;
701 * If this relocation is not against part of the image
702 * mapped into memory we skip it.
704 if ((mpp = find_segment((caddr_t)roffset,
705 lmp)) == NULL) {
706 elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
707 rsymndx);
708 continue;
713 * If we're promoting .plts, try and determine if this one has
714 * already been written. An uninitialized .plts' second
715 * instruction is a branch. Note, elf_plt_write() optimizes
716 * .plt relocations, and it's possible that a relocated entry
717 * is a branch. If this is the case, we can't tell the
718 * difference between an uninitialized .plt and a relocated,
719 * .plt that uses a branch. In this case, we'll simply redo
720 * the relocation calculation, which is a bit sad.
722 if (plt) {
723 ulong_t *_roffset = (ulong_t *)roffset;
725 _roffset++;
726 if ((*_roffset & (~(S_MASK(22)))) != M_BA_A)
727 continue;
730 binfo = 0;
731 pltndx = (ulong_t)-1;
732 pbtype = PLT_T_NONE;
735 * If a symbol index is specified then get the symbol table
736 * entry, locate the symbol definition, and determine its
737 * address.
739 if (rsymndx) {
741 * If a Syminfo section is provided, determine if this
742 * symbol is deferred, and if so, skip this relocation.
744 if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp,
745 textrel, sip, rsymndx))
746 continue;
749 * Get the local symbol table entry.
751 symref = (Sym *)((ulong_t)SYMTAB(lmp) +
752 (rsymndx * SYMENT(lmp)));
755 * If this is a local symbol, just use the base address.
756 * (we should have no local relocations in the
757 * executable).
759 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
760 value = basebgn;
761 name = NULL;
764 * Special case TLS relocations.
766 if (rtype == R_SPARC_TLS_DTPMOD32) {
768 * Use the TLS modid.
770 value = TLSMODID(lmp);
772 } else if (rtype == R_SPARC_TLS_TPOFF32) {
773 if ((value = elf_static_tls(lmp, symref,
774 rel, rtype, 0, roffset, 0)) == 0) {
775 ret = 0;
776 break;
779 } else {
781 * If the symbol index is equal to the previous
782 * symbol index relocation we processed then
783 * reuse the previous values. (Note that there
784 * have been cases where a relocation exists
785 * against a copy relocation symbol, our ld(1)
786 * should optimize this away, but make sure we
787 * don't use the same symbol information should
788 * this case exist).
790 if ((rsymndx == psymndx) &&
791 (rtype != R_SPARC_COPY)) {
792 /* LINTED */
793 if (psymdef == 0) {
794 DBG_CALL(Dbg_bind_weak(lmp,
795 (Addr)roffset, (Addr)
796 (roffset - basebgn), name));
797 continue;
799 /* LINTED */
800 value = pvalue;
801 /* LINTED */
802 name = pname;
803 symdef = psymdef;
804 /* LINTED */
805 symref = psymref;
806 /* LINTED */
807 _lmp = plmp;
808 /* LINTED */
809 binfo = pbinfo;
811 if ((LIST(_lmp)->lm_tflags |
812 AFLAGS(_lmp)) &
813 LML_TFLG_AUD_SYMBIND) {
814 value = audit_symbind(lmp, _lmp,
815 /* LINTED */
816 symdef, dsymndx, value,
817 &sb_flags);
819 } else {
820 Slookup sl;
821 Sresult sr;
824 * Lookup the symbol definition.
825 * Initialize the symbol lookup, and
826 * symbol result, data structures.
828 name = (char *)(STRTAB(lmp) +
829 symref->st_name);
831 SLOOKUP_INIT(sl, name, lmp, 0,
832 ld_entry_cnt, 0, rsymndx, symref,
833 rtype, LKUP_STDRELOC);
834 SRESULT_INIT(sr, name);
835 symdef = NULL;
837 if (lookup_sym(&sl, &sr, &binfo,
838 in_nfavl)) {
839 name = (char *)sr.sr_name;
840 _lmp = sr.sr_dmap;
841 symdef = sr.sr_sym;
845 * If the symbol is not found and the
846 * reference was not to a weak symbol,
847 * report an error. Weak references
848 * may be unresolved.
850 /* BEGIN CSTYLED */
851 if (symdef == 0) {
852 if (sl.sl_bind != STB_WEAK) {
853 if (elf_reloc_error(lmp, name,
854 rel, binfo))
855 continue;
857 ret = 0;
858 break;
860 } else {
861 psymndx = rsymndx;
862 psymdef = 0;
864 DBG_CALL(Dbg_bind_weak(lmp,
865 (Addr)roffset, (Addr)
866 (roffset - basebgn), name));
867 continue;
870 /* END CSTYLED */
873 * If symbol was found in an object
874 * other than the referencing object
875 * then record the binding.
877 if ((lmp != _lmp) && ((FLAGS1(_lmp) &
878 FL1_RT_NOINIFIN) == 0)) {
879 if (aplist_test(&bound, _lmp,
880 AL_CNT_RELBIND) == 0) {
881 ret = 0;
882 break;
887 * Calculate the location of definition;
888 * symbol value plus base address of
889 * containing shared object.
891 if (IS_SIZE(rtype))
892 value = symdef->st_size;
893 else
894 value = symdef->st_value;
896 if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
897 !(IS_SIZE(rtype)) &&
898 (symdef->st_shndx != SHN_ABS) &&
899 (ELF_ST_TYPE(symdef->st_info) !=
900 STT_TLS))
901 value += ADDR(_lmp);
904 * Retain this symbol index and the
905 * value in case it can be used for the
906 * subsequent relocations.
908 if (rtype != R_SPARC_COPY) {
909 psymndx = rsymndx;
910 pvalue = value;
911 pname = name;
912 psymdef = symdef;
913 psymref = symref;
914 plmp = _lmp;
915 pbinfo = binfo;
917 if ((LIST(_lmp)->lm_tflags |
918 AFLAGS(_lmp)) &
919 LML_TFLG_AUD_SYMBIND) {
920 dsymndx = (((uintptr_t)symdef -
921 (uintptr_t)SYMTAB(_lmp)) /
922 SYMENT(_lmp));
923 value = audit_symbind(lmp, _lmp,
924 symdef, dsymndx, value,
925 &sb_flags);
930 * If relocation is PC-relative, subtract
931 * offset address.
933 if (IS_PC_RELATIVE(rtype))
934 value -= roffset;
937 * Special case TLS relocations.
939 if (rtype == R_SPARC_TLS_DTPMOD32) {
941 * Relocation value is the TLS modid.
943 value = TLSMODID(_lmp);
945 } else if (rtype == R_SPARC_TLS_TPOFF32) {
946 if ((value = elf_static_tls(_lmp,
947 symdef, rel, rtype, name, roffset,
948 value)) == 0) {
949 ret = 0;
950 break;
954 } else {
956 * Special cases.
958 if (rtype == R_SPARC_REGISTER) {
960 * A register symbol associated with symbol
961 * index 0 is initialized (i.e. relocated) to
962 * a constant in the r_addend field rather than
963 * to a symbol value.
965 value = 0;
967 } else if (rtype == R_SPARC_TLS_DTPMOD32) {
969 * TLS relocation value is the TLS modid.
971 value = TLSMODID(lmp);
972 } else
973 value = basebgn;
975 name = NULL;
978 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
979 M_REL_SHT_TYPE, rel, NULL, 0, name));
982 * Make sure the segment is writable.
984 if ((rtype != R_SPARC_REGISTER) &&
985 ((mpp->mr_prot & PROT_WRITE) == 0) &&
986 ((set_prot(lmp, mpp, 1) == 0) ||
987 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
988 ret = 0;
989 break;
993 * Call relocation routine to perform required relocation.
995 switch (rtype) {
996 case R_SPARC_REGISTER:
998 * The v9 ABI 4.2.4 says that system objects may,
999 * but are not required to, use register symbols
1000 * to inidcate how they use global registers. Thus
1001 * at least %g6, %g7 must be allowed in addition
1002 * to %g2 and %g3.
1004 value += reladd;
1005 if (roffset == STO_SPARC_REGISTER_G1) {
1006 set_sparc_g1(value);
1007 } else if (roffset == STO_SPARC_REGISTER_G2) {
1008 set_sparc_g2(value);
1009 } else if (roffset == STO_SPARC_REGISTER_G3) {
1010 set_sparc_g3(value);
1011 } else if (roffset == STO_SPARC_REGISTER_G4) {
1012 set_sparc_g4(value);
1013 } else if (roffset == STO_SPARC_REGISTER_G5) {
1014 set_sparc_g5(value);
1015 } else if (roffset == STO_SPARC_REGISTER_G6) {
1016 set_sparc_g6(value);
1017 } else if (roffset == STO_SPARC_REGISTER_G7) {
1018 set_sparc_g7(value);
1019 } else {
1020 eprintf(LIST(lmp), ERR_FATAL,
1021 MSG_INTL(MSG_REL_BADREG), NAME(lmp),
1022 EC_ADDR(roffset));
1023 ret = 0;
1024 break;
1027 DBG_CALL(Dbg_reloc_apply_reg(LIST(lmp), ELF_DBG_RTLD,
1028 M_MACH, (Xword)roffset, (Xword)value));
1029 break;
1030 case R_SPARC_COPY:
1031 if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
1032 symdef, _lmp, (const void *)value) == 0)
1033 ret = 0;
1034 break;
1035 case R_SPARC_JMP_SLOT:
1036 pltndx = ((ulong_t)rel -
1037 (uintptr_t)JMPREL(lmp)) / relsiz;
1039 if (FLAGS(lmp) & FLG_RT_FIXED)
1040 vaddr = 0;
1041 else
1042 vaddr = ADDR(lmp);
1044 if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
1045 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
1046 AUDINFO(lmp)->ai_dynplts) {
1047 int fail = 0;
1048 ulong_t symndx = (((uintptr_t)symdef -
1049 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
1051 (void) elf_plt_trace_write((caddr_t)vaddr,
1052 (Rela *)rel, lmp, _lmp, symdef, symndx,
1053 pltndx, (caddr_t)value, sb_flags, &fail);
1054 if (fail)
1055 ret = 0;
1056 } else {
1058 * Write standard PLT entry to jump directly
1059 * to newly bound function.
1061 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
1062 ELF_DBG_RTLD, (Xword)roffset,
1063 (Xword)value));
1064 pbtype = elf_plt_write((uintptr_t)vaddr,
1065 (uintptr_t)vaddr, (void *)rel, value,
1066 pltndx);
1068 break;
1069 default:
1070 value += reladd;
1073 * Write the relocation out. If this relocation is a
1074 * common basic write, skip the doreloc() engine.
1076 if ((rtype == R_SPARC_GLOB_DAT) ||
1077 (rtype == R_SPARC_32)) {
1078 if (roffset & 0x3) {
1079 Conv_inv_buf_t inv_buf;
1081 eprintf(LIST(lmp), ERR_FATAL,
1082 MSG_INTL(MSG_REL_NONALIGN),
1083 conv_reloc_SPARC_type(rtype,
1084 0, &inv_buf),
1085 NAME(lmp), demangle(name),
1086 EC_OFF(roffset));
1087 ret = 0;
1088 } else
1089 *(uint_t *)roffset += value;
1090 } else {
1091 if (do_reloc_rtld(rtype, (uchar_t *)roffset,
1092 (Xword *)&value, name,
1093 NAME(lmp), LIST(lmp)) == 0)
1094 ret = 0;
1098 * The value now contains the 'bit-shifted' value that
1099 * was or'ed into memory (this was set by
1100 * do_reloc_rtld()).
1102 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
1103 (Xword)roffset, (Xword)value));
1106 * If this relocation is against a text segment, make
1107 * sure that the instruction cache is flushed.
1109 if (textrel)
1110 iflush_range((caddr_t)roffset, 0x4);
1113 if ((ret == 0) &&
1114 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
1115 break;
1117 if (binfo) {
1118 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
1119 (Off)(roffset - basebgn), pltndx, pbtype,
1120 _lmp, (Addr)value, symdef->st_value, name, binfo));
1124 return (relocate_finish(lmp, bound, ret));
1128 * Provide a machine specific interface to the conversion routine. By calling
1129 * the machine specific version, rather than the generic version, we insure that
1130 * the data tables/strings for all known machine versions aren't dragged into
1131 * ld.so.1.
1133 const char *
1134 _conv_reloc_type(uint_t rel)
1136 static Conv_inv_buf_t inv_buf;
1138 return (conv_reloc_SPARC_type(rel, 0, &inv_buf));