4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Redirection ld.so. Based on the 4.x binary compatibility ld.so, used
29 * to redirect aliases for ld.so to the real one.
33 * Import data structures
36 #include <sys/types.h>
38 #include <sys/fcntl.h>
40 #include <sys/sysconfig.h>
45 #include "alias_boot.h"
48 * Local manifest constants and macros.
50 #define ALIGN(x, a) ((uintptr_t)(x) & ~((a) - 1))
51 #define ROUND(x, a) (((uintptr_t)(x) + ((a) - 1)) & ~((a) - 1))
53 #define EMPTY strings[EMPTY_S]
54 #define LDSO strings[LDSO_S]
55 #define ZERO strings[ZERO_S]
56 #define CLOSE (*(funcs[CLOSE_F]))
57 #define FSTATAT (*(funcs[FSTATAT_F]))
58 #define MMAP (*(funcs[MMAP_F]))
59 #define MUNMAP (*(funcs[MUNMAP_F]))
60 #define OPENAT (*(funcs[OPENAT_F]))
61 #define PANIC (*(funcs[PANIC_F]))
62 #define SYSCONFIG (*(funcs[SYSCONFIG_F]))
65 * Alias ld.so entry point -- receives a bootstrap structure and a vector
66 * of strings. The vector is "well-known" to us, and consists of pointers
67 * to string constants. This aliasing bootstrap requires no relocation in
68 * order to run, save for the pointers of constant strings. This second
69 * parameter provides this. Note that this program is carefully coded in
70 * order to maintain the "no bootstrapping" requirement -- it calls only
71 * local functions, uses no intrinsics, etc.
74 __rtld(Elf32_Boot
*ebp
, const char *strings
[], int (*funcs
[])())
76 int i
, p
; /* working */
78 long page_size
= 0; /* size of a page */
79 const char *program_name
= EMPTY
; /* our name */
80 int ldfd
; /* fd assigned to ld.so */
81 int dzfd
= 0; /* fd assigned to /dev/zero */
82 Elf32_Ehdr
*ehdr
; /* ELF header of ld.so */
83 Elf32_Phdr
*phdr
; /* first Phdr in file */
84 Elf32_Phdr
*pptr
; /* working Phdr */
85 Elf32_Phdr
*lph
= NULL
; /* last loadable Phdr */
86 Elf32_Phdr
*fph
= NULL
; /* first loadable Phdr */
87 caddr_t maddr
; /* pointer to mapping claim */
88 Elf32_Off mlen
; /* total mapping claim */
89 caddr_t faddr
; /* first program mapping of ld.so */
90 Elf32_Off foff
; /* file offset for segment mapping */
91 Elf32_Off flen
; /* file length for segment mapping */
92 caddr_t addr
; /* working mapping address */
93 caddr_t zaddr
; /* /dev/zero working mapping addr */
94 struct stat sb
; /* stat buffer for sizing */
95 auxv_t
*ap
; /* working aux pointer */
98 * Discover things about our environment: auxiliary vector (if
99 * any), arguments, program name, and the like.
101 while (ebp
->eb_tag
!= NULL
) {
102 switch (ebp
->eb_tag
) {
104 program_name
= *((char **)ebp
->eb_un
.eb_ptr
);
107 for (ap
= (auxv_t
*)ebp
->eb_un
.eb_ptr
;
108 ap
->a_type
!= AT_NULL
; ap
++)
109 if (ap
->a_type
== AT_PAGESZ
) {
110 page_size
= ap
->a_un
.a_val
;
119 * If we didn't get a page size from looking in the auxiliary
120 * vector, we need to get one now.
122 if (page_size
== 0) {
123 page_size
= SYSCONFIG(_CONFIG_PAGESIZE
);
124 ebp
->eb_tag
= EB_PAGESIZE
, (ebp
++)->eb_un
.eb_val
=
125 (Elf32_Word
)page_size
;
129 * Map in the real ld.so. Note that we're mapping it as
130 * an ELF database, not as a program -- we just want to walk it's
131 * data structures. Further mappings will actually establish the
132 * program in the address space.
134 if ((ldfd
= OPENAT(AT_FDCWD
, LDSO
, O_RDONLY
)) == -1)
136 if (FSTATAT(ldfd
, NULL
, &sb
, 0) == -1)
138 ehdr
= (Elf32_Ehdr
*)MMAP(0, sb
.st_size
, PROT_READ
| PROT_EXEC
,
139 MAP_SHARED
, ldfd
, 0);
140 if (ehdr
== (Elf32_Ehdr
*)-1)
144 * Validate the file we're looking at, ensure it has the correct
145 * ELF structures, such as: ELF magic numbers, coded for SPARC,
148 if (ehdr
->e_ident
[EI_MAG0
] != ELFMAG0
||
149 ehdr
->e_ident
[EI_MAG1
] != ELFMAG1
||
150 ehdr
->e_ident
[EI_MAG2
] != ELFMAG2
||
151 ehdr
->e_ident
[EI_MAG3
] != ELFMAG3
)
153 if (ehdr
->e_ident
[EI_CLASS
] != ELFCLASS32
||
154 ehdr
->e_ident
[EI_DATA
] != ELFDATA2MSB
)
156 if (ehdr
->e_type
!= ET_DYN
)
158 if ((ehdr
->e_machine
!= EM_SPARC
) &&
159 (ehdr
->e_machine
!= EM_SPARC32PLUS
))
161 if (ehdr
->e_version
> EV_CURRENT
)
165 * Point at program headers and start figuring out what to load.
167 phdr
= (Elf32_Phdr
*)((caddr_t
)ehdr
+ ehdr
->e_phoff
);
168 for (p
= 0, pptr
= phdr
; p
< (int)ehdr
->e_phnum
; p
++,
169 pptr
= (Elf32_Phdr
*)((caddr_t
)pptr
+ ehdr
->e_phentsize
))
170 if (pptr
->p_type
== PT_LOAD
) {
173 } else if (pptr
->p_vaddr
<= lph
->p_vaddr
)
179 * We'd better have at least one loadable segment.
185 * Map enough address space to hold the program (as opposed to the
186 * file) represented by ld.so. The amount to be assigned is the
187 * range between the end of the last loadable segment and the
188 * beginning of the first PLUS the alignment of the first segment.
189 * mmap() can assign us any page-aligned address, but the relocations
190 * assume the alignments included in the program header. As an
191 * optimization, however, let's assume that mmap() will actually
192 * give us an aligned address -- since if it does, we can save
193 * an munmap() later on. If it doesn't -- then go try it again.
195 mlen
= ROUND((lph
->p_vaddr
+ lph
->p_memsz
) -
196 ALIGN(fph
->p_vaddr
, page_size
), page_size
);
197 maddr
= (caddr_t
)MMAP(0, mlen
, PROT_READ
| PROT_EXEC
,
198 MAP_SHARED
, ldfd
, 0);
199 if (maddr
== (caddr_t
)-1)
201 faddr
= (caddr_t
)ROUND(maddr
, fph
->p_align
);
204 * Check to see whether alignment skew was really needed.
206 if (faddr
!= maddr
) {
207 (void) MUNMAP(maddr
, mlen
);
208 mlen
= ROUND((lph
->p_vaddr
+ lph
->p_memsz
) -
209 ALIGN(fph
->p_vaddr
, fph
->p_align
) + fph
->p_align
,
211 maddr
= (caddr_t
)MMAP(0, mlen
, PROT_READ
| PROT_EXEC
,
212 MAP_SHARED
, ldfd
, 0);
213 if (maddr
== (caddr_t
)-1)
215 faddr
= (caddr_t
)ROUND(maddr
, fph
->p_align
);
219 * We have the address space reserved, so map each loadable segment.
221 for (p
= 0, pptr
= phdr
; p
< (int)ehdr
->e_phnum
; p
++,
222 pptr
= (Elf32_Phdr
*)((caddr_t
)pptr
+ ehdr
->e_phentsize
)) {
225 * Skip non-loadable segments or segments that don't occupy
228 if ((pptr
->p_type
!= PT_LOAD
) || (pptr
->p_memsz
== 0))
232 * Determine the file offset to which the mapping will
233 * directed (must be aligned) and how much to map (might
234 * be more than the file in the case of .bss.)
236 foff
= ALIGN(pptr
->p_offset
, page_size
);
237 flen
= pptr
->p_memsz
+ (pptr
->p_offset
- foff
);
240 * Set address of this segment relative to our base.
242 addr
= (caddr_t
)ALIGN(faddr
+ pptr
->p_vaddr
, page_size
);
245 * If this is the first program header, record our base
246 * address for later use.
249 ebp
->eb_tag
= EB_LDSO_BASE
;
250 (ebp
++)->eb_un
.eb_ptr
= (Elf32_Addr
)addr
;
254 * Unmap anything from the last mapping address to this
258 (void) MUNMAP(maddr
, addr
- maddr
);
259 mlen
-= addr
- maddr
;
263 * Determine the mapping protection from the section
267 if (pptr
->p_flags
& PF_R
)
269 if (pptr
->p_flags
& PF_W
)
271 if (pptr
->p_flags
& PF_X
)
273 if ((caddr_t
)MMAP((caddr_t
)addr
, flen
, i
,
274 MAP_FIXED
| MAP_PRIVATE
, ldfd
, foff
) == (caddr_t
)-1)
278 * If the memory occupancy of the segment overflows the
279 * definition in the file, we need to "zero out" the
280 * end of the mapping we've established, and if necessary,
281 * map some more space from /dev/zero.
283 if (pptr
->p_memsz
> pptr
->p_filesz
) {
284 foff
= (uintptr_t)faddr
+ pptr
->p_vaddr
+
286 zaddr
= (caddr_t
)ROUND(foff
, page_size
);
287 for (j
= 0; j
< (int)(zaddr
- foff
); j
++)
288 *((char *)foff
+ j
) = 0;
289 j
= (faddr
+ pptr
->p_vaddr
+ pptr
->p_memsz
) - zaddr
;
292 dzfd
= OPENAT(AT_FDCWD
, ZERO
, O_RDWR
);
296 if ((caddr_t
)MMAP((caddr_t
)zaddr
, j
, i
,
297 MAP_FIXED
| MAP_PRIVATE
, dzfd
,
304 * Update the mapping claim pointer.
306 maddr
= addr
+ ROUND(flen
, page_size
);
307 mlen
-= maddr
- addr
;
311 * Unmap any final reservation.
314 (void) MUNMAP(maddr
, mlen
);
317 * Clean up file descriptor space we've consumed. Pass along
318 * the /dev/zero file descriptor we got -- every cycle counts.
322 ebp
->eb_tag
= EB_DEVZERO
, (ebp
++)->eb_un
.eb_val
= dzfd
;
325 * The call itself. Note that we start 1 instruction word in.
326 * The ELF ld.so contains an "entry vector" of branch instructions,
327 * which, for our interest are:
328 * +0: ba, a <normal startup>
329 * +4: ba, a <compatibility startup>
330 * +8: ba, a <alias startup>
331 * By starting at the alias startup, the ELF ld.so knows
332 * that a pointer to "eb" is available to it and further knows
333 * how to calculate the offset to the program's arguments and
334 * other structures. We do the "call" by returning to our
335 * bootstrap and then jumping to the address that we return.
337 ebp
->eb_tag
= EB_NULL
, ebp
->eb_un
.eb_val
= 0;
338 return ((void *)(ehdr
->e_entry
+ faddr
+ 8));