Install sources with MKSRC=yes
[minix3.git] / libexec / ld.elf_so / map_object_fallback.c
blobac24443ee71fc75bc5e54dfcd2c5b8d825942793
1 /* $NetBSD: map_object.c,v 1.52 2013/08/03 13:17:05 skrll Exp $ */
3 /*
4 * Copyright 1996 John D. Polstra.
5 * Copyright 1996 Matt Thomas <matt@3am-software.com>
6 * Copyright 2002 Charles M. Hannum <root@ihack.net>
7 * All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by John Polstra.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 #ifndef lint
37 __RCSID("$NetBSD: map_object.c,v 1.52 2013/08/03 13:17:05 skrll Exp $");
38 #endif /* not lint */
40 #include <errno.h>
41 #include <stddef.h>
42 #include <stdlib.h>
43 #include <string.h>
44 #include <unistd.h>
45 #include <sys/stat.h>
46 #include <sys/types.h>
47 #include <sys/mman.h>
49 #include "debug.h"
50 #include "rtld.h"
52 #define MINIXVERBOSE 0
54 #if MINIXVERBOSE
55 #include <stdio.h>
56 #endif
58 static int protflags(int); /* Elf flags -> mmap protection */
60 #define EA_UNDEF (~(Elf_Addr)0)
62 static void Pread(void *addr, size_t size, int fd, off_t off)
64 int s;
65 if((s=pread(fd,addr, size, off)) < 0) {
66 _rtld_error("pread failed");
67 exit(1);
70 #if MINIXVERBOSE
71 fprintf(stderr, "read 0x%lx bytes from offset 0x%lx to addr 0x%lx\n", size, off, addr);
72 #endif
75 /* minix-without-mmap version of _rtld_map_object() */
76 Obj_Entry *
77 _rtld_map_object_fallback(const char *path, int fd, const struct stat *sb)
79 Obj_Entry *obj;
80 Elf_Ehdr *ehdr;
81 Elf_Phdr *phdr;
82 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
83 Elf_Phdr *phtls;
84 #endif
85 size_t phsize;
86 Elf_Phdr *phlimit;
87 Elf_Phdr *segs[2];
88 int nsegs;
89 caddr_t mapbase = MAP_FAILED;
90 size_t mapsize = 0;
91 int mapflags;
92 Elf_Off base_offset;
93 #ifdef MAP_ALIGNED
94 Elf_Addr base_alignment;
95 #endif
96 Elf_Addr base_vaddr;
97 Elf_Addr base_vlimit;
98 Elf_Addr text_vlimit;
99 int text_flags;
100 caddr_t base_addr;
101 Elf_Off data_offset;
102 Elf_Addr data_vaddr;
103 Elf_Addr data_vlimit;
104 int data_flags;
105 caddr_t data_addr;
106 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
107 Elf_Addr tls_vaddr = 0; /* Noise GCC */
108 #endif
109 Elf_Addr phdr_vaddr;
110 size_t phdr_memsz;
111 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
112 caddr_t gap_addr;
113 size_t gap_size;
114 #endif
115 int i;
116 #ifdef RTLD_LOADER
117 Elf_Addr clear_vaddr;
118 caddr_t clear_addr;
119 size_t nclear;
120 #endif
122 if (sb != NULL && sb->st_size < (off_t)sizeof (Elf_Ehdr)) {
123 _rtld_error("%s: not ELF file (too short)", path);
124 return NULL;
127 obj = _rtld_obj_new();
128 obj->path = xstrdup(path);
129 obj->pathlen = strlen(path);
130 if (sb != NULL) {
131 obj->dev = sb->st_dev;
132 obj->ino = sb->st_ino;
135 #ifdef __minix
136 ehdr = mmap(NULL, _rtld_pagesz, PROT_READ|PROT_WRITE,
137 MAP_PREALLOC|MAP_ANON, -1, (off_t)0);
138 Pread(ehdr, _rtld_pagesz, fd, 0);
139 #if MINIXVERBOSE
140 fprintf(stderr, "minix mmap for header: 0x%lx\n", ehdr);
141 #endif
142 #else
143 ehdr = mmap(NULL, _rtld_pagesz, PROT_READ, MAP_FILE | MAP_SHARED, fd,
144 (off_t)0);
145 #endif
146 obj->ehdr = ehdr;
147 if (ehdr == MAP_FAILED) {
148 _rtld_error("%s: read error: %s", path, xstrerror(errno));
149 goto bad;
151 /* Make sure the file is valid */
152 if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0) {
153 _rtld_error("%s: not ELF file (magic number bad)", path);
154 goto bad;
156 if (ehdr->e_ident[EI_CLASS] != ELFCLASS) {
157 _rtld_error("%s: invalid ELF class %x; expected %x", path,
158 ehdr->e_ident[EI_CLASS], ELFCLASS);
159 goto bad;
161 /* Elf_e_ident includes class */
162 if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
163 ehdr->e_version != EV_CURRENT ||
164 ehdr->e_ident[EI_DATA] != ELFDEFNNAME(MACHDEP_ENDIANNESS)) {
165 _rtld_error("%s: unsupported file version", path);
166 goto bad;
168 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
169 _rtld_error("%s: unsupported file type", path);
170 goto bad;
172 switch (ehdr->e_machine) {
173 ELFDEFNNAME(MACHDEP_ID_CASES)
174 default:
175 _rtld_error("%s: unsupported machine", path);
176 goto bad;
180 * We rely on the program header being in the first page. This is
181 * not strictly required by the ABI specification, but it seems to
182 * always true in practice. And, it simplifies things considerably.
184 assert(ehdr->e_phentsize == sizeof(Elf_Phdr));
185 assert(ehdr->e_phoff + ehdr->e_phnum * sizeof(Elf_Phdr) <=
186 _rtld_pagesz);
189 * Scan the program header entries, and save key information.
191 * We rely on there being exactly two load segments, text and data,
192 * in that order.
194 phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff);
195 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
196 phtls = NULL;
197 #endif
198 phsize = ehdr->e_phnum * sizeof(phdr[0]);
199 obj->phdr = NULL;
200 phdr_vaddr = EA_UNDEF;
201 phdr_memsz = 0;
202 phlimit = phdr + ehdr->e_phnum;
203 nsegs = 0;
204 while (phdr < phlimit) {
205 switch (phdr->p_type) {
206 case PT_INTERP:
207 obj->interp = (void *)(uintptr_t)phdr->p_vaddr;
208 dbg(("%s: PT_INTERP %p", obj->path, obj->interp));
209 break;
211 case PT_LOAD:
212 if (nsegs < 2)
213 segs[nsegs] = phdr;
214 ++nsegs;
216 dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_LOAD",
217 (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz));
218 break;
220 case PT_PHDR:
221 phdr_vaddr = phdr->p_vaddr;
222 phdr_memsz = phdr->p_memsz;
223 dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_PHDR",
224 (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz));
225 break;
227 case PT_DYNAMIC:
228 obj->dynamic = (void *)(uintptr_t)phdr->p_vaddr;
229 dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_DYNAMIC",
230 (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz));
231 break;
233 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
234 case PT_TLS:
235 phtls = phdr;
236 dbg(("%s: %s %p phsize %" PRImemsz, obj->path, "PT_TLS",
237 (void *)(uintptr_t)phdr->p_vaddr, phdr->p_memsz));
238 break;
239 #endif
240 #ifdef __ARM_EABI__
241 case PT_ARM_EXIDX:
242 obj->exidx_start = (void *)(uintptr_t)phdr->p_vaddr;
243 obj->exidx_sz = phdr->p_memsz;
244 break;
245 #endif
248 ++phdr;
250 phdr = (Elf_Phdr *) ((caddr_t)ehdr + ehdr->e_phoff);
251 obj->entry = (void *)(uintptr_t)ehdr->e_entry;
252 if (!obj->dynamic) {
253 _rtld_error("%s: not dynamically linked", path);
254 goto bad;
256 if (nsegs != 2) {
257 _rtld_error("%s: wrong number of segments (%d != 2)", path,
258 nsegs);
259 goto bad;
263 * Map the entire address space of the object as a file
264 * region to stake out our contiguous region and establish a
265 * base for relocation. We use a file mapping so that
266 * the kernel will give us whatever alignment is appropriate
267 * for the platform we're running on.
269 * We map it using the text protection, map the data segment
270 * into the right place, then map an anon segment for the bss
271 * and unmap the gaps left by padding to alignment.
274 #ifdef MAP_ALIGNED
275 base_alignment = segs[0]->p_align;
276 #endif
277 base_offset = round_down(segs[0]->p_offset);
278 base_vaddr = round_down(segs[0]->p_vaddr);
279 base_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_memsz);
280 text_vlimit = round_up(segs[0]->p_vaddr + segs[0]->p_memsz);
281 text_flags = protflags(segs[0]->p_flags);
282 data_offset = round_down(segs[1]->p_offset);
283 data_vaddr = round_down(segs[1]->p_vaddr);
284 data_vlimit = round_up(segs[1]->p_vaddr + segs[1]->p_filesz);
285 data_flags = protflags(segs[1]->p_flags);
286 #ifdef RTLD_LOADER
287 clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
288 #endif
290 obj->textsize = text_vlimit - base_vaddr;
291 obj->vaddrbase = base_vaddr;
292 obj->isdynamic = ehdr->e_type == ET_DYN;
294 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
295 if (phtls != NULL) {
296 ++_rtld_tls_dtv_generation;
297 obj->tlsindex = ++_rtld_tls_max_index;
298 obj->tlssize = phtls->p_memsz;
299 obj->tlsalign = phtls->p_align;
300 obj->tlsinitsize = phtls->p_filesz;
301 tls_vaddr = phtls->p_vaddr;
303 #endif
305 obj->phdr_loaded = false;
306 for (i = 0; i < nsegs; i++) {
307 if (phdr_vaddr != EA_UNDEF &&
308 segs[i]->p_vaddr <= phdr_vaddr &&
309 segs[i]->p_memsz >= phdr_memsz) {
310 obj->phdr_loaded = true;
311 break;
313 if (segs[i]->p_offset <= ehdr->e_phoff &&
314 segs[i]->p_memsz >= phsize) {
315 phdr_vaddr = segs[i]->p_vaddr + ehdr->e_phoff;
316 phdr_memsz = phsize;
317 obj->phdr_loaded = true;
318 break;
321 if (obj->phdr_loaded) {
322 obj->phdr = (void *)(uintptr_t)phdr_vaddr;
323 obj->phsize = phdr_memsz;
324 } else {
325 Elf_Phdr *buf;
326 buf = xmalloc(phsize);
327 if (buf == NULL) {
328 _rtld_error("%s: cannot allocate program header", path);
329 goto bad;
331 memcpy(buf, phdr, phsize);
332 obj->phdr = buf;
333 obj->phsize = phsize;
335 dbg(("%s: phdr %p phsize %zu (%s)", obj->path, obj->phdr, obj->phsize,
336 obj->phdr_loaded ? "loaded" : "allocated"));
338 /* Unmap header if it overlaps the first load section. */
339 if (base_offset < _rtld_pagesz) {
340 munmap(ehdr, _rtld_pagesz);
341 obj->ehdr = MAP_FAILED;
345 * Calculate log2 of the base section alignment.
347 mapflags = 0;
348 #ifdef MAP_ALIGNED
349 if (base_alignment > _rtld_pagesz) {
350 unsigned int log2 = 0;
351 for (; base_alignment > 1; base_alignment >>= 1)
352 log2++;
353 mapflags = MAP_ALIGNED(log2);
355 #endif
357 #ifdef RTLD_LOADER
358 base_addr = obj->isdynamic ? NULL : (caddr_t)base_vaddr;
359 #else
360 base_addr = NULL;
361 #endif
362 mapsize = base_vlimit - base_vaddr;
364 #ifndef __minix
365 mapbase = mmap(base_addr, mapsize, text_flags,
366 mapflags | MAP_FILE | MAP_PRIVATE, fd, base_offset);
367 #else
368 mapbase = mmap(base_addr, mapsize, PROT_READ|PROT_WRITE,
369 MAP_ANON | MAP_PREALLOC, -1, 0);
370 #if MINIXVERBOSE
371 fprintf(stderr, "minix mmap for whole block: 0x%lx-0x%lx\n", mapbase, mapbase+mapsize);
372 #endif
373 Pread(mapbase, obj->textsize, fd, 0);
374 #endif
375 if (mapbase == MAP_FAILED) {
376 _rtld_error("mmap of entire address space failed: %s",
377 xstrerror(errno));
378 goto bad;
381 /* Overlay the data segment onto the proper region. */
382 data_addr = mapbase + (data_vaddr - base_vaddr);
383 #ifdef __minix
384 Pread(data_addr, data_vlimit - data_vaddr, fd, data_offset);
385 #else
386 if (mmap(data_addr, data_vlimit - data_vaddr, data_flags,
387 MAP_FILE | MAP_PRIVATE | MAP_FIXED, fd, data_offset) ==
388 MAP_FAILED) {
389 _rtld_error("mmap of data failed: %s", xstrerror(errno));
390 goto bad;
393 /* Overlay the bss segment onto the proper region. */
394 if (mmap(mapbase + data_vlimit - base_vaddr, base_vlimit - data_vlimit,
395 data_flags, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0) ==
396 MAP_FAILED) {
397 _rtld_error("mmap of bss failed: %s", xstrerror(errno));
398 goto bad;
401 /* Unmap the gap between the text and data. */
402 gap_addr = mapbase + round_up(text_vlimit - base_vaddr);
403 gap_size = data_addr - gap_addr;
404 if (gap_size != 0 && mprotect(gap_addr, gap_size, PROT_NONE) == -1) {
405 _rtld_error("mprotect of text -> data gap failed: %s",
406 xstrerror(errno));
407 goto bad;
409 #endif
411 #ifdef RTLD_LOADER
412 /* Clear any BSS in the last page of the data segment. */
413 clear_addr = mapbase + (clear_vaddr - base_vaddr);
414 if ((nclear = data_vlimit - clear_vaddr) > 0)
415 memset(clear_addr, 0, nclear);
417 /* Non-file portion of BSS mapped above. */
418 #endif
420 #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
421 if (phtls != NULL)
422 obj->tlsinit = mapbase + tls_vaddr;
423 #endif
425 obj->mapbase = mapbase;
426 obj->mapsize = mapsize;
427 obj->relocbase = mapbase - base_vaddr;
429 if (obj->dynamic)
430 obj->dynamic = (void *)(obj->relocbase + (Elf_Addr)(uintptr_t)obj->dynamic);
431 if (obj->entry)
432 obj->entry = (void *)(obj->relocbase + (Elf_Addr)(uintptr_t)obj->entry);
433 if (obj->interp)
434 obj->interp = (void *)(obj->relocbase + (Elf_Addr)(uintptr_t)obj->interp);
435 if (obj->phdr_loaded)
436 obj->phdr = (void *)(obj->relocbase + (Elf_Addr)(uintptr_t)obj->phdr);
437 #ifdef __ARM_EABI__
438 if (obj->exidx_start)
439 obj->exidx_start = (void *)(obj->relocbase + (Elf_Addr)(uintptr_t)obj->exidx_start);
440 #endif
442 return obj;
444 bad:
445 if (obj->ehdr != MAP_FAILED)
446 munmap(obj->ehdr, _rtld_pagesz);
447 if (mapbase != MAP_FAILED)
448 munmap(mapbase, mapsize);
449 _rtld_obj_free(obj);
450 return NULL;
454 * Given a set of ELF protection flags, return the corresponding protection
455 * flags for MMAP.
457 static int
458 protflags(int elfflags)
460 int prot = 0;
462 if (elfflags & PF_R)
463 prot |= PROT_READ;
464 #ifdef RTLD_LOADER
465 if (elfflags & PF_W)
466 prot |= PROT_WRITE;
467 #endif
468 if (elfflags & PF_X)
469 prot |= PROT_EXEC;
470 #if defined(__minix)
471 /* Minix has to map it writable so we can do relocations
472 * as we don't have mprotect() yet.
474 prot |= PROT_WRITE;
475 #endif /* defined(__minix) */
476 return prot;