.
[glibc/history.git] / elf / dl-load.c
blob1625d3cf5b0d341b0439bb9f908e9ad75c6f61a1
1 /* _dl_map_object -- Map in a shared object's segments from the file.
2 Copyright (C) 1995, 1996 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
18 Cambridge, MA 02139, USA. */
20 #include <link.h>
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #include <string.h>
24 #include <fcntl.h>
25 #include <unistd.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include "dynamic-link.h"
31 /* On some systems, no flag bits are given to specify file mapping. */
32 #ifndef MAP_FILE
33 #define MAP_FILE 0
34 #endif
36 /* The right way to map in the shared library files is MAP_COPY, which
37 makes a virtual copy of the data at the time of the mmap call; this
38 guarantees the mapped pages will be consistent even if the file is
39 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
40 get is MAP_PRIVATE, which copies each page when it is modified; this
41 means if the file is overwritten, we may at some point get some pages
42 from the new version after starting with pages from the old version. */
43 #ifndef MAP_COPY
44 #define MAP_COPY MAP_PRIVATE
45 #endif
48 #include <endian.h>
49 #if BYTE_ORDER == BIG_ENDIAN
50 #define byteorder ELFDATA2MSB
51 #define byteorder_name "big-endian"
52 #elif BYTE_ORDER == LITTLE_ENDIAN
53 #define byteorder ELFDATA2LSB
54 #define byteorder_name "little-endian"
55 #else
56 #error "Unknown BYTE_ORDER " BYTE_ORDER
57 #define byteorder ELFDATANONE
58 #endif
60 #define STRING(x) #x
62 int _dl_zerofd = -1;
65 /* Try to open NAME in one of the directories in DIRPATH.
66 Return the fd, or -1. If successful, fill in *REALNAME
67 with the malloc'd full directory name. */
69 static int
70 open_path (const char *name, size_t namelen,
71 const char *dirpath,
72 char **realname)
74 char *buf;
75 const char *p;
76 int fd;
78 p = dirpath;
79 if (p == NULL || *p == '\0')
81 errno = ENOENT;
82 return -1;
85 buf = alloca (strlen (dirpath) + 1 + namelen);
88 dirpath = p;
89 p = strpbrk (dirpath, ":;");
90 if (p == NULL)
91 p = strchr (dirpath, '\0');
93 if (p == dirpath)
94 /* Two adjacent colons, or a colon at the beginning or the end of
95 the path means to search the current directory. */
96 (void) memcpy (buf, name, namelen);
97 else
99 /* Construct the pathname to try. */
100 (void) memcpy (buf, dirpath, p - dirpath);
101 buf[p - dirpath] = '/';
102 (void) memcpy (&buf[(p - dirpath) + 1], name, namelen);
105 fd = open (buf, O_RDONLY);
106 if (fd != -1)
108 *realname = strdup (buf);
109 return fd;
111 if (errno != ENOENT && errno != EACCES)
112 /* The file exists and is readable, but something went wrong. */
113 return -1;
115 while (*p++ != '\0');
117 return -1;
121 /* Map in the shared object file NAME. */
123 struct link_map *
124 _dl_map_object (struct link_map *loader, const char *name)
126 int fd;
127 char *realname;
128 struct link_map *l;
130 /* Look for this name among those already loaded. */
131 for (l = _dl_loaded; l; l = l->l_next)
132 if (! strcmp (name, l->l_libname))
134 /* The object is already loaded.
135 Just bump its reference count and return it. */
136 ++l->l_opencount;
137 return l;
140 if (strchr (name, '/') == NULL)
142 /* Search for NAME in several places. */
144 size_t namelen = strlen (name) + 1;
146 inline void trypath (const char *dirpath)
148 fd = open_path (name, namelen, dirpath, &realname);
151 fd = -1;
152 if (loader && loader->l_info[DT_RPATH])
153 trypath ((const char *) (loader->l_addr +
154 loader->l_info[DT_STRTAB]->d_un.d_ptr +
155 loader->l_info[DT_RPATH]->d_un.d_val));
156 if (fd == -1 && ! _dl_secure)
157 trypath (getenv ("LD_LIBRARY_PATH"));
158 if (fd == -1)
160 extern const char *_dl_rpath; /* Set in rtld.c. */
161 trypath (_dl_rpath);
164 else
166 fd = open (name, O_RDONLY);
167 if (fd != -1)
168 realname = strdup (name);
171 if (fd == -1)
172 _dl_signal_error (errno, name, "cannot open shared object file");
174 return _dl_map_object_from_fd (name, fd, realname);
178 /* Map in the shared object NAME, actually located in REALNAME, and already
179 opened on FD. */
181 struct link_map *
182 _dl_map_object_from_fd (const char *name, int fd, char *realname)
184 struct link_map *l = NULL;
185 const size_t pagesize = getpagesize ();
186 void *file_mapping = NULL;
187 size_t mapping_size = 0;
189 #define LOSE(s) lose (0, (s))
190 void lose (int code, const char *msg)
192 (void) close (fd);
193 if (file_mapping)
194 munmap (file_mapping, mapping_size);
195 _dl_signal_error (code, l ? l->l_name : name, msg);
198 inline caddr_t map_segment (Elf32_Addr mapstart, size_t len,
199 int prot, int fixed, off_t offset)
201 caddr_t mapat = mmap ((caddr_t) mapstart, len, prot,
202 fixed|MAP_COPY|MAP_FILE,
203 fd, offset);
204 if (mapat == (caddr_t) -1)
205 lose (errno, "failed to map segment from shared object");
206 return mapat;
209 /* Make sure LOCATION is mapped in. */
210 void *map (off_t location, size_t size)
212 if ((off_t) mapping_size <= location + (off_t) size)
214 void *result;
215 if (file_mapping)
216 munmap (file_mapping, mapping_size);
217 mapping_size = (location + size + 1 + pagesize - 1);
218 mapping_size &= ~(pagesize - 1);
219 result = mmap (file_mapping, mapping_size, PROT_READ,
220 MAP_COPY|MAP_FILE, fd, 0);
221 if (result == (void *) -1)
222 lose (errno, "cannot map file data");
223 file_mapping = result;
225 return file_mapping + location;
228 const Elf32_Ehdr *header;
229 const Elf32_Phdr *phdr;
230 const Elf32_Phdr *ph;
231 int type;
233 /* Look again to see if the real name matched another already loaded. */
234 for (l = _dl_loaded; l; l = l->l_next)
235 if (! strcmp (realname, l->l_name))
237 /* The object is already loaded.
238 Just bump its reference count and return it. */
239 close (fd);
240 free (realname);
241 ++l->l_opencount;
242 return l;
245 /* Map in the first page to read the header. */
246 header = map (0, sizeof *header);
248 /* Check the header for basic validity. */
249 if (*(Elf32_Word *) &header->e_ident !=
250 #if BYTE_ORDER == LITTLE_ENDIAN
251 ((ELFMAG0 << (EI_MAG0 * 8)) |
252 (ELFMAG1 << (EI_MAG1 * 8)) |
253 (ELFMAG2 << (EI_MAG2 * 8)) |
254 (ELFMAG3 << (EI_MAG3 * 8)))
255 #else
256 ((ELFMAG0 << (EI_MAG3 * 8)) |
257 (ELFMAG1 << (EI_MAG2 * 8)) |
258 (ELFMAG2 << (EI_MAG1 * 8)) |
259 (ELFMAG3 << (EI_MAG0 * 8)))
260 #endif
262 LOSE ("invalid ELF header");
263 if (header->e_ident[EI_CLASS] != ELFCLASS32)
264 LOSE ("ELF file class not 32-bit");
265 if (header->e_ident[EI_DATA] != byteorder)
266 LOSE ("ELF file data encoding not " byteorder_name);
267 if (header->e_ident[EI_VERSION] != EV_CURRENT)
268 LOSE ("ELF file version ident not " STRING(EV_CURRENT));
269 if (header->e_version != EV_CURRENT)
270 LOSE ("ELF file version not " STRING(EV_CURRENT));
271 if (! elf_machine_matches_host (header->e_machine))
272 LOSE ("ELF file machine architecture not " ELF_MACHINE_NAME);
273 if (header->e_phentsize != sizeof (Elf32_Phdr))
274 LOSE ("ELF file's phentsize not the expected size");
276 /* Enter the new object in the list of loaded objects. */
277 l = _dl_new_object (realname, name, lt_loaded);
278 l->l_opencount = 1;
280 if (_dl_zerofd == -1)
282 _dl_zerofd = _dl_sysdep_open_zero_fill ();
283 if (_dl_zerofd == -1)
284 _dl_signal_error (errno, NULL, "cannot open zero fill device");
287 /* Extract the remaining details we need from the ELF header
288 and then map in the program header table. */
289 l->l_entry = header->e_entry;
290 type = header->e_type;
291 l->l_phnum = header->e_phnum;
292 phdr = map (header->e_phoff, l->l_phnum * sizeof (Elf32_Phdr));
295 /* Scan the program header table, collecting its load commands. */
296 struct loadcmd
298 Elf32_Addr mapstart, mapend, dataend, allocend;
299 off_t mapoff;
300 int prot;
301 } loadcmds[l->l_phnum], *c;
302 size_t nloadcmds = 0;
304 l->l_ld = 0;
305 l->l_phdr = 0;
306 l->l_addr = 0;
307 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
308 switch (ph->p_type)
310 /* These entries tell us where to find things once the file's
311 segments are mapped in. We record the addresses it says
312 verbatim, and later correct for the run-time load address. */
313 case PT_DYNAMIC:
314 l->l_ld = (void *) ph->p_vaddr;
315 break;
316 case PT_PHDR:
317 l->l_phdr = (void *) ph->p_vaddr;
318 break;
320 case PT_LOAD:
321 /* A load command tells us to map in part of the file.
322 We record the load commands and process them all later. */
323 if (ph->p_align % pagesize != 0)
324 LOSE ("ELF load command alignment not page-aligned");
325 if ((ph->p_vaddr - ph->p_offset) % ph->p_align)
326 LOSE ("ELF load command address/offset not properly aligned");
328 struct loadcmd *c = &loadcmds[nloadcmds++];
329 c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);
330 c->mapend = ((ph->p_vaddr + ph->p_filesz + pagesize - 1)
331 & ~(pagesize - 1));
332 c->dataend = ph->p_vaddr + ph->p_filesz;
333 c->allocend = ph->p_vaddr + ph->p_memsz;
334 c->mapoff = ph->p_offset & ~(ph->p_align - 1);
335 c->prot = 0;
336 if (ph->p_flags & PF_R)
337 c->prot |= PROT_READ;
338 if (ph->p_flags & PF_W)
339 c->prot |= PROT_WRITE;
340 if (ph->p_flags & PF_X)
341 c->prot |= PROT_EXEC;
342 break;
346 /* We are done reading the file's headers now. Unmap them. */
347 munmap (file_mapping, mapping_size);
349 /* Now process the load commands and map segments into memory. */
350 c = loadcmds;
352 if (type == ET_DYN || type == ET_REL)
354 /* This is a position-independent shared object. We can let the
355 kernel map it anywhere it likes, but we must have space for all
356 the segments in their specified positions relative to the first.
357 So we map the first segment without MAP_FIXED, but with its
358 extent increased to cover all the segments. Then we remove
359 access from excess portion, and there is known sufficient space
360 there to remap from the later segments. */
361 caddr_t mapat;
362 mapat = map_segment (c->mapstart,
363 loadcmds[nloadcmds - 1].allocend - c->mapstart,
364 c->prot, 0, c->mapoff);
365 l->l_addr = (Elf32_Addr) mapat - c->mapstart;
367 /* Change protection on the excess portion to disallow all access;
368 the portions we do not remap later will be inaccessible as if
369 unallocated. Then jump into the normal segment-mapping loop to
370 handle the portion of the segment past the end of the file
371 mapping. */
372 mprotect (mapat + c->mapend,
373 loadcmds[nloadcmds - 1].allocend - c->mapend,
375 goto postmap;
378 while (c < &loadcmds[nloadcmds])
380 if (c->mapend > c->mapstart)
381 /* Map the segment contents from the file. */
382 map_segment (l->l_addr + c->mapstart, c->mapend - c->mapstart,
383 c->prot, MAP_FIXED, c->mapoff);
385 postmap:
386 if (c->allocend > c->dataend)
388 /* Extra zero pages should appear at the end of this segment,
389 after the data mapped from the file. */
390 Elf32_Addr zero, zeroend, zeropage;
392 zero = l->l_addr + c->dataend;
393 zeroend = l->l_addr + c->allocend;
394 zeropage = (zero + pagesize - 1) & ~(pagesize - 1);
396 if (zeroend < zeropage)
397 /* All the extra data is in the last page of the segment.
398 We can just zero it. */
399 zeropage = zeroend;
401 if (zeropage > zero)
403 /* Zero the final part of the last page of the segment. */
404 if ((c->prot & PROT_WRITE) == 0)
406 /* Dag nab it. */
407 if (mprotect ((caddr_t) (zero & ~(pagesize - 1)),
408 pagesize, c->prot|PROT_WRITE) < 0)
409 lose (errno, "cannot change memory protections");
411 memset ((void *) zero, 0, zeropage - zero);
412 if ((c->prot & PROT_WRITE) == 0)
413 mprotect ((caddr_t) (zero & ~(pagesize - 1)),
414 pagesize, c->prot);
417 if (zeroend > zeropage)
419 /* Map the remaining zero pages in from the zero fill FD. */
420 caddr_t mapat;
421 mapat = mmap ((caddr_t) zeropage, zeroend - zeropage, c->prot,
422 MAP_ANON|MAP_PRIVATE|MAP_FIXED,
423 _dl_zerofd, 0);
424 if (mapat == (caddr_t) -1)
425 lose (errno, "cannot map zero-fill pages");
429 ++c;
433 l->l_type = type == ET_EXEC ? lt_executable : lt_library;
435 if (l->l_ld == 0)
437 if (type == ET_DYN)
438 LOSE ("object file has no dynamic section");
440 else
441 (Elf32_Addr) l->l_ld += l->l_addr;
443 if (l->l_phdr == 0)
444 l->l_phdr = (void *) ((const Elf32_Ehdr *) l->l_addr)->e_phoff;
445 (Elf32_Addr) l->l_phdr += l->l_addr;
447 elf_get_dynamic_info (l->l_ld, l->l_info);
448 if (l->l_info[DT_HASH])
449 _dl_setup_hash (l);
451 return l;