Undo last change. Add asflags-cpu to ASFLAGS in the same place we add
[glibc/history.git] / elf / dl-open.c
blobc3f0e42d5e5925c378f68ec7b7c7ab3b6d45d6d5
1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2007, 2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <libintl.h>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
29 #include <sys/param.h>
30 #include <bits/libc-lock.h>
31 #include <ldsodefs.h>
32 #include <bp-sym.h>
33 #include <caller.h>
34 #include <sysdep-cancel.h>
35 #include <tls.h>
37 #include <dl-dst.h>
40 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
41 void (*dl_main) (const ElfW(Phdr) *phdr,
42 ElfW(Word) phnum,
43 ElfW(Addr) *user_entry));
44 weak_extern (BP_SYM (_dl_sysdep_start))
46 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
48 /* Undefine the following for debugging. */
49 /* #define SCOPE_DEBUG 1 */
50 #ifdef SCOPE_DEBUG
51 static void show_scope (struct link_map *new);
52 #endif
54 /* We must be carefull not to leave us in an inconsistent state. Thus we
55 catch any error and re-raise it after cleaning up. */
57 struct dl_open_args
59 const char *file;
60 int mode;
61 /* This is the caller of the dlopen() function. */
62 const void *caller_dlopen;
63 /* This is the caller if _dl_open(). */
64 const void *caller_dl_open;
65 struct link_map *map;
66 /* Namespace ID. */
67 Lmid_t nsid;
68 /* Original parameters to the program and the current environment. */
69 int argc;
70 char **argv;
71 char **env;
75 static int
76 add_to_global (struct link_map *new)
78 struct link_map **new_global;
79 unsigned int to_add = 0;
80 unsigned int cnt;
82 /* Count the objects we have to put in the global scope. */
83 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
84 if (new->l_searchlist.r_list[cnt]->l_global == 0)
85 ++to_add;
87 /* The symbols of the new objects and its dependencies are to be
88 introduced into the global scope that will be used to resolve
89 references from other dynamically-loaded objects.
91 The global scope is the searchlist in the main link map. We
92 extend this list if necessary. There is one problem though:
93 since this structure was allocated very early (before the libc
94 is loaded) the memory it uses is allocated by the malloc()-stub
95 in the ld.so. When we come here these functions are not used
96 anymore. Instead the malloc() implementation of the libc is
97 used. But this means the block from the main map cannot be used
98 in an realloc() call. Therefore we allocate a completely new
99 array the first time we have to add something to the locale scope. */
101 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
102 if (ns->_ns_global_scope_alloc == 0)
104 /* This is the first dynamic object given global scope. */
105 ns->_ns_global_scope_alloc
106 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
107 new_global = (struct link_map **)
108 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
109 if (new_global == NULL)
111 ns->_ns_global_scope_alloc = 0;
112 nomem:
113 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
114 N_("cannot extend global scope"));
115 return 1;
118 /* Copy over the old entries. */
119 ns->_ns_main_searchlist->r_list
120 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
121 (ns->_ns_main_searchlist->r_nlist
122 * sizeof (struct link_map *)));
124 else if (ns->_ns_main_searchlist->r_nlist + to_add
125 > ns->_ns_global_scope_alloc)
127 /* We have to extend the existing array of link maps in the
128 main map. */
129 struct link_map **old_global
130 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
131 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
133 new_global = (struct link_map **)
134 malloc (new_nalloc * sizeof (struct link_map *));
135 if (new_global == NULL)
136 goto nomem;
138 memcpy (new_global, old_global,
139 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
141 ns->_ns_global_scope_alloc = new_nalloc;
142 ns->_ns_main_searchlist->r_list = new_global;
144 if (!RTLD_SINGLE_THREAD_P)
145 THREAD_GSCOPE_WAIT ();
147 free (old_global);
150 /* Now add the new entries. */
151 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
152 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
154 struct link_map *map = new->l_searchlist.r_list[cnt];
156 if (map->l_global == 0)
158 map->l_global = 1;
159 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
162 atomic_write_barrier ();
163 ns->_ns_main_searchlist->r_nlist = new_nlist;
165 return 0;
169 _dl_scope_free (void *old)
171 struct dl_scope_free_list *fsl;
172 #define DL_SCOPE_FREE_LIST_SIZE (sizeof (fsl->list) / sizeof (fsl->list[0]))
174 if (RTLD_SINGLE_THREAD_P)
175 free (old);
176 else if ((fsl = GL(dl_scope_free_list)) == NULL)
178 GL(dl_scope_free_list) = fsl = malloc (sizeof (*fsl));
179 if (fsl == NULL)
181 THREAD_GSCOPE_WAIT ();
182 free (old);
183 return 1;
185 else
187 fsl->list[0] = old;
188 fsl->count = 1;
191 else if (fsl->count < DL_SCOPE_FREE_LIST_SIZE)
192 fsl->list[fsl->count++] = old;
193 else
195 THREAD_GSCOPE_WAIT ();
196 while (fsl->count > 0)
197 free (fsl->list[--fsl->count]);
198 return 1;
200 return 0;
203 static void
204 dl_open_worker (void *a)
206 struct dl_open_args *args = a;
207 const char *file = args->file;
208 int mode = args->mode;
209 struct link_map *call_map = NULL;
211 /* Check whether _dl_open() has been called from a valid DSO. */
212 if (__check_caller (args->caller_dl_open,
213 allow_libc|allow_libdl|allow_ldso) != 0)
214 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
216 /* Determine the caller's map if necessary. This is needed in case
217 we have a DST, when we don't know the namespace ID we have to put
218 the new object in, or when the file name has no path in which
219 case we need to look along the RUNPATH/RPATH of the caller. */
220 const char *dst = strchr (file, '$');
221 if (dst != NULL || args->nsid == __LM_ID_CALLER
222 || strchr (file, '/') == NULL)
224 const void *caller_dlopen = args->caller_dlopen;
226 /* We have to find out from which object the caller is calling.
227 By default we assume this is the main application. */
228 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
230 struct link_map *l;
231 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
232 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
233 if (caller_dlopen >= (const void *) l->l_map_start
234 && caller_dlopen < (const void *) l->l_map_end
235 && (l->l_contiguous
236 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
238 assert (ns == l->l_ns);
239 call_map = l;
240 goto found_caller;
243 found_caller:
244 if (args->nsid == __LM_ID_CALLER)
246 #ifndef SHARED
247 /* In statically linked apps there might be no loaded object. */
248 if (call_map == NULL)
249 args->nsid = LM_ID_BASE;
250 else
251 #endif
252 args->nsid = call_map->l_ns;
256 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
258 /* Maybe we have to expand a DST. */
259 if (__builtin_expect (dst != NULL, 0))
261 size_t len = strlen (file);
263 /* Determine how much space we need. We have to allocate the
264 memory locally. */
265 size_t required = DL_DST_REQUIRED (call_map, file, len,
266 _dl_dst_count (dst, 0));
268 /* Get space for the new file name. */
269 char *new_file = (char *) alloca (required + 1);
271 /* Generate the new file name. */
272 _dl_dst_substitute (call_map, file, new_file, 0);
274 /* If the substitution failed don't try to load. */
275 if (*new_file == '\0')
276 _dl_signal_error (0, "dlopen", NULL,
277 N_("empty dynamic string token substitution"));
279 /* Now we have a new file name. */
280 file = new_file;
282 /* It does not matter whether call_map is set even if we
283 computed it only because of the DST. Since the path contains
284 a slash the value is not used. See dl-load.c. */
287 /* Load the named object. */
288 struct link_map *new;
289 args->map = new = _dl_map_object (call_map, file, 0, lt_loaded, 0,
290 mode | __RTLD_CALLMAP, args->nsid);
292 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
293 set and the object is not already loaded. */
294 if (new == NULL)
296 assert (mode & RTLD_NOLOAD);
297 return;
300 if (__builtin_expect (mode & __RTLD_SPROF, 0))
301 /* This happens only if we load a DSO for 'sprof'. */
302 return;
304 /* This object is directly loaded. */
305 ++new->l_direct_opencount;
307 /* It was already open. */
308 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
310 /* Let the user know about the opencount. */
311 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
312 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
313 new->l_name, new->l_ns, new->l_direct_opencount);
315 /* If the user requested the object to be in the global namespace
316 but it is not so far, add it now. */
317 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
318 (void) add_to_global (new);
320 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
322 return;
325 /* Load that object's dependencies. */
326 _dl_map_object_deps (new, NULL, 0, 0,
327 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
329 /* So far, so good. Now check the versions. */
330 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
331 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
332 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
333 0, 0);
335 #ifdef SCOPE_DEBUG
336 show_scope (new);
337 #endif
339 #ifdef SHARED
340 /* Auditing checkpoint: we have added all objects. */
341 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
343 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
344 /* Do not call the functions for any auditing object. */
345 if (head->l_auditing == 0)
347 struct audit_ifaces *afct = GLRO(dl_audit);
348 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
350 if (afct->activity != NULL)
351 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
353 afct = afct->next;
357 #endif
359 /* Notify the debugger all new objects are now ready to go. */
360 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
361 r->r_state = RT_CONSISTENT;
362 _dl_debug_state ();
364 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
365 int reloc_mode = mode & __RTLD_AUDIT;
366 if (GLRO(dl_lazy))
367 reloc_mode |= mode & RTLD_LAZY;
369 /* Relocate the objects loaded. We do this in reverse order so that copy
370 relocs of earlier objects overwrite the data written by later objects. */
372 struct link_map *l = new;
373 while (l->l_next)
374 l = l->l_next;
375 while (1)
377 if (! l->l_real->l_relocated)
379 #ifdef SHARED
380 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
382 /* If this here is the shared object which we want to profile
383 make sure the profile is started. We can find out whether
384 this is necessary or not by observing the `_dl_profile_map'
385 variable. If was NULL but is not NULL afterwars we must
386 start the profiling. */
387 struct link_map *old_profile_map = GL(dl_profile_map);
389 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
391 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
393 /* We must prepare the profiling. */
394 _dl_start_profile ();
396 /* Prevent unloading the object. */
397 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
400 else
401 #endif
402 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
405 if (l == new)
406 break;
407 l = l->l_prev;
410 /* If the file is not loaded now as a dependency, add the search
411 list of the newly loaded object to the scope. */
412 bool any_tls = false;
413 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
415 struct link_map *imap = new->l_searchlist.r_list[i];
417 /* If the initializer has been called already, the object has
418 not been loaded here and now. */
419 if (imap->l_init_called && imap->l_type == lt_loaded)
421 struct r_scope_elem **runp = imap->l_scope;
422 size_t cnt = 0;
424 while (*runp != NULL)
426 if (*runp == &new->l_searchlist)
427 break;
428 ++cnt;
429 ++runp;
432 if (*runp != NULL)
433 /* Avoid duplicates. */
434 continue;
436 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
438 /* The 'r_scope' array is too small. Allocate a new one
439 dynamically. */
440 size_t new_size;
441 struct r_scope_elem **newp;
443 #define SCOPE_ELEMS(imap) \
444 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
446 if (imap->l_scope != imap->l_scope_mem
447 && imap->l_scope_max < SCOPE_ELEMS (imap))
449 new_size = SCOPE_ELEMS (imap);
450 newp = imap->l_scope_mem;
452 else
454 new_size = imap->l_scope_max * 2;
455 newp = (struct r_scope_elem **)
456 malloc (new_size * sizeof (struct r_scope_elem *));
457 if (newp == NULL)
458 _dl_signal_error (ENOMEM, "dlopen", NULL,
459 N_("cannot create scope list"));
462 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
463 struct r_scope_elem **old = imap->l_scope;
465 imap->l_scope = newp;
467 if (old != imap->l_scope_mem)
468 _dl_scope_free (old);
470 imap->l_scope_max = new_size;
473 /* First terminate the extended list. Otherwise a thread
474 might use the new last element and then use the garbage
475 at offset IDX+1. */
476 imap->l_scope[cnt + 1] = NULL;
477 atomic_write_barrier ();
478 imap->l_scope[cnt] = &new->l_searchlist;
480 /* Only add TLS memory if this object is loaded now and
481 therefore is not yet initialized. */
482 else if (! imap->l_init_called
483 /* Only if the module defines thread local data. */
484 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
486 /* Now that we know the object is loaded successfully add
487 modules containing TLS data to the slot info table. We
488 might have to increase its size. */
489 _dl_add_to_slotinfo (imap);
491 if (imap->l_need_tls_init)
493 /* For static TLS we have to allocate the memory here
494 and now. This includes allocating memory in the DTV.
495 But we cannot change any DTV other than our own. So,
496 if we cannot guarantee that there is room in the DTV
497 we don't even try it and fail the load.
499 XXX We could track the minimum DTV slots allocated in
500 all threads. */
501 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
502 _dl_signal_error (0, "dlopen", NULL, N_("\
503 cannot load any more object with static TLS"));
505 imap->l_need_tls_init = 0;
506 #ifdef SHARED
507 /* Update the slot information data for at least the
508 generation of the DSO we are allocating data for. */
509 _dl_update_slotinfo (imap->l_tls_modid);
510 #endif
512 GL(dl_init_static_tls) (imap);
513 assert (imap->l_need_tls_init == 0);
516 /* We have to bump the generation counter. */
517 any_tls = true;
521 /* Bump the generation number if necessary. */
522 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
523 _dl_fatal_printf (N_("\
524 TLS generation counter wrapped! Please report this."));
526 /* Run the initializer functions of new objects. */
527 _dl_init (new, args->argc, args->argv, args->env);
529 /* Now we can make the new map available in the global scope. */
530 if (mode & RTLD_GLOBAL)
531 /* Move the object in the global namespace. */
532 if (add_to_global (new) != 0)
533 /* It failed. */
534 return;
536 /* Mark the object as not deletable if the RTLD_NODELETE flags was
537 passed. */
538 if (__builtin_expect (mode & RTLD_NODELETE, 0))
539 new->l_flags_1 |= DF_1_NODELETE;
541 #ifndef SHARED
542 /* We must be the static _dl_open in libc.a. A static program that
543 has loaded a dynamic object now has competition. */
544 __libc_multiple_libcs = 1;
545 #endif
547 /* Let the user know about the opencount. */
548 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
549 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
550 new->l_name, new->l_ns, new->l_direct_opencount);
554 void *
555 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
556 int argc, char *argv[], char *env[])
558 if ((mode & RTLD_BINDING_MASK) == 0)
559 /* One of the flags must be set. */
560 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
562 /* Make sure we are alone. */
563 __rtld_lock_lock_recursive (GL(dl_load_lock));
565 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
567 /* Find a new namespace. */
568 for (nsid = 1; nsid < GL(dl_nns); ++nsid)
569 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
570 break;
572 if (nsid == DL_NNS)
574 /* No more namespace available. */
575 __rtld_lock_unlock_recursive (GL(dl_load_lock));
577 _dl_signal_error (EINVAL, file, NULL, N_("\
578 no more namespaces available for dlmopen()"));
581 if (nsid == GL(dl_nns))
582 ++GL(dl_nns);
584 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
586 /* Never allow loading a DSO in a namespace which is empty. Such
587 direct placements is only causing problems. Also don't allow
588 loading into a namespace used for auditing. */
589 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
590 && (GL(dl_ns)[nsid]._ns_nloaded == 0
591 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
592 _dl_signal_error (EINVAL, file, NULL,
593 N_("invalid target namespace in dlmopen()"));
594 #ifndef SHARED
595 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
596 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
597 && GL(dl_nns) == 0)
598 GL(dl_nns) = 1;
599 #endif
601 struct dl_open_args args;
602 args.file = file;
603 args.mode = mode;
604 args.caller_dlopen = caller_dlopen;
605 args.caller_dl_open = RETURN_ADDRESS (0);
606 args.map = NULL;
607 args.nsid = nsid;
608 args.argc = argc;
609 args.argv = argv;
610 args.env = env;
612 const char *objname;
613 const char *errstring;
614 bool malloced;
615 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
616 dl_open_worker, &args);
618 #ifndef MAP_COPY
619 /* We must munmap() the cache file. */
620 _dl_unload_cache ();
621 #endif
623 /* See if an error occurred during loading. */
624 if (__builtin_expect (errstring != NULL, 0))
626 /* Remove the object from memory. It may be in an inconsistent
627 state if relocation failed, for example. */
628 if (args.map)
630 /* Maybe some of the modules which were loaded use TLS.
631 Since it will be removed in the following _dl_close call
632 we have to mark the dtv array as having gaps to fill the
633 holes. This is a pessimistic assumption which won't hurt
634 if not true. There is no need to do this when we are
635 loading the auditing DSOs since TLS has not yet been set
636 up. */
637 if ((mode & __RTLD_AUDIT) == 0)
638 GL(dl_tls_dtv_gaps) = true;
640 _dl_close_worker (args.map);
643 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
645 /* Release the lock. */
646 __rtld_lock_unlock_recursive (GL(dl_load_lock));
648 /* Make a local copy of the error string so that we can release the
649 memory allocated for it. */
650 size_t len_errstring = strlen (errstring) + 1;
651 char *local_errstring;
652 if (objname == errstring + len_errstring)
654 size_t total_len = len_errstring + strlen (objname) + 1;
655 local_errstring = alloca (total_len);
656 memcpy (local_errstring, errstring, total_len);
657 objname = local_errstring + len_errstring;
659 else
661 local_errstring = alloca (len_errstring);
662 memcpy (local_errstring, errstring, len_errstring);
665 if (malloced)
666 free ((char *) errstring);
668 /* Reraise the error. */
669 _dl_signal_error (errcode, objname, NULL, local_errstring);
672 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
674 /* Release the lock. */
675 __rtld_lock_unlock_recursive (GL(dl_load_lock));
677 #ifndef SHARED
678 DL_STATIC_INIT (args.map);
679 #endif
681 return args.map;
685 #ifdef SCOPE_DEBUG
686 #include <unistd.h>
688 static void
689 show_scope (struct link_map *new)
691 int scope_cnt;
693 for (scope_cnt = 0; new->l_scope[scope_cnt] != NULL; ++scope_cnt)
695 char numbuf[2];
696 unsigned int cnt;
698 numbuf[0] = '0' + scope_cnt;
699 numbuf[1] = '\0';
700 _dl_printf ("scope %s:", numbuf);
702 for (cnt = 0; cnt < new->l_scope[scope_cnt]->r_nlist; ++cnt)
703 if (*new->l_scope[scope_cnt]->r_list[cnt]->l_name)
704 _dl_printf (" %s", new->l_scope[scope_cnt]->r_list[cnt]->l_name);
705 else
706 _dl_printf (" <main>");
708 _dl_printf ("\n");
711 #endif
713 #ifdef IS_IN_rtld
714 /* Return non-zero if ADDR lies within one of L's segments. */
716 internal_function
717 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
719 int n = l->l_phnum;
720 const ElfW(Addr) reladdr = addr - l->l_addr;
722 while (--n >= 0)
723 if (l->l_phdr[n].p_type == PT_LOAD
724 && reladdr - l->l_phdr[n].p_vaddr >= 0
725 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
726 return 1;
727 return 0;
729 #endif