8322 nl: misleading-indentation
[unleashed/tickless.git] / usr / src / uts / common / os / mmapobj.c
blob142c10754e67a3457abfd320cf640be84b11e16c
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2014 Joyent, Inc. All rights reserved.
27 #include <sys/types.h>
28 #include <sys/sysmacros.h>
29 #include <sys/kmem.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/errno.h>
33 #include <sys/mman.h>
34 #include <sys/cmn_err.h>
35 #include <sys/cred.h>
36 #include <sys/vmsystm.h>
37 #include <sys/machsystm.h>
38 #include <sys/debug.h>
39 #include <vm/as.h>
40 #include <vm/seg.h>
41 #include <sys/vmparam.h>
42 #include <sys/vfs.h>
43 #include <sys/elf.h>
44 #include <sys/machelf.h>
45 #include <sys/corectl.h>
46 #include <sys/exec.h>
47 #include <sys/exechdr.h>
48 #include <sys/autoconf.h>
49 #include <sys/mem.h>
50 #include <vm/seg_dev.h>
51 #include <sys/vmparam.h>
52 #include <sys/mmapobj.h>
53 #include <sys/atomic.h>
56 * Theory statement:
58 * The main driving force behind mmapobj is to interpret and map ELF files
59 * inside of the kernel instead of having the linker be responsible for this.
61 * mmapobj also supports the AOUT 4.x binary format as well as flat files in
62 * a read only manner.
64 * When interpreting and mapping an ELF file, mmapobj will map each PT_LOAD
65 * or PT_SUNWBSS segment according to the ELF standard. Refer to the "Linker
66 * and Libraries Guide" for more information about the standard and mapping
67 * rules.
69 * Having mmapobj interpret and map objects will allow the kernel to make the
70 * best decision for where to place the mappings for said objects. Thus, we
71 * can make optimizations inside of the kernel for specific platforms or cache
72 * mapping information to make mapping objects faster. The cache is ignored
73 * if ASLR is enabled.
75 * The lib_va_hash will be one such optimization. For each ELF object that
76 * mmapobj is asked to interpret, we will attempt to cache the information
77 * about the PT_LOAD and PT_SUNWBSS sections to speed up future mappings of
78 * the same objects. We will cache up to LIBVA_CACHED_SEGS (see below) program
79 * headers which should cover a majority of the libraries out there without
80 * wasting space. In order to make sure that the cached information is valid,
81 * we check the passed in vnode's mtime and ctime to make sure the vnode
82 * has not been modified since the last time we used it.
84 * In addition, the lib_va_hash may contain a preferred starting VA for the
85 * object which can be useful for platforms which support a shared context.
86 * This will increase the likelyhood that library text can be shared among
87 * many different processes. We limit the reserved VA space for 32 bit objects
88 * in order to minimize fragmenting the processes address space.
90 * In addition to the above, the mmapobj interface allows for padding to be
91 * requested before the first mapping and after the last mapping created.
92 * When padding is requested, no additional optimizations will be made for
93 * that request.
97 * Threshold to prevent allocating too much kernel memory to read in the
98 * program headers for an object. If it requires more than below,
99 * we will use a KM_NOSLEEP allocation to allocate memory to hold all of the
100 * program headers which could possibly fail. If less memory than below is
101 * needed, then we use a KM_SLEEP allocation and are willing to wait for the
102 * memory if we need to.
104 size_t mmapobj_alloc_threshold = 65536;
106 /* Debug stats for test coverage */
107 #ifdef DEBUG
108 struct mobj_stats {
109 uint_t mobjs_unmap_called;
110 uint_t mobjs_remap_devnull;
111 uint_t mobjs_lookup_start;
112 uint_t mobjs_alloc_start;
113 uint_t mobjs_alloc_vmem;
114 uint_t mobjs_add_collision;
115 uint_t mobjs_get_addr;
116 uint_t mobjs_map_flat_no_padding;
117 uint_t mobjs_map_flat_padding;
118 uint_t mobjs_map_ptload_text;
119 uint_t mobjs_map_ptload_initdata;
120 uint_t mobjs_map_ptload_preread;
121 uint_t mobjs_map_ptload_unaligned_text;
122 uint_t mobjs_map_ptload_unaligned_map_fail;
123 uint_t mobjs_map_ptload_unaligned_read_fail;
124 uint_t mobjs_zfoddiff;
125 uint_t mobjs_zfoddiff_nowrite;
126 uint_t mobjs_zfodextra;
127 uint_t mobjs_ptload_failed;
128 uint_t mobjs_map_elf_no_holes;
129 uint_t mobjs_unmap_hole;
130 uint_t mobjs_nomem_header;
131 uint_t mobjs_inval_header;
132 uint_t mobjs_overlap_header;
133 uint_t mobjs_np2_align;
134 uint_t mobjs_np2_align_overflow;
135 uint_t mobjs_exec_padding;
136 uint_t mobjs_exec_addr_mapped;
137 uint_t mobjs_exec_addr_devnull;
138 uint_t mobjs_exec_addr_in_use;
139 uint_t mobjs_lvp_found;
140 uint_t mobjs_no_loadable_yet;
141 uint_t mobjs_nothing_to_map;
142 uint_t mobjs_e2big;
143 uint_t mobjs_dyn_pad_align;
144 uint_t mobjs_dyn_pad_noalign;
145 uint_t mobjs_alloc_start_fail;
146 uint_t mobjs_lvp_nocache;
147 uint_t mobjs_extra_padding;
148 uint_t mobjs_lvp_not_needed;
149 uint_t mobjs_no_mem_map_sz;
150 uint_t mobjs_check_exec_failed;
151 uint_t mobjs_lvp_used;
152 uint_t mobjs_wrong_model;
153 uint_t mobjs_noexec_fs;
154 uint_t mobjs_e2big_et_rel;
155 uint_t mobjs_et_rel_mapped;
156 uint_t mobjs_unknown_elf_type;
157 uint_t mobjs_phent32_too_small;
158 uint_t mobjs_phent64_too_small;
159 uint_t mobjs_inval_elf_class;
160 uint_t mobjs_too_many_phdrs;
161 uint_t mobjs_no_phsize;
162 uint_t mobjs_phsize_large;
163 uint_t mobjs_phsize_xtralarge;
164 uint_t mobjs_fast_wrong_model;
165 uint_t mobjs_fast_e2big;
166 uint_t mobjs_fast;
167 uint_t mobjs_fast_success;
168 uint_t mobjs_fast_not_now;
169 uint_t mobjs_small_file;
170 uint_t mobjs_read_error;
171 uint_t mobjs_unsupported;
172 uint_t mobjs_flat_e2big;
173 uint_t mobjs_phent_align32;
174 uint_t mobjs_phent_align64;
175 uint_t mobjs_lib_va_find_hit;
176 uint_t mobjs_lib_va_find_delay_delete;
177 uint_t mobjs_lib_va_find_delete;
178 uint_t mobjs_lib_va_add_delay_delete;
179 uint_t mobjs_lib_va_add_delete;
180 uint_t mobjs_lib_va_create_failure;
181 uint_t mobjs_min_align;
182 #if defined(__sparc)
183 uint_t mobjs_aout_uzero_fault;
184 uint_t mobjs_aout_64bit_try;
185 uint_t mobjs_aout_noexec;
186 uint_t mobjs_aout_e2big;
187 uint_t mobjs_aout_lib;
188 uint_t mobjs_aout_fixed;
189 uint_t mobjs_aout_zfoddiff;
190 uint_t mobjs_aout_map_bss;
191 uint_t mobjs_aout_bss_fail;
192 uint_t mobjs_aout_nlist;
193 uint_t mobjs_aout_addr_in_use;
194 #endif
195 } mobj_stats;
197 #define MOBJ_STAT_ADD(stat) ((mobj_stats.mobjs_##stat)++)
198 #else
199 #define MOBJ_STAT_ADD(stat)
200 #endif
203 * Check if addr is at or above the address space reserved for the stack.
204 * The stack is at the top of the address space for all sparc processes
205 * and 64 bit x86 processes. For 32 bit x86, the stack is not at the top
206 * of the address space and thus this check wil always return false for
207 * 32 bit x86 processes.
209 #if defined(__sparc)
210 #define OVERLAPS_STACK(addr, p) \
211 (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK)))
212 #elif defined(__amd64)
213 #define OVERLAPS_STACK(addr, p) \
214 ((p->p_model == DATAMODEL_LP64) && \
215 (addr >= (p->p_usrstack - ((p->p_stk_ctl + PAGEOFFSET) & PAGEMASK))))
216 #elif defined(__i386)
217 #define OVERLAPS_STACK(addr, p) 0
218 #endif
220 /* lv_flags values - bitmap */
221 #define LV_ELF32 0x1 /* 32 bit ELF file */
222 #define LV_ELF64 0x2 /* 64 bit ELF file */
223 #define LV_DEL 0x4 /* delete when lv_refcnt hits zero */
226 * Note: lv_num_segs will denote how many segments this file has and will
227 * only be set after the lv_mps array has been filled out.
228 * lv_mps can only be valid if lv_num_segs is non-zero.
230 struct lib_va {
231 struct lib_va *lv_next;
232 caddr_t lv_base_va; /* start va for library */
233 ssize_t lv_len; /* total va span of library */
234 size_t lv_align; /* minimum alignment */
235 uint64_t lv_nodeid; /* filesystem node id */
236 uint64_t lv_fsid; /* filesystem id */
237 timestruc_t lv_ctime; /* last time file was changed */
238 timestruc_t lv_mtime; /* or modified */
239 mmapobj_result_t lv_mps[LIBVA_CACHED_SEGS]; /* cached pheaders */
240 int lv_num_segs; /* # segs for this file */
241 int lv_flags;
242 uint_t lv_refcnt; /* number of holds on struct */
245 #define LIB_VA_SIZE 1024
246 #define LIB_VA_MASK (LIB_VA_SIZE - 1)
247 #define LIB_VA_MUTEX_SHIFT 3
249 #if (LIB_VA_SIZE & (LIB_VA_SIZE - 1))
250 #error "LIB_VA_SIZE is not a power of 2"
251 #endif
253 static struct lib_va *lib_va_hash[LIB_VA_SIZE];
254 static kmutex_t lib_va_hash_mutex[LIB_VA_SIZE >> LIB_VA_MUTEX_SHIFT];
256 #define LIB_VA_HASH_MUTEX(index) \
257 (&lib_va_hash_mutex[index >> LIB_VA_MUTEX_SHIFT])
259 #define LIB_VA_HASH(nodeid) \
260 (((nodeid) ^ ((nodeid) << 7) ^ ((nodeid) << 13)) & LIB_VA_MASK)
262 #define LIB_VA_MATCH_ID(arg1, arg2) \
263 ((arg1)->lv_nodeid == (arg2)->va_nodeid && \
264 (arg1)->lv_fsid == (arg2)->va_fsid)
266 #define LIB_VA_MATCH_TIME(arg1, arg2) \
267 ((arg1)->lv_ctime.tv_sec == (arg2)->va_ctime.tv_sec && \
268 (arg1)->lv_mtime.tv_sec == (arg2)->va_mtime.tv_sec && \
269 (arg1)->lv_ctime.tv_nsec == (arg2)->va_ctime.tv_nsec && \
270 (arg1)->lv_mtime.tv_nsec == (arg2)->va_mtime.tv_nsec)
272 #define LIB_VA_MATCH(arg1, arg2) \
273 (LIB_VA_MATCH_ID(arg1, arg2) && LIB_VA_MATCH_TIME(arg1, arg2))
276 * lib_va will be used for optimized allocation of address ranges for
277 * libraries, such that subsequent mappings of the same library will attempt
278 * to use the same VA as previous mappings of that library.
279 * In order to map libraries at the same VA in many processes, we need to carve
280 * out our own address space for them which is unique across many processes.
281 * We use different arenas for 32 bit and 64 bit libraries.
283 * Since the 32 bit address space is relatively small, we limit the number of
284 * libraries which try to use consistent virtual addresses to lib_threshold.
285 * For 64 bit libraries there is no such limit since the address space is large.
287 static vmem_t *lib_va_32_arena;
288 static vmem_t *lib_va_64_arena;
289 uint_t lib_threshold = 20; /* modifiable via /etc/system */
291 static kmutex_t lib_va_init_mutex; /* no need to initialize */
294 * Number of 32 bit and 64 bit libraries in lib_va hash.
296 static uint_t libs_mapped_32 = 0;
297 static uint_t libs_mapped_64 = 0;
300 * Free up the resources associated with lvp as well as lvp itself.
301 * We also decrement the number of libraries mapped via a lib_va
302 * cached virtual address.
304 void
305 lib_va_free(struct lib_va *lvp)
307 int is_64bit = lvp->lv_flags & LV_ELF64;
308 ASSERT(lvp->lv_refcnt == 0);
310 if (lvp->lv_base_va != NULL) {
311 vmem_xfree(is_64bit ? lib_va_64_arena : lib_va_32_arena,
312 lvp->lv_base_va, lvp->lv_len);
313 if (is_64bit) {
314 atomic_dec_32(&libs_mapped_64);
315 } else {
316 atomic_dec_32(&libs_mapped_32);
319 kmem_free(lvp, sizeof (struct lib_va));
323 * See if the file associated with the vap passed in is in the lib_va hash.
324 * If it is and the file has not been modified since last use, then
325 * return a pointer to that data. Otherwise, return NULL if the file has
326 * changed or the file was not found in the hash.
328 static struct lib_va *
329 lib_va_find(vattr_t *vap)
331 struct lib_va *lvp;
332 struct lib_va *del = NULL;
333 struct lib_va **tmp;
334 uint_t index;
335 index = LIB_VA_HASH(vap->va_nodeid);
337 mutex_enter(LIB_VA_HASH_MUTEX(index));
338 tmp = &lib_va_hash[index];
339 while (*tmp != NULL) {
340 lvp = *tmp;
341 if (LIB_VA_MATCH_ID(lvp, vap)) {
342 if (LIB_VA_MATCH_TIME(lvp, vap)) {
343 ASSERT((lvp->lv_flags & LV_DEL) == 0);
344 lvp->lv_refcnt++;
345 MOBJ_STAT_ADD(lib_va_find_hit);
346 } else {
348 * file was updated since last use.
349 * need to remove it from list.
351 del = lvp;
352 *tmp = del->lv_next;
353 del->lv_next = NULL;
355 * If we can't delete it now, mark it for later
357 if (del->lv_refcnt) {
358 MOBJ_STAT_ADD(lib_va_find_delay_delete);
359 del->lv_flags |= LV_DEL;
360 del = NULL;
362 lvp = NULL;
364 mutex_exit(LIB_VA_HASH_MUTEX(index));
365 if (del) {
366 ASSERT(del->lv_refcnt == 0);
367 MOBJ_STAT_ADD(lib_va_find_delete);
368 lib_va_free(del);
370 return (lvp);
372 tmp = &lvp->lv_next;
374 mutex_exit(LIB_VA_HASH_MUTEX(index));
375 return (NULL);
379 * Add a new entry to the lib_va hash.
380 * Search the hash while holding the appropriate mutex to make sure that the
381 * data is not already in the cache. If we find data that is in the cache
382 * already and has not been modified since last use, we return NULL. If it
383 * has been modified since last use, we will remove that entry from
384 * the hash and it will be deleted once it's reference count reaches zero.
385 * If there is no current entry in the hash we will add the new entry and
386 * return it to the caller who is responsible for calling lib_va_release to
387 * drop their reference count on it.
389 * lv_num_segs will be set to zero since the caller needs to add that
390 * information to the data structure.
392 static struct lib_va *
393 lib_va_add_hash(caddr_t base_va, ssize_t len, size_t align, vattr_t *vap)
395 struct lib_va *lvp;
396 uint_t index;
397 model_t model;
398 struct lib_va **tmp;
399 struct lib_va *del = NULL;
401 model = get_udatamodel();
402 index = LIB_VA_HASH(vap->va_nodeid);
404 lvp = kmem_alloc(sizeof (struct lib_va), KM_SLEEP);
406 mutex_enter(LIB_VA_HASH_MUTEX(index));
409 * Make sure not adding same data a second time.
410 * The hash chains should be relatively short and adding
411 * is a relatively rare event, so it's worth the check.
413 tmp = &lib_va_hash[index];
414 while (*tmp != NULL) {
415 if (LIB_VA_MATCH_ID(*tmp, vap)) {
416 if (LIB_VA_MATCH_TIME(*tmp, vap)) {
417 mutex_exit(LIB_VA_HASH_MUTEX(index));
418 kmem_free(lvp, sizeof (struct lib_va));
419 return (NULL);
423 * We have the same nodeid and fsid but the file has
424 * been modified since we last saw it.
425 * Need to remove the old node and add this new
426 * one.
427 * Could probably use a callback mechanism to make
428 * this cleaner.
430 ASSERT(del == NULL);
431 del = *tmp;
432 *tmp = del->lv_next;
433 del->lv_next = NULL;
436 * Check to see if we can free it. If lv_refcnt
437 * is greater than zero, than some other thread
438 * has a reference to the one we want to delete
439 * and we can not delete it. All of this is done
440 * under the lib_va_hash_mutex lock so it is atomic.
442 if (del->lv_refcnt) {
443 MOBJ_STAT_ADD(lib_va_add_delay_delete);
444 del->lv_flags |= LV_DEL;
445 del = NULL;
447 /* tmp is already advanced */
448 continue;
450 tmp = &((*tmp)->lv_next);
453 lvp->lv_base_va = base_va;
454 lvp->lv_len = len;
455 lvp->lv_align = align;
456 lvp->lv_nodeid = vap->va_nodeid;
457 lvp->lv_fsid = vap->va_fsid;
458 lvp->lv_ctime.tv_sec = vap->va_ctime.tv_sec;
459 lvp->lv_ctime.tv_nsec = vap->va_ctime.tv_nsec;
460 lvp->lv_mtime.tv_sec = vap->va_mtime.tv_sec;
461 lvp->lv_mtime.tv_nsec = vap->va_mtime.tv_nsec;
462 lvp->lv_next = NULL;
463 lvp->lv_refcnt = 1;
465 /* Caller responsible for filling this and lv_mps out */
466 lvp->lv_num_segs = 0;
468 if (model == DATAMODEL_LP64) {
469 lvp->lv_flags = LV_ELF64;
470 } else {
471 ASSERT(model == DATAMODEL_ILP32);
472 lvp->lv_flags = LV_ELF32;
475 if (base_va != NULL) {
476 if (model == DATAMODEL_LP64) {
477 atomic_inc_32(&libs_mapped_64);
478 } else {
479 ASSERT(model == DATAMODEL_ILP32);
480 atomic_inc_32(&libs_mapped_32);
483 ASSERT(*tmp == NULL);
484 *tmp = lvp;
485 mutex_exit(LIB_VA_HASH_MUTEX(index));
486 if (del) {
487 ASSERT(del->lv_refcnt == 0);
488 MOBJ_STAT_ADD(lib_va_add_delete);
489 lib_va_free(del);
491 return (lvp);
495 * Release the hold on lvp which was acquired by lib_va_find or lib_va_add_hash.
496 * In addition, if this is the last hold and lvp is marked for deletion,
497 * free up it's reserved address space and free the structure.
499 static void
500 lib_va_release(struct lib_va *lvp)
502 uint_t index;
503 int to_del = 0;
505 ASSERT(lvp->lv_refcnt > 0);
507 index = LIB_VA_HASH(lvp->lv_nodeid);
508 mutex_enter(LIB_VA_HASH_MUTEX(index));
509 if (--lvp->lv_refcnt == 0 && (lvp->lv_flags & LV_DEL)) {
510 to_del = 1;
512 mutex_exit(LIB_VA_HASH_MUTEX(index));
513 if (to_del) {
514 ASSERT(lvp->lv_next == 0);
515 lib_va_free(lvp);
520 * Dummy function for mapping through /dev/null
521 * Normally I would have used mmmmap in common/io/mem.c
522 * but that is a static function, and for /dev/null, it
523 * just returns -1.
525 /* ARGSUSED */
526 static int
527 mmapobj_dummy(dev_t dev, off_t off, int prot)
529 return (-1);
533 * Called when an error occurred which requires mmapobj to return failure.
534 * All mapped objects will be unmapped and /dev/null mappings will be
535 * reclaimed if necessary.
536 * num_mapped is the number of elements of mrp which have been mapped, and
537 * num_segs is the total number of elements in mrp.
538 * For e_type ET_EXEC, we need to unmap all of the elements in mrp since
539 * we had already made reservations for them.
540 * If num_mapped equals num_segs, then we know that we had fully mapped
541 * the file and only need to clean up the segments described.
542 * If they are not equal, then for ET_DYN we will unmap the range from the
543 * end of the last mapped segment to the end of the last segment in mrp
544 * since we would have made a reservation for that memory earlier.
545 * If e_type is passed in as zero, num_mapped must equal num_segs.
547 void
548 mmapobj_unmap(mmapobj_result_t *mrp, int num_mapped, int num_segs,
549 ushort_t e_type)
551 int i;
552 struct as *as = curproc->p_as;
553 caddr_t addr;
554 size_t size;
556 if (e_type == ET_EXEC) {
557 num_mapped = num_segs;
559 #ifdef DEBUG
560 if (e_type == 0) {
561 ASSERT(num_mapped == num_segs);
563 #endif
565 MOBJ_STAT_ADD(unmap_called);
566 for (i = 0; i < num_mapped; i++) {
569 * If we are going to have to create a mapping we need to
570 * make sure that no one else will use the address we
571 * need to remap between the time it is unmapped and
572 * mapped below.
574 if (mrp[i].mr_flags & MR_RESV) {
575 as_rangelock(as);
577 /* Always need to unmap what we mapped */
578 (void) as_unmap(as, mrp[i].mr_addr, mrp[i].mr_msize);
580 /* Need to reclaim /dev/null reservation from earlier */
581 if (mrp[i].mr_flags & MR_RESV) {
582 struct segdev_crargs dev_a;
584 ASSERT(e_type != ET_DYN);
586 * Use seg_dev segment driver for /dev/null mapping.
588 dev_a.mapfunc = mmapobj_dummy;
589 dev_a.dev = makedevice(mm_major, M_NULL);
590 dev_a.offset = 0;
591 dev_a.type = 0; /* neither PRIVATE nor SHARED */
592 dev_a.prot = dev_a.maxprot = (uchar_t)PROT_NONE;
593 dev_a.hat_attr = 0;
594 dev_a.hat_flags = 0;
596 (void) as_map(as, mrp[i].mr_addr, mrp[i].mr_msize,
597 segdev_create, &dev_a);
598 MOBJ_STAT_ADD(remap_devnull);
599 as_rangeunlock(as);
603 if (num_mapped != num_segs) {
604 ASSERT(e_type == ET_DYN);
605 /* Need to unmap any reservation made after last mapped seg */
606 if (num_mapped == 0) {
607 addr = mrp[0].mr_addr;
608 } else {
609 addr = mrp[num_mapped - 1].mr_addr +
610 mrp[num_mapped - 1].mr_msize;
612 size = (size_t)mrp[num_segs - 1].mr_addr +
613 mrp[num_segs - 1].mr_msize - (size_t)addr;
614 (void) as_unmap(as, addr, size);
617 * Now we need to unmap the holes between mapped segs.
618 * Note that we have not mapped all of the segments and thus
619 * the holes between segments would not have been unmapped
620 * yet. If num_mapped == num_segs, then all of the holes
621 * between segments would have already been unmapped.
624 for (i = 1; i < num_mapped; i++) {
625 addr = mrp[i - 1].mr_addr + mrp[i - 1].mr_msize;
626 size = mrp[i].mr_addr - addr;
627 (void) as_unmap(as, addr, size);
633 * We need to add the start address into mrp so that the unmap function
634 * has absolute addresses to use.
636 static void
637 mmapobj_unmap_exec(mmapobj_result_t *mrp, int num_mapped, caddr_t start_addr)
639 int i;
641 for (i = 0; i < num_mapped; i++) {
642 mrp[i].mr_addr += (size_t)start_addr;
644 mmapobj_unmap(mrp, num_mapped, num_mapped, ET_EXEC);
647 static caddr_t
648 mmapobj_lookup_start_addr(struct lib_va *lvp)
650 proc_t *p = curproc;
651 struct as *as = p->p_as;
652 struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
653 int error;
654 uint_t ma_flags = _MAP_LOW32;
655 caddr_t base = NULL;
656 size_t len;
657 size_t align;
659 ASSERT(lvp != NULL);
660 MOBJ_STAT_ADD(lookup_start);
662 as_rangelock(as);
664 base = lvp->lv_base_va;
665 len = lvp->lv_len;
668 * If we don't have an expected base address, or the one that we want
669 * to use is not available or acceptable, go get an acceptable
670 * address range.
672 if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) ||
673 valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) !=
674 RANGE_OKAY || OVERLAPS_STACK(base + len, p)) {
675 if (lvp->lv_flags & LV_ELF64) {
676 ma_flags = 0;
679 align = lvp->lv_align;
680 if (align > 1) {
681 ma_flags |= MAP_ALIGN;
684 base = (caddr_t)align;
685 map_addr(&base, len, 0, 1, ma_flags);
689 * Need to reserve the address space we're going to use.
690 * Don't reserve swap space since we'll be mapping over this.
692 if (base != NULL) {
693 crargs.flags |= MAP_NORESERVE;
694 error = as_map(as, base, len, segvn_create, &crargs);
695 if (error) {
696 base = NULL;
700 as_rangeunlock(as);
701 return (base);
705 * Get the starting address for a given file to be mapped and return it
706 * to the caller. If we're using lib_va and we need to allocate an address,
707 * we will attempt to allocate it from the global reserved pool such that the
708 * same address can be used in the future for this file. If we can't use the
709 * reserved address then we just get one that will fit in our address space.
711 * Returns the starting virtual address for the range to be mapped or NULL
712 * if an error is encountered. If we successfully insert the requested info
713 * into the lib_va hash, then *lvpp will be set to point to this lib_va
714 * structure. The structure will have a hold on it and thus lib_va_release
715 * needs to be called on it by the caller. This function will not fill out
716 * lv_mps or lv_num_segs since it does not have enough information to do so.
717 * The caller is responsible for doing this making sure that any modifications
718 * to lv_mps are visible before setting lv_num_segs.
720 static caddr_t
721 mmapobj_alloc_start_addr(struct lib_va **lvpp, size_t len, int use_lib_va,
722 int randomize, size_t align, vattr_t *vap)
724 proc_t *p = curproc;
725 struct as *as = p->p_as;
726 struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
727 int error;
728 model_t model;
729 uint_t ma_flags = _MAP_LOW32;
730 caddr_t base = NULL;
731 vmem_t *model_vmem;
732 size_t lib_va_start;
733 size_t lib_va_end;
734 size_t lib_va_len;
736 ASSERT(lvpp != NULL);
737 ASSERT((randomize & use_lib_va) != 1);
739 MOBJ_STAT_ADD(alloc_start);
740 model = get_udatamodel();
742 if (model == DATAMODEL_LP64) {
743 ma_flags = 0;
744 model_vmem = lib_va_64_arena;
745 } else {
746 ASSERT(model == DATAMODEL_ILP32);
747 model_vmem = lib_va_32_arena;
750 if (align > 1) {
751 ma_flags |= MAP_ALIGN;
754 if (randomize != 0)
755 ma_flags |= _MAP_RANDOMIZE;
757 if (use_lib_va) {
759 * The first time through, we need to setup the lib_va arenas.
760 * We call map_addr to find a suitable range of memory to map
761 * the given library, and we will set the highest address
762 * in our vmem arena to the end of this adddress range.
763 * We allow up to half of the address space to be used
764 * for lib_va addresses but we do not prevent any allocations
765 * in this range from other allocation paths.
767 if (lib_va_64_arena == NULL && model == DATAMODEL_LP64) {
768 mutex_enter(&lib_va_init_mutex);
769 if (lib_va_64_arena == NULL) {
770 base = (caddr_t)align;
771 as_rangelock(as);
772 map_addr(&base, len, 0, 1, ma_flags);
773 as_rangeunlock(as);
774 if (base == NULL) {
775 mutex_exit(&lib_va_init_mutex);
776 MOBJ_STAT_ADD(lib_va_create_failure);
777 goto nolibva;
779 lib_va_end = (size_t)base + len;
780 lib_va_len = lib_va_end >> 1;
781 lib_va_len = P2ROUNDUP(lib_va_len, PAGESIZE);
782 lib_va_start = lib_va_end - lib_va_len;
785 * Need to make sure we avoid the address hole.
786 * We know lib_va_end is valid but we need to
787 * make sure lib_va_start is as well.
789 if ((lib_va_end > (size_t)hole_end) &&
790 (lib_va_start < (size_t)hole_end)) {
791 lib_va_start = P2ROUNDUP(
792 (size_t)hole_end, PAGESIZE);
793 lib_va_len = lib_va_end - lib_va_start;
795 lib_va_64_arena = vmem_create("lib_va_64",
796 (void *)lib_va_start, lib_va_len, PAGESIZE,
797 NULL, NULL, NULL, 0,
798 VM_NOSLEEP | VMC_IDENTIFIER);
799 if (lib_va_64_arena == NULL) {
800 mutex_exit(&lib_va_init_mutex);
801 goto nolibva;
804 model_vmem = lib_va_64_arena;
805 mutex_exit(&lib_va_init_mutex);
806 } else if (lib_va_32_arena == NULL &&
807 model == DATAMODEL_ILP32) {
808 mutex_enter(&lib_va_init_mutex);
809 if (lib_va_32_arena == NULL) {
810 base = (caddr_t)align;
811 as_rangelock(as);
812 map_addr(&base, len, 0, 1, ma_flags);
813 as_rangeunlock(as);
814 if (base == NULL) {
815 mutex_exit(&lib_va_init_mutex);
816 MOBJ_STAT_ADD(lib_va_create_failure);
817 goto nolibva;
819 lib_va_end = (size_t)base + len;
820 lib_va_len = lib_va_end >> 1;
821 lib_va_len = P2ROUNDUP(lib_va_len, PAGESIZE);
822 lib_va_start = lib_va_end - lib_va_len;
823 lib_va_32_arena = vmem_create("lib_va_32",
824 (void *)lib_va_start, lib_va_len, PAGESIZE,
825 NULL, NULL, NULL, 0,
826 VM_NOSLEEP | VMC_IDENTIFIER);
827 if (lib_va_32_arena == NULL) {
828 mutex_exit(&lib_va_init_mutex);
829 goto nolibva;
832 model_vmem = lib_va_32_arena;
833 mutex_exit(&lib_va_init_mutex);
836 if (model == DATAMODEL_LP64 || libs_mapped_32 < lib_threshold) {
837 base = vmem_xalloc(model_vmem, len, align, 0, 0, NULL,
838 NULL, VM_NOSLEEP | VM_ENDALLOC);
839 MOBJ_STAT_ADD(alloc_vmem);
843 * Even if the address fails to fit in our address space,
844 * or we can't use a reserved address,
845 * we should still save it off in lib_va_hash.
847 *lvpp = lib_va_add_hash(base, len, align, vap);
850 * Check for collision on insertion and free up our VA space.
851 * This is expected to be rare, so we'll just reset base to
852 * NULL instead of looking it up in the lib_va hash.
854 if (*lvpp == NULL) {
855 if (base != NULL) {
856 vmem_xfree(model_vmem, base, len);
857 base = NULL;
858 MOBJ_STAT_ADD(add_collision);
863 nolibva:
864 as_rangelock(as);
867 * If we don't have an expected base address, or the one that we want
868 * to use is not available or acceptable, go get an acceptable
869 * address range.
871 * If ASLR is enabled, we should never have used the cache, and should
872 * also start our real work here, in the consequent of the next
873 * condition.
875 if (randomize != 0)
876 ASSERT(base == NULL);
878 if (base == NULL || as_gap(as, len, &base, &len, 0, NULL) ||
879 valid_usr_range(base, len, PROT_ALL, as, as->a_userlimit) !=
880 RANGE_OKAY || OVERLAPS_STACK(base + len, p)) {
881 MOBJ_STAT_ADD(get_addr);
882 base = (caddr_t)align;
883 map_addr(&base, len, 0, 1, ma_flags);
887 * Need to reserve the address space we're going to use.
888 * Don't reserve swap space since we'll be mapping over this.
890 if (base != NULL) {
891 /* Don't reserve swap space since we'll be mapping over this */
892 crargs.flags |= MAP_NORESERVE;
893 error = as_map(as, base, len, segvn_create, &crargs);
894 if (error) {
895 base = NULL;
899 as_rangeunlock(as);
900 return (base);
904 * Map the file associated with vp into the address space as a single
905 * read only private mapping.
906 * Returns 0 for success, and non-zero for failure to map the file.
908 static int
909 mmapobj_map_flat(vnode_t *vp, mmapobj_result_t *mrp, size_t padding,
910 cred_t *fcred)
912 int error = 0;
913 struct as *as = curproc->p_as;
914 caddr_t addr = NULL;
915 caddr_t start_addr;
916 size_t len;
917 size_t pad_len;
918 int prot = PROT_USER | PROT_READ;
919 uint_t ma_flags = _MAP_LOW32;
920 vattr_t vattr;
921 struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_USER, PROT_ALL);
923 if (get_udatamodel() == DATAMODEL_LP64) {
924 ma_flags = 0;
927 vattr.va_mask = AT_SIZE;
928 error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
929 if (error) {
930 return (error);
933 len = vattr.va_size;
935 ma_flags |= MAP_PRIVATE;
936 if (padding == 0) {
937 MOBJ_STAT_ADD(map_flat_no_padding);
938 error = VOP_MAP(vp, 0, as, &addr, len, prot, PROT_ALL,
939 ma_flags, fcred, NULL);
940 if (error == 0) {
941 mrp[0].mr_addr = addr;
942 mrp[0].mr_msize = len;
943 mrp[0].mr_fsize = len;
944 mrp[0].mr_offset = 0;
945 mrp[0].mr_prot = prot;
946 mrp[0].mr_flags = 0;
948 return (error);
951 /* padding was requested so there's more work to be done */
952 MOBJ_STAT_ADD(map_flat_padding);
954 /* No need to reserve swap space now since it will be reserved later */
955 crargs.flags |= MAP_NORESERVE;
957 /* Need to setup padding which can only be in PAGESIZE increments. */
958 ASSERT((padding & PAGEOFFSET) == 0);
959 pad_len = len + (2 * padding);
961 as_rangelock(as);
962 map_addr(&addr, pad_len, 0, 1, ma_flags);
963 error = as_map(as, addr, pad_len, segvn_create, &crargs);
964 as_rangeunlock(as);
965 if (error) {
966 return (error);
968 start_addr = addr;
969 addr += padding;
970 ma_flags |= MAP_FIXED;
971 error = VOP_MAP(vp, 0, as, &addr, len, prot, PROT_ALL, ma_flags,
972 fcred, NULL);
973 if (error == 0) {
974 mrp[0].mr_addr = start_addr;
975 mrp[0].mr_msize = padding;
976 mrp[0].mr_fsize = 0;
977 mrp[0].mr_offset = 0;
978 mrp[0].mr_prot = 0;
979 mrp[0].mr_flags = MR_PADDING;
981 mrp[1].mr_addr = addr;
982 mrp[1].mr_msize = len;
983 mrp[1].mr_fsize = len;
984 mrp[1].mr_offset = 0;
985 mrp[1].mr_prot = prot;
986 mrp[1].mr_flags = 0;
988 mrp[2].mr_addr = addr + P2ROUNDUP(len, PAGESIZE);
989 mrp[2].mr_msize = padding;
990 mrp[2].mr_fsize = 0;
991 mrp[2].mr_offset = 0;
992 mrp[2].mr_prot = 0;
993 mrp[2].mr_flags = MR_PADDING;
994 } else {
995 /* Need to cleanup the as_map from earlier */
996 (void) as_unmap(as, start_addr, pad_len);
998 return (error);
1002 * Map a PT_LOAD or PT_SUNWBSS section of an executable file into the user's
1003 * address space.
1004 * vp - vnode to be mapped in
1005 * addr - start address
1006 * len - length of vp to be mapped
1007 * zfodlen - length of zero filled memory after len above
1008 * offset - offset into file where mapping should start
1009 * prot - protections for this mapping
1010 * fcred - credentials for the file associated with vp at open time.
1012 static int
1013 mmapobj_map_ptload(struct vnode *vp, caddr_t addr, size_t len, size_t zfodlen,
1014 off_t offset, int prot, cred_t *fcred)
1016 int error = 0;
1017 caddr_t zfodbase, oldaddr;
1018 size_t oldlen;
1019 size_t end;
1020 size_t zfoddiff;
1021 label_t ljb;
1022 struct as *as = curproc->p_as;
1023 model_t model;
1024 int full_page;
1027 * See if addr and offset are aligned such that we can map in
1028 * full pages instead of partial pages.
1030 full_page = (((uintptr_t)addr & PAGEOFFSET) ==
1031 ((uintptr_t)offset & PAGEOFFSET));
1033 model = get_udatamodel();
1035 oldaddr = addr;
1036 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1037 if (len) {
1038 spgcnt_t availm, npages;
1039 int preread;
1040 uint_t mflag = MAP_PRIVATE | MAP_FIXED;
1042 if (model == DATAMODEL_ILP32) {
1043 mflag |= _MAP_LOW32;
1045 /* We may need to map in extra bytes */
1046 oldlen = len;
1047 len += ((size_t)oldaddr & PAGEOFFSET);
1049 if (full_page) {
1050 offset = (off_t)((uintptr_t)offset & PAGEMASK);
1051 if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_EXEC) {
1052 mflag |= MAP_TEXT;
1053 MOBJ_STAT_ADD(map_ptload_text);
1054 } else {
1055 mflag |= MAP_INITDATA;
1056 MOBJ_STAT_ADD(map_ptload_initdata);
1060 * maxprot is passed as PROT_ALL so that mdb can
1061 * write to this segment.
1063 if (error = VOP_MAP(vp, (offset_t)offset, as, &addr,
1064 len, prot, PROT_ALL, mflag, fcred, NULL)) {
1065 return (error);
1069 * If the segment can fit and is relatively small, then
1070 * we prefault the entire segment in. This is based
1071 * on the model that says the best working set of a
1072 * small program is all of its pages.
1073 * We only do this if freemem will not drop below
1074 * lotsfree since we don't want to induce paging.
1076 npages = (spgcnt_t)btopr(len);
1077 availm = freemem - lotsfree;
1078 preread = (npages < availm && len < PGTHRESH) ? 1 : 0;
1081 * If we aren't prefaulting the segment,
1082 * increment "deficit", if necessary to ensure
1083 * that pages will become available when this
1084 * process starts executing.
1086 if (preread == 0 && npages > availm &&
1087 deficit < lotsfree) {
1088 deficit += MIN((pgcnt_t)(npages - availm),
1089 lotsfree - deficit);
1092 if (preread) {
1093 (void) as_faulta(as, addr, len);
1094 MOBJ_STAT_ADD(map_ptload_preread);
1096 } else {
1098 * addr and offset were not aligned such that we could
1099 * use VOP_MAP, thus we need to as_map the memory we
1100 * need and then read the data in from disk.
1101 * This code path is a corner case which should never
1102 * be taken, but hand crafted binaries could trigger
1103 * this logic and it needs to work correctly.
1105 MOBJ_STAT_ADD(map_ptload_unaligned_text);
1106 as_rangelock(as);
1107 (void) as_unmap(as, addr, len);
1110 * We use zfod_argsp because we need to be able to
1111 * write to the mapping and then we'll change the
1112 * protections later if they are incorrect.
1114 error = as_map(as, addr, len, segvn_create, zfod_argsp);
1115 as_rangeunlock(as);
1116 if (error) {
1117 MOBJ_STAT_ADD(map_ptload_unaligned_map_fail);
1118 return (error);
1121 /* Now read in the data from disk */
1122 error = vn_rdwr(UIO_READ, vp, oldaddr, oldlen, offset,
1123 UIO_USERSPACE, 0, (rlim64_t)0, fcred, NULL);
1124 if (error) {
1125 MOBJ_STAT_ADD(map_ptload_unaligned_read_fail);
1126 return (error);
1130 * Now set protections.
1132 if (prot != PROT_ZFOD) {
1133 (void) as_setprot(as, addr, len, prot);
1138 if (zfodlen) {
1139 end = (size_t)addr + len;
1140 zfodbase = (caddr_t)P2ROUNDUP(end, PAGESIZE);
1141 zfoddiff = (uintptr_t)zfodbase - end;
1142 if (zfoddiff) {
1144 * Before we go to zero the remaining space on the last
1145 * page, make sure we have write permission.
1147 * We need to be careful how we zero-fill the last page
1148 * if the protection does not include PROT_WRITE. Using
1149 * as_setprot() can cause the VM segment code to call
1150 * segvn_vpage(), which must allocate a page struct for
1151 * each page in the segment. If we have a very large
1152 * segment, this may fail, so we check for that, even
1153 * though we ignore other return values from as_setprot.
1155 MOBJ_STAT_ADD(zfoddiff);
1156 if ((prot & PROT_WRITE) == 0) {
1157 if (as_setprot(as, (caddr_t)end, zfoddiff,
1158 prot | PROT_WRITE) == ENOMEM)
1159 return (ENOMEM);
1160 MOBJ_STAT_ADD(zfoddiff_nowrite);
1162 if (on_fault(&ljb)) {
1163 no_fault();
1164 if ((prot & PROT_WRITE) == 0) {
1165 (void) as_setprot(as, (caddr_t)end,
1166 zfoddiff, prot);
1168 return (EFAULT);
1170 uzero((void *)end, zfoddiff);
1171 no_fault();
1174 * Remove write protection to return to original state
1176 if ((prot & PROT_WRITE) == 0) {
1177 (void) as_setprot(as, (caddr_t)end,
1178 zfoddiff, prot);
1181 if (zfodlen > zfoddiff) {
1182 struct segvn_crargs crargs =
1183 SEGVN_ZFOD_ARGS(prot, PROT_ALL);
1185 MOBJ_STAT_ADD(zfodextra);
1186 zfodlen -= zfoddiff;
1187 crargs.szc = AS_MAP_NO_LPOOB;
1190 as_rangelock(as);
1191 (void) as_unmap(as, (caddr_t)zfodbase, zfodlen);
1192 error = as_map(as, (caddr_t)zfodbase,
1193 zfodlen, segvn_create, &crargs);
1194 as_rangeunlock(as);
1195 if (error) {
1196 return (error);
1200 return (0);
1204 * Map the ELF file represented by vp into the users address space. The
1205 * first mapping will start at start_addr and there will be num_elements
1206 * mappings. The mappings are described by the data in mrp which may be
1207 * modified upon returning from this function.
1208 * Returns 0 for success or errno for failure.
1210 static int
1211 mmapobj_map_elf(struct vnode *vp, caddr_t start_addr, mmapobj_result_t *mrp,
1212 int num_elements, cred_t *fcred, ushort_t e_type)
1214 int i;
1215 int ret;
1216 caddr_t lo;
1217 caddr_t hi;
1218 struct as *as = curproc->p_as;
1220 for (i = 0; i < num_elements; i++) {
1221 caddr_t addr;
1222 size_t p_memsz;
1223 size_t p_filesz;
1224 size_t zfodlen;
1225 offset_t p_offset;
1226 size_t dif;
1227 int prot;
1229 /* Always need to adjust mr_addr */
1230 addr = start_addr + (size_t)(mrp[i].mr_addr);
1231 mrp[i].mr_addr =
1232 (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1234 /* Padding has already been mapped */
1235 if (MR_GET_TYPE(mrp[i].mr_flags) == MR_PADDING) {
1236 continue;
1239 /* Can't execute code from "noexec" mounted filesystem. */
1240 if (((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) &&
1241 ((mrp[i].mr_prot & PROT_EXEC) != 0)) {
1242 MOBJ_STAT_ADD(noexec_fs);
1243 return (EACCES);
1246 p_memsz = mrp[i].mr_msize;
1247 p_filesz = mrp[i].mr_fsize;
1248 zfodlen = p_memsz - p_filesz;
1249 p_offset = mrp[i].mr_offset;
1250 dif = (uintptr_t)(addr) & PAGEOFFSET;
1251 prot = mrp[i].mr_prot | PROT_USER;
1252 ret = mmapobj_map_ptload(vp, addr, p_filesz, zfodlen,
1253 p_offset, prot, fcred);
1254 if (ret != 0) {
1255 MOBJ_STAT_ADD(ptload_failed);
1256 mmapobj_unmap(mrp, i, num_elements, e_type);
1257 return (ret);
1260 /* Need to cleanup mrp to reflect the actual values used */
1261 mrp[i].mr_msize += dif;
1262 mrp[i].mr_offset = (size_t)addr & PAGEOFFSET;
1265 /* Also need to unmap any holes created above */
1266 if (num_elements == 1) {
1267 MOBJ_STAT_ADD(map_elf_no_holes);
1268 return (0);
1270 if (e_type == ET_EXEC) {
1271 return (0);
1274 as_rangelock(as);
1275 lo = start_addr;
1276 hi = mrp[0].mr_addr;
1278 /* Remove holes made by the rest of the segments */
1279 for (i = 0; i < num_elements - 1; i++) {
1280 lo = (caddr_t)P2ROUNDUP((size_t)(mrp[i].mr_addr) +
1281 mrp[i].mr_msize, PAGESIZE);
1282 hi = mrp[i + 1].mr_addr;
1283 if (lo < hi) {
1285 * If as_unmap fails we just use up a bit of extra
1286 * space
1288 (void) as_unmap(as, (caddr_t)lo,
1289 (size_t)hi - (size_t)lo);
1290 MOBJ_STAT_ADD(unmap_hole);
1293 as_rangeunlock(as);
1295 return (0);
1298 /* Ugly hack to get STRUCT_* macros to work below */
1299 struct myphdr {
1300 Phdr x; /* native version */
1303 struct myphdr32 {
1304 Elf32_Phdr x;
1308 * Calculate and return the number of loadable segments in the ELF Phdr
1309 * represented by phdrbase as well as the len of the total mapping and
1310 * the max alignment that is needed for a given segment. On success,
1311 * 0 is returned, and *len, *loadable and *align have been filled out.
1312 * On failure, errno will be returned, which in this case is ENOTSUP
1313 * if we were passed an ELF file with overlapping segments.
1315 static int
1316 calc_loadable(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, size_t *len,
1317 int *loadable, size_t *align)
1319 int i;
1320 int hsize;
1321 model_t model;
1322 ushort_t e_type = ehdrp->e_type; /* same offset 32 and 64 bit */
1323 uint_t p_type;
1324 offset_t p_offset;
1325 size_t p_memsz;
1326 size_t p_align;
1327 caddr_t vaddr;
1328 int num_segs = 0;
1329 caddr_t start_addr = NULL;
1330 caddr_t p_end = NULL;
1331 size_t max_align = 0;
1332 size_t min_align = PAGESIZE; /* needed for vmem_xalloc */
1333 STRUCT_HANDLE(myphdr, mph);
1334 #if defined(__sparc)
1335 extern int vac_size;
1338 * Want to prevent aliasing by making the start address at least be
1339 * aligned to vac_size.
1341 min_align = MAX(PAGESIZE, vac_size);
1342 #endif
1344 model = get_udatamodel();
1345 STRUCT_SET_HANDLE(mph, model, (struct myphdr *)phdrbase);
1347 /* hsize alignment should have been checked before calling this func */
1348 if (model == DATAMODEL_LP64) {
1349 hsize = ehdrp->e_phentsize;
1350 if (hsize & 7) {
1351 return (ENOTSUP);
1353 } else {
1354 ASSERT(model == DATAMODEL_ILP32);
1355 hsize = ((Elf32_Ehdr *)ehdrp)->e_phentsize;
1356 if (hsize & 3) {
1357 return (ENOTSUP);
1362 * Determine the span of all loadable segments and calculate the
1363 * number of loadable segments.
1365 for (i = 0; i < nphdrs; i++) {
1366 p_type = STRUCT_FGET(mph, x.p_type);
1367 if (p_type == PT_LOAD || p_type == PT_SUNWBSS) {
1368 vaddr = (caddr_t)(uintptr_t)STRUCT_FGET(mph, x.p_vaddr);
1369 p_memsz = STRUCT_FGET(mph, x.p_memsz);
1372 * Skip this header if it requests no memory to be
1373 * mapped.
1375 if (p_memsz == 0) {
1376 STRUCT_SET_HANDLE(mph, model,
1377 (struct myphdr *)((size_t)STRUCT_BUF(mph) +
1378 hsize));
1379 MOBJ_STAT_ADD(nomem_header);
1380 continue;
1382 if (num_segs++ == 0) {
1384 * The p_vaddr of the first PT_LOAD segment
1385 * must either be NULL or within the first
1386 * page in order to be interpreted.
1387 * Otherwise, its an invalid file.
1389 if (e_type == ET_DYN &&
1390 ((caddr_t)((uintptr_t)vaddr &
1391 (uintptr_t)PAGEMASK) != NULL)) {
1392 MOBJ_STAT_ADD(inval_header);
1393 return (ENOTSUP);
1395 start_addr = vaddr;
1397 * For the first segment, we need to map from
1398 * the beginning of the file, so we will
1399 * adjust the size of the mapping to include
1400 * this memory.
1402 p_offset = STRUCT_FGET(mph, x.p_offset);
1403 } else {
1404 p_offset = 0;
1407 * Check to make sure that this mapping wouldn't
1408 * overlap a previous mapping.
1410 if (vaddr < p_end) {
1411 MOBJ_STAT_ADD(overlap_header);
1412 return (ENOTSUP);
1415 p_end = vaddr + p_memsz + p_offset;
1416 p_end = (caddr_t)P2ROUNDUP((size_t)p_end, PAGESIZE);
1418 p_align = STRUCT_FGET(mph, x.p_align);
1419 if (p_align > 1 && p_align > max_align) {
1420 max_align = p_align;
1421 if (max_align < min_align) {
1422 max_align = min_align;
1423 MOBJ_STAT_ADD(min_align);
1427 STRUCT_SET_HANDLE(mph, model,
1428 (struct myphdr *)((size_t)STRUCT_BUF(mph) + hsize));
1432 * The alignment should be a power of 2, if it isn't we forgive it
1433 * and round up. On overflow, we'll set the alignment to max_align
1434 * rounded down to the nearest power of 2.
1436 if (max_align > 0 && !ISP2(max_align)) {
1437 MOBJ_STAT_ADD(np2_align);
1438 *align = 2 * (1L << (highbit(max_align) - 1));
1439 if (*align < max_align ||
1440 (*align > UINT_MAX && model == DATAMODEL_ILP32)) {
1441 MOBJ_STAT_ADD(np2_align_overflow);
1442 *align = 1L << (highbit(max_align) - 1);
1444 } else {
1445 *align = max_align;
1448 ASSERT(*align >= PAGESIZE || *align == 0);
1450 *loadable = num_segs;
1451 *len = p_end - start_addr;
1452 return (0);
1456 * Check the address space to see if the virtual addresses to be used are
1457 * available. If they are not, return errno for failure. On success, 0
1458 * will be returned, and the virtual addresses for each mmapobj_result_t
1459 * will be reserved. Note that a reservation could have earlier been made
1460 * for a given segment via a /dev/null mapping. If that is the case, then
1461 * we can use that VA space for our mappings.
1462 * Note: this function will only be used for ET_EXEC binaries.
1465 check_exec_addrs(int loadable, mmapobj_result_t *mrp, caddr_t start_addr)
1467 int i;
1468 struct as *as = curproc->p_as;
1469 struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
1470 int ret;
1471 caddr_t myaddr;
1472 size_t mylen;
1473 struct seg *seg;
1475 /* No need to reserve swap space now since it will be reserved later */
1476 crargs.flags |= MAP_NORESERVE;
1477 as_rangelock(as);
1478 for (i = 0; i < loadable; i++) {
1480 myaddr = start_addr + (size_t)mrp[i].mr_addr;
1481 mylen = mrp[i].mr_msize;
1483 /* See if there is a hole in the as for this range */
1484 if (as_gap(as, mylen, &myaddr, &mylen, 0, NULL) == 0) {
1485 ASSERT(myaddr == start_addr + (size_t)mrp[i].mr_addr);
1486 ASSERT(mylen == mrp[i].mr_msize);
1488 #ifdef DEBUG
1489 if (MR_GET_TYPE(mrp[i].mr_flags) == MR_PADDING) {
1490 MOBJ_STAT_ADD(exec_padding);
1492 #endif
1493 ret = as_map(as, myaddr, mylen, segvn_create, &crargs);
1494 if (ret) {
1495 as_rangeunlock(as);
1496 mmapobj_unmap_exec(mrp, i, start_addr);
1497 return (ret);
1499 } else {
1501 * There is a mapping that exists in the range
1502 * so check to see if it was a "reservation"
1503 * from /dev/null. The mapping is from
1504 * /dev/null if the mapping comes from
1505 * segdev and the type is neither MAP_SHARED
1506 * nor MAP_PRIVATE.
1508 AS_LOCK_ENTER(as, RW_READER);
1509 seg = as_findseg(as, myaddr, 0);
1510 MOBJ_STAT_ADD(exec_addr_mapped);
1511 if (seg && seg->s_ops == &segdev_ops &&
1512 ((SEGOP_GETTYPE(seg, myaddr) &
1513 (MAP_SHARED | MAP_PRIVATE)) == 0) &&
1514 myaddr >= seg->s_base &&
1515 myaddr + mylen <=
1516 seg->s_base + seg->s_size) {
1517 MOBJ_STAT_ADD(exec_addr_devnull);
1518 AS_LOCK_EXIT(as);
1519 (void) as_unmap(as, myaddr, mylen);
1520 ret = as_map(as, myaddr, mylen, segvn_create,
1521 &crargs);
1522 mrp[i].mr_flags |= MR_RESV;
1523 if (ret) {
1524 as_rangeunlock(as);
1525 /* Need to remap what we unmapped */
1526 mmapobj_unmap_exec(mrp, i + 1,
1527 start_addr);
1528 return (ret);
1530 } else {
1531 AS_LOCK_EXIT(as);
1532 as_rangeunlock(as);
1533 mmapobj_unmap_exec(mrp, i, start_addr);
1534 MOBJ_STAT_ADD(exec_addr_in_use);
1535 return (EADDRINUSE);
1539 as_rangeunlock(as);
1540 return (0);
1544 * Walk through the ELF program headers and extract all useful information
1545 * for PT_LOAD and PT_SUNWBSS segments into mrp.
1546 * Return 0 on success or error on failure.
1548 static int
1549 process_phdrs(Ehdr *ehdrp, caddr_t phdrbase, int nphdrs, mmapobj_result_t *mrp,
1550 vnode_t *vp, uint_t *num_mapped, size_t padding, cred_t *fcred)
1552 int i;
1553 caddr_t start_addr = NULL;
1554 caddr_t vaddr;
1555 size_t len = 0;
1556 size_t lib_len = 0;
1557 int ret;
1558 int prot;
1559 struct lib_va *lvp = NULL;
1560 vattr_t vattr;
1561 struct as *as = curproc->p_as;
1562 int error;
1563 int loadable = 0;
1564 int current = 0;
1565 int use_lib_va = 1;
1566 size_t align = 0;
1567 size_t add_pad = 0;
1568 int hdr_seen = 0;
1569 ushort_t e_type = ehdrp->e_type; /* same offset 32 and 64 bit */
1570 uint_t p_type;
1571 offset_t p_offset;
1572 size_t p_memsz;
1573 size_t p_filesz;
1574 uint_t p_flags;
1575 int hsize;
1576 model_t model;
1577 STRUCT_HANDLE(myphdr, mph);
1579 model = get_udatamodel();
1580 STRUCT_SET_HANDLE(mph, model, (struct myphdr *)phdrbase);
1583 * Need to make sure that hsize is aligned properly.
1584 * For 32bit processes, 4 byte alignment is required.
1585 * For 64bit processes, 8 byte alignment is required.
1586 * If the alignment isn't correct, we need to return failure
1587 * since it could cause an alignment error panic while walking
1588 * the phdr array.
1590 if (model == DATAMODEL_LP64) {
1591 hsize = ehdrp->e_phentsize;
1592 if (hsize & 7) {
1593 MOBJ_STAT_ADD(phent_align64);
1594 return (ENOTSUP);
1596 } else {
1597 ASSERT(model == DATAMODEL_ILP32);
1598 hsize = ((Elf32_Ehdr *)ehdrp)->e_phentsize;
1599 if (hsize & 3) {
1600 MOBJ_STAT_ADD(phent_align32);
1601 return (ENOTSUP);
1605 if ((padding != 0) || secflag_enabled(curproc, PROC_SEC_ASLR)) {
1606 use_lib_va = 0;
1608 if (e_type == ET_DYN) {
1609 vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME;
1610 error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
1611 if (error) {
1612 return (error);
1614 /* Check to see if we already have a description for this lib */
1615 if (!secflag_enabled(curproc, PROC_SEC_ASLR))
1616 lvp = lib_va_find(&vattr);
1618 if (lvp != NULL) {
1619 MOBJ_STAT_ADD(lvp_found);
1620 if (use_lib_va) {
1621 start_addr = mmapobj_lookup_start_addr(lvp);
1622 if (start_addr == NULL) {
1623 lib_va_release(lvp);
1624 return (ENOMEM);
1629 * loadable may be zero if the original allocator
1630 * of lvp hasn't finished setting it up but the rest
1631 * of the fields will be accurate.
1633 loadable = lvp->lv_num_segs;
1634 len = lvp->lv_len;
1635 align = lvp->lv_align;
1640 * Determine the span of all loadable segments and calculate the
1641 * number of loadable segments, the total len spanned by the mappings
1642 * and the max alignment, if we didn't get them above.
1644 if (loadable == 0) {
1645 MOBJ_STAT_ADD(no_loadable_yet);
1646 ret = calc_loadable(ehdrp, phdrbase, nphdrs, &len,
1647 &loadable, &align);
1648 if (ret != 0) {
1650 * Since it'd be an invalid file, we shouldn't have
1651 * cached it previously.
1653 ASSERT(lvp == NULL);
1654 return (ret);
1656 #ifdef DEBUG
1657 if (lvp) {
1658 ASSERT(len == lvp->lv_len);
1659 ASSERT(align == lvp->lv_align);
1661 #endif
1664 /* Make sure there's something to map. */
1665 if (len == 0 || loadable == 0) {
1667 * Since it'd be an invalid file, we shouldn't have
1668 * cached it previously.
1670 ASSERT(lvp == NULL);
1671 MOBJ_STAT_ADD(nothing_to_map);
1672 return (ENOTSUP);
1675 lib_len = len;
1676 if (padding != 0) {
1677 loadable += 2;
1679 if (loadable > *num_mapped) {
1680 *num_mapped = loadable;
1681 /* cleanup previous reservation */
1682 if (start_addr) {
1683 (void) as_unmap(as, start_addr, lib_len);
1685 MOBJ_STAT_ADD(e2big);
1686 if (lvp) {
1687 lib_va_release(lvp);
1689 return (E2BIG);
1693 * We now know the size of the object to map and now we need to
1694 * get the start address to map it at. It's possible we already
1695 * have it if we found all the info we need in the lib_va cache.
1697 if (e_type == ET_DYN && start_addr == NULL) {
1699 * Need to make sure padding does not throw off
1700 * required alignment. We can only specify an
1701 * alignment for the starting address to be mapped,
1702 * so we round padding up to the alignment and map
1703 * from there and then throw out the extra later.
1705 if (padding != 0) {
1706 if (align > 1) {
1707 add_pad = P2ROUNDUP(padding, align);
1708 len += add_pad;
1709 MOBJ_STAT_ADD(dyn_pad_align);
1710 } else {
1711 MOBJ_STAT_ADD(dyn_pad_noalign);
1712 len += padding; /* at beginning */
1714 len += padding; /* at end of mapping */
1717 * At this point, if lvp is non-NULL, then above we
1718 * already found it in the cache but did not get
1719 * the start address since we were not going to use lib_va.
1720 * Since we know that lib_va will not be used, it's safe
1721 * to call mmapobj_alloc_start_addr and know that lvp
1722 * will not be modified.
1724 ASSERT(lvp ? use_lib_va == 0 : 1);
1725 start_addr = mmapobj_alloc_start_addr(&lvp, len,
1726 use_lib_va,
1727 secflag_enabled(curproc, PROC_SEC_ASLR),
1728 align, &vattr);
1729 if (start_addr == NULL) {
1730 if (lvp) {
1731 lib_va_release(lvp);
1733 MOBJ_STAT_ADD(alloc_start_fail);
1734 return (ENOMEM);
1737 * If we can't cache it, no need to hang on to it.
1738 * Setting lv_num_segs to non-zero will make that
1739 * field active and since there are too many segments
1740 * to cache, all future users will not try to use lv_mps.
1742 if (lvp != NULL && loadable > LIBVA_CACHED_SEGS && use_lib_va) {
1743 lvp->lv_num_segs = loadable;
1744 lib_va_release(lvp);
1745 lvp = NULL;
1746 MOBJ_STAT_ADD(lvp_nocache);
1749 * Free the beginning of the mapping if the padding
1750 * was not aligned correctly.
1752 if (padding != 0 && add_pad != padding) {
1753 (void) as_unmap(as, start_addr,
1754 add_pad - padding);
1755 start_addr += (add_pad - padding);
1756 MOBJ_STAT_ADD(extra_padding);
1761 * At this point, we have reserved the virtual address space
1762 * for our mappings. Now we need to start filling out the mrp
1763 * array to describe all of the individual mappings we are going
1764 * to return.
1765 * For ET_EXEC there has been no memory reservation since we are
1766 * using fixed addresses. While filling in the mrp array below,
1767 * we will have the first segment biased to start at addr 0
1768 * and the rest will be biased by this same amount. Thus if there
1769 * is padding, the first padding will start at addr 0, and the next
1770 * segment will start at the value of padding.
1773 /* We'll fill out padding later, so start filling in mrp at index 1 */
1774 if (padding != 0) {
1775 current = 1;
1778 /* If we have no more need for lvp let it go now */
1779 if (lvp != NULL && use_lib_va == 0) {
1780 lib_va_release(lvp);
1781 MOBJ_STAT_ADD(lvp_not_needed);
1782 lvp = NULL;
1785 /* Now fill out the mrp structs from the program headers */
1786 STRUCT_SET_HANDLE(mph, model, (struct myphdr *)phdrbase);
1787 for (i = 0; i < nphdrs; i++) {
1788 p_type = STRUCT_FGET(mph, x.p_type);
1789 if (p_type == PT_LOAD || p_type == PT_SUNWBSS) {
1790 vaddr = (caddr_t)(uintptr_t)STRUCT_FGET(mph, x.p_vaddr);
1791 p_memsz = STRUCT_FGET(mph, x.p_memsz);
1792 p_filesz = STRUCT_FGET(mph, x.p_filesz);
1793 p_offset = STRUCT_FGET(mph, x.p_offset);
1794 p_flags = STRUCT_FGET(mph, x.p_flags);
1797 * Skip this header if it requests no memory to be
1798 * mapped.
1800 if (p_memsz == 0) {
1801 STRUCT_SET_HANDLE(mph, model,
1802 (struct myphdr *)((size_t)STRUCT_BUF(mph) +
1803 hsize));
1804 MOBJ_STAT_ADD(no_mem_map_sz);
1805 continue;
1808 prot = 0;
1809 if (p_flags & PF_R)
1810 prot |= PROT_READ;
1811 if (p_flags & PF_W)
1812 prot |= PROT_WRITE;
1813 if (p_flags & PF_X)
1814 prot |= PROT_EXEC;
1816 ASSERT(current < loadable);
1817 mrp[current].mr_msize = p_memsz;
1818 mrp[current].mr_fsize = p_filesz;
1819 mrp[current].mr_offset = p_offset;
1820 mrp[current].mr_prot = prot;
1822 if (hdr_seen == 0 && p_filesz != 0) {
1823 mrp[current].mr_flags = MR_HDR_ELF;
1825 * We modify mr_offset because we
1826 * need to map the ELF header as well, and if
1827 * we didn't then the header could be left out
1828 * of the mapping that we will create later.
1829 * Since we're removing the offset, we need to
1830 * account for that in the other fields as well
1831 * since we will be mapping the memory from 0
1832 * to p_offset.
1834 if (e_type == ET_DYN) {
1835 mrp[current].mr_offset = 0;
1836 mrp[current].mr_msize += p_offset;
1837 mrp[current].mr_fsize += p_offset;
1838 } else {
1839 ASSERT(e_type == ET_EXEC);
1841 * Save off the start addr which will be
1842 * our bias for the rest of the
1843 * ET_EXEC mappings.
1845 start_addr = vaddr - padding;
1847 mrp[current].mr_addr = (caddr_t)padding;
1848 hdr_seen = 1;
1849 } else {
1850 if (e_type == ET_EXEC) {
1851 /* bias mr_addr */
1852 mrp[current].mr_addr =
1853 vaddr - (size_t)start_addr;
1854 } else {
1855 mrp[current].mr_addr = vaddr + padding;
1857 mrp[current].mr_flags = 0;
1859 current++;
1862 /* Move to next phdr */
1863 STRUCT_SET_HANDLE(mph, model,
1864 (struct myphdr *)((size_t)STRUCT_BUF(mph) +
1865 hsize));
1868 /* Now fill out the padding segments */
1869 if (padding != 0) {
1870 mrp[0].mr_addr = NULL;
1871 mrp[0].mr_msize = padding;
1872 mrp[0].mr_fsize = 0;
1873 mrp[0].mr_offset = 0;
1874 mrp[0].mr_prot = 0;
1875 mrp[0].mr_flags = MR_PADDING;
1877 /* Setup padding for the last segment */
1878 ASSERT(current == loadable - 1);
1879 mrp[current].mr_addr = (caddr_t)lib_len + padding;
1880 mrp[current].mr_msize = padding;
1881 mrp[current].mr_fsize = 0;
1882 mrp[current].mr_offset = 0;
1883 mrp[current].mr_prot = 0;
1884 mrp[current].mr_flags = MR_PADDING;
1888 * Need to make sure address ranges desired are not in use or
1889 * are previously allocated reservations from /dev/null. For
1890 * ET_DYN, we already made sure our address range was free.
1892 if (e_type == ET_EXEC) {
1893 ret = check_exec_addrs(loadable, mrp, start_addr);
1894 if (ret != 0) {
1895 ASSERT(lvp == NULL);
1896 MOBJ_STAT_ADD(check_exec_failed);
1897 return (ret);
1901 /* Finish up our business with lvp. */
1902 if (lvp) {
1903 ASSERT(e_type == ET_DYN);
1904 if (lvp->lv_num_segs == 0 && loadable <= LIBVA_CACHED_SEGS) {
1905 bcopy(mrp, lvp->lv_mps,
1906 loadable * sizeof (mmapobj_result_t));
1907 membar_producer();
1910 * Setting lv_num_segs to a non-zero value indicates that
1911 * lv_mps is now valid and can be used by other threads.
1912 * So, the above stores need to finish before lv_num_segs
1913 * is updated. lv_mps is only valid if lv_num_segs is
1914 * greater than LIBVA_CACHED_SEGS.
1916 lvp->lv_num_segs = loadable;
1917 lib_va_release(lvp);
1918 MOBJ_STAT_ADD(lvp_used);
1921 /* Now that we have mrp completely filled out go map it */
1922 ret = mmapobj_map_elf(vp, start_addr, mrp, loadable, fcred, e_type);
1923 if (ret == 0) {
1924 *num_mapped = loadable;
1927 return (ret);
1931 * Take the ELF file passed in, and do the work of mapping it.
1932 * num_mapped in - # elements in user buffer
1933 * num_mapped out - # sections mapped and length of mrp array if
1934 * no errors.
1936 static int
1937 doelfwork(Ehdr *ehdrp, vnode_t *vp, mmapobj_result_t *mrp,
1938 uint_t *num_mapped, size_t padding, cred_t *fcred)
1940 int error;
1941 offset_t phoff;
1942 int nphdrs;
1943 unsigned char ei_class;
1944 unsigned short phentsize;
1945 ssize_t phsizep;
1946 caddr_t phbasep;
1947 int to_map;
1948 model_t model;
1950 ei_class = ehdrp->e_ident[EI_CLASS];
1951 model = get_udatamodel();
1952 if ((model == DATAMODEL_ILP32 && ei_class == ELFCLASS64) ||
1953 (model == DATAMODEL_LP64 && ei_class == ELFCLASS32)) {
1954 MOBJ_STAT_ADD(wrong_model);
1955 return (ENOTSUP);
1958 /* Can't execute code from "noexec" mounted filesystem. */
1959 if (ehdrp->e_type == ET_EXEC &&
1960 (vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) {
1961 MOBJ_STAT_ADD(noexec_fs);
1962 return (EACCES);
1966 * Relocatable and core files are mapped as a single flat file
1967 * since no interpretation is done on them by mmapobj.
1969 if (ehdrp->e_type == ET_REL || ehdrp->e_type == ET_CORE) {
1970 to_map = padding ? 3 : 1;
1971 if (*num_mapped < to_map) {
1972 *num_mapped = to_map;
1973 MOBJ_STAT_ADD(e2big_et_rel);
1974 return (E2BIG);
1976 error = mmapobj_map_flat(vp, mrp, padding, fcred);
1977 if (error == 0) {
1978 *num_mapped = to_map;
1979 mrp[padding ? 1 : 0].mr_flags = MR_HDR_ELF;
1980 MOBJ_STAT_ADD(et_rel_mapped);
1982 return (error);
1985 /* Check for an unknown ELF type */
1986 if (ehdrp->e_type != ET_EXEC && ehdrp->e_type != ET_DYN) {
1987 MOBJ_STAT_ADD(unknown_elf_type);
1988 return (ENOTSUP);
1991 if (ei_class == ELFCLASS32) {
1992 Elf32_Ehdr *e32hdr = (Elf32_Ehdr *)ehdrp;
1993 ASSERT(model == DATAMODEL_ILP32);
1994 nphdrs = e32hdr->e_phnum;
1995 phentsize = e32hdr->e_phentsize;
1996 if (phentsize < sizeof (Elf32_Phdr)) {
1997 MOBJ_STAT_ADD(phent32_too_small);
1998 return (ENOTSUP);
2000 phoff = e32hdr->e_phoff;
2001 } else if (ei_class == ELFCLASS64) {
2002 Elf64_Ehdr *e64hdr = (Elf64_Ehdr *)ehdrp;
2003 ASSERT(model == DATAMODEL_LP64);
2004 nphdrs = e64hdr->e_phnum;
2005 phentsize = e64hdr->e_phentsize;
2006 if (phentsize < sizeof (Elf64_Phdr)) {
2007 MOBJ_STAT_ADD(phent64_too_small);
2008 return (ENOTSUP);
2010 phoff = e64hdr->e_phoff;
2011 } else {
2012 /* fallthrough case for an invalid ELF class */
2013 MOBJ_STAT_ADD(inval_elf_class);
2014 return (ENOTSUP);
2018 * nphdrs should only have this value for core files which are handled
2019 * above as a single mapping. If other file types ever use this
2020 * sentinel, then we'll add the support needed to handle this here.
2022 if (nphdrs == PN_XNUM) {
2023 MOBJ_STAT_ADD(too_many_phdrs);
2024 return (ENOTSUP);
2027 phsizep = nphdrs * phentsize;
2029 if (phsizep == 0) {
2030 MOBJ_STAT_ADD(no_phsize);
2031 return (ENOTSUP);
2034 /* Make sure we only wait for memory if it's a reasonable request */
2035 if (phsizep > mmapobj_alloc_threshold) {
2036 MOBJ_STAT_ADD(phsize_large);
2037 if ((phbasep = kmem_alloc(phsizep, KM_NOSLEEP)) == NULL) {
2038 MOBJ_STAT_ADD(phsize_xtralarge);
2039 return (ENOMEM);
2041 } else {
2042 phbasep = kmem_alloc(phsizep, KM_SLEEP);
2045 if ((error = vn_rdwr(UIO_READ, vp, phbasep, phsizep,
2046 (offset_t)phoff, UIO_SYSSPACE, 0, (rlim64_t)0,
2047 fcred, NULL)) != 0) {
2048 kmem_free(phbasep, phsizep);
2049 return (error);
2052 /* Now process the phdr's */
2053 error = process_phdrs(ehdrp, phbasep, nphdrs, mrp, vp, num_mapped,
2054 padding, fcred);
2055 kmem_free(phbasep, phsizep);
2056 return (error);
2059 #if defined(__sparc)
2061 * Hack to support 64 bit kernels running AOUT 4.x programs.
2062 * This is the sizeof (struct nlist) for a 32 bit kernel.
2063 * Since AOUT programs are 32 bit only, they will never use the 64 bit
2064 * sizeof (struct nlist) and thus creating a #define is the simplest
2065 * way around this since this is a format which is not being updated.
2066 * This will be used in the place of sizeof (struct nlist) below.
2068 #define NLIST_SIZE (0xC)
2070 static int
2071 doaoutwork(vnode_t *vp, mmapobj_result_t *mrp,
2072 uint_t *num_mapped, struct exec *hdr, cred_t *fcred)
2074 int error;
2075 size_t size;
2076 size_t osize;
2077 size_t nsize; /* nlist size */
2078 size_t msize;
2079 size_t zfoddiff;
2080 caddr_t addr;
2081 caddr_t start_addr;
2082 struct as *as = curproc->p_as;
2083 int prot = PROT_USER | PROT_READ | PROT_EXEC;
2084 uint_t mflag = MAP_PRIVATE | _MAP_LOW32;
2085 offset_t off = 0;
2086 int segnum = 0;
2087 uint_t to_map;
2088 int is_library = 0;
2089 struct segvn_crargs crargs = SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
2091 /* Only 32bit apps supported by this file format */
2092 if (get_udatamodel() != DATAMODEL_ILP32) {
2093 MOBJ_STAT_ADD(aout_64bit_try);
2094 return (ENOTSUP);
2097 /* Check to see if this is a library */
2098 if (hdr->a_magic == ZMAGIC && hdr->a_entry < PAGESIZE) {
2099 is_library = 1;
2103 * Can't execute code from "noexec" mounted filesystem. Unlike ELF,
2104 * aout libraries are always mapped with something PROT_EXEC, so this
2105 * doesn't need to be checked for specific parts
2107 if ((vp->v_vfsp->vfs_flag & VFS_NOEXEC) != 0) {
2108 MOBJ_STAT_ADD(aout_noexec);
2109 return (EACCES);
2113 * There are 2 ways to calculate the mapped size of executable:
2114 * 1) rounded text size + data size + bss size.
2115 * 2) starting offset for text + text size + data size + text relocation
2116 * size + data relocation size + room for nlist data structure.
2118 * The larger of the two sizes will be used to map this binary.
2120 osize = P2ROUNDUP(hdr->a_text, PAGESIZE) + hdr->a_data + hdr->a_bss;
2122 off = hdr->a_magic == ZMAGIC ? 0 : sizeof (struct exec);
2124 nsize = off + hdr->a_text + hdr->a_data + hdr->a_trsize +
2125 hdr->a_drsize + NLIST_SIZE;
2127 size = MAX(osize, nsize);
2128 if (size != nsize) {
2129 nsize = 0;
2133 * 1 seg for text and 1 seg for initialized data.
2134 * 1 seg for bss (if can't fit in leftover space of init data)
2135 * 1 seg for nlist if needed.
2137 to_map = 2 + (nsize ? 1 : 0) +
2138 (hdr->a_bss > PAGESIZE - P2PHASE(hdr->a_data, PAGESIZE) ? 1 : 0);
2139 if (*num_mapped < to_map) {
2140 *num_mapped = to_map;
2141 MOBJ_STAT_ADD(aout_e2big);
2142 return (E2BIG);
2145 /* Reserve address space for the whole mapping */
2146 if (is_library) {
2147 /* We'll let VOP_MAP below pick our address for us */
2148 addr = NULL;
2149 MOBJ_STAT_ADD(aout_lib);
2150 } else {
2152 * default start address for fixed binaries from AOUT 4.x
2153 * standard.
2155 MOBJ_STAT_ADD(aout_fixed);
2156 mflag |= MAP_FIXED;
2157 addr = (caddr_t)0x2000;
2158 as_rangelock(as);
2159 if (as_gap(as, size, &addr, &size, 0, NULL) != 0) {
2160 as_rangeunlock(as);
2161 MOBJ_STAT_ADD(aout_addr_in_use);
2162 return (EADDRINUSE);
2164 crargs.flags |= MAP_NORESERVE;
2165 error = as_map(as, addr, size, segvn_create, &crargs);
2166 ASSERT(addr == (caddr_t)0x2000);
2167 as_rangeunlock(as);
2170 start_addr = addr;
2171 osize = size;
2174 * Map as large as we need, backed by file, this will be text, and
2175 * possibly the nlist segment. We map over this mapping for bss and
2176 * initialized data segments.
2178 error = VOP_MAP(vp, off, as, &addr, size, prot, PROT_ALL,
2179 mflag, fcred, NULL);
2180 if (error) {
2181 if (!is_library) {
2182 (void) as_unmap(as, start_addr, osize);
2184 return (error);
2187 /* pickup the value of start_addr and osize for libraries */
2188 start_addr = addr;
2189 osize = size;
2192 * We have our initial reservation/allocation so we need to use fixed
2193 * addresses from now on.
2195 mflag |= MAP_FIXED;
2197 mrp[0].mr_addr = addr;
2198 mrp[0].mr_msize = hdr->a_text;
2199 mrp[0].mr_fsize = hdr->a_text;
2200 mrp[0].mr_offset = 0;
2201 mrp[0].mr_prot = PROT_READ | PROT_EXEC;
2202 mrp[0].mr_flags = MR_HDR_AOUT;
2206 * Map initialized data. We are mapping over a portion of the
2207 * previous mapping which will be unmapped in VOP_MAP below.
2209 off = P2ROUNDUP((offset_t)(hdr->a_text), PAGESIZE);
2210 msize = off;
2211 addr += off;
2212 size = hdr->a_data;
2213 error = VOP_MAP(vp, off, as, &addr, size, PROT_ALL, PROT_ALL,
2214 mflag, fcred, NULL);
2215 if (error) {
2216 (void) as_unmap(as, start_addr, osize);
2217 return (error);
2219 msize += size;
2220 mrp[1].mr_addr = addr;
2221 mrp[1].mr_msize = size;
2222 mrp[1].mr_fsize = size;
2223 mrp[1].mr_offset = 0;
2224 mrp[1].mr_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
2225 mrp[1].mr_flags = 0;
2227 /* Need to zero out remainder of page */
2228 addr += hdr->a_data;
2229 zfoddiff = P2PHASE((size_t)addr, PAGESIZE);
2230 if (zfoddiff) {
2231 label_t ljb;
2233 MOBJ_STAT_ADD(aout_zfoddiff);
2234 zfoddiff = PAGESIZE - zfoddiff;
2235 if (on_fault(&ljb)) {
2236 no_fault();
2237 MOBJ_STAT_ADD(aout_uzero_fault);
2238 (void) as_unmap(as, start_addr, osize);
2239 return (EFAULT);
2241 uzero(addr, zfoddiff);
2242 no_fault();
2244 msize += zfoddiff;
2245 segnum = 2;
2247 /* Map bss */
2248 if (hdr->a_bss > zfoddiff) {
2249 struct segvn_crargs crargs =
2250 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
2251 MOBJ_STAT_ADD(aout_map_bss);
2252 addr += zfoddiff;
2253 size = hdr->a_bss - zfoddiff;
2254 as_rangelock(as);
2255 (void) as_unmap(as, addr, size);
2256 error = as_map(as, addr, size, segvn_create, &crargs);
2257 as_rangeunlock(as);
2258 msize += size;
2260 if (error) {
2261 MOBJ_STAT_ADD(aout_bss_fail);
2262 (void) as_unmap(as, start_addr, osize);
2263 return (error);
2265 mrp[2].mr_addr = addr;
2266 mrp[2].mr_msize = size;
2267 mrp[2].mr_fsize = 0;
2268 mrp[2].mr_offset = 0;
2269 mrp[2].mr_prot = PROT_READ | PROT_WRITE | PROT_EXEC;
2270 mrp[2].mr_flags = 0;
2272 addr += size;
2273 segnum = 3;
2277 * If we have extra bits left over, we need to include that in how
2278 * much we mapped to make sure the nlist logic is correct
2280 msize = P2ROUNDUP(msize, PAGESIZE);
2282 if (nsize && msize < nsize) {
2283 MOBJ_STAT_ADD(aout_nlist);
2284 mrp[segnum].mr_addr = addr;
2285 mrp[segnum].mr_msize = nsize - msize;
2286 mrp[segnum].mr_fsize = 0;
2287 mrp[segnum].mr_offset = 0;
2288 mrp[segnum].mr_prot = PROT_READ | PROT_EXEC;
2289 mrp[segnum].mr_flags = 0;
2292 *num_mapped = to_map;
2293 return (0);
2295 #endif
2298 * These are the two types of files that we can interpret and we want to read
2299 * in enough info to cover both types when looking at the initial header.
2301 #define MAX_HEADER_SIZE (MAX(sizeof (Ehdr), sizeof (struct exec)))
2304 * Map vp passed in in an interpreted manner. ELF and AOUT files will be
2305 * interpreted and mapped appropriately for execution.
2306 * num_mapped in - # elements in mrp
2307 * num_mapped out - # sections mapped and length of mrp array if
2308 * no errors or E2BIG returned.
2310 * Returns 0 on success, errno value on failure.
2312 static int
2313 mmapobj_map_interpret(vnode_t *vp, mmapobj_result_t *mrp,
2314 uint_t *num_mapped, size_t padding, cred_t *fcred)
2316 int error = 0;
2317 vattr_t vattr;
2318 struct lib_va *lvp;
2319 caddr_t start_addr;
2320 model_t model;
2323 * header has to be aligned to the native size of ulong_t in order
2324 * to avoid an unaligned access when dereferencing the header as
2325 * a ulong_t. Thus we allocate our array on the stack of type
2326 * ulong_t and then have header, which we dereference later as a char
2327 * array point at lheader.
2329 ulong_t lheader[(MAX_HEADER_SIZE / (sizeof (ulong_t))) + 1];
2330 caddr_t header = (caddr_t)&lheader;
2332 vattr.va_mask = AT_FSID | AT_NODEID | AT_CTIME | AT_MTIME | AT_SIZE;
2333 error = VOP_GETATTR(vp, &vattr, 0, fcred, NULL);
2334 if (error) {
2335 return (error);
2339 * Check lib_va to see if we already have a full description
2340 * for this library. This is the fast path and only used for
2341 * ET_DYN ELF files (dynamic libraries).
2343 if (padding == 0 && !secflag_enabled(curproc, PROC_SEC_ASLR) &&
2344 ((lvp = lib_va_find(&vattr)) != NULL)) {
2345 int num_segs;
2347 model = get_udatamodel();
2348 if ((model == DATAMODEL_ILP32 &&
2349 lvp->lv_flags & LV_ELF64) ||
2350 (model == DATAMODEL_LP64 &&
2351 lvp->lv_flags & LV_ELF32)) {
2352 lib_va_release(lvp);
2353 MOBJ_STAT_ADD(fast_wrong_model);
2354 return (ENOTSUP);
2356 num_segs = lvp->lv_num_segs;
2357 if (*num_mapped < num_segs) {
2358 *num_mapped = num_segs;
2359 lib_va_release(lvp);
2360 MOBJ_STAT_ADD(fast_e2big);
2361 return (E2BIG);
2365 * Check to see if we have all the mappable program headers
2366 * cached.
2368 if (num_segs <= LIBVA_CACHED_SEGS && num_segs != 0) {
2369 MOBJ_STAT_ADD(fast);
2370 start_addr = mmapobj_lookup_start_addr(lvp);
2371 if (start_addr == NULL) {
2372 lib_va_release(lvp);
2373 return (ENOMEM);
2376 bcopy(lvp->lv_mps, mrp,
2377 num_segs * sizeof (mmapobj_result_t));
2379 error = mmapobj_map_elf(vp, start_addr, mrp,
2380 num_segs, fcred, ET_DYN);
2382 lib_va_release(lvp);
2383 if (error == 0) {
2384 *num_mapped = num_segs;
2385 MOBJ_STAT_ADD(fast_success);
2387 return (error);
2389 MOBJ_STAT_ADD(fast_not_now);
2391 /* Release it for now since we'll look it up below */
2392 lib_va_release(lvp);
2396 * Time to see if this is a file we can interpret. If it's smaller
2397 * than this, then we can't interpret it.
2399 if (vattr.va_size < MAX_HEADER_SIZE) {
2400 MOBJ_STAT_ADD(small_file);
2401 return (ENOTSUP);
2404 if ((error = vn_rdwr(UIO_READ, vp, header, MAX_HEADER_SIZE, 0,
2405 UIO_SYSSPACE, 0, (rlim64_t)0, fcred, NULL)) != 0) {
2406 MOBJ_STAT_ADD(read_error);
2407 return (error);
2410 /* Verify file type */
2411 if (header[EI_MAG0] == ELFMAG0 && header[EI_MAG1] == ELFMAG1 &&
2412 header[EI_MAG2] == ELFMAG2 && header[EI_MAG3] == ELFMAG3) {
2413 return (doelfwork((Ehdr *)lheader, vp, mrp, num_mapped,
2414 padding, fcred));
2417 #if defined(__sparc)
2418 /* On sparc, check for 4.X AOUT format */
2419 switch (((struct exec *)header)->a_magic) {
2420 case OMAGIC:
2421 case ZMAGIC:
2422 case NMAGIC:
2423 return (doaoutwork(vp, mrp, num_mapped,
2424 (struct exec *)lheader, fcred));
2426 #endif
2428 /* Unsupported type */
2429 MOBJ_STAT_ADD(unsupported);
2430 return (ENOTSUP);
2434 * Given a vnode, map it as either a flat file or interpret it and map
2435 * it according to the rules of the file type.
2436 * *num_mapped will contain the size of the mmapobj_result_t array passed in.
2437 * If padding is non-zero, the mappings will be padded by that amount
2438 * rounded up to the nearest pagesize.
2439 * If the mapping is successful, *num_mapped will contain the number of
2440 * distinct mappings created, and mrp will point to the array of
2441 * mmapobj_result_t's which describe these mappings.
2443 * On error, -1 is returned and errno is set appropriately.
2444 * A special error case will set errno to E2BIG when there are more than
2445 * *num_mapped mappings to be created and *num_mapped will be set to the
2446 * number of mappings needed.
2449 mmapobj(vnode_t *vp, uint_t flags, mmapobj_result_t *mrp,
2450 uint_t *num_mapped, size_t padding, cred_t *fcred)
2452 int to_map;
2453 int error = 0;
2455 ASSERT((padding & PAGEOFFSET) == 0);
2456 ASSERT((flags & ~MMOBJ_ALL_FLAGS) == 0);
2457 ASSERT(num_mapped != NULL);
2458 ASSERT((flags & MMOBJ_PADDING) ? padding != 0 : padding == 0);
2460 if ((flags & MMOBJ_INTERPRET) == 0) {
2461 to_map = padding ? 3 : 1;
2462 if (*num_mapped < to_map) {
2463 *num_mapped = to_map;
2464 MOBJ_STAT_ADD(flat_e2big);
2465 return (E2BIG);
2467 error = mmapobj_map_flat(vp, mrp, padding, fcred);
2469 if (error) {
2470 return (error);
2472 *num_mapped = to_map;
2473 return (0);
2476 error = mmapobj_map_interpret(vp, mrp, num_mapped, padding, fcred);
2477 return (error);