2 * Copyright 2008, Google Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 /* NaCl Simple/secure ELF loader (NaCl SEL) memory map.
35 #include "native_client/include/portability.h"
41 #include "native_client/service_runtime/sel_mem.h"
42 #include "native_client/service_runtime/sel_util.h"
43 #include "native_client/service_runtime/nacl_check.h"
44 #include "native_client/service_runtime/nacl_config.h"
45 #include "native_client/service_runtime/nacl_log.h"
46 #include "native_client/service_runtime/nacl_memory_object.h"
48 #define START_ENTRIES 5 /* tramp+text, rodata, data, bss, stack */
49 #define REMOVE_MARKED_DEBUG 0
52 * The memory map structure is a simple array of memory regions which
53 * may have different access protections. We do not yet merge regions
54 * with the same access protections together to reduce the region
55 * number, but may do so in the future.
57 * Regions are described by (relative) starting page number, the
58 * number of pages, and the protection that the pages should have.
60 * Takes ownership of NaClMemObj.
63 struct NaClVmmapEntry
*NaClVmmapEntryMake(uintptr_t page_num
,
66 struct NaClMemObj
*nmop
)
68 struct NaClVmmapEntry
*entry
;
71 "NaClVmmapEntryMake(0x%"PRIxPTR
",0x%"PRIxS
",0x%x,0x%"PRIxPTR
")\n",
72 page_num
, npages
, prot
, (uintptr_t) nmop
);
73 entry
= (struct NaClVmmapEntry
*) malloc(sizeof *entry
);
77 NaClLog(4, "entry: 0x%"PRIxPTR
"\n", (uintptr_t) entry
);
78 entry
->page_num
= page_num
;
79 entry
->npages
= npages
;
86 void NaClVmmapEntryFree(struct NaClVmmapEntry
*entry
)
89 ("NaClVmmapEntryFree(0x%08"PRIxPTR
"): (0x%"PRIxPTR
",0x%"PRIxS
","
90 "0x%x,0x%"PRIxPTR
")\n"),
92 entry
->page_num
, entry
->npages
, entry
->prot
, (uintptr_t) entry
->nmop
);
93 if (NULL
!= entry
->nmop
) {
94 NaClMemObjDtor(entry
->nmop
);
103 void NaClVmentryPrint(void *state
,
104 struct NaClVmmapEntry
*vmep
)
106 printf("page num 0x%06x\n", (uint32_t)vmep
->page_num
);
107 printf("num pages %d\n", (uint32_t)vmep
->npages
);
108 printf("prot bits %x\n", vmep
->prot
);
111 void NaClVmmapDebug(struct NaClVmmap
*self
,
115 NaClVmmapVisit(self
, NaClVmentryPrint
, (void *) 0);
119 int NaClVmmapCtor(struct NaClVmmap
*self
)
121 self
->size
= START_ENTRIES
;
122 self
->vmentry
= calloc(self
->size
, sizeof *self
->vmentry
);
123 if (!self
->vmentry
) {
131 void NaClVmmapDtor(struct NaClVmmap
*self
)
135 for (i
= 0; i
< self
->nvalid
; ++i
) {
136 NaClVmmapEntryFree(self
->vmentry
[i
]);
143 * Comparison function for qsort. Should never encounter a
144 * removed/invalid entry.
147 static int NaClVmmapCmpEntries(void const *vleft
,
150 struct NaClVmmapEntry
const *const *left
=
151 (struct NaClVmmapEntry
const *const *) vleft
;
152 struct NaClVmmapEntry
const *const *right
=
153 (struct NaClVmmapEntry
const *const *) vright
;
155 return ((*left
)->page_num
- (*right
)->page_num
);
158 static void NaClVmmapRemoveMarked(struct NaClVmmap
*self
)
163 if (0 == self
->nvalid
)
166 #if REMOVE_MARKED_DEBUG
167 NaClVmmapDebug(self
, "Before RemoveMarked");
170 * Linearly scan with a move-end-to-front strategy to get rid of
171 * marked-to-be-removed entries.
174 /* forall j in [0, self->nvalid): NULL != self->vmentry[j] */
175 for (last
= self
->nvalid
; last
> 0 && self
->vmentry
[--last
]->removed
; ) {
176 NaClVmmapEntryFree(self
->vmentry
[last
]);
177 self
->vmentry
[last
] = NULL
;
179 if (last
== 0 && self
->vmentry
[0]->removed
) {
180 NaClLog(LOG_FATAL
, "No valid entries in VM map\n");
183 /* forall j in [0, last]: NULL != self->vmentry[j] */
185 /* 0 <= last < self->nvalid && !self->vmentry[last]->removed */
186 CHECK(0 <= last
&& last
< self
->nvalid
);
187 CHECK(!self
->vmentry
[last
]->removed
);
188 /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
190 /* loop invar: forall j in [0, i): !self->vmentry[j]->removed */
191 for (i
= 0; i
< last
; ++i
) {
192 if (!self
->vmentry
[i
]->removed
) {
195 /* self->vmentry[i]->removed */
197 NaClVmmapEntryFree(self
->vmentry
[i
]);
198 self
->vmentry
[i
] = self
->vmentry
[last
];
199 self
->vmentry
[last
] = NULL
;
201 /* forall j in [last, self->nvalid): NULL == self->vmentry[j] */
202 /* forall j in [0, i]: !self->vmentry[j]->removed */
204 while (--last
> i
&& self
->vmentry
[last
]->removed
) {
205 NaClVmmapEntryFree(self
->vmentry
[last
]);
206 self
->vmentry
[last
] = NULL
;
209 * since !self->vmentry[i]->removed, we are guaranteed that
210 * !self->vmentry[last]->removed when the while loop terminates.
212 * forall j in (last, self->nvalid):
213 * NULL == self->vmentry[j]->removed
217 /* forall j in [0, last]: !self->vmentry[j]->removed */
218 /* forall j in (last, self->nvalid): NULL == self->vmentry[j] */
219 self
->nvalid
= last
+ 1;
222 #if REMOVE_MARKED_DEBUG
223 NaClVmmapDebug(self
, "After RemoveMarked");
227 void NaClVmmapMakeSorted(struct NaClVmmap
*self
)
232 NaClVmmapRemoveMarked(self
);
236 sizeof *self
->vmentry
,
237 NaClVmmapCmpEntries
);
239 #if REMOVE_MARKED_DEBUG
240 NaClVmmapDebug(self
, "After Sort");
245 * Adds an entry. Does not sort.
247 int NaClVmmapAdd(struct NaClVmmap
*self
,
251 struct NaClMemObj
*nmop
)
253 struct NaClVmmapEntry
*entry
;
256 ("NaClVmmapAdd(0x%08"PRIxPTR
", 0x%"PRIxPTR
", 0x%"PRIxS
", 0x%x,"
257 " 0x%08"PRIxPTR
")\n"),
258 (uintptr_t) self
, page_num
, npages
, prot
, (uintptr_t) nmop
);
259 if (self
->nvalid
== self
->size
) {
260 int new_size
= 2 * self
->size
;
261 struct NaClVmmapEntry
**new_map
;
263 new_map
= realloc(self
->vmentry
, new_size
* sizeof *new_map
);
264 if (NULL
== new_map
) {
267 self
->vmentry
= new_map
;
268 self
->size
= new_size
;
270 /* self->nvalid < self->size */
271 entry
= NaClVmmapEntryMake(page_num
, npages
, prot
, nmop
);
273 self
->vmentry
[self
->nvalid
] = entry
;
281 * Update the virtual memory map. Deletion is handled by a remove
282 * flag, since a NULL nmop just means that the memory is backed by the
283 * system paging file.
286 void NaClVmmapUpdate(struct NaClVmmap
*self
,
290 struct NaClMemObj
*nmop
,
293 /* update existing entries or create new entry as needed */
295 uintptr_t new_region_end_page
= page_num
+ npages
;
298 ("NaClVmmapUpdate(0x%08"PRIxPTR
", 0x%"PRIxPTR
", 0x%"PRIxS
","
299 " 0x%x, 0x%08"PRIxPTR
")\n"),
300 (uintptr_t) self
, page_num
, npages
, prot
, (uintptr_t) nmop
);
301 NaClVmmapMakeSorted(self
);
305 for (i
= 0; i
< self
->nvalid
; i
++) {
306 struct NaClVmmapEntry
*ent
= self
->vmentry
[i
];
307 uintptr_t ent_end_page
= ent
->page_num
+ ent
->npages
;
308 off_t additional_offset
=
309 (new_region_end_page
- ent
->page_num
) << NACL_PAGESHIFT
;
311 if (ent
->page_num
< page_num
&& new_region_end_page
< ent_end_page
) {
313 * Split existing mapping into two parts, with new mapping in
316 if (!NaClVmmapAdd(self
,
318 ent_end_page
- new_region_end_page
,
320 NaClMemObjSplit(ent
->nmop
, additional_offset
))) {
321 NaClLog(LOG_FATAL
, "NaClVmmapUpdate: could not split entry\n");
323 ent
->npages
= page_num
- ent
->page_num
;
325 } else if (ent
->page_num
< page_num
&& page_num
< ent_end_page
) {
326 /* New mapping overlaps end of existing mapping. */
327 ent
->npages
= page_num
- ent
->page_num
;
328 } else if (ent
->page_num
< new_region_end_page
&&
329 new_region_end_page
< ent_end_page
) {
330 /* New mapping overlaps start of existing mapping. */
332 NaClMemObjIncOffset(ent
->nmop
, additional_offset
);
334 ent
->page_num
= new_region_end_page
;
335 ent
->npages
= ent_end_page
- new_region_end_page
;
337 } else if (page_num
<= ent
->page_num
&&
338 ent_end_page
<= new_region_end_page
) {
339 /* New mapping covers all of the existing mapping. */
343 assert(new_region_end_page
<= ent
->page_num
|| ent_end_page
<= page_num
);
348 if (!NaClVmmapAdd(self
, page_num
, npages
, prot
, nmop
))
349 NaClLog(LOG_FATAL
, "NaClVmmapUpdate: could not add entry\n");
352 NaClVmmapRemoveMarked(self
);
355 static int NaClVmmapContainCmpEntries(void const *vkey
,
358 struct NaClVmmapEntry
const *const *key
=
359 (struct NaClVmmapEntry
const *const *) vkey
;
360 struct NaClVmmapEntry
const *const *ent
=
361 (struct NaClVmmapEntry
const *const *) vent
;
363 NaClLog(5, "key->page_num = 0x%05"PRIxPTR
"\n", (*key
)->page_num
);
365 NaClLog(5, "entry->page_num = 0x%05"PRIxPTR
"\n", (*ent
)->page_num
);
366 NaClLog(5, "entry->npages = 0x%"PRIxS
"\n", (*ent
)->npages
);
368 if ((*key
)->page_num
< (*ent
)->page_num
) return -1;
369 if ((*key
)->page_num
< (*ent
)->page_num
+ (*ent
)->npages
) return 0;
373 struct NaClVmmapEntry
const *NaClVmmapFindPage(struct NaClVmmap
*self
,
376 struct NaClVmmapEntry key
;
377 struct NaClVmmapEntry
*kptr
;
378 struct NaClVmmapEntry
*const *result_ptr
;
380 NaClVmmapMakeSorted(self
);
383 result_ptr
= ((struct NaClVmmapEntry
*const *)
387 sizeof self
->vmentry
[0],
388 NaClVmmapContainCmpEntries
));
389 return result_ptr
? *result_ptr
: NULL
;
392 struct NaClVmmapIter
*NaClVmmapFindPageIter(struct NaClVmmap
*self
,
394 struct NaClVmmapIter
*space
)
396 struct NaClVmmapEntry key
;
397 struct NaClVmmapEntry
*kptr
;
398 struct NaClVmmapEntry
**result_ptr
;
400 NaClVmmapMakeSorted(self
);
403 result_ptr
= ((struct NaClVmmapEntry
**)
407 sizeof self
->vmentry
[0],
408 NaClVmmapContainCmpEntries
));
410 if (NULL
== result_ptr
) {
411 space
->entry_ix
= self
->nvalid
;
413 space
->entry_ix
= result_ptr
- self
->vmentry
;
418 int NaClVmmapIterAtEnd(struct NaClVmmapIter
*nvip
)
420 return nvip
->entry_ix
>= nvip
->vmmap
->nvalid
;
424 * IterStar only permissible if not AtEnd
426 struct NaClVmmapEntry
*NaClVmmapIterStar(struct NaClVmmapIter
*nvip
)
428 return nvip
->vmmap
->vmentry
[nvip
->entry_ix
];
431 void NaClVmmapIterIncr(struct NaClVmmapIter
*nvip
)
437 * Iterator becomes invalid after Erase. We could have a version that
438 * keep the iterator valid by copying forward, but it is unclear
439 * whether that is needed.
441 void NaClVmmapIterErase(struct NaClVmmapIter
*nvip
)
443 struct NaClVmmap
*nvp
;
446 free(nvp
->vmentry
[nvip
->entry_ix
]);
447 nvp
->vmentry
[nvip
->entry_ix
] = nvp
->vmentry
[--nvp
->nvalid
];
451 void NaClVmmapVisit(struct NaClVmmap
*self
,
452 void (*fn
)(void *state
,
453 struct NaClVmmapEntry
*entry
),
459 NaClVmmapMakeSorted(self
);
460 for (i
= 0, nentries
= self
->nvalid
; i
< nentries
; ++i
) {
461 (*fn
)(state
, self
->vmentry
[i
]);
466 * Linear search, from high addresses down.
468 uintptr_t NaClVmmapFindSpace(struct NaClVmmap
*self
,
472 struct NaClVmmapEntry
*vmep
;
474 uintptr_t start_page
;
476 if (0 == self
->nvalid
)
478 NaClVmmapMakeSorted(self
);
479 for (i
= self
->nvalid
; --i
> 0; ) {
480 vmep
= self
->vmentry
[i
-1];
481 end_page
= vmep
->page_num
+ vmep
->npages
; /* end page from previous */
482 start_page
= self
->vmentry
[i
]->page_num
; /* start page from current */
483 if (start_page
- end_page
>= num_pages
) {
484 return start_page
- num_pages
;
489 * in user addresses, page 0 is always trampoline, and user
490 * addresses are contained in system addresses, so returning a
491 * system page number of 0 can serve as error indicator: it is at
492 * worst the trampoline page, and likely to be below it.
497 * Linear search, from high addresses down. For mmap, so the starting
498 * address of the region found must be NACL_MAP_PAGESIZE aligned.
500 * For general mmap it is better to use as high an address as
501 * possible, since the stack size for the main thread is currently
502 * fixed, and the heap is the only thing that grows.
504 uintptr_t NaClVmmapFindMapSpace(struct NaClVmmap
*self
,
508 struct NaClVmmapEntry
*vmep
;
510 uintptr_t start_page
;
512 if (0 == self
->nvalid
)
514 NaClVmmapMakeSorted(self
);
515 num_pages
= NaClRoundPageNumUpToMapMultiple(num_pages
);
517 for (i
= self
->nvalid
; --i
> 0; ) {
518 vmep
= self
->vmentry
[i
-1];
519 end_page
= vmep
->page_num
+ vmep
->npages
; /* end page from previous */
520 end_page
= NaClRoundPageNumUpToMapMultiple(end_page
);
522 start_page
= self
->vmentry
[i
]->page_num
; /* start page from current */
523 if (NACL_MAP_PAGESHIFT
> NACL_PAGESHIFT
) {
525 start_page
= NaClTruncPageNumDownToMapMultiple(start_page
);
527 if (start_page
<= end_page
) {
531 if (start_page
- end_page
>= num_pages
) {
532 return start_page
- num_pages
;
537 * in user addresses, page 0 is always trampoline, and user
538 * addresses are contained in system addresses, so returning a
539 * system page number of 0 can serve as error indicator: it is at
540 * worst the trampoline page, and likely to be below it.
545 * Linear search, from uaddr up.
547 uintptr_t NaClVmmapFindMapSpaceAboveHint(struct NaClVmmap
*self
,
553 struct NaClVmmapEntry
*vmep
;
555 uintptr_t start_page
;
558 NaClVmmapMakeSorted(self
);
560 usr_page
= uaddr
>> NACL_PAGESHIFT
;
561 num_pages
= NaClRoundPageNumUpToMapMultiple(num_pages
);
563 nvalid
= self
->nvalid
;
565 for (i
= 1; i
< nvalid
; ++i
) {
566 vmep
= self
->vmentry
[i
-1];
567 end_page
= vmep
->page_num
+ vmep
->npages
;
568 end_page
= NaClRoundPageNumUpToMapMultiple(end_page
);
570 start_page
= self
->vmentry
[i
]->page_num
;
571 if (NACL_MAP_PAGESHIFT
> NACL_PAGESHIFT
) {
573 start_page
= NaClTruncPageNumDownToMapMultiple(start_page
);
575 if (start_page
<= end_page
) {
579 if (end_page
<= usr_page
&& usr_page
< start_page
) {
582 if (usr_page
<= end_page
&& (start_page
- end_page
) >= num_pages
) {
583 /* found a gap at or after uaddr that's big enough */
590 /* Returns whether the given range contains any executable mappings. */
591 int NaClVmmapContainsExecutablePages(struct NaClVmmap
*self
, uintptr_t page_num
,
595 NaClVmmapRemoveMarked(self
);
596 for (i
= 0; i
< self
->nvalid
; i
++) {
597 struct NaClVmmapEntry
*ent
= self
->vmentry
[i
];
598 if ((ent
->prot
& NACL_ABI_PROT_EXEC
) != 0 &&
599 !(page_num
+ npages
<= ent
->page_num
||
600 ent
->page_num
+ ent
->npages
<= page_num
))