2 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 RCSID("$OpenBSD: monitor_mm.c,v 1.9 2004/05/11 19:01:43 deraadt Exp $");
29 #ifdef HAVE_SYS_MMAN_H
36 #include "monitor_mm.h"
39 mm_compare(struct mm_share
*a
, struct mm_share
*b
)
41 long diff
= (char *)a
->address
- (char *)b
->address
;
51 RB_GENERATE(mmtree
, mm_share
, next
, mm_compare
)
53 static struct mm_share
*
54 mm_make_entry(struct mm_master
*mm
, struct mmtree
*head
,
55 void *address
, size_t size
)
57 struct mm_share
*tmp
, *tmp2
;
59 if (mm
->mmalloc
== NULL
)
60 tmp
= xmalloc(sizeof(struct mm_share
));
62 tmp
= mm_xmalloc(mm
->mmalloc
, sizeof(struct mm_share
));
63 tmp
->address
= address
;
66 tmp2
= RB_INSERT(mmtree
, head
, tmp
);
68 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
69 mm
, tmp2
, address
, (u_long
)size
);
74 /* Creates a shared memory area of a certain size */
77 mm_create(struct mm_master
*mmalloc
, size_t size
)
83 mm
= xmalloc(sizeof(struct mm_master
));
85 mm
= mm_xmalloc(mmalloc
, sizeof(struct mm_master
));
88 * If the memory map has a mm_master it can be completely
89 * shared including authentication between the child
92 mm
->mmalloc
= mmalloc
;
94 address
= xmmap(size
);
95 if (address
== (void *)MAP_FAILED
)
96 fatal("mmap(%lu): %s", (u_long
)size
, strerror(errno
));
98 mm
->address
= address
;
101 RB_INIT(&mm
->rb_free
);
102 RB_INIT(&mm
->rb_allocated
);
104 mm_make_entry(mm
, &mm
->rb_free
, address
, size
);
109 /* Frees either the allocated or the free list */
112 mm_freelist(struct mm_master
*mmalloc
, struct mmtree
*head
)
114 struct mm_share
*mms
, *next
;
116 for (mms
= RB_ROOT(head
); mms
; mms
= next
) {
117 next
= RB_NEXT(mmtree
, head
, mms
);
118 RB_REMOVE(mmtree
, head
, mms
);
122 mm_free(mmalloc
, mms
);
126 /* Destroys a memory mapped area */
129 mm_destroy(struct mm_master
*mm
)
131 mm_freelist(mm
->mmalloc
, &mm
->rb_free
);
132 mm_freelist(mm
->mmalloc
, &mm
->rb_allocated
);
135 if (munmap(mm
->address
, mm
->size
) == -1)
136 fatal("munmap(%p, %lu): %s", mm
->address
, (u_long
)mm
->size
,
139 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
142 if (mm
->mmalloc
== NULL
)
145 mm_free(mm
->mmalloc
, mm
);
149 mm_xmalloc(struct mm_master
*mm
, size_t size
)
153 address
= mm_malloc(mm
, size
);
155 fatal("%s: mm_malloc(%lu)", __func__
, (u_long
)size
);
160 /* Allocates data from a memory mapped area */
163 mm_malloc(struct mm_master
*mm
, size_t size
)
165 struct mm_share
*mms
, *tmp
;
168 fatal("mm_malloc: try to allocate 0 space");
169 if (size
> SIZE_T_MAX
- MM_MINSIZE
+ 1)
170 fatal("mm_malloc: size too big");
172 size
= ((size
+ (MM_MINSIZE
- 1)) / MM_MINSIZE
) * MM_MINSIZE
;
174 RB_FOREACH(mms
, mmtree
, &mm
->rb_free
) {
175 if (mms
->size
>= size
)
183 memset(mms
->address
, 0xd0, size
);
185 tmp
= mm_make_entry(mm
, &mm
->rb_allocated
, mms
->address
, size
);
187 /* Does not change order in RB tree */
189 mms
->address
= (u_char
*)mms
->address
+ size
;
191 if (mms
->size
== 0) {
192 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
193 if (mm
->mmalloc
== NULL
)
196 mm_free(mm
->mmalloc
, mms
);
199 return (tmp
->address
);
202 /* Frees memory in a memory mapped area */
205 mm_free(struct mm_master
*mm
, void *address
)
207 struct mm_share
*mms
, *prev
, tmp
;
209 tmp
.address
= address
;
210 mms
= RB_FIND(mmtree
, &mm
->rb_allocated
, &tmp
);
212 fatal("mm_free(%p): can not find %p", mm
, address
);
215 memset(mms
->address
, 0xd0, mms
->size
);
217 /* Remove from allocated list and insert in free list */
218 RB_REMOVE(mmtree
, &mm
->rb_allocated
, mms
);
219 if (RB_INSERT(mmtree
, &mm
->rb_free
, mms
) != NULL
)
220 fatal("mm_free(%p): double address %p", mm
, address
);
222 /* Find previous entry */
224 if (RB_LEFT(prev
, next
)) {
225 prev
= RB_LEFT(prev
, next
);
226 while (RB_RIGHT(prev
, next
))
227 prev
= RB_RIGHT(prev
, next
);
229 if (RB_PARENT(prev
, next
) &&
230 (prev
== RB_RIGHT(RB_PARENT(prev
, next
), next
)))
231 prev
= RB_PARENT(prev
, next
);
233 while (RB_PARENT(prev
, next
) &&
234 (prev
== RB_LEFT(RB_PARENT(prev
, next
), next
)))
235 prev
= RB_PARENT(prev
, next
);
236 prev
= RB_PARENT(prev
, next
);
240 /* Check if range does not overlap */
241 if (prev
!= NULL
&& MM_ADDRESS_END(prev
) > address
)
242 fatal("mm_free: memory corruption: %p(%lu) > %p",
243 prev
->address
, (u_long
)prev
->size
, address
);
245 /* See if we can merge backwards */
246 if (prev
!= NULL
&& MM_ADDRESS_END(prev
) == address
) {
247 prev
->size
+= mms
->size
;
248 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
249 if (mm
->mmalloc
== NULL
)
252 mm_free(mm
->mmalloc
, mms
);
259 /* Check if we can merge forwards */
260 mms
= RB_NEXT(mmtree
, &mm
->rb_free
, prev
);
264 if (MM_ADDRESS_END(prev
) > mms
->address
)
265 fatal("mm_free: memory corruption: %p < %p(%lu)",
266 mms
->address
, prev
->address
, (u_long
)prev
->size
);
267 if (MM_ADDRESS_END(prev
) != mms
->address
)
270 prev
->size
+= mms
->size
;
271 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
273 if (mm
->mmalloc
== NULL
)
276 mm_free(mm
->mmalloc
, mms
);
280 mm_sync_list(struct mmtree
*oldtree
, struct mmtree
*newtree
,
281 struct mm_master
*mm
, struct mm_master
*mmold
)
283 struct mm_master
*mmalloc
= mm
->mmalloc
;
284 struct mm_share
*mms
, *new;
287 RB_FOREACH(mms
, mmtree
, oldtree
) {
288 /* Check the values */
289 mm_memvalid(mmold
, mms
, sizeof(struct mm_share
));
290 mm_memvalid(mm
, mms
->address
, mms
->size
);
292 new = mm_xmalloc(mmalloc
, sizeof(struct mm_share
));
293 memcpy(new, mms
, sizeof(struct mm_share
));
294 RB_INSERT(mmtree
, newtree
, new);
299 mm_share_sync(struct mm_master
**pmm
, struct mm_master
**pmmalloc
)
301 struct mm_master
*mm
;
302 struct mm_master
*mmalloc
;
303 struct mm_master
*mmold
;
304 struct mmtree rb_free
, rb_allocated
;
306 debug3("%s: Share sync", __func__
);
310 mm_memvalid(mmold
, mm
, sizeof(*mm
));
312 mmalloc
= mm_create(NULL
, mm
->size
);
313 mm
= mm_xmalloc(mmalloc
, sizeof(struct mm_master
));
314 memcpy(mm
, *pmm
, sizeof(struct mm_master
));
315 mm
->mmalloc
= mmalloc
;
317 rb_free
= mm
->rb_free
;
318 rb_allocated
= mm
->rb_allocated
;
320 RB_INIT(&mm
->rb_free
);
321 RB_INIT(&mm
->rb_allocated
);
323 mm_sync_list(&rb_free
, &mm
->rb_free
, mm
, mmold
);
324 mm_sync_list(&rb_allocated
, &mm
->rb_allocated
, mm
, mmold
);
331 debug3("%s: Share sync end", __func__
);
335 mm_memvalid(struct mm_master
*mm
, void *address
, size_t size
)
337 void *end
= (u_char
*)address
+ size
;
339 if (address
< mm
->address
)
340 fatal("mm_memvalid: address too small: %p", address
);
342 fatal("mm_memvalid: end < address: %p < %p", end
, address
);
343 if (end
> (void *)((u_char
*)mm
->address
+ mm
->size
))
344 fatal("mm_memvalid: address too large: %p", address
);