No empty .Rs/.Re
[netbsd-mini2440.git] / crypto / external / bsd / openssh / dist / monitor_mm.c
blob82df074b43e34ff6489e001feefa5fd72332c70b
1 /* $NetBSD: monitor_mm.c,v 1.1.1.2 2009/12/27 01:06:58 christos Exp $ */
2 /* $OpenBSD: monitor_mm.c,v 1.16 2009/06/22 05:39:28 dtucker Exp $ */
3 /*
4 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "includes.h"
29 __RCSID("$NetBSD: monitor_mm.c,v 1.2 2009/06/07 22:38:46 christos Exp $");
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sys/tree.h>
33 #include <sys/param.h>
35 #include <errno.h>
36 #include <stdarg.h>
37 #include <string.h>
39 #include "xmalloc.h"
40 #include "ssh.h"
41 #include "log.h"
42 #include "monitor_mm.h"
44 static int
45 mm_compare(struct mm_share *a, struct mm_share *b)
47 long diff = (char *)a->address - (char *)b->address;
49 if (diff == 0)
50 return (0);
51 else if (diff < 0)
52 return (-1);
53 else
54 return (1);
57 RB_GENERATE(mmtree, mm_share, next, mm_compare)
59 static struct mm_share *
60 mm_make_entry(struct mm_master *mm, struct mmtree *head,
61 void *address, size_t size)
63 struct mm_share *tmp, *tmp2;
65 if (mm->mmalloc == NULL)
66 tmp = xmalloc(sizeof(struct mm_share));
67 else
68 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
69 tmp->address = address;
70 tmp->size = size;
72 tmp2 = RB_INSERT(mmtree, head, tmp);
73 if (tmp2 != NULL)
74 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
75 mm, tmp2, address, (u_long)size);
77 return (tmp);
80 /* Creates a shared memory area of a certain size */
82 struct mm_master *
83 mm_create(struct mm_master *mmalloc, size_t size)
85 void *address;
86 struct mm_master *mm;
88 if (mmalloc == NULL)
89 mm = xmalloc(sizeof(struct mm_master));
90 else
91 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
94 * If the memory map has a mm_master it can be completely
95 * shared including authentication between the child
96 * and the client.
98 mm->mmalloc = mmalloc;
100 address = mmap(NULL, size, PROT_WRITE|PROT_READ, MAP_ANON|MAP_SHARED,
101 -1, (off_t)0);
102 if (address == MAP_FAILED)
103 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
105 mm->address = address;
106 mm->size = size;
108 RB_INIT(&mm->rb_free);
109 RB_INIT(&mm->rb_allocated);
111 mm_make_entry(mm, &mm->rb_free, address, size);
113 return (mm);
116 /* Frees either the allocated or the free list */
118 static void
119 mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
121 struct mm_share *mms, *next;
123 for (mms = RB_ROOT(head); mms; mms = next) {
124 next = RB_NEXT(mmtree, head, mms);
125 RB_REMOVE(mmtree, head, mms);
126 if (mmalloc == NULL)
127 xfree(mms);
128 else
129 mm_free(mmalloc, mms);
133 /* Destroys a memory mapped area */
135 void
136 mm_destroy(struct mm_master *mm)
138 mm_freelist(mm->mmalloc, &mm->rb_free);
139 mm_freelist(mm->mmalloc, &mm->rb_allocated);
141 if (munmap(mm->address, mm->size) == -1)
142 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
143 strerror(errno));
144 if (mm->mmalloc == NULL)
145 xfree(mm);
146 else
147 mm_free(mm->mmalloc, mm);
150 void *
151 mm_xmalloc(struct mm_master *mm, size_t size)
153 void *address;
155 address = mm_malloc(mm, size);
156 if (address == NULL)
157 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
158 return (address);
162 /* Allocates data from a memory mapped area */
164 void *
165 mm_malloc(struct mm_master *mm, size_t size)
167 struct mm_share *mms, *tmp;
169 if (size == 0)
170 fatal("mm_malloc: try to allocate 0 space");
171 if (size > SIZE_T_MAX - MM_MINSIZE + 1)
172 fatal("mm_malloc: size too big");
174 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
176 RB_FOREACH(mms, mmtree, &mm->rb_free) {
177 if (mms->size >= size)
178 break;
181 if (mms == NULL)
182 return (NULL);
184 /* Debug */
185 memset(mms->address, 0xd0, size);
187 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
189 /* Does not change order in RB tree */
190 mms->size -= size;
191 mms->address = (u_char *)mms->address + size;
193 if (mms->size == 0) {
194 RB_REMOVE(mmtree, &mm->rb_free, mms);
195 if (mm->mmalloc == NULL)
196 xfree(mms);
197 else
198 mm_free(mm->mmalloc, mms);
201 return (tmp->address);
204 /* Frees memory in a memory mapped area */
206 void
207 mm_free(struct mm_master *mm, void *address)
209 struct mm_share *mms, *prev, tmp;
211 tmp.address = address;
212 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
213 if (mms == NULL)
214 fatal("mm_free(%p): can not find %p", mm, address);
216 /* Debug */
217 memset(mms->address, 0xd0, mms->size);
219 /* Remove from allocated list and insert in free list */
220 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
221 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
222 fatal("mm_free(%p): double address %p", mm, address);
224 /* Find previous entry */
225 prev = mms;
226 if (RB_LEFT(prev, next)) {
227 prev = RB_LEFT(prev, next);
228 while (RB_RIGHT(prev, next))
229 prev = RB_RIGHT(prev, next);
230 } else {
231 if (RB_PARENT(prev, next) &&
232 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
233 prev = RB_PARENT(prev, next);
234 else {
235 while (RB_PARENT(prev, next) &&
236 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
237 prev = RB_PARENT(prev, next);
238 prev = RB_PARENT(prev, next);
242 /* Check if range does not overlap */
243 if (prev != NULL && MM_ADDRESS_END(prev) > address)
244 fatal("mm_free: memory corruption: %p(%lu) > %p",
245 prev->address, (u_long)prev->size, address);
247 /* See if we can merge backwards */
248 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
249 prev->size += mms->size;
250 RB_REMOVE(mmtree, &mm->rb_free, mms);
251 if (mm->mmalloc == NULL)
252 xfree(mms);
253 else
254 mm_free(mm->mmalloc, mms);
255 } else
256 prev = mms;
258 if (prev == NULL)
259 return;
261 /* Check if we can merge forwards */
262 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
263 if (mms == NULL)
264 return;
266 if (MM_ADDRESS_END(prev) > mms->address)
267 fatal("mm_free: memory corruption: %p < %p(%lu)",
268 mms->address, prev->address, (u_long)prev->size);
269 if (MM_ADDRESS_END(prev) != mms->address)
270 return;
272 prev->size += mms->size;
273 RB_REMOVE(mmtree, &mm->rb_free, mms);
275 if (mm->mmalloc == NULL)
276 xfree(mms);
277 else
278 mm_free(mm->mmalloc, mms);
281 static void
282 mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
283 struct mm_master *mm, struct mm_master *mmold)
285 struct mm_master *mmalloc = mm->mmalloc;
286 struct mm_share *mms, *new;
288 /* Sync free list */
289 RB_FOREACH(mms, mmtree, oldtree) {
290 /* Check the values */
291 mm_memvalid(mmold, mms, sizeof(struct mm_share));
292 mm_memvalid(mm, mms->address, mms->size);
294 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
295 memcpy(new, mms, sizeof(struct mm_share));
296 RB_INSERT(mmtree, newtree, new);
300 void
301 mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
303 struct mm_master *mm;
304 struct mm_master *mmalloc;
305 struct mm_master *mmold;
306 struct mmtree rb_free, rb_allocated;
308 debug3("%s: Share sync", __func__);
310 mm = *pmm;
311 mmold = mm->mmalloc;
312 mm_memvalid(mmold, mm, sizeof(*mm));
314 mmalloc = mm_create(NULL, mm->size);
315 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
316 memcpy(mm, *pmm, sizeof(struct mm_master));
317 mm->mmalloc = mmalloc;
319 rb_free = mm->rb_free;
320 rb_allocated = mm->rb_allocated;
322 RB_INIT(&mm->rb_free);
323 RB_INIT(&mm->rb_allocated);
325 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
326 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
328 mm_destroy(mmold);
330 *pmm = mm;
331 *pmmalloc = mmalloc;
333 debug3("%s: Share sync end", __func__);
336 void
337 mm_memvalid(struct mm_master *mm, void *address, size_t size)
339 void *end = (u_char *)address + size;
341 if (address < mm->address)
342 fatal("mm_memvalid: address too small: %p", address);
343 if (end < address)
344 fatal("mm_memvalid: end < address: %p < %p", end, address);
345 if (end > (void *)((u_char *)mm->address + mm->size))
346 fatal("mm_memvalid: address too large: %p", address);