Move primary cache code to libminixfs.
[minix.git] / libexec / ld.elf_so / xmalloc.c
blob626378dab545a9f565def5812f010f21f6450e8f
1 /* $NetBSD: xmalloc.c,v 1.10 2010/12/03 23:07:49 joerg Exp $ */
3 /*
4 * Copyright 1996 John D. Polstra.
5 * Copyright 1996 Matt Thomas <matt@3am-software.com>
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by John Polstra.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (c) 1983 Regents of the University of California.
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
63 #ifdef __minix
64 #define munmap minix_munmap
65 #endif
67 #if defined(LIBC_SCCS) && !defined(lint)
68 /*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
69 #endif /* LIBC_SCCS and not lint */
72 * malloc.c (Caltech) 2/21/82
73 * Chris Kingsley, kingsley@cit-20.
75 * This is a very fast storage allocator. It allocates blocks of a small
76 * number of different sizes, and keeps free lists of each size. Blocks that
77 * don't exactly fit are passed up to the next larger size. In this
78 * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long.
79 * This is designed for use in a virtual memory environment.
82 #include <sys/cdefs.h>
83 #ifndef lint
84 __RCSID("$NetBSD: xmalloc.c,v 1.10 2010/12/03 23:07:49 joerg Exp $");
85 #endif /* not lint */
87 #include <stdlib.h>
88 #include <string.h>
89 #include <unistd.h>
90 #include <errno.h>
92 #include <sys/types.h>
93 #include <sys/param.h>
94 #include <sys/mman.h>
95 #include <sys/stat.h>
97 #include "rtld.h"
100 * Pre-allocate mmap'ed pages
102 #define NPOOLPAGES (32*1024/pagesz)
103 static char *pagepool_start, *pagepool_end;
104 static int morepages(int);
105 #define PAGEPOOL_SIZE (size_t)(pagepool_end - pagepool_start)
108 * The overhead on a block is at least 4 bytes. When free, this space
109 * contains a pointer to the next free block, and the bottom two bits must
110 * be zero. When in use, the first byte is set to MAGIC, and the second
111 * byte is the size index. The remaining bytes are for alignment.
112 * If range checking is enabled then a second word holds the size of the
113 * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC).
114 * The order of elements is critical: ov_magic must overlay the low order
115 * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern.
117 union overhead {
118 union overhead *ov_next; /* when free */
119 struct {
120 u_char ovu_magic; /* magic number */
121 u_char ovu_index; /* bucket # */
122 #ifdef RCHECK
123 u_short ovu_rmagic; /* range magic number */
124 u_int ovu_size; /* actual block size */
125 #endif
126 } ovu;
127 #define ov_magic ovu.ovu_magic
128 #define ov_index ovu.ovu_index
129 #define ov_rmagic ovu.ovu_rmagic
130 #define ov_size ovu.ovu_size
133 static void morecore(size_t);
134 static void *imalloc(size_t);
136 #define MAGIC 0xef /* magic # on accounting info */
137 #define RMAGIC 0x5555 /* magic # on range info */
139 #ifdef RCHECK
140 #define RSLOP (sizeof (u_short))
141 #else
142 #define RSLOP 0
143 #endif
146 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
147 * smallest allocatable block is 8 bytes. The overhead information
148 * precedes the data area returned to the user.
150 #define NBUCKETS 30
151 static union overhead *nextf[NBUCKETS];
153 static size_t pagesz; /* page size */
154 static size_t pagebucket; /* page size bucket */
156 #ifdef MSTATS
158 * nmalloc[i] is the difference between the number of mallocs and frees
159 * for a given block size.
161 static u_int nmalloc[NBUCKETS];
162 #endif
164 #if defined(MALLOC_DEBUG) || defined(RCHECK)
165 #define ASSERT(p) if (!(p)) botch("p")
166 static void
167 botch(const char *s)
169 xwarnx("\r\nassertion botched: %s\r\n", s);
170 abort();
172 #else
173 #define ASSERT(p)
174 #endif
176 #define TRACE() xprintf("TRACE %s:%d\n", __FILE__, __LINE__)
178 static void *
179 imalloc(size_t nbytes)
181 union overhead *op;
182 size_t bucket;
183 size_t n, m;
184 unsigned amt;
187 * First time malloc is called, setup page size and
188 * align break pointer so all data will be page aligned.
190 if (pagesz == 0) {
191 pagesz = n = _rtld_pagesz;
192 if (morepages(NPOOLPAGES) == 0)
193 return NULL;
194 op = (union overhead *)(pagepool_start);
195 m = sizeof (*op) - (((char *)op - (char *)NULL) & (n - 1));
196 if (n < m)
197 n += pagesz - m;
198 else
199 n -= m;
200 if (n) {
201 pagepool_start += n;
203 bucket = 0;
204 amt = sizeof(union overhead);
205 while (pagesz > amt) {
206 amt <<= 1;
207 bucket++;
209 pagebucket = bucket;
212 * Convert amount of memory requested into closest block size
213 * stored in hash buckets which satisfies request.
214 * Account for space used per block for accounting.
216 if (nbytes <= (n = pagesz - sizeof (*op) - RSLOP)) {
217 if (sizeof(union overhead) & (sizeof(union overhead) - 1)) {
218 amt = sizeof(union overhead) * 2;
219 bucket = 1;
220 } else {
221 amt = sizeof(union overhead); /* size of first bucket */
222 bucket = 0;
224 n = -(sizeof (*op) + RSLOP);
225 } else {
226 amt = pagesz;
227 bucket = pagebucket;
229 while (nbytes > amt + n) {
230 amt <<= 1;
231 if (amt == 0)
232 return (NULL);
233 bucket++;
236 * If nothing in hash bucket right now,
237 * request more memory from the system.
239 if ((op = nextf[bucket]) == NULL) {
240 morecore(bucket);
241 if ((op = nextf[bucket]) == NULL)
242 return (NULL);
244 /* remove from linked list */
245 nextf[bucket] = op->ov_next;
246 op->ov_magic = MAGIC;
247 op->ov_index = bucket;
248 #ifdef MSTATS
249 nmalloc[bucket]++;
250 #endif
251 #ifdef RCHECK
253 * Record allocated size of block and
254 * bound space with magic numbers.
256 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
257 op->ov_rmagic = RMAGIC;
258 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
259 #endif
260 return ((char *)(op + 1));
264 * Allocate more memory to the indicated bucket.
266 static void
267 morecore(size_t bucket)
269 union overhead *op;
270 size_t sz; /* size of desired block */
271 size_t amt; /* amount to allocate */
272 size_t nblks; /* how many blocks we get */
275 * sbrk_size <= 0 only for big, FLUFFY, requests (about
276 * 2^30 bytes on a VAX, I think) or for a negative arg.
278 sz = 1 << (bucket + 3);
279 #ifdef MALLOC_DEBUG
280 ASSERT(sz > 0);
281 #endif
282 if (sz < pagesz) {
283 amt = pagesz;
284 nblks = amt / sz;
285 } else {
286 amt = sz + pagesz;
287 nblks = 1;
289 if (amt > PAGEPOOL_SIZE)
290 if (morepages(amt/pagesz + NPOOLPAGES) == 0)
291 return;
292 op = (union overhead *)pagepool_start;
293 pagepool_start += amt;
296 * Add new memory allocated to that on
297 * free list for this hash bucket.
299 nextf[bucket] = op;
300 while (--nblks > 0) {
301 op->ov_next = (union overhead *)((caddr_t)op + sz);
302 op = (union overhead *)((caddr_t)op + sz);
306 void
307 xfree(void *cp)
309 int size;
310 union overhead *op;
312 if (cp == NULL)
313 return;
314 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
315 #ifdef MALLOC_DEBUG
316 ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */
317 #else
318 if (op->ov_magic != MAGIC)
319 return; /* sanity */
320 #endif
321 #ifdef RCHECK
322 ASSERT(op->ov_rmagic == RMAGIC);
323 ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC);
324 #endif
325 size = op->ov_index;
326 ASSERT(size < NBUCKETS);
327 op->ov_next = nextf[size]; /* also clobbers ov_magic */
328 nextf[size] = op;
329 #ifdef MSTATS
330 nmalloc[size]--;
331 #endif
334 static void *
335 irealloc(void *cp, size_t nbytes)
337 size_t onb;
338 size_t i;
339 union overhead *op;
340 char *res;
342 if (cp == NULL)
343 return (imalloc(nbytes));
344 op = (union overhead *)((caddr_t)cp - sizeof (union overhead));
345 if (op->ov_magic != MAGIC) {
346 static const char *err_str =
347 "memory corruption or double free in realloc\n";
348 extern char *__progname;
349 write(STDERR_FILENO, __progname, strlen(__progname));
350 write(STDERR_FILENO, err_str, strlen(err_str));
351 abort();
354 i = op->ov_index;
355 onb = 1 << (i + 3);
356 if (onb < pagesz)
357 onb -= sizeof (*op) + RSLOP;
358 else
359 onb += pagesz - sizeof (*op) - RSLOP;
360 /* avoid the copy if same size block */
361 if (i) {
362 i = 1 << (i + 2);
363 if (i < pagesz)
364 i -= sizeof (*op) + RSLOP;
365 else
366 i += pagesz - sizeof (*op) - RSLOP;
368 if (nbytes <= onb && nbytes > i) {
369 #ifdef RCHECK
370 op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1);
371 *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC;
372 #endif
373 return(cp);
374 } else
375 xfree(cp);
376 if ((res = imalloc(nbytes)) == NULL)
377 return (NULL);
378 if (cp != res) /* common optimization if "compacting" */
379 memcpy(res, cp, (nbytes < onb) ? nbytes : onb);
380 return (res);
383 #ifdef MSTATS
385 * mstats - print out statistics about malloc
387 * Prints two lines of numbers, one showing the length of the free list
388 * for each size category, the second showing the number of mallocs -
389 * frees for each size category.
391 void
392 mstats(char *s)
394 int i, j;
395 union overhead *p;
396 int totfree = 0,
397 totused = 0;
399 xprintf("Memory allocation statistics %s\nfree:\t", s);
400 for (i = 0; i < NBUCKETS; i++) {
401 for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
403 xprintf(" %d", j);
404 totfree += j * (1 << (i + 3));
406 xprintf("\nused:\t");
407 for (i = 0; i < NBUCKETS; i++) {
408 xprintf(" %d", nmalloc[i]);
409 totused += nmalloc[i] * (1 << (i + 3));
411 xprintf("\n\tTotal in use: %d, total free: %d\n",
412 totused, totfree);
414 #endif
416 /* Minix mmap can do this. */
417 #ifdef __minix
418 #define mmap minix_mmap
419 #endif
421 static int
422 morepages(int n)
424 int fd = -1;
425 int offset;
427 #ifdef NEED_DEV_ZERO
428 fd = open("/dev/zero", O_RDWR, 0);
429 if (fd == -1)
430 xerr(1, "/dev/zero");
431 #endif
433 if (PAGEPOOL_SIZE > pagesz) {
434 caddr_t addr = (caddr_t)
435 (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1));
436 if (munmap(addr, pagepool_end - addr) != 0)
437 xwarn("morepages: munmap %p", addr);
440 offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1));
442 if ((pagepool_start = mmap(0, n * pagesz,
443 PROT_READ|PROT_WRITE,
444 MAP_ANON|MAP_PRIVATE, fd, 0)) == (caddr_t)-1) {
445 xprintf("Cannot map anonymous memory");
446 return 0;
448 pagepool_end = pagepool_start + n * pagesz;
449 pagepool_start += offset;
451 #ifdef NEED_DEV_ZERO
452 close(fd);
453 #endif
454 return n;
457 void *
458 xcalloc(size_t size)
461 return memset(xmalloc(size), 0, size);
464 void *
465 xmalloc(size_t size)
467 void *p = imalloc(size);
469 if (p == NULL)
470 xerr(1, "%s", xstrerror(errno));
471 return p;
474 void *
475 xrealloc(void *p, size_t size)
477 p = irealloc(p, size);
479 if (p == NULL)
480 xerr(1, "%s", xstrerror(errno));
481 return p;
484 char *
485 xstrdup(const char *str)
487 size_t len;
488 char *copy;
490 len = strlen(str) + 1;
491 copy = xmalloc(len);
492 memcpy(copy, str, len);
493 return (copy);