release.sh: restore -jJAILDIR option
[minix.git] / lib / libc / stdlib / malloc.c
bloba07a572169b0d6293f2b9f3d951bbc9aba6373e9
1 /* $NetBSD: malloc.c,v 1.52 2008/02/03 22:56:53 christos Exp $ */
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
11 * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
15 #ifdef __minix
16 #define mmap minix_mmap
17 #define munmap minix_munmap
18 #ifdef _LIBSYS
19 #include <minix/sysutil.h>
20 #define MALLOC_NO_SYSCALLS
21 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
22 #define wrterror(w) panic("libminc malloc error: %s\n", w)
23 #endif
24 #endif
27 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
28 * to internal conditions and consistency in malloc.c. This has a
29 * noticeable runtime performance hit, and generally will not do you
30 * any good unless you fiddle with the internals of malloc or want
31 * to catch random pointer corruption as early as possible.
33 #ifndef MALLOC_EXTRA_SANITY
34 #undef MALLOC_EXTRA_SANITY
35 #endif
38 * What to use for Junk. This is the byte value we use to fill with
39 * when the 'J' option is enabled.
41 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
44 * The basic parameters you can tweak.
46 * malloc_minsize minimum size of an allocation in bytes.
47 * If this is too small it's too much work
48 * to manage them. This is also the smallest
49 * unit of alignment used for the storage
50 * returned by malloc/realloc.
54 #include "namespace.h"
55 #if defined(__FreeBSD__)
56 # if defined(__i386__)
57 # define malloc_minsize 16U
58 # endif
59 # if defined(__ia64__)
60 # define malloc_pageshift 13U
61 # define malloc_minsize 16U
62 # endif
63 # if defined(__alpha__)
64 # define malloc_pageshift 13U
65 # define malloc_minsize 16U
66 # endif
67 # if defined(__sparc64__)
68 # define malloc_pageshift 13U
69 # define malloc_minsize 16U
70 # endif
71 # if defined(__amd64__)
72 # define malloc_pageshift 12U
73 # define malloc_minsize 16U
74 # endif
75 # if defined(__arm__)
76 # define malloc_pageshift 12U
77 # define malloc_minsize 16U
78 # endif
79 #ifndef __minix
80 # define HAS_UTRACE
81 # define UTRACE_LABEL
82 #endif /* __minix */
84 #include <sys/cdefs.h>
85 void utrace(struct ut *, int);
88 * Make malloc/free/realloc thread-safe in libc for use with
89 * kernel threads.
91 # include "libc_private.h"
92 # include "spinlock.h"
93 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
94 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
95 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
96 #endif /* __FreeBSD__ */
98 /* #undef these things so that malloc uses the non-internal symbols.
99 * This is necessary for VM to be able to define its own versions, and
100 * use this malloc.
102 #undef minix_mmap
103 #undef minix_munmap
105 #include <assert.h>
107 #include <sys/types.h>
108 #if defined(__NetBSD__)
109 # define malloc_minsize 16U
110 # define HAS_UTRACE
111 # define UTRACE_LABEL "malloc",
112 #include <sys/cdefs.h>
113 #include "extern.h"
114 #if defined(LIBC_SCCS) && !defined(lint)
115 __RCSID("$NetBSD: malloc.c,v 1.52 2008/02/03 22:56:53 christos Exp $");
116 #endif /* LIBC_SCCS and not lint */
117 int utrace(const char *, void *, size_t);
119 #include <reentrant.h>
120 extern int __isthreaded;
121 static mutex_t thread_lock = MUTEX_INITIALIZER;
122 #define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
123 #define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
124 #endif /* __NetBSD__ */
126 #if defined(__sparc__) && defined(sun)
127 # define malloc_minsize 16U
128 # define MAP_ANON (0)
129 static int fdzero;
130 # define MMAP_FD fdzero
131 # define INIT_MMAP() \
132 { if ((fdzero = open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
133 wrterror("open of /dev/zero"); }
134 #endif /* __sparc__ */
136 /* Insert your combination here... */
137 #if defined(__FOOCPU__) && defined(__BAROS__)
138 # define malloc_minsize 16U
139 #endif /* __FOOCPU__ && __BAROS__ */
141 #ifndef ZEROSIZEPTR
142 #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
143 #endif
146 * No user serviceable parts behind this point.
148 #include <sys/types.h>
149 #include <sys/mman.h>
150 #include <errno.h>
151 #include <fcntl.h>
152 #include <paths.h>
153 #include <stddef.h>
154 #include <stdio.h>
155 #include <stdlib.h>
156 #include <string.h>
157 #include <unistd.h>
160 * This structure describes a page worth of chunks.
163 struct pginfo {
164 struct pginfo *next; /* next on the free list */
165 void *page; /* Pointer to the page */
166 u_short size; /* size of this page's chunks */
167 u_short shift; /* How far to shift for this size chunks */
168 u_short free; /* How many free chunks */
169 u_short total; /* How many chunk */
170 u_int bits[1]; /* Which chunks are free */
174 * This structure describes a number of free pages.
177 struct pgfree {
178 struct pgfree *next; /* next run of free pages */
179 struct pgfree *prev; /* prev run of free pages */
180 void *page; /* pointer to free pages */
181 void *end; /* pointer to end of free pages */
182 size_t size; /* number of bytes free */
186 * How many bits per u_int in the bitmap.
187 * Change only if not 8 bits/byte
189 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
192 * Magic values to put in the page_directory
194 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
195 #define MALLOC_FREE ((struct pginfo*) 1)
196 #define MALLOC_FIRST ((struct pginfo*) 2)
197 #define MALLOC_FOLLOW ((struct pginfo*) 3)
198 #define MALLOC_MAGIC ((struct pginfo*) 4)
201 * Page size related parameters, computed at run-time.
203 static size_t malloc_pagesize;
204 static size_t malloc_pageshift;
205 static size_t malloc_pagemask;
207 #ifndef malloc_minsize
208 #define malloc_minsize 16U
209 #endif
211 #ifndef malloc_maxsize
212 #define malloc_maxsize ((malloc_pagesize)>>1)
213 #endif
215 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
216 #define ptr2idx(foo) \
217 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
219 #ifndef _MALLOC_LOCK
220 #define _MALLOC_LOCK()
221 #endif
223 #ifndef _MALLOC_UNLOCK
224 #define _MALLOC_UNLOCK()
225 #endif
227 #ifndef MMAP_FD
228 #define MMAP_FD (-1)
229 #endif
231 #ifndef INIT_MMAP
232 #define INIT_MMAP()
233 #endif
235 #ifndef __minix
236 #ifndef MADV_FREE
237 #define MADV_FREE MADV_DONTNEED
238 #endif
239 #endif /* !__minix */
241 /* Number of free pages we cache */
242 static size_t malloc_cache = 16;
244 /* The offset from pagenumber to index into the page directory */
245 static size_t malloc_origo;
247 /* The last index in the page directory we care about */
248 static size_t last_idx;
250 /* Pointer to page directory. Allocated "as if with" malloc */
251 static struct pginfo **page_dir;
253 /* How many slots in the page directory */
254 static size_t malloc_ninfo;
256 /* Free pages line up here */
257 static struct pgfree free_list;
259 /* Abort(), user doesn't handle problems. */
260 static int malloc_abort;
262 /* Are we trying to die ? */
263 static int suicide;
265 /* always realloc ? */
266 static int malloc_realloc;
268 /* pass the kernel a hint on free pages ? */
269 #if defined(MADV_FREE)
270 static int malloc_hint = 0;
271 #endif
273 /* xmalloc behaviour ? */
274 static int malloc_xmalloc;
276 /* sysv behaviour for malloc(0) ? */
277 static int malloc_sysv;
279 /* zero fill ? */
280 static int malloc_zero;
282 /* junk fill ? */
283 static int malloc_junk;
285 #ifdef HAS_UTRACE
287 /* utrace ? */
288 static int malloc_utrace;
290 struct ut { void *p; size_t s; void *r; };
292 #define UTRACE(a, b, c) \
293 if (malloc_utrace) { \
294 struct ut u; \
295 u.p=a; u.s = b; u.r=c; \
296 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
298 #else /* !HAS_UTRACE */
299 #define UTRACE(a,b,c)
300 #endif /* HAS_UTRACE */
302 /* my last break. */
303 static void *malloc_brk;
305 /* one location cache for free-list holders */
306 static struct pgfree *px;
308 /* compile-time options */
309 const char *_malloc_options;
311 /* Name of the current public function */
312 static const char *malloc_func;
314 /* Macro for mmap */
315 #define MMAP(size) \
316 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
317 MMAP_FD, (off_t)0);
320 * Necessary function declarations
322 static int extend_pgdir(size_t idx);
323 static void *imalloc(size_t size);
324 static void ifree(void *ptr);
325 static void *irealloc(void *ptr, size_t size);
327 #ifndef MALLOC_NO_SYSCALLS
328 static void
329 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
332 write(STDERR_FILENO, p1, strlen(p1));
333 write(STDERR_FILENO, p2, strlen(p2));
334 write(STDERR_FILENO, p3, strlen(p3));
335 write(STDERR_FILENO, p4, strlen(p4));
338 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
339 const char *p4) = wrtmessage;
340 static void
341 wrterror(const char *p)
344 suicide = 1;
345 _malloc_message(getprogname(), malloc_func, " error: ", p);
346 abort();
349 static void
350 wrtwarning(const char *p)
354 * Sensitive processes, somewhat arbitrarily defined here as setuid,
355 * setgid, root and wheel cannot afford to have malloc mistakes.
357 if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
358 wrterror(p);
360 #endif
363 * Allocate a number of pages from the OS
365 static void *
366 map_pages(size_t pages)
368 caddr_t result, rresult, tail;
369 intptr_t bytes = pages << malloc_pageshift;
371 if (bytes < 0 || (size_t)bytes < pages) {
372 errno = ENOMEM;
373 return NULL;
376 if ((result = sbrk(bytes)) == (void *)-1)
377 return NULL;
380 * Round to a page, in case sbrk(2) did not do this for us
382 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
383 if (result < rresult) {
384 /* make sure we have enough space to fit bytes */
385 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
386 /* we failed, put everything back */
387 if (brk(result)) {
388 wrterror("brk(2) failed [internal error]\n");
392 tail = rresult + (size_t)bytes;
394 last_idx = ptr2idx(tail) - 1;
395 malloc_brk = tail;
397 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
398 malloc_brk = result;
399 last_idx = ptr2idx(malloc_brk) - 1;
400 /* Put back break point since we failed. */
401 if (brk(malloc_brk))
402 wrterror("brk(2) failed [internal error]\n");
403 return 0;
406 return rresult;
410 * Extend page directory
412 static int
413 extend_pgdir(size_t idx)
415 struct pginfo **new, **old;
416 size_t newlen, oldlen;
418 /* check for overflow */
419 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
420 + (malloc_pagesize / sizeof *page_dir)) < idx) {
421 errno = ENOMEM;
422 return 0;
425 /* Make it this many pages */
426 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
428 /* remember the old mapping size */
429 oldlen = malloc_ninfo * sizeof *page_dir;
432 * NOTE: we allocate new pages and copy the directory rather than tempt
433 * fate by trying to "grow" the region.. There is nothing to prevent
434 * us from accidentally re-mapping space that's been allocated by our caller
435 * via dlopen() or other mmap().
437 * The copy problem is not too bad, as there is 4K of page index per
438 * 4MB of malloc arena.
440 * We can totally avoid the copy if we open a file descriptor to associate
441 * the anon mappings with. Then, when we remap the pages at the new
442 * address, the old pages will be "magically" remapped.. But this means
443 * keeping open a "secret" file descriptor.....
446 /* Get new pages */
447 new = MMAP(newlen);
448 if (new == MAP_FAILED)
449 return 0;
451 /* Copy the old stuff */
452 memcpy(new, page_dir, oldlen);
454 /* register the new size */
455 malloc_ninfo = newlen / sizeof *page_dir;
457 /* swap the pointers */
458 old = page_dir;
459 page_dir = new;
461 /* Now free the old stuff */
462 munmap(old, oldlen);
463 return 1;
467 * Initialize the world
469 static void
470 malloc_init(void)
472 int save_errno = errno;
473 #ifndef MALLOC_NO_SYSCALLS
474 const char *p;
475 char b[64];
476 size_t i;
477 ssize_t j;
480 * Compute page-size related variables.
482 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
483 #else
484 malloc_pagesize = PAGE_SIZE;
485 #endif
486 malloc_pagemask = malloc_pagesize - 1;
487 for (malloc_pageshift = 0;
488 (1UL << malloc_pageshift) != malloc_pagesize;
489 malloc_pageshift++)
490 /* nothing */ ;
492 INIT_MMAP();
494 #ifdef MALLOC_EXTRA_SANITY
495 malloc_junk = 1;
496 #endif /* MALLOC_EXTRA_SANITY */
498 #ifndef MALLOC_NO_SYSCALLS
499 for (i = 0; i < 3; i++) {
500 if (i == 0) {
501 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
502 if (j <= 0)
503 continue;
504 b[j] = '\0';
505 p = b;
506 } else if (i == 1 && issetugid() == 0) {
507 p = getenv("MALLOC_OPTIONS");
508 } else if (i == 1) {
509 continue;
510 } else {
511 p = _malloc_options;
513 for (; p != NULL && *p != '\0'; p++) {
514 switch (*p) {
515 case '>': malloc_cache <<= 1; break;
516 case '<': malloc_cache >>= 1; break;
517 case 'a': malloc_abort = 0; break;
518 case 'A': malloc_abort = 1; break;
519 #ifndef __minix
520 case 'h': malloc_hint = 0; break;
521 case 'H': malloc_hint = 1; break;
522 #endif /* !__minix */
523 case 'r': malloc_realloc = 0; break;
524 case 'R': malloc_realloc = 1; break;
525 case 'j': malloc_junk = 0; break;
526 case 'J': malloc_junk = 1; break;
527 #ifdef HAS_UTRACE
528 case 'u': malloc_utrace = 0; break;
529 case 'U': malloc_utrace = 1; break;
530 #endif
531 case 'v': malloc_sysv = 0; break;
532 case 'V': malloc_sysv = 1; break;
533 case 'x': malloc_xmalloc = 0; break;
534 case 'X': malloc_xmalloc = 1; break;
535 case 'z': malloc_zero = 0; break;
536 case 'Z': malloc_zero = 1; break;
537 default:
538 _malloc_message(getprogname(), malloc_func,
539 " warning: ", "unknown char in MALLOC_OPTIONS\n");
540 break;
544 #endif
546 UTRACE(0, 0, 0);
549 * We want junk in the entire allocation, and zero only in the part
550 * the user asked for.
552 if (malloc_zero)
553 malloc_junk = 1;
555 /* Allocate one page for the page directory */
556 page_dir = MMAP(malloc_pagesize);
558 if (page_dir == MAP_FAILED)
559 wrterror("mmap(2) failed, check limits.\n");
562 * We need a maximum of malloc_pageshift buckets, steal these from the
563 * front of the page_directory;
565 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
566 >> malloc_pageshift;
567 malloc_origo -= malloc_pageshift;
569 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
571 /* Recalculate the cache size in bytes, and make sure it's nonzero */
573 if (!malloc_cache)
574 malloc_cache++;
576 malloc_cache <<= malloc_pageshift;
579 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
580 * We can sbrk(2) further back when we keep this on a low address.
582 px = imalloc(sizeof *px);
584 errno = save_errno;
588 * Allocate a number of complete pages
590 static void *
591 malloc_pages(size_t size)
593 void *p, *delay_free = NULL;
594 size_t i;
595 struct pgfree *pf;
596 size_t idx;
598 idx = pageround(size);
599 if (idx < size) {
600 errno = ENOMEM;
601 return NULL;
602 } else
603 size = idx;
605 p = NULL;
607 /* Look for free pages before asking for more */
608 for(pf = free_list.next; pf; pf = pf->next) {
610 #ifdef MALLOC_EXTRA_SANITY
611 if (pf->size & malloc_pagemask)
612 wrterror("(ES): junk length entry on free_list.\n");
613 if (!pf->size)
614 wrterror("(ES): zero length entry on free_list.\n");
615 if (pf->page == pf->end)
616 wrterror("(ES): zero entry on free_list.\n");
617 if (pf->page > pf->end)
618 wrterror("(ES): sick entry on free_list.\n");
619 if ((void*)pf->page >= (void*)sbrk(0))
620 wrterror("(ES): entry on free_list past brk.\n");
621 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
622 wrterror("(ES): non-free first page on free-list.\n");
623 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
624 wrterror("(ES): non-free last page on free-list.\n");
625 #endif /* MALLOC_EXTRA_SANITY */
627 if (pf->size < size)
628 continue;
630 if (pf->size == size) {
631 p = pf->page;
632 if (pf->next != NULL)
633 pf->next->prev = pf->prev;
634 pf->prev->next = pf->next;
635 delay_free = pf;
636 break;
639 p = pf->page;
640 pf->page = (char *)pf->page + size;
641 pf->size -= size;
642 break;
645 #ifdef MALLOC_EXTRA_SANITY
646 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
647 wrterror("(ES): allocated non-free page on free-list.\n");
648 #endif /* MALLOC_EXTRA_SANITY */
650 size >>= malloc_pageshift;
652 /* Map new pages */
653 if (p == NULL)
654 p = map_pages(size);
656 if (p != NULL) {
658 idx = ptr2idx(p);
659 page_dir[idx] = MALLOC_FIRST;
660 for (i=1;i<size;i++)
661 page_dir[idx+i] = MALLOC_FOLLOW;
663 if (malloc_junk)
664 memset(p, SOME_JUNK, size << malloc_pageshift);
667 if (delay_free) {
668 if (px == NULL)
669 px = delay_free;
670 else
671 ifree(delay_free);
674 return p;
678 * Allocate a page of fragments
681 static inline int
682 malloc_make_chunks(int bits)
684 struct pginfo *bp;
685 void *pp;
686 int i, k;
687 long l;
689 /* Allocate a new bucket */
690 pp = malloc_pages(malloc_pagesize);
691 if (pp == NULL)
692 return 0;
694 /* Find length of admin structure */
695 l = (long)offsetof(struct pginfo, bits[0]);
696 l += (long)sizeof bp->bits[0] *
697 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
699 /* Don't waste more than two chunks on this */
700 if ((1<<(bits)) <= l+l) {
701 bp = (struct pginfo *)pp;
702 } else {
703 bp = imalloc((size_t)l);
704 if (bp == NULL) {
705 ifree(pp);
706 return 0;
710 bp->size = (1<<bits);
711 bp->shift = bits;
712 bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
713 bp->page = pp;
715 /* set all valid bits in the bitmap */
716 k = bp->total;
717 i = 0;
719 /* Do a bunch at a time */
720 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
721 bp->bits[i / MALLOC_BITS] = ~0U;
723 for(; i < k; i++)
724 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
726 if (bp == bp->page) {
727 /* Mark the ones we stole for ourselves */
728 for(i = 0; l > 0; i++) {
729 bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
730 bp->free--;
731 bp->total--;
732 l -= (long)(1 << bits);
736 /* MALLOC_LOCK */
738 page_dir[ptr2idx(pp)] = bp;
740 bp->next = page_dir[bits];
741 page_dir[bits] = bp;
743 /* MALLOC_UNLOCK */
745 return 1;
749 * Allocate a fragment
751 static void *
752 malloc_bytes(size_t size)
754 size_t i;
755 int j;
756 u_int u;
757 struct pginfo *bp;
758 size_t k;
759 u_int *lp;
761 /* Don't bother with anything less than this */
762 if (size < malloc_minsize)
763 size = malloc_minsize;
766 /* Find the right bucket */
767 j = 1;
768 i = size-1;
769 while (i >>= 1)
770 j++;
772 /* If it's empty, make a page more of that size chunks */
773 if (page_dir[j] == NULL && !malloc_make_chunks(j))
774 return NULL;
776 bp = page_dir[j];
778 /* Find first word of bitmap which isn't empty */
779 for (lp = bp->bits; !*lp; lp++)
782 /* Find that bit, and tweak it */
783 u = 1;
784 k = 0;
785 while (!(*lp & u)) {
786 u += u;
787 k++;
789 *lp ^= u;
791 /* If there are no more free, remove from free-list */
792 if (!--bp->free) {
793 page_dir[j] = bp->next;
794 bp->next = NULL;
797 /* Adjust to the real offset of that chunk */
798 k += (lp-bp->bits)*MALLOC_BITS;
799 k <<= bp->shift;
801 if (malloc_junk)
802 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
804 return (u_char *)bp->page + k;
808 * Allocate a piece of memory
810 static void *
811 imalloc(size_t size)
813 void *result;
815 if (suicide)
816 abort();
818 if ((size + malloc_pagesize) < size) /* Check for overflow */
819 result = NULL;
820 else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
821 result = NULL;
822 else if (size <= malloc_maxsize)
823 result = malloc_bytes(size);
824 else
825 result = malloc_pages(size);
827 if (malloc_abort && result == NULL)
828 wrterror("allocation failed.\n");
830 if (malloc_zero && result != NULL)
831 memset(result, 0, size);
833 return result;
837 * Change the size of an allocation.
839 static void *
840 irealloc(void *ptr, size_t size)
842 void *p;
843 size_t osize, idx;
844 struct pginfo **mp;
845 size_t i;
847 if (suicide)
848 abort();
850 idx = ptr2idx(ptr);
852 if (idx < malloc_pageshift) {
853 wrtwarning("junk pointer, too low to make sense.\n");
854 return 0;
857 if (idx > last_idx) {
858 wrtwarning("junk pointer, too high to make sense.\n");
859 return 0;
862 mp = &page_dir[idx];
864 if (*mp == MALLOC_FIRST) { /* Page allocation */
866 /* Check the pointer */
867 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
868 wrtwarning("modified (page-) pointer.\n");
869 return NULL;
872 /* Find the size in bytes */
873 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
874 osize += malloc_pagesize;
876 if (!malloc_realloc && /* unless we have to, */
877 size <= osize && /* .. or are too small, */
878 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
879 if (malloc_junk)
880 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
881 return ptr; /* don't do anything. */
884 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
886 /* Check the pointer for sane values */
887 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
888 wrtwarning("modified (chunk-) pointer.\n");
889 return NULL;
892 /* Find the chunk index in the page */
893 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
895 /* Verify that it isn't a free chunk already */
896 if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
897 wrtwarning("chunk is already free.\n");
898 return NULL;
901 osize = (*mp)->size;
903 if (!malloc_realloc && /* Unless we have to, */
904 size <= osize && /* ..or are too small, */
905 (size > osize / 2 || /* ..or could use a smaller size, */
906 osize == malloc_minsize)) { /* ..(if there is one) */
907 if (malloc_junk)
908 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
909 return ptr; /* ..Don't do anything */
912 } else {
913 wrtwarning("pointer to wrong page.\n");
914 return NULL;
917 p = imalloc(size);
919 if (p != NULL) {
920 /* copy the lesser of the two sizes, and free the old one */
921 if (!size || !osize)
923 else if (osize < size)
924 memcpy(p, ptr, osize);
925 else
926 memcpy(p, ptr, size);
927 ifree(ptr);
929 return p;
933 * Free a sequence of pages
936 static inline void
937 free_pages(void *ptr, size_t idx, struct pginfo *info)
939 size_t i;
940 struct pgfree *pf, *pt=NULL;
941 size_t l;
942 void *tail;
944 if (info == MALLOC_FREE) {
945 wrtwarning("page is already free.\n");
946 return;
949 if (info != MALLOC_FIRST) {
950 wrtwarning("pointer to wrong page.\n");
951 return;
954 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
955 wrtwarning("modified (page-) pointer.\n");
956 return;
959 /* Count how many pages and mark them free at the same time */
960 page_dir[idx] = MALLOC_FREE;
961 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
962 page_dir[idx + i] = MALLOC_FREE;
964 l = i << malloc_pageshift;
966 if (malloc_junk)
967 memset(ptr, SOME_JUNK, l);
969 #ifndef __minix
970 if (malloc_hint)
971 madvise(ptr, l, MADV_FREE);
972 #endif /* !__minix */
974 tail = (char *)ptr+l;
976 /* add to free-list */
977 if (px == NULL)
978 px = imalloc(sizeof *px); /* This cannot fail... */
979 px->page = ptr;
980 px->end = tail;
981 px->size = l;
982 if (free_list.next == NULL) {
984 /* Nothing on free list, put this at head */
985 px->next = free_list.next;
986 px->prev = &free_list;
987 free_list.next = px;
988 pf = px;
989 px = NULL;
991 } else {
993 /* Find the right spot, leave pf pointing to the modified entry. */
994 tail = (char *)ptr+l;
996 for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
997 pf = pf->next)
998 ; /* Race ahead here */
1000 if (pf->page > tail) {
1001 /* Insert before entry */
1002 px->next = pf;
1003 px->prev = pf->prev;
1004 pf->prev = px;
1005 px->prev->next = px;
1006 pf = px;
1007 px = NULL;
1008 } else if (pf->end == ptr ) {
1009 /* Append to the previous entry */
1010 pf->end = (char *)pf->end + l;
1011 pf->size += l;
1012 if (pf->next != NULL && pf->end == pf->next->page ) {
1013 /* And collapse the next too. */
1014 pt = pf->next;
1015 pf->end = pt->end;
1016 pf->size += pt->size;
1017 pf->next = pt->next;
1018 if (pf->next != NULL)
1019 pf->next->prev = pf;
1021 } else if (pf->page == tail) {
1022 /* Prepend to entry */
1023 pf->size += l;
1024 pf->page = ptr;
1025 } else if (pf->next == NULL) {
1026 /* Append at tail of chain */
1027 px->next = NULL;
1028 px->prev = pf;
1029 pf->next = px;
1030 pf = px;
1031 px = NULL;
1032 } else {
1033 wrterror("freelist is destroyed.\n");
1037 /* Return something to OS ? */
1038 if (pf->next == NULL && /* If we're the last one, */
1039 pf->size > malloc_cache && /* ..and the cache is full, */
1040 pf->end == malloc_brk && /* ..and none behind us, */
1041 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1042 int r;
1044 * Keep the cache intact. Notice that the '>' above guarantees that
1045 * the pf will always have at least one page afterwards.
1047 pf->end = (char *)pf->page + malloc_cache;
1048 pf->size = malloc_cache;
1050 r = brk(pf->end);
1051 assert(r >= 0);
1052 malloc_brk = pf->end;
1054 idx = ptr2idx(pf->end);
1056 for(i=idx;i <= last_idx;)
1057 page_dir[i++] = MALLOC_NOT_MINE;
1059 last_idx = idx - 1;
1061 /* XXX: We could realloc/shrink the pagedir here I guess. */
1063 if (pt != NULL)
1064 ifree(pt);
1068 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1071 static inline void
1072 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1074 size_t i;
1075 struct pginfo **mp;
1076 void *vp;
1078 /* Find the chunk number on the page */
1079 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1081 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1082 wrtwarning("modified (chunk-) pointer.\n");
1083 return;
1086 if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1087 wrtwarning("chunk is already free.\n");
1088 return;
1091 if (malloc_junk)
1092 memset(ptr, SOME_JUNK, (size_t)info->size);
1094 info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1095 info->free++;
1097 mp = page_dir + info->shift;
1099 if (info->free == 1) {
1101 /* Page became non-full */
1103 mp = page_dir + info->shift;
1104 /* Insert in address order */
1105 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1106 mp = &(*mp)->next;
1107 info->next = *mp;
1108 *mp = info;
1109 return;
1112 if (info->free != info->total)
1113 return;
1115 /* Find & remove this page in the queue */
1116 while (*mp != info) {
1117 mp = &((*mp)->next);
1118 #ifdef MALLOC_EXTRA_SANITY
1119 if (!*mp)
1120 wrterror("(ES): Not on queue.\n");
1121 #endif /* MALLOC_EXTRA_SANITY */
1123 *mp = info->next;
1125 /* Free the page & the info structure if need be */
1126 page_dir[idx] = MALLOC_FIRST;
1127 vp = info->page; /* Order is important ! */
1128 if(vp != (void*)info)
1129 ifree(info);
1130 ifree(vp);
1133 static void
1134 ifree(void *ptr)
1136 struct pginfo *info;
1137 size_t idx;
1139 /* This is legal */
1140 if (ptr == NULL)
1141 return;
1143 /* If we're already sinking, don't make matters any worse. */
1144 if (suicide)
1145 return;
1147 idx = ptr2idx(ptr);
1149 if (idx < malloc_pageshift) {
1150 wrtwarning("junk pointer, too low to make sense.\n");
1151 return;
1154 if (idx > last_idx) {
1155 wrtwarning("junk pointer, too high to make sense.\n");
1156 return;
1159 info = page_dir[idx];
1161 if (info < MALLOC_MAGIC)
1162 free_pages(ptr, idx, info);
1163 else
1164 free_bytes(ptr, idx, info);
1165 return;
1168 static int malloc_active; /* Recusion flag for public interface. */
1169 static unsigned malloc_started; /* Set when initialization has been done */
1171 static void *
1172 pubrealloc(void *ptr, size_t size, const char *func)
1174 void *r;
1175 int err = 0;
1178 * If a thread is inside our code with a functional lock held, and then
1179 * catches a signal which calls us again, we would get a deadlock if the
1180 * lock is not of a recursive type.
1182 _MALLOC_LOCK();
1183 malloc_func = func;
1184 if (malloc_active > 0) {
1185 if (malloc_active == 1) {
1186 wrtwarning("recursive call\n");
1187 malloc_active = 2;
1189 _MALLOC_UNLOCK();
1190 errno = EINVAL;
1191 return (NULL);
1193 malloc_active = 1;
1195 if (!malloc_started) {
1196 if (ptr != NULL) {
1197 wrtwarning("malloc() has never been called\n");
1198 malloc_active = 0;
1199 _MALLOC_UNLOCK();
1200 errno = EINVAL;
1201 return (NULL);
1203 malloc_init();
1204 malloc_started = 1;
1207 if (ptr == ZEROSIZEPTR)
1208 ptr = NULL;
1209 if (malloc_sysv && !size) {
1210 if (ptr != NULL)
1211 ifree(ptr);
1212 r = NULL;
1213 } else if (!size) {
1214 if (ptr != NULL)
1215 ifree(ptr);
1216 r = ZEROSIZEPTR;
1217 } else if (ptr == NULL) {
1218 r = imalloc(size);
1219 err = (r == NULL);
1220 } else {
1221 r = irealloc(ptr, size);
1222 err = (r == NULL);
1224 UTRACE(ptr, size, r);
1225 malloc_active = 0;
1226 _MALLOC_UNLOCK();
1227 if (malloc_xmalloc && err)
1228 wrterror("out of memory\n");
1229 if (err)
1230 errno = ENOMEM;
1231 return (r);
1235 * These are the public exported interface routines.
1238 void *
1239 malloc(size_t size)
1242 return pubrealloc(NULL, size, " in malloc():");
1246 posix_memalign(void **memptr, size_t alignment, size_t size)
1248 int err;
1249 void *result;
1251 if (!malloc_started) {
1252 malloc_init();
1253 malloc_started = 1;
1255 /* Make sure that alignment is a large enough power of 2. */
1256 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1257 alignment > malloc_pagesize)
1258 return EINVAL;
1261 * (size | alignment) is enough to assure the requested alignment, since
1262 * the allocator always allocates power-of-two blocks.
1264 err = errno; /* Protect errno against changes in pubrealloc(). */
1265 result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1266 errno = err;
1268 if (result == NULL)
1269 return ENOMEM;
1271 *memptr = result;
1272 return 0;
1275 void *
1276 calloc(size_t num, size_t size)
1278 void *ret;
1280 if (size != 0 && (num * size) / size != num) {
1281 /* size_t overflow. */
1282 errno = ENOMEM;
1283 return (NULL);
1286 ret = pubrealloc(NULL, num * size, " in calloc():");
1288 if (ret != NULL)
1289 memset(ret, 0, num * size);
1291 return ret;
1294 void
1295 free(void *ptr)
1298 pubrealloc(ptr, 0, " in free():");
1301 void *
1302 realloc(void *ptr, size_t size)
1305 return pubrealloc(ptr, size, " in realloc():");
1309 * Begin library-private functions, used by threading libraries for protection
1310 * of malloc during fork(). These functions are only called if the program is
1311 * running in threaded mode, so there is no need to check whether the program
1312 * is threaded here.
1315 void
1316 _malloc_prefork(void)
1319 _MALLOC_LOCK();
1322 void
1323 _malloc_postfork(void)
1326 _MALLOC_UNLOCK();