Remove building with NOCRYPTO option
[minix3.git] / lib / libc / stdlib / malloc.c
blob92da787b6856eda1e9ca5e6e6b54dd6898156480
1 /* $NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $ */
3 /*
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
11 * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
15 #if defined(__minix)
16 #include "extern.h"
17 #ifdef _LIBSYS
18 #include <minix/sysutil.h>
19 #include <machine/param.h>
20 #include <machine/vmparam.h>
21 #define MALLOC_NO_SYSCALLS
22 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
23 #define wrterror(w) panic("libminc malloc error: %s\n", w)
24 #endif
25 #endif /* defined(__minix) */
28 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
29 * to internal conditions and consistency in malloc.c. This has a
30 * noticeable runtime performance hit, and generally will not do you
31 * any good unless you fiddle with the internals of malloc or want
32 * to catch random pointer corruption as early as possible.
34 #ifndef MALLOC_EXTRA_SANITY
35 #undef MALLOC_EXTRA_SANITY
36 #endif
39 * What to use for Junk. This is the byte value we use to fill with
40 * when the 'J' option is enabled.
42 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
45 * The basic parameters you can tweak.
47 * malloc_minsize minimum size of an allocation in bytes.
48 * If this is too small it's too much work
49 * to manage them. This is also the smallest
50 * unit of alignment used for the storage
51 * returned by malloc/realloc.
55 #include "namespace.h"
56 #if defined(__FreeBSD__)
57 # if defined(__i386__)
58 # define malloc_minsize 16U
59 # endif
60 # if defined(__ia64__)
61 # define malloc_pageshift 13U
62 # define malloc_minsize 16U
63 # endif
64 # if defined(__alpha__)
65 # define malloc_pageshift 13U
66 # define malloc_minsize 16U
67 # endif
68 # if defined(__sparc64__)
69 # define malloc_pageshift 13U
70 # define malloc_minsize 16U
71 # endif
72 # if defined(__amd64__)
73 # define malloc_pageshift 12U
74 # define malloc_minsize 16U
75 # endif
76 # if defined(__arm__)
77 # define malloc_pageshift 12U
78 # define malloc_minsize 16U
79 # endif
80 # define HAS_UTRACE
81 # define UTRACE_LABEL
83 #include <sys/cdefs.h>
84 void utrace(struct ut *, int);
87 * Make malloc/free/realloc thread-safe in libc for use with
88 * kernel threads.
90 # include "libc_private.h"
91 # include "spinlock.h"
92 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
93 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
94 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
95 #endif /* __FreeBSD__ */
97 #if defined(__minix)
98 /* #undef these things so that malloc uses the non-internal symbols.
99 * This is necessary for VM to be able to define its own versions, and
100 * use this malloc.
102 #undef mmap
103 #undef munmap
104 #endif /* defined(__minix) */
106 #include <sys/types.h>
107 #if defined(__NetBSD__)
108 # define malloc_minsize 16U
109 # define HAS_UTRACE
110 # define UTRACE_LABEL "malloc",
111 #include <sys/cdefs.h>
112 #include "extern.h"
113 #if defined(LIBC_SCCS) && !defined(lint)
114 __RCSID("$NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $");
115 #endif /* LIBC_SCCS and not lint */
116 int utrace(const char *, void *, size_t);
118 #include <reentrant.h>
119 extern int __isthreaded;
120 static mutex_t thread_lock = MUTEX_INITIALIZER;
121 #define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
122 #define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
123 #endif /* __NetBSD__ */
125 #if defined(__sparc__) && defined(sun)
126 # define malloc_minsize 16U
127 # define MAP_ANON (0)
128 static int fdzero;
129 # define MMAP_FD fdzero
130 # define INIT_MMAP() \
131 { if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \
132 wrterror("open of /dev/zero"); }
133 #endif /* __sparc__ */
135 /* Insert your combination here... */
136 #if defined(__FOOCPU__) && defined(__BAROS__)
137 # define malloc_minsize 16U
138 #endif /* __FOOCPU__ && __BAROS__ */
140 #ifndef ZEROSIZEPTR
141 #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
142 #endif
145 * No user serviceable parts behind this point.
147 #include <sys/types.h>
148 #include <sys/mman.h>
149 #include <errno.h>
150 #include <fcntl.h>
151 #include <paths.h>
152 #include <stddef.h>
153 #include <stdio.h>
154 #include <stdlib.h>
155 #include <string.h>
156 #include <unistd.h>
159 * This structure describes a page worth of chunks.
162 struct pginfo {
163 struct pginfo *next; /* next on the free list */
164 void *page; /* Pointer to the page */
165 u_short size; /* size of this page's chunks */
166 u_short shift; /* How far to shift for this size chunks */
167 u_short free; /* How many free chunks */
168 u_short total; /* How many chunk */
169 u_int bits[1]; /* Which chunks are free */
173 * This structure describes a number of free pages.
176 struct pgfree {
177 struct pgfree *next; /* next run of free pages */
178 struct pgfree *prev; /* prev run of free pages */
179 void *page; /* pointer to free pages */
180 void *end; /* pointer to end of free pages */
181 size_t size; /* number of bytes free */
185 * How many bits per u_int in the bitmap.
186 * Change only if not 8 bits/byte
188 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
191 * Magic values to put in the page_directory
193 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
194 #define MALLOC_FREE ((struct pginfo*) 1)
195 #define MALLOC_FIRST ((struct pginfo*) 2)
196 #define MALLOC_FOLLOW ((struct pginfo*) 3)
197 #define MALLOC_MAGIC ((struct pginfo*) 4)
200 * Page size related parameters, computed at run-time.
202 static size_t malloc_pagesize;
203 static size_t malloc_pageshift;
204 static size_t malloc_pagemask;
206 #ifndef malloc_minsize
207 #define malloc_minsize 16U
208 #endif
210 #ifndef malloc_maxsize
211 #define malloc_maxsize ((malloc_pagesize)>>1)
212 #endif
214 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
215 #define ptr2idx(foo) \
216 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
218 #ifndef _MALLOC_LOCK
219 #define _MALLOC_LOCK()
220 #endif
222 #ifndef _MALLOC_UNLOCK
223 #define _MALLOC_UNLOCK()
224 #endif
226 #ifndef MMAP_FD
227 #define MMAP_FD (-1)
228 #endif
230 #ifndef INIT_MMAP
231 #define INIT_MMAP()
232 #endif
234 #if !defined(__minix)
235 #ifndef MADV_FREE
236 #define MADV_FREE MADV_DONTNEED
237 #endif
238 #else
239 #undef MADV_FREE
240 #endif /* !defined(__minix) */
242 /* Number of free pages we cache */
243 static size_t malloc_cache = 16;
245 /* The offset from pagenumber to index into the page directory */
246 static size_t malloc_origo;
248 /* The last index in the page directory we care about */
249 static size_t last_idx;
251 /* Pointer to page directory. Allocated "as if with" malloc */
252 static struct pginfo **page_dir;
254 /* How many slots in the page directory */
255 static size_t malloc_ninfo;
257 /* Free pages line up here */
258 static struct pgfree free_list;
260 /* Abort(), user doesn't handle problems. */
261 static int malloc_abort;
263 /* Are we trying to die ? */
264 static int suicide;
266 /* always realloc ? */
267 static int malloc_realloc;
269 /* pass the kernel a hint on free pages ? */
270 #if defined(MADV_FREE)
271 static int malloc_hint = 0;
272 #endif
274 /* xmalloc behaviour ? */
275 static int malloc_xmalloc;
277 /* sysv behaviour for malloc(0) ? */
278 static int malloc_sysv;
280 /* zero fill ? */
281 static int malloc_zero;
283 /* junk fill ? */
284 static int malloc_junk;
286 #ifdef HAS_UTRACE
288 /* utrace ? */
289 static int malloc_utrace;
291 struct ut { void *p; size_t s; void *r; };
293 #define UTRACE(a, b, c) \
294 if (malloc_utrace) { \
295 struct ut u; \
296 u.p=a; u.s = b; u.r=c; \
297 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
299 #else /* !HAS_UTRACE */
300 #define UTRACE(a,b,c)
301 #endif /* HAS_UTRACE */
303 /* my last break. */
304 static void *malloc_brk;
306 /* one location cache for free-list holders */
307 static struct pgfree *px;
309 /* compile-time options */
310 const char *_malloc_options;
312 /* Name of the current public function */
313 static const char *malloc_func;
315 /* Macro for mmap */
316 #define MMAP(size) \
317 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
318 MMAP_FD, (off_t)0);
321 * Necessary function declarations
323 static int extend_pgdir(size_t idx);
324 static void *imalloc(size_t size);
325 static void ifree(void *ptr);
326 static void *irealloc(void *ptr, size_t size);
328 #ifndef MALLOC_NO_SYSCALLS
329 static void
330 wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4)
333 write(STDERR_FILENO, p1, strlen(p1));
334 write(STDERR_FILENO, p2, strlen(p2));
335 write(STDERR_FILENO, p3, strlen(p3));
336 write(STDERR_FILENO, p4, strlen(p4));
339 void (*_malloc_message)(const char *p1, const char *p2, const char *p3,
340 const char *p4) = wrtmessage;
341 static void
342 wrterror(const char *p)
345 suicide = 1;
346 _malloc_message(getprogname(), malloc_func, " error: ", p);
347 abort();
350 static void
351 wrtwarning(const char *p)
355 * Sensitive processes, somewhat arbitrarily defined here as setuid,
356 * setgid, root and wheel cannot afford to have malloc mistakes.
358 if (malloc_abort || issetugid() || getuid() == 0 || getgid() == 0)
359 wrterror(p);
361 #endif
364 * Allocate a number of pages from the OS
366 static void *
367 map_pages(size_t pages)
369 caddr_t result, rresult, tail;
370 intptr_t bytes = pages << malloc_pageshift;
372 if (bytes < 0 || (size_t)bytes < pages) {
373 errno = ENOMEM;
374 return NULL;
377 if ((result = sbrk(bytes)) == (void *)-1)
378 return NULL;
381 * Round to a page, in case sbrk(2) did not do this for us
383 rresult = (caddr_t)pageround((size_t)(uintptr_t)result);
384 if (result < rresult) {
385 /* make sure we have enough space to fit bytes */
386 if (sbrk((intptr_t)(rresult - result)) == (void *) -1) {
387 /* we failed, put everything back */
388 if (brk(result)) {
389 wrterror("brk(2) failed [internal error]\n");
393 tail = rresult + (size_t)bytes;
395 last_idx = ptr2idx(tail) - 1;
396 malloc_brk = tail;
398 if ((last_idx+1) >= malloc_ninfo && !extend_pgdir(last_idx)) {
399 malloc_brk = result;
400 last_idx = ptr2idx(malloc_brk) - 1;
401 /* Put back break point since we failed. */
402 if (brk(malloc_brk))
403 wrterror("brk(2) failed [internal error]\n");
404 return 0;
407 return rresult;
411 * Extend page directory
413 static int
414 extend_pgdir(size_t idx)
416 struct pginfo **new, **old;
417 size_t newlen, oldlen;
419 /* check for overflow */
420 if ((((~(1UL << ((sizeof(size_t) * NBBY) - 1)) / sizeof(*page_dir)) + 1)
421 + (malloc_pagesize / sizeof *page_dir)) < idx) {
422 errno = ENOMEM;
423 return 0;
426 /* Make it this many pages */
427 newlen = pageround(idx * sizeof *page_dir) + malloc_pagesize;
429 /* remember the old mapping size */
430 oldlen = malloc_ninfo * sizeof *page_dir;
433 * NOTE: we allocate new pages and copy the directory rather than tempt
434 * fate by trying to "grow" the region.. There is nothing to prevent
435 * us from accidentally re-mapping space that's been allocated by our caller
436 * via dlopen() or other mmap().
438 * The copy problem is not too bad, as there is 4K of page index per
439 * 4MB of malloc arena.
441 * We can totally avoid the copy if we open a file descriptor to associate
442 * the anon mappings with. Then, when we remap the pages at the new
443 * address, the old pages will be "magically" remapped.. But this means
444 * keeping open a "secret" file descriptor.....
447 /* Get new pages */
448 new = MMAP(newlen);
449 if (new == MAP_FAILED)
450 return 0;
452 /* Copy the old stuff */
453 memcpy(new, page_dir, oldlen);
455 /* register the new size */
456 malloc_ninfo = newlen / sizeof *page_dir;
458 /* swap the pointers */
459 old = page_dir;
460 page_dir = new;
462 /* Now free the old stuff */
463 munmap(old, oldlen);
464 return 1;
468 * Initialize the world
470 static void
471 malloc_init(void)
473 #ifndef MALLOC_NO_SYSCALLS
474 const char *p;
475 char b[64];
476 size_t i;
477 ssize_t j;
478 #endif
479 int serrno = errno;
480 #ifndef MALLOC_NO_SYSCALLS
483 * Compute page-size related variables.
485 malloc_pagesize = (size_t)sysconf(_SC_PAGESIZE);
486 #else
487 malloc_pagesize = PAGE_SIZE;
488 #endif
489 malloc_pagemask = malloc_pagesize - 1;
490 for (malloc_pageshift = 0;
491 (1UL << malloc_pageshift) != malloc_pagesize;
492 malloc_pageshift++)
493 /* nothing */ ;
495 INIT_MMAP();
497 #ifdef MALLOC_EXTRA_SANITY
498 malloc_junk = 1;
499 #endif /* MALLOC_EXTRA_SANITY */
501 #ifndef MALLOC_NO_SYSCALLS
502 for (i = 0; i < 3; i++) {
503 if (i == 0) {
504 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
505 if (j == -1)
506 continue;
507 b[j] = '\0';
508 p = b;
509 } else if (i == 1 && issetugid() == 0) {
510 p = getenv("MALLOC_OPTIONS");
511 } else if (i == 1) {
512 continue;
513 } else {
514 p = _malloc_options;
516 for (; p != NULL && *p != '\0'; p++) {
517 switch (*p) {
518 case '>': malloc_cache <<= 1; break;
519 case '<': malloc_cache >>= 1; break;
520 case 'a': malloc_abort = 0; break;
521 case 'A': malloc_abort = 1; break;
522 #if !defined(__minix)
523 case 'h': malloc_hint = 0; break;
524 case 'H': malloc_hint = 1; break;
525 #endif /* !defined(__minix) */
526 case 'r': malloc_realloc = 0; break;
527 case 'R': malloc_realloc = 1; break;
528 case 'j': malloc_junk = 0; break;
529 case 'J': malloc_junk = 1; break;
530 #ifdef HAS_UTRACE
531 case 'u': malloc_utrace = 0; break;
532 case 'U': malloc_utrace = 1; break;
533 #endif
534 case 'v': malloc_sysv = 0; break;
535 case 'V': malloc_sysv = 1; break;
536 case 'x': malloc_xmalloc = 0; break;
537 case 'X': malloc_xmalloc = 1; break;
538 case 'z': malloc_zero = 0; break;
539 case 'Z': malloc_zero = 1; break;
540 default:
541 _malloc_message(getprogname(), malloc_func,
542 " warning: ", "unknown char in MALLOC_OPTIONS\n");
543 break;
547 #endif
549 UTRACE(0, 0, 0);
552 * We want junk in the entire allocation, and zero only in the part
553 * the user asked for.
555 if (malloc_zero)
556 malloc_junk = 1;
558 /* Allocate one page for the page directory */
559 page_dir = MMAP(malloc_pagesize);
561 if (page_dir == MAP_FAILED)
562 wrterror("mmap(2) failed, check limits.\n");
565 * We need a maximum of malloc_pageshift buckets, steal these from the
566 * front of the page_directory;
568 malloc_origo = pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
569 >> malloc_pageshift;
570 malloc_origo -= malloc_pageshift;
572 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
574 /* Recalculate the cache size in bytes, and make sure it's nonzero */
576 if (!malloc_cache)
577 malloc_cache++;
579 malloc_cache <<= malloc_pageshift;
582 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
583 * We can sbrk(2) further back when we keep this on a low address.
585 px = imalloc(sizeof *px);
587 errno = serrno;
591 * Allocate a number of complete pages
593 static void *
594 malloc_pages(size_t size)
596 void *p, *delay_free = NULL;
597 size_t i;
598 struct pgfree *pf;
599 size_t idx;
601 idx = pageround(size);
602 if (idx < size) {
603 errno = ENOMEM;
604 return NULL;
605 } else
606 size = idx;
608 p = NULL;
610 /* Look for free pages before asking for more */
611 for(pf = free_list.next; pf; pf = pf->next) {
613 #ifdef MALLOC_EXTRA_SANITY
614 if (pf->size & malloc_pagemask)
615 wrterror("(ES): junk length entry on free_list.\n");
616 if (!pf->size)
617 wrterror("(ES): zero length entry on free_list.\n");
618 if (pf->page == pf->end)
619 wrterror("(ES): zero entry on free_list.\n");
620 if (pf->page > pf->end)
621 wrterror("(ES): sick entry on free_list.\n");
622 if ((void*)pf->page >= (void*)sbrk(0))
623 wrterror("(ES): entry on free_list past brk.\n");
624 if (page_dir[ptr2idx(pf->page)] != MALLOC_FREE)
625 wrterror("(ES): non-free first page on free-list.\n");
626 if (page_dir[ptr2idx(pf->end)-1] != MALLOC_FREE)
627 wrterror("(ES): non-free last page on free-list.\n");
628 #endif /* MALLOC_EXTRA_SANITY */
630 if (pf->size < size)
631 continue;
633 if (pf->size == size) {
634 p = pf->page;
635 if (pf->next != NULL)
636 pf->next->prev = pf->prev;
637 pf->prev->next = pf->next;
638 delay_free = pf;
639 break;
642 p = pf->page;
643 pf->page = (char *)pf->page + size;
644 pf->size -= size;
645 break;
648 #ifdef MALLOC_EXTRA_SANITY
649 if (p != NULL && page_dir[ptr2idx(p)] != MALLOC_FREE)
650 wrterror("(ES): allocated non-free page on free-list.\n");
651 #endif /* MALLOC_EXTRA_SANITY */
653 size >>= malloc_pageshift;
655 /* Map new pages */
656 if (p == NULL)
657 p = map_pages(size);
659 if (p != NULL) {
661 idx = ptr2idx(p);
662 page_dir[idx] = MALLOC_FIRST;
663 for (i=1;i<size;i++)
664 page_dir[idx+i] = MALLOC_FOLLOW;
666 if (malloc_junk)
667 memset(p, SOME_JUNK, size << malloc_pageshift);
670 if (delay_free) {
671 if (px == NULL)
672 px = delay_free;
673 else
674 ifree(delay_free);
677 return p;
681 * Allocate a page of fragments
684 static inline int
685 malloc_make_chunks(int bits)
687 struct pginfo *bp;
688 void *pp;
689 int i, k;
690 long l;
692 /* Allocate a new bucket */
693 pp = malloc_pages(malloc_pagesize);
694 if (pp == NULL)
695 return 0;
697 /* Find length of admin structure */
698 l = (long)offsetof(struct pginfo, bits[0]);
699 l += (long)sizeof bp->bits[0] *
700 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
702 /* Don't waste more than two chunks on this */
703 if ((1<<(bits)) <= l+l) {
704 bp = (struct pginfo *)pp;
705 } else {
706 bp = imalloc((size_t)l);
707 if (bp == NULL) {
708 ifree(pp);
709 return 0;
713 bp->size = (1<<bits);
714 bp->shift = bits;
715 bp->total = bp->free = (u_short)(malloc_pagesize >> bits);
716 bp->page = pp;
718 /* set all valid bits in the bitmap */
719 k = bp->total;
720 i = 0;
722 /* Do a bunch at a time */
723 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
724 bp->bits[i / MALLOC_BITS] = ~0U;
726 for(; i < k; i++)
727 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
729 if (bp == bp->page) {
730 /* Mark the ones we stole for ourselves */
731 for(i = 0; l > 0; i++) {
732 bp->bits[i / MALLOC_BITS] &= ~(1 << (i % MALLOC_BITS));
733 bp->free--;
734 bp->total--;
735 l -= (long)(1 << bits);
739 /* MALLOC_LOCK */
741 page_dir[ptr2idx(pp)] = bp;
743 bp->next = page_dir[bits];
744 page_dir[bits] = bp;
746 /* MALLOC_UNLOCK */
748 return 1;
752 * Allocate a fragment
754 static void *
755 malloc_bytes(size_t size)
757 size_t i;
758 int j;
759 u_int u;
760 struct pginfo *bp;
761 size_t k;
762 u_int *lp;
764 /* Don't bother with anything less than this */
765 if (size < malloc_minsize)
766 size = malloc_minsize;
769 /* Find the right bucket */
770 j = 1;
771 i = size-1;
772 while (i >>= 1)
773 j++;
775 /* If it's empty, make a page more of that size chunks */
776 if (page_dir[j] == NULL && !malloc_make_chunks(j))
777 return NULL;
779 bp = page_dir[j];
781 /* Find first word of bitmap which isn't empty */
782 for (lp = bp->bits; !*lp; lp++)
785 /* Find that bit, and tweak it */
786 u = 1;
787 k = 0;
788 while (!(*lp & u)) {
789 u += u;
790 k++;
792 *lp ^= u;
794 /* If there are no more free, remove from free-list */
795 if (!--bp->free) {
796 page_dir[j] = bp->next;
797 bp->next = NULL;
800 /* Adjust to the real offset of that chunk */
801 k += (lp-bp->bits)*MALLOC_BITS;
802 k <<= bp->shift;
804 if (malloc_junk)
805 memset((u_char*)bp->page + k, SOME_JUNK, (size_t)bp->size);
807 return (u_char *)bp->page + k;
811 * Allocate a piece of memory
813 static void *
814 imalloc(size_t size)
816 void *result;
818 if (suicide)
819 abort();
821 if ((size + malloc_pagesize) < size) /* Check for overflow */
822 result = NULL;
823 #ifndef __minix
824 else if ((size + malloc_pagesize) >= (uintptr_t)page_dir)
825 result = NULL;
826 #endif /* !__minix */
827 else if (size <= malloc_maxsize)
828 result = malloc_bytes(size);
829 else
830 result = malloc_pages(size);
832 if (malloc_abort && result == NULL)
833 wrterror("allocation failed.\n");
835 if (malloc_zero && result != NULL)
836 memset(result, 0, size);
838 return result;
842 * Change the size of an allocation.
844 static void *
845 irealloc(void *ptr, size_t size)
847 void *p;
848 size_t osize, idx;
849 struct pginfo **mp;
850 size_t i;
852 if (suicide)
853 abort();
855 idx = ptr2idx(ptr);
857 if (idx < malloc_pageshift) {
858 wrtwarning("junk pointer, too low to make sense.\n");
859 return 0;
862 if (idx > last_idx) {
863 wrtwarning("junk pointer, too high to make sense.\n");
864 return 0;
867 mp = &page_dir[idx];
869 if (*mp == MALLOC_FIRST) { /* Page allocation */
871 /* Check the pointer */
872 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
873 wrtwarning("modified (page-) pointer.\n");
874 return NULL;
877 /* Find the size in bytes */
878 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
879 osize += malloc_pagesize;
881 if (!malloc_realloc && /* unless we have to, */
882 size <= osize && /* .. or are too small, */
883 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
884 if (malloc_junk)
885 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
886 return ptr; /* don't do anything. */
889 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
891 /* Check the pointer for sane values */
892 if (((size_t)(uintptr_t)ptr & ((*mp)->size-1))) {
893 wrtwarning("modified (chunk-) pointer.\n");
894 return NULL;
897 /* Find the chunk index in the page */
898 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> (*mp)->shift;
900 /* Verify that it isn't a free chunk already */
901 if ((*mp)->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
902 wrtwarning("chunk is already free.\n");
903 return NULL;
906 osize = (*mp)->size;
908 if (!malloc_realloc && /* Unless we have to, */
909 size <= osize && /* ..or are too small, */
910 (size > osize / 2 || /* ..or could use a smaller size, */
911 osize == malloc_minsize)) { /* ..(if there is one) */
912 if (malloc_junk)
913 memset((u_char *)ptr + size, SOME_JUNK, osize-size);
914 return ptr; /* ..Don't do anything */
917 } else {
918 wrtwarning("pointer to wrong page.\n");
919 return NULL;
922 p = imalloc(size);
924 if (p != NULL) {
925 /* copy the lesser of the two sizes, and free the old one */
926 if (!size || !osize)
928 else if (osize < size)
929 memcpy(p, ptr, osize);
930 else
931 memcpy(p, ptr, size);
932 ifree(ptr);
934 return p;
938 * Free a sequence of pages
941 static inline void
942 free_pages(void *ptr, size_t idx, struct pginfo *info)
944 size_t i;
945 struct pgfree *pf, *pt=NULL;
946 size_t l;
947 void *tail;
949 if (info == MALLOC_FREE) {
950 wrtwarning("page is already free.\n");
951 return;
954 if (info != MALLOC_FIRST) {
955 wrtwarning("pointer to wrong page.\n");
956 return;
959 if ((size_t)(uintptr_t)ptr & malloc_pagemask) {
960 wrtwarning("modified (page-) pointer.\n");
961 return;
964 /* Count how many pages and mark them free at the same time */
965 page_dir[idx] = MALLOC_FREE;
966 for (i = 1; page_dir[idx+i] == MALLOC_FOLLOW; i++)
967 page_dir[idx + i] = MALLOC_FREE;
969 l = i << malloc_pageshift;
971 if (malloc_junk)
972 memset(ptr, SOME_JUNK, l);
974 #if !defined(__minix)
975 if (malloc_hint)
976 madvise(ptr, l, MADV_FREE);
977 #endif /* !defined(__minix) */
979 tail = (char *)ptr+l;
981 /* add to free-list */
982 if (px == NULL)
983 px = imalloc(sizeof *px); /* This cannot fail... */
984 px->page = ptr;
985 px->end = tail;
986 px->size = l;
987 if (free_list.next == NULL) {
989 /* Nothing on free list, put this at head */
990 px->next = free_list.next;
991 px->prev = &free_list;
992 free_list.next = px;
993 pf = px;
994 px = NULL;
996 } else {
998 /* Find the right spot, leave pf pointing to the modified entry. */
999 tail = (char *)ptr+l;
1001 for(pf = free_list.next; pf->end < ptr && pf->next != NULL;
1002 pf = pf->next)
1003 ; /* Race ahead here */
1005 if (pf->page > tail) {
1006 /* Insert before entry */
1007 px->next = pf;
1008 px->prev = pf->prev;
1009 pf->prev = px;
1010 px->prev->next = px;
1011 pf = px;
1012 px = NULL;
1013 } else if (pf->end == ptr ) {
1014 /* Append to the previous entry */
1015 pf->end = (char *)pf->end + l;
1016 pf->size += l;
1017 if (pf->next != NULL && pf->end == pf->next->page ) {
1018 /* And collapse the next too. */
1019 pt = pf->next;
1020 pf->end = pt->end;
1021 pf->size += pt->size;
1022 pf->next = pt->next;
1023 if (pf->next != NULL)
1024 pf->next->prev = pf;
1026 } else if (pf->page == tail) {
1027 /* Prepend to entry */
1028 pf->size += l;
1029 pf->page = ptr;
1030 } else if (pf->next == NULL) {
1031 /* Append at tail of chain */
1032 px->next = NULL;
1033 px->prev = pf;
1034 pf->next = px;
1035 pf = px;
1036 px = NULL;
1037 } else {
1038 wrterror("freelist is destroyed.\n");
1042 /* Return something to OS ? */
1043 if (pf->next == NULL && /* If we're the last one, */
1044 pf->size > malloc_cache && /* ..and the cache is full, */
1045 pf->end == malloc_brk && /* ..and none behind us, */
1046 malloc_brk == sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1049 * Keep the cache intact. Notice that the '>' above guarantees that
1050 * the pf will always have at least one page afterwards.
1052 pf->end = (char *)pf->page + malloc_cache;
1053 pf->size = malloc_cache;
1055 brk(pf->end);
1056 malloc_brk = pf->end;
1058 idx = ptr2idx(pf->end);
1060 for(i=idx;i <= last_idx;)
1061 page_dir[i++] = MALLOC_NOT_MINE;
1063 last_idx = idx - 1;
1065 /* XXX: We could realloc/shrink the pagedir here I guess. */
1067 if (pt != NULL)
1068 ifree(pt);
1072 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1075 static inline void
1076 free_bytes(void *ptr, size_t idx, struct pginfo *info)
1078 size_t i;
1079 struct pginfo **mp;
1080 void *vp;
1082 /* Find the chunk number on the page */
1083 i = ((size_t)(uintptr_t)ptr & malloc_pagemask) >> info->shift;
1085 if (((size_t)(uintptr_t)ptr & (info->size-1))) {
1086 wrtwarning("modified (chunk-) pointer.\n");
1087 return;
1090 if (info->bits[i/MALLOC_BITS] & (1UL << (i % MALLOC_BITS))) {
1091 wrtwarning("chunk is already free.\n");
1092 return;
1095 if (malloc_junk)
1096 memset(ptr, SOME_JUNK, (size_t)info->size);
1098 info->bits[i/MALLOC_BITS] |= (u_int)(1UL << (i % MALLOC_BITS));
1099 info->free++;
1101 mp = page_dir + info->shift;
1103 if (info->free == 1) {
1105 /* Page became non-full */
1107 mp = page_dir + info->shift;
1108 /* Insert in address order */
1109 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
1110 mp = &(*mp)->next;
1111 info->next = *mp;
1112 *mp = info;
1113 return;
1116 if (info->free != info->total)
1117 return;
1119 /* Find & remove this page in the queue */
1120 while (*mp != info) {
1121 mp = &((*mp)->next);
1122 #ifdef MALLOC_EXTRA_SANITY
1123 if (!*mp)
1124 wrterror("(ES): Not on queue.\n");
1125 #endif /* MALLOC_EXTRA_SANITY */
1127 *mp = info->next;
1129 /* Free the page & the info structure if need be */
1130 page_dir[idx] = MALLOC_FIRST;
1131 vp = info->page; /* Order is important ! */
1132 if(vp != (void*)info)
1133 ifree(info);
1134 ifree(vp);
1137 static void
1138 ifree(void *ptr)
1140 struct pginfo *info;
1141 size_t idx;
1143 /* This is legal */
1144 if (ptr == NULL)
1145 return;
1147 /* If we're already sinking, don't make matters any worse. */
1148 if (suicide)
1149 return;
1151 idx = ptr2idx(ptr);
1153 if (idx < malloc_pageshift) {
1154 wrtwarning("junk pointer, too low to make sense.\n");
1155 return;
1158 if (idx > last_idx) {
1159 wrtwarning("junk pointer, too high to make sense.\n");
1160 return;
1163 info = page_dir[idx];
1165 if (info < MALLOC_MAGIC)
1166 free_pages(ptr, idx, info);
1167 else
1168 free_bytes(ptr, idx, info);
1169 return;
1172 static int malloc_active; /* Recursion flag for public interface. */
1173 static unsigned malloc_started; /* Set when initialization has been done */
1175 static void *
1176 pubrealloc(void *ptr, size_t size, const char *func)
1178 void *r;
1179 int err = 0;
1182 * If a thread is inside our code with a functional lock held, and then
1183 * catches a signal which calls us again, we would get a deadlock if the
1184 * lock is not of a recursive type.
1186 _MALLOC_LOCK();
1187 malloc_func = func;
1188 if (malloc_active > 0) {
1189 if (malloc_active == 1) {
1190 wrtwarning("recursive call\n");
1191 malloc_active = 2;
1193 _MALLOC_UNLOCK();
1194 errno = EINVAL;
1195 return (NULL);
1197 malloc_active = 1;
1199 if (!malloc_started) {
1200 if (ptr != NULL) {
1201 wrtwarning("malloc() has never been called\n");
1202 malloc_active = 0;
1203 _MALLOC_UNLOCK();
1204 errno = EINVAL;
1205 return (NULL);
1207 malloc_init();
1208 malloc_started = 1;
1211 if (ptr == ZEROSIZEPTR)
1212 ptr = NULL;
1213 if (malloc_sysv && !size) {
1214 if (ptr != NULL)
1215 ifree(ptr);
1216 r = NULL;
1217 } else if (!size) {
1218 if (ptr != NULL)
1219 ifree(ptr);
1220 r = ZEROSIZEPTR;
1221 } else if (ptr == NULL) {
1222 r = imalloc(size);
1223 err = (r == NULL);
1224 } else {
1225 r = irealloc(ptr, size);
1226 err = (r == NULL);
1228 UTRACE(ptr, size, r);
1229 malloc_active = 0;
1230 _MALLOC_UNLOCK();
1231 if (malloc_xmalloc && err)
1232 wrterror("out of memory\n");
1233 if (err)
1234 errno = ENOMEM;
1235 return (r);
1239 * These are the public exported interface routines.
1242 void *
1243 malloc(size_t size)
1246 return pubrealloc(NULL, size, " in malloc():");
1250 posix_memalign(void **memptr, size_t alignment, size_t size)
1252 int err;
1253 void *result;
1255 if (!malloc_started) {
1256 malloc_init();
1257 malloc_started = 1;
1259 /* Make sure that alignment is a large enough power of 2. */
1260 if (((alignment - 1) & alignment) != 0 || alignment < sizeof(void *) ||
1261 alignment > malloc_pagesize)
1262 return EINVAL;
1265 * (size | alignment) is enough to assure the requested alignment, since
1266 * the allocator always allocates power-of-two blocks.
1268 err = errno; /* Protect errno against changes in pubrealloc(). */
1269 result = pubrealloc(NULL, (size | alignment), " in posix_memalign()");
1270 errno = err;
1272 if (result == NULL)
1273 return ENOMEM;
1275 *memptr = result;
1276 return 0;
1279 void *
1280 calloc(size_t num, size_t size)
1282 void *ret;
1284 if (size != 0 && (num * size) / size != num) {
1285 /* size_t overflow. */
1286 errno = ENOMEM;
1287 return (NULL);
1290 ret = pubrealloc(NULL, num * size, " in calloc():");
1292 if (ret != NULL)
1293 memset(ret, 0, num * size);
1295 return ret;
1298 void
1299 free(void *ptr)
1302 pubrealloc(ptr, 0, " in free():");
1305 void *
1306 realloc(void *ptr, size_t size)
1309 return pubrealloc(ptr, size, " in realloc():");
1313 * Begin library-private functions, used by threading libraries for protection
1314 * of malloc during fork(). These functions are only called if the program is
1315 * running in threaded mode, so there is no need to check whether the program
1316 * is threaded here.
1319 void
1320 _malloc_prefork(void)
1323 _MALLOC_LOCK();
1326 void
1327 _malloc_postfork(void)
1330 _MALLOC_UNLOCK();