1 /* $NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $ */
4 * ----------------------------------------------------------------------------
5 * "THE BEER-WARE LICENSE" (Revision 42):
6 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
7 * can do whatever you want with this stuff. If we meet some day, and you think
8 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
9 * ----------------------------------------------------------------------------
11 * From FreeBSD: malloc.c,v 1.91 2006/01/12 07:28:20 jasone
18 #include <minix/sysutil.h>
19 #include <machine/param.h>
20 #include <machine/vmparam.h>
21 #define MALLOC_NO_SYSCALLS
22 #define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
23 #define wrterror(w) panic("libminc malloc error: %s\n", w)
25 #endif /* defined(__minix) */
28 * Defining MALLOC_EXTRA_SANITY will enable extra checks which are related
29 * to internal conditions and consistency in malloc.c. This has a
30 * noticeable runtime performance hit, and generally will not do you
31 * any good unless you fiddle with the internals of malloc or want
32 * to catch random pointer corruption as early as possible.
34 #ifndef MALLOC_EXTRA_SANITY
35 #undef MALLOC_EXTRA_SANITY
39 * What to use for Junk. This is the byte value we use to fill with
40 * when the 'J' option is enabled.
42 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
45 * The basic parameters you can tweak.
47 * malloc_minsize minimum size of an allocation in bytes.
48 * If this is too small it's too much work
49 * to manage them. This is also the smallest
50 * unit of alignment used for the storage
51 * returned by malloc/realloc.
55 #include "namespace.h"
56 #if defined(__FreeBSD__)
57 # if defined(__i386__)
58 # define malloc_minsize 16U
60 # if defined(__ia64__)
61 # define malloc_pageshift 13U
62 # define malloc_minsize 16U
64 # if defined(__alpha__)
65 # define malloc_pageshift 13U
66 # define malloc_minsize 16U
68 # if defined(__sparc64__)
69 # define malloc_pageshift 13U
70 # define malloc_minsize 16U
72 # if defined(__amd64__)
73 # define malloc_pageshift 12U
74 # define malloc_minsize 16U
77 # define malloc_pageshift 12U
78 # define malloc_minsize 16U
83 #include <sys/cdefs.h>
84 void utrace(struct ut
*, int);
87 * Make malloc/free/realloc thread-safe in libc for use with
90 # include "libc_private.h"
91 # include "spinlock.h"
92 static spinlock_t thread_lock
= _SPINLOCK_INITIALIZER
;
93 # define _MALLOC_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
94 # define _MALLOC_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
95 #endif /* __FreeBSD__ */
98 /* #undef these things so that malloc uses the non-internal symbols.
99 * This is necessary for VM to be able to define its own versions, and
104 #endif /* defined(__minix) */
106 #include <sys/types.h>
107 #if defined(__NetBSD__)
108 # define malloc_minsize 16U
110 # define UTRACE_LABEL "malloc",
111 #include <sys/cdefs.h>
113 #if defined(LIBC_SCCS) && !defined(lint)
114 __RCSID("$NetBSD: malloc.c,v 1.56 2014/09/18 13:58:20 christos Exp $");
115 #endif /* LIBC_SCCS and not lint */
116 int utrace(const char *, void *, size_t);
118 #include <reentrant.h>
119 extern int __isthreaded
;
120 static mutex_t thread_lock
= MUTEX_INITIALIZER
;
121 #define _MALLOC_LOCK() if (__isthreaded) mutex_lock(&thread_lock);
122 #define _MALLOC_UNLOCK() if (__isthreaded) mutex_unlock(&thread_lock);
123 #endif /* __NetBSD__ */
125 #if defined(__sparc__) && defined(sun)
126 # define malloc_minsize 16U
127 # define MAP_ANON (0)
129 # define MMAP_FD fdzero
130 # define INIT_MMAP() \
131 { if ((fdzero = open(_PATH_DEVZERO, O_RDWR | O_CLOEXEC, 0000)) == -1) \
132 wrterror("open of /dev/zero"); }
133 #endif /* __sparc__ */
135 /* Insert your combination here... */
136 #if defined(__FOOCPU__) && defined(__BAROS__)
137 # define malloc_minsize 16U
138 #endif /* __FOOCPU__ && __BAROS__ */
141 #define ZEROSIZEPTR ((void *)(uintptr_t)(1UL << (malloc_pageshift - 1)))
145 * No user serviceable parts behind this point.
147 #include <sys/types.h>
148 #include <sys/mman.h>
159 * This structure describes a page worth of chunks.
163 struct pginfo
*next
; /* next on the free list */
164 void *page
; /* Pointer to the page */
165 u_short size
; /* size of this page's chunks */
166 u_short shift
; /* How far to shift for this size chunks */
167 u_short free
; /* How many free chunks */
168 u_short total
; /* How many chunk */
169 u_int bits
[1]; /* Which chunks are free */
173 * This structure describes a number of free pages.
177 struct pgfree
*next
; /* next run of free pages */
178 struct pgfree
*prev
; /* prev run of free pages */
179 void *page
; /* pointer to free pages */
180 void *end
; /* pointer to end of free pages */
181 size_t size
; /* number of bytes free */
185 * How many bits per u_int in the bitmap.
186 * Change only if not 8 bits/byte
188 #define MALLOC_BITS ((int)(8*sizeof(u_int)))
191 * Magic values to put in the page_directory
193 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
194 #define MALLOC_FREE ((struct pginfo*) 1)
195 #define MALLOC_FIRST ((struct pginfo*) 2)
196 #define MALLOC_FOLLOW ((struct pginfo*) 3)
197 #define MALLOC_MAGIC ((struct pginfo*) 4)
200 * Page size related parameters, computed at run-time.
202 static size_t malloc_pagesize
;
203 static size_t malloc_pageshift
;
204 static size_t malloc_pagemask
;
206 #ifndef malloc_minsize
207 #define malloc_minsize 16U
210 #ifndef malloc_maxsize
211 #define malloc_maxsize ((malloc_pagesize)>>1)
214 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
215 #define ptr2idx(foo) \
216 (((size_t)(uintptr_t)(foo) >> malloc_pageshift)-malloc_origo)
219 #define _MALLOC_LOCK()
222 #ifndef _MALLOC_UNLOCK
223 #define _MALLOC_UNLOCK()
234 #if !defined(__minix)
236 #define MADV_FREE MADV_DONTNEED
240 #endif /* !defined(__minix) */
242 /* Number of free pages we cache */
243 static size_t malloc_cache
= 16;
245 /* The offset from pagenumber to index into the page directory */
246 static size_t malloc_origo
;
248 /* The last index in the page directory we care about */
249 static size_t last_idx
;
251 /* Pointer to page directory. Allocated "as if with" malloc */
252 static struct pginfo
**page_dir
;
254 /* How many slots in the page directory */
255 static size_t malloc_ninfo
;
257 /* Free pages line up here */
258 static struct pgfree free_list
;
260 /* Abort(), user doesn't handle problems. */
261 static int malloc_abort
;
263 /* Are we trying to die ? */
266 /* always realloc ? */
267 static int malloc_realloc
;
269 /* pass the kernel a hint on free pages ? */
270 #if defined(MADV_FREE)
271 static int malloc_hint
= 0;
274 /* xmalloc behaviour ? */
275 static int malloc_xmalloc
;
277 /* sysv behaviour for malloc(0) ? */
278 static int malloc_sysv
;
281 static int malloc_zero
;
284 static int malloc_junk
;
289 static int malloc_utrace
;
291 struct ut
{ void *p
; size_t s
; void *r
; };
293 #define UTRACE(a, b, c) \
294 if (malloc_utrace) { \
296 u.p=a; u.s = b; u.r=c; \
297 utrace(UTRACE_LABEL (void *) &u, sizeof u); \
299 #else /* !HAS_UTRACE */
300 #define UTRACE(a,b,c)
301 #endif /* HAS_UTRACE */
304 static void *malloc_brk
;
306 /* one location cache for free-list holders */
307 static struct pgfree
*px
;
309 /* compile-time options */
310 const char *_malloc_options
;
312 /* Name of the current public function */
313 static const char *malloc_func
;
317 mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
321 * Necessary function declarations
323 static int extend_pgdir(size_t idx
);
324 static void *imalloc(size_t size
);
325 static void ifree(void *ptr
);
326 static void *irealloc(void *ptr
, size_t size
);
328 #ifndef MALLOC_NO_SYSCALLS
330 wrtmessage(const char *p1
, const char *p2
, const char *p3
, const char *p4
)
333 write(STDERR_FILENO
, p1
, strlen(p1
));
334 write(STDERR_FILENO
, p2
, strlen(p2
));
335 write(STDERR_FILENO
, p3
, strlen(p3
));
336 write(STDERR_FILENO
, p4
, strlen(p4
));
339 void (*_malloc_message
)(const char *p1
, const char *p2
, const char *p3
,
340 const char *p4
) = wrtmessage
;
342 wrterror(const char *p
)
346 _malloc_message(getprogname(), malloc_func
, " error: ", p
);
351 wrtwarning(const char *p
)
355 * Sensitive processes, somewhat arbitrarily defined here as setuid,
356 * setgid, root and wheel cannot afford to have malloc mistakes.
358 if (malloc_abort
|| issetugid() || getuid() == 0 || getgid() == 0)
364 * Allocate a number of pages from the OS
367 map_pages(size_t pages
)
369 caddr_t result
, rresult
, tail
;
370 intptr_t bytes
= pages
<< malloc_pageshift
;
372 if (bytes
< 0 || (size_t)bytes
< pages
) {
377 if ((result
= sbrk(bytes
)) == (void *)-1)
381 * Round to a page, in case sbrk(2) did not do this for us
383 rresult
= (caddr_t
)pageround((size_t)(uintptr_t)result
);
384 if (result
< rresult
) {
385 /* make sure we have enough space to fit bytes */
386 if (sbrk((intptr_t)(rresult
- result
)) == (void *) -1) {
387 /* we failed, put everything back */
389 wrterror("brk(2) failed [internal error]\n");
393 tail
= rresult
+ (size_t)bytes
;
395 last_idx
= ptr2idx(tail
) - 1;
398 if ((last_idx
+1) >= malloc_ninfo
&& !extend_pgdir(last_idx
)) {
400 last_idx
= ptr2idx(malloc_brk
) - 1;
401 /* Put back break point since we failed. */
403 wrterror("brk(2) failed [internal error]\n");
411 * Extend page directory
414 extend_pgdir(size_t idx
)
416 struct pginfo
**new, **old
;
417 size_t newlen
, oldlen
;
419 /* check for overflow */
420 if ((((~(1UL << ((sizeof(size_t) * NBBY
) - 1)) / sizeof(*page_dir
)) + 1)
421 + (malloc_pagesize
/ sizeof *page_dir
)) < idx
) {
426 /* Make it this many pages */
427 newlen
= pageround(idx
* sizeof *page_dir
) + malloc_pagesize
;
429 /* remember the old mapping size */
430 oldlen
= malloc_ninfo
* sizeof *page_dir
;
433 * NOTE: we allocate new pages and copy the directory rather than tempt
434 * fate by trying to "grow" the region.. There is nothing to prevent
435 * us from accidentally re-mapping space that's been allocated by our caller
436 * via dlopen() or other mmap().
438 * The copy problem is not too bad, as there is 4K of page index per
439 * 4MB of malloc arena.
441 * We can totally avoid the copy if we open a file descriptor to associate
442 * the anon mappings with. Then, when we remap the pages at the new
443 * address, the old pages will be "magically" remapped.. But this means
444 * keeping open a "secret" file descriptor.....
449 if (new == MAP_FAILED
)
452 /* Copy the old stuff */
453 memcpy(new, page_dir
, oldlen
);
455 /* register the new size */
456 malloc_ninfo
= newlen
/ sizeof *page_dir
;
458 /* swap the pointers */
462 /* Now free the old stuff */
468 * Initialize the world
473 #ifndef MALLOC_NO_SYSCALLS
480 #ifndef MALLOC_NO_SYSCALLS
483 * Compute page-size related variables.
485 malloc_pagesize
= (size_t)sysconf(_SC_PAGESIZE
);
487 malloc_pagesize
= PAGE_SIZE
;
489 malloc_pagemask
= malloc_pagesize
- 1;
490 for (malloc_pageshift
= 0;
491 (1UL << malloc_pageshift
) != malloc_pagesize
;
497 #ifdef MALLOC_EXTRA_SANITY
499 #endif /* MALLOC_EXTRA_SANITY */
501 #ifndef MALLOC_NO_SYSCALLS
502 for (i
= 0; i
< 3; i
++) {
504 j
= readlink("/etc/malloc.conf", b
, sizeof b
- 1);
509 } else if (i
== 1 && issetugid() == 0) {
510 p
= getenv("MALLOC_OPTIONS");
516 for (; p
!= NULL
&& *p
!= '\0'; p
++) {
518 case '>': malloc_cache
<<= 1; break;
519 case '<': malloc_cache
>>= 1; break;
520 case 'a': malloc_abort
= 0; break;
521 case 'A': malloc_abort
= 1; break;
522 #if !defined(__minix)
523 case 'h': malloc_hint
= 0; break;
524 case 'H': malloc_hint
= 1; break;
525 #endif /* !defined(__minix) */
526 case 'r': malloc_realloc
= 0; break;
527 case 'R': malloc_realloc
= 1; break;
528 case 'j': malloc_junk
= 0; break;
529 case 'J': malloc_junk
= 1; break;
531 case 'u': malloc_utrace
= 0; break;
532 case 'U': malloc_utrace
= 1; break;
534 case 'v': malloc_sysv
= 0; break;
535 case 'V': malloc_sysv
= 1; break;
536 case 'x': malloc_xmalloc
= 0; break;
537 case 'X': malloc_xmalloc
= 1; break;
538 case 'z': malloc_zero
= 0; break;
539 case 'Z': malloc_zero
= 1; break;
541 _malloc_message(getprogname(), malloc_func
,
542 " warning: ", "unknown char in MALLOC_OPTIONS\n");
552 * We want junk in the entire allocation, and zero only in the part
553 * the user asked for.
558 /* Allocate one page for the page directory */
559 page_dir
= MMAP(malloc_pagesize
);
561 if (page_dir
== MAP_FAILED
)
562 wrterror("mmap(2) failed, check limits.\n");
565 * We need a maximum of malloc_pageshift buckets, steal these from the
566 * front of the page_directory;
568 malloc_origo
= pageround((size_t)(uintptr_t)sbrk((intptr_t)0))
570 malloc_origo
-= malloc_pageshift
;
572 malloc_ninfo
= malloc_pagesize
/ sizeof *page_dir
;
574 /* Recalculate the cache size in bytes, and make sure it's nonzero */
579 malloc_cache
<<= malloc_pageshift
;
582 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
583 * We can sbrk(2) further back when we keep this on a low address.
585 px
= imalloc(sizeof *px
);
591 * Allocate a number of complete pages
594 malloc_pages(size_t size
)
596 void *p
, *delay_free
= NULL
;
601 idx
= pageround(size
);
610 /* Look for free pages before asking for more */
611 for(pf
= free_list
.next
; pf
; pf
= pf
->next
) {
613 #ifdef MALLOC_EXTRA_SANITY
614 if (pf
->size
& malloc_pagemask
)
615 wrterror("(ES): junk length entry on free_list.\n");
617 wrterror("(ES): zero length entry on free_list.\n");
618 if (pf
->page
== pf
->end
)
619 wrterror("(ES): zero entry on free_list.\n");
620 if (pf
->page
> pf
->end
)
621 wrterror("(ES): sick entry on free_list.\n");
622 if ((void*)pf
->page
>= (void*)sbrk(0))
623 wrterror("(ES): entry on free_list past brk.\n");
624 if (page_dir
[ptr2idx(pf
->page
)] != MALLOC_FREE
)
625 wrterror("(ES): non-free first page on free-list.\n");
626 if (page_dir
[ptr2idx(pf
->end
)-1] != MALLOC_FREE
)
627 wrterror("(ES): non-free last page on free-list.\n");
628 #endif /* MALLOC_EXTRA_SANITY */
633 if (pf
->size
== size
) {
635 if (pf
->next
!= NULL
)
636 pf
->next
->prev
= pf
->prev
;
637 pf
->prev
->next
= pf
->next
;
643 pf
->page
= (char *)pf
->page
+ size
;
648 #ifdef MALLOC_EXTRA_SANITY
649 if (p
!= NULL
&& page_dir
[ptr2idx(p
)] != MALLOC_FREE
)
650 wrterror("(ES): allocated non-free page on free-list.\n");
651 #endif /* MALLOC_EXTRA_SANITY */
653 size
>>= malloc_pageshift
;
662 page_dir
[idx
] = MALLOC_FIRST
;
664 page_dir
[idx
+i
] = MALLOC_FOLLOW
;
667 memset(p
, SOME_JUNK
, size
<< malloc_pageshift
);
681 * Allocate a page of fragments
685 malloc_make_chunks(int bits
)
692 /* Allocate a new bucket */
693 pp
= malloc_pages(malloc_pagesize
);
697 /* Find length of admin structure */
698 l
= (long)offsetof(struct pginfo
, bits
[0]);
699 l
+= (long)sizeof bp
->bits
[0] *
700 (((malloc_pagesize
>> bits
)+MALLOC_BITS
-1) / MALLOC_BITS
);
702 /* Don't waste more than two chunks on this */
703 if ((1<<(bits
)) <= l
+l
) {
704 bp
= (struct pginfo
*)pp
;
706 bp
= imalloc((size_t)l
);
713 bp
->size
= (1<<bits
);
715 bp
->total
= bp
->free
= (u_short
)(malloc_pagesize
>> bits
);
718 /* set all valid bits in the bitmap */
722 /* Do a bunch at a time */
723 for(;k
-i
>= MALLOC_BITS
; i
+= MALLOC_BITS
)
724 bp
->bits
[i
/ MALLOC_BITS
] = ~0U;
727 bp
->bits
[i
/MALLOC_BITS
] |= 1<<(i
%MALLOC_BITS
);
729 if (bp
== bp
->page
) {
730 /* Mark the ones we stole for ourselves */
731 for(i
= 0; l
> 0; i
++) {
732 bp
->bits
[i
/ MALLOC_BITS
] &= ~(1 << (i
% MALLOC_BITS
));
735 l
-= (long)(1 << bits
);
741 page_dir
[ptr2idx(pp
)] = bp
;
743 bp
->next
= page_dir
[bits
];
752 * Allocate a fragment
755 malloc_bytes(size_t size
)
764 /* Don't bother with anything less than this */
765 if (size
< malloc_minsize
)
766 size
= malloc_minsize
;
769 /* Find the right bucket */
775 /* If it's empty, make a page more of that size chunks */
776 if (page_dir
[j
] == NULL
&& !malloc_make_chunks(j
))
781 /* Find first word of bitmap which isn't empty */
782 for (lp
= bp
->bits
; !*lp
; lp
++)
785 /* Find that bit, and tweak it */
794 /* If there are no more free, remove from free-list */
796 page_dir
[j
] = bp
->next
;
800 /* Adjust to the real offset of that chunk */
801 k
+= (lp
-bp
->bits
)*MALLOC_BITS
;
805 memset((u_char
*)bp
->page
+ k
, SOME_JUNK
, (size_t)bp
->size
);
807 return (u_char
*)bp
->page
+ k
;
811 * Allocate a piece of memory
821 if ((size
+ malloc_pagesize
) < size
) /* Check for overflow */
824 else if ((size
+ malloc_pagesize
) >= (uintptr_t)page_dir
)
826 #endif /* !__minix */
827 else if (size
<= malloc_maxsize
)
828 result
= malloc_bytes(size
);
830 result
= malloc_pages(size
);
832 if (malloc_abort
&& result
== NULL
)
833 wrterror("allocation failed.\n");
835 if (malloc_zero
&& result
!= NULL
)
836 memset(result
, 0, size
);
842 * Change the size of an allocation.
845 irealloc(void *ptr
, size_t size
)
857 if (idx
< malloc_pageshift
) {
858 wrtwarning("junk pointer, too low to make sense.\n");
862 if (idx
> last_idx
) {
863 wrtwarning("junk pointer, too high to make sense.\n");
869 if (*mp
== MALLOC_FIRST
) { /* Page allocation */
871 /* Check the pointer */
872 if ((size_t)(uintptr_t)ptr
& malloc_pagemask
) {
873 wrtwarning("modified (page-) pointer.\n");
877 /* Find the size in bytes */
878 for (osize
= malloc_pagesize
; *++mp
== MALLOC_FOLLOW
;)
879 osize
+= malloc_pagesize
;
881 if (!malloc_realloc
&& /* unless we have to, */
882 size
<= osize
&& /* .. or are too small, */
883 size
> (osize
- malloc_pagesize
)) { /* .. or can free a page, */
885 memset((u_char
*)ptr
+ size
, SOME_JUNK
, osize
-size
);
886 return ptr
; /* don't do anything. */
889 } else if (*mp
>= MALLOC_MAGIC
) { /* Chunk allocation */
891 /* Check the pointer for sane values */
892 if (((size_t)(uintptr_t)ptr
& ((*mp
)->size
-1))) {
893 wrtwarning("modified (chunk-) pointer.\n");
897 /* Find the chunk index in the page */
898 i
= ((size_t)(uintptr_t)ptr
& malloc_pagemask
) >> (*mp
)->shift
;
900 /* Verify that it isn't a free chunk already */
901 if ((*mp
)->bits
[i
/MALLOC_BITS
] & (1UL << (i
% MALLOC_BITS
))) {
902 wrtwarning("chunk is already free.\n");
908 if (!malloc_realloc
&& /* Unless we have to, */
909 size
<= osize
&& /* ..or are too small, */
910 (size
> osize
/ 2 || /* ..or could use a smaller size, */
911 osize
== malloc_minsize
)) { /* ..(if there is one) */
913 memset((u_char
*)ptr
+ size
, SOME_JUNK
, osize
-size
);
914 return ptr
; /* ..Don't do anything */
918 wrtwarning("pointer to wrong page.\n");
925 /* copy the lesser of the two sizes, and free the old one */
928 else if (osize
< size
)
929 memcpy(p
, ptr
, osize
);
931 memcpy(p
, ptr
, size
);
938 * Free a sequence of pages
942 free_pages(void *ptr
, size_t idx
, struct pginfo
*info
)
945 struct pgfree
*pf
, *pt
=NULL
;
949 if (info
== MALLOC_FREE
) {
950 wrtwarning("page is already free.\n");
954 if (info
!= MALLOC_FIRST
) {
955 wrtwarning("pointer to wrong page.\n");
959 if ((size_t)(uintptr_t)ptr
& malloc_pagemask
) {
960 wrtwarning("modified (page-) pointer.\n");
964 /* Count how many pages and mark them free at the same time */
965 page_dir
[idx
] = MALLOC_FREE
;
966 for (i
= 1; page_dir
[idx
+i
] == MALLOC_FOLLOW
; i
++)
967 page_dir
[idx
+ i
] = MALLOC_FREE
;
969 l
= i
<< malloc_pageshift
;
972 memset(ptr
, SOME_JUNK
, l
);
974 #if !defined(__minix)
976 madvise(ptr
, l
, MADV_FREE
);
977 #endif /* !defined(__minix) */
979 tail
= (char *)ptr
+l
;
981 /* add to free-list */
983 px
= imalloc(sizeof *px
); /* This cannot fail... */
987 if (free_list
.next
== NULL
) {
989 /* Nothing on free list, put this at head */
990 px
->next
= free_list
.next
;
991 px
->prev
= &free_list
;
998 /* Find the right spot, leave pf pointing to the modified entry. */
999 tail
= (char *)ptr
+l
;
1001 for(pf
= free_list
.next
; pf
->end
< ptr
&& pf
->next
!= NULL
;
1003 ; /* Race ahead here */
1005 if (pf
->page
> tail
) {
1006 /* Insert before entry */
1008 px
->prev
= pf
->prev
;
1010 px
->prev
->next
= px
;
1013 } else if (pf
->end
== ptr
) {
1014 /* Append to the previous entry */
1015 pf
->end
= (char *)pf
->end
+ l
;
1017 if (pf
->next
!= NULL
&& pf
->end
== pf
->next
->page
) {
1018 /* And collapse the next too. */
1021 pf
->size
+= pt
->size
;
1022 pf
->next
= pt
->next
;
1023 if (pf
->next
!= NULL
)
1024 pf
->next
->prev
= pf
;
1026 } else if (pf
->page
== tail
) {
1027 /* Prepend to entry */
1030 } else if (pf
->next
== NULL
) {
1031 /* Append at tail of chain */
1038 wrterror("freelist is destroyed.\n");
1042 /* Return something to OS ? */
1043 if (pf
->next
== NULL
&& /* If we're the last one, */
1044 pf
->size
> malloc_cache
&& /* ..and the cache is full, */
1045 pf
->end
== malloc_brk
&& /* ..and none behind us, */
1046 malloc_brk
== sbrk((intptr_t)0)) { /* ..and it's OK to do... */
1049 * Keep the cache intact. Notice that the '>' above guarantees that
1050 * the pf will always have at least one page afterwards.
1052 pf
->end
= (char *)pf
->page
+ malloc_cache
;
1053 pf
->size
= malloc_cache
;
1056 malloc_brk
= pf
->end
;
1058 idx
= ptr2idx(pf
->end
);
1060 for(i
=idx
;i
<= last_idx
;)
1061 page_dir
[i
++] = MALLOC_NOT_MINE
;
1065 /* XXX: We could realloc/shrink the pagedir here I guess. */
1072 * Free a chunk, and possibly the page it's on, if the page becomes empty.
1076 free_bytes(void *ptr
, size_t idx
, struct pginfo
*info
)
1082 /* Find the chunk number on the page */
1083 i
= ((size_t)(uintptr_t)ptr
& malloc_pagemask
) >> info
->shift
;
1085 if (((size_t)(uintptr_t)ptr
& (info
->size
-1))) {
1086 wrtwarning("modified (chunk-) pointer.\n");
1090 if (info
->bits
[i
/MALLOC_BITS
] & (1UL << (i
% MALLOC_BITS
))) {
1091 wrtwarning("chunk is already free.\n");
1096 memset(ptr
, SOME_JUNK
, (size_t)info
->size
);
1098 info
->bits
[i
/MALLOC_BITS
] |= (u_int
)(1UL << (i
% MALLOC_BITS
));
1101 mp
= page_dir
+ info
->shift
;
1103 if (info
->free
== 1) {
1105 /* Page became non-full */
1107 mp
= page_dir
+ info
->shift
;
1108 /* Insert in address order */
1109 while (*mp
&& (*mp
)->next
&& (*mp
)->next
->page
< info
->page
)
1116 if (info
->free
!= info
->total
)
1119 /* Find & remove this page in the queue */
1120 while (*mp
!= info
) {
1121 mp
= &((*mp
)->next
);
1122 #ifdef MALLOC_EXTRA_SANITY
1124 wrterror("(ES): Not on queue.\n");
1125 #endif /* MALLOC_EXTRA_SANITY */
1129 /* Free the page & the info structure if need be */
1130 page_dir
[idx
] = MALLOC_FIRST
;
1131 vp
= info
->page
; /* Order is important ! */
1132 if(vp
!= (void*)info
)
1140 struct pginfo
*info
;
1147 /* If we're already sinking, don't make matters any worse. */
1153 if (idx
< malloc_pageshift
) {
1154 wrtwarning("junk pointer, too low to make sense.\n");
1158 if (idx
> last_idx
) {
1159 wrtwarning("junk pointer, too high to make sense.\n");
1163 info
= page_dir
[idx
];
1165 if (info
< MALLOC_MAGIC
)
1166 free_pages(ptr
, idx
, info
);
1168 free_bytes(ptr
, idx
, info
);
1172 static int malloc_active
; /* Recursion flag for public interface. */
1173 static unsigned malloc_started
; /* Set when initialization has been done */
1176 pubrealloc(void *ptr
, size_t size
, const char *func
)
1182 * If a thread is inside our code with a functional lock held, and then
1183 * catches a signal which calls us again, we would get a deadlock if the
1184 * lock is not of a recursive type.
1188 if (malloc_active
> 0) {
1189 if (malloc_active
== 1) {
1190 wrtwarning("recursive call\n");
1199 if (!malloc_started
) {
1201 wrtwarning("malloc() has never been called\n");
1211 if (ptr
== ZEROSIZEPTR
)
1213 if (malloc_sysv
&& !size
) {
1221 } else if (ptr
== NULL
) {
1225 r
= irealloc(ptr
, size
);
1228 UTRACE(ptr
, size
, r
);
1231 if (malloc_xmalloc
&& err
)
1232 wrterror("out of memory\n");
1239 * These are the public exported interface routines.
1246 return pubrealloc(NULL
, size
, " in malloc():");
1250 posix_memalign(void **memptr
, size_t alignment
, size_t size
)
1255 if (!malloc_started
) {
1259 /* Make sure that alignment is a large enough power of 2. */
1260 if (((alignment
- 1) & alignment
) != 0 || alignment
< sizeof(void *) ||
1261 alignment
> malloc_pagesize
)
1265 * (size | alignment) is enough to assure the requested alignment, since
1266 * the allocator always allocates power-of-two blocks.
1268 err
= errno
; /* Protect errno against changes in pubrealloc(). */
1269 result
= pubrealloc(NULL
, (size
| alignment
), " in posix_memalign()");
1280 calloc(size_t num
, size_t size
)
1284 if (size
!= 0 && (num
* size
) / size
!= num
) {
1285 /* size_t overflow. */
1290 ret
= pubrealloc(NULL
, num
* size
, " in calloc():");
1293 memset(ret
, 0, num
* size
);
1302 pubrealloc(ptr
, 0, " in free():");
1306 realloc(void *ptr
, size_t size
)
1309 return pubrealloc(ptr
, size
, " in realloc():");
1313 * Begin library-private functions, used by threading libraries for protection
1314 * of malloc during fork(). These functions are only called if the program is
1315 * running in threaded mode, so there is no need to check whether the program
1320 _malloc_prefork(void)
1327 _malloc_postfork(void)