4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
30 #pragma ident "%Z%%M% %I% %E% SMI"
33 * Memory management: malloc(), realloc(), free().
35 * The following #-parameters may be redefined:
36 * SEGMENTED: if defined, memory requests are assumed to be
37 * non-contiguous across calls of GETCORE's.
38 * GETCORE: a function to get more core memory. If not SEGMENTED,
39 * GETCORE(0) is assumed to return the next available
40 * address. Default is 'sbrk'.
41 * ERRCORE: the error code as returned by GETCORE.
42 * Default is (char *)(-1).
43 * CORESIZE: a desired unit (measured in bytes) to be used
44 * with GETCORE. Default is (1024*ALIGN).
46 * This algorithm is based on a best fit strategy with lists of
47 * free elts maintained in a self-adjusting binary tree. Each list
48 * contains all elts of the same size. The tree is ordered by size.
49 * For results on self-adjusting trees, see the paper:
50 * Self-Adjusting Binary Trees,
51 * DD Sleator & RE Tarjan, JACM 1985.
53 * The header of a block contains the size of the data part in bytes.
54 * Since the size of a block is 0%4, the low two bits of the header
55 * are free and used as follows:
57 * BIT0: 1 for busy (block is in use), 0 for free.
58 * BIT1: if the block is busy, this bit is 1 if the
59 * preceding block in contiguous memory is free.
60 * Otherwise, it is always 0.
68 * Some abusers of the system (notably java1.2) acquire __malloc_lock
69 * in order to prevent threads from holding it while they are being
70 * suspended via thr_suspend() or thr_suspend_allmutators().
71 * This never worked when alternate malloc() libraries were used
72 * because they don't use __malloc_lock for their locking strategy.
73 * We leave __malloc_lock as an external variable to satisfy these
74 * old programs, but we define a new lock, private to libc, to do the
75 * real locking: libc_malloc_lock. This puts libc's malloc() package
76 * on the same footing as all other malloc packages.
78 mutex_t __malloc_lock
= DEFAULTMUTEX
;
79 mutex_t libc_malloc_lock
= DEFAULTMUTEX
;
81 static TREE
*Root
, /* root of the free tree */
82 *Bottom
, /* the last free chunk in the arena */
83 *_morecore(size_t); /* function to get more core */
85 static char *Baddr
; /* current high address of the arena */
86 static char *Lfree
; /* last freed block with data intact */
88 static void t_delete(TREE
*);
89 static void t_splay(TREE
*);
90 static void realfree(void *);
91 static void cleanfree(void *);
92 static void *_malloc_unlocked(size_t);
94 #define FREESIZE (1<<5) /* size for preserving free blocks until next malloc */
95 #define FREEMASK FREESIZE-1
97 static void *flist
[FREESIZE
]; /* list of blocks to be freed on next malloc */
98 static int freeidx
; /* index of free blocks in flist % FREESIZE */
101 * Interfaces used only by atfork_init() functions.
106 (void) mutex_lock(&libc_malloc_lock
);
112 (void) mutex_unlock(&libc_malloc_lock
);
116 * Allocation of small blocks
118 static TREE
*List
[MINSIZE
/WORDSIZE
-1]; /* lists of small blocks */
121 _smalloc(size_t size
)
126 ASSERT(size
% WORDSIZE
== 0);
127 /* want to return a unique pointer on malloc(0) */
132 i
= size
/ WORDSIZE
- 1;
134 if (List
[i
] == NULL
) {
137 /* number of blocks to get at one time */
138 #define NPS (WORDSIZE*8)
139 ASSERT((size
+ WORDSIZE
) * NPS
>= MINSIZE
);
141 /* get NPS of these block types */
142 if ((List
[i
] = _malloc_unlocked((size
+ WORDSIZE
) * NPS
)) == 0)
145 /* make them into a link list */
146 for (n
= 0, np
= List
[i
]; n
< NPS
; ++n
) {
155 /* allocate from the head of the queue */
167 if (!primary_link_map
) {
171 assert_no_libc_locks_held();
172 (void) mutex_lock(&libc_malloc_lock
);
173 ret
= _malloc_unlocked(size
);
174 (void) mutex_unlock(&libc_malloc_lock
);
179 _malloc_unlocked(size_t size
)
186 ASSERT(WORDSIZE
== ALIGN
);
188 /* check for size that could overflow calculations */
189 if (size
> MAX_MALLOC
) {
194 /* make sure that size is 0 mod ALIGN */
197 /* see if the last free block can be used */
204 * exact match, use it as is
206 freeidx
= (freeidx
+ FREESIZE
- 1) &
207 FREEMASK
; /* 1 back */
208 flist
[freeidx
] = Lfree
= NULL
;
210 } else if (size
>= MINSIZE
&& n
> size
) {
212 * got a big enough piece
214 freeidx
= (freeidx
+ FREESIZE
- 1) &
215 FREEMASK
; /* 1 back */
216 flist
[freeidx
] = Lfree
= NULL
;
217 o_bit1
= SIZE(sp
) & BIT1
;
224 /* perform free's of space since last malloc */
229 return (_smalloc(size
));
231 /* search for an elt of the right size */
238 if (SIZE(tp
) >= size
) {
239 if (n
== 0 || n
>= SIZE(tp
)) {
247 } else { /* branch right */
257 } else if (tp
!= Root
) {
258 /* make the searched-to element the root */
264 /* if found none fitted in the tree */
266 if (Bottom
&& size
<= SIZE(Bottom
)) {
269 } else if ((sp
= _morecore(size
)) == NULL
) /* no more memory */
273 /* tell the forward neighbor that we're busy */
274 CLRBIT1(SIZE(NEXT(sp
)));
276 ASSERT(ISBIT0(SIZE(NEXT(sp
))));
279 /* if the leftover is enough for a new free piece */
280 if ((n
= (SIZE(sp
) - size
)) >= MINSIZE
+ WORDSIZE
) {
286 } else if (BOTTOM(sp
))
289 /* return the allocated space */
290 SIZE(sp
) |= BIT0
| o_bit1
;
298 * If the block size is increasing, we try forward merging first.
299 * This is not best-fit but it avoids some data recopying.
302 realloc(void *old
, size_t size
)
308 if (!primary_link_map
) {
313 assert_no_libc_locks_held();
316 /* check for size that could overflow calculations */
317 if (size
> MAX_MALLOC
) {
322 /* pointer to the block */
323 (void) mutex_lock(&libc_malloc_lock
);
325 new = _malloc_unlocked(size
);
326 (void) mutex_unlock(&libc_malloc_lock
);
330 /* perform free's of space since last malloc */
333 /* make sure that size is 0 mod ALIGN */
339 /* if the block was freed, data has been destroyed. */
341 (void) mutex_unlock(&libc_malloc_lock
);
347 if (size
== SIZE(tp
)) {
349 (void) mutex_unlock(&libc_malloc_lock
);
353 /* special cases involving small blocks */
354 if (size
< MINSIZE
|| SIZE(tp
) < MINSIZE
) {
355 /* free is size is zero */
357 SETOLD01(SIZE(tp
), ts
);
359 (void) mutex_unlock(&libc_malloc_lock
);
366 /* block is increasing in size, try merging the next block */
367 if (size
> SIZE(tp
)) {
369 if (!ISBIT0(SIZE(np
))) {
370 ASSERT(SIZE(np
) >= MINSIZE
);
371 ASSERT(!ISBIT1(SIZE(np
)));
372 SIZE(tp
) += SIZE(np
) + WORDSIZE
;
377 CLRBIT1(SIZE(NEXT(np
)));
381 /* not enough & at TRUE end of memory, try extending core */
382 if (size
> SIZE(tp
) && BOTTOM(tp
) && GETCORE(0) == Baddr
) {
384 if ((tp
= _morecore(size
)) == NULL
) {
392 /* got enough space to use */
393 if (size
<= SIZE(tp
)) {
397 if ((n
= (SIZE(tp
) - size
)) >= MINSIZE
+ WORDSIZE
) {
403 } else if (BOTTOM(tp
))
406 /* the previous block may be free */
407 SETOLD01(SIZE(tp
), ts
);
408 (void) mutex_unlock(&libc_malloc_lock
);
412 /* call malloc to get a new block */
414 SETOLD01(SIZE(tp
), ts
);
415 if ((new = _malloc_unlocked(size
)) != NULL
) {
419 MEMCOPY(new, old
, ts
);
421 (void) mutex_unlock(&libc_malloc_lock
);
426 * Attempt special case recovery allocations since malloc() failed:
428 * 1. size <= SIZE(tp) < MINSIZE
429 * Simply return the existing block
430 * 2. SIZE(tp) < size < MINSIZE
431 * malloc() may have failed to allocate the chunk of
432 * small blocks. Try asking for MINSIZE bytes.
433 * 3. size < MINSIZE <= SIZE(tp)
434 * malloc() may have failed as with 2. Change to
435 * MINSIZE allocation which is taken from the beginning
436 * of the current block.
437 * 4. MINSIZE <= SIZE(tp) < size
438 * If the previous block is free and the combination of
439 * these two blocks has at least size bytes, then merge
440 * the two blocks copying the existing contents backwards.
443 if (SIZE(tp
) < MINSIZE
) {
444 if (size
< SIZE(tp
)) { /* case 1. */
445 SETOLD01(SIZE(tp
), ts
);
446 (void) mutex_unlock(&libc_malloc_lock
);
448 } else if (size
< MINSIZE
) { /* case 2. */
452 } else if (size
< MINSIZE
) { /* case 3. */
455 } else if (ISBIT1(ts
) &&
456 (SIZE(np
= LAST(tp
)) + SIZE(tp
) + WORDSIZE
) >= size
) {
457 ASSERT(!ISBIT0(SIZE(np
)));
459 SIZE(np
) += SIZE(tp
) + WORDSIZE
;
461 * Since the copy may overlap, use memmove() if available.
462 * Otherwise, copy by hand.
464 (void) memmove(DATA(np
), old
, SIZE(tp
));
470 SETOLD01(SIZE(tp
), ts
);
471 (void) mutex_unlock(&libc_malloc_lock
);
479 * Coalescing of adjacent free blocks is done first.
480 * Then, the new free block is leaf-inserted into the free tree
481 * without splaying. This strategy does not guarantee the amortized
482 * O(nlogn) behaviour for the insert/delete/find set of operations
483 * on the tree. In practice, however, free is much more infrequent
484 * than malloc/realloc and the tree searches performed by these
485 * functions adequately keep the tree in balance.
495 /* pointer to the block */
502 /* small block, put it in the right linked list */
503 if (SIZE(tp
) < MINSIZE
) {
504 ASSERT(SIZE(tp
) / WORDSIZE
>= 1);
505 ts
= SIZE(tp
) / WORDSIZE
- 1;
506 AFTER(tp
) = List
[ts
];
511 /* see if coalescing with next block is warranted */
513 if (!ISBIT0(SIZE(np
))) {
516 SIZE(tp
) += SIZE(np
) + WORDSIZE
;
519 /* the same with the preceding block */
522 ASSERT(!ISBIT0(SIZE(np
)));
523 ASSERT(np
!= Bottom
);
525 SIZE(np
) += SIZE(tp
) + WORDSIZE
;
529 /* initialize tree info */
530 PARENT(tp
) = LEFT(tp
) = RIGHT(tp
) = LINKFOR(tp
) = NULL
;
532 /* the last word of the block contains self's address */
535 /* set bottom block, or insert in the free tree */
539 /* search for the place to insert */
544 if (SIZE(np
) > size
) {
552 } else if (SIZE(np
) < size
) {
561 if ((sp
= PARENT(np
)) != NULL
) {
570 /* insert to head of list */
571 if ((sp
= LEFT(np
)) != NULL
)
575 if ((sp
= RIGHT(np
)) != NULL
)
579 /* doubly link list */
591 /* tell next block that this one is free */
592 SETBIT1(SIZE(NEXT(tp
)));
594 ASSERT(ISBIT0(SIZE(NEXT(tp
))));
598 * Get more core. Gaps in memory are noted as busy blocks.
601 _morecore(size_t size
)
608 /* compute new amount of memory to get */
610 n
= (ssize_t
)size
+ 2 * WORDSIZE
;
616 /* need to pad size out so that addr is aligned */
617 if ((((uintptr_t)addr
) % ALIGN
) != 0)
618 offset
= ALIGN
- (uintptr_t)addr
% ALIGN
;
623 /* if not segmented memory, what we need may be smaller */
631 /* get a multiple of CORESIZE */
632 n
= ((n
- 1) / CORESIZE
+ 1) * CORESIZE
;
635 /* check if nsize request could overflow in GETCORE */
636 if ((size_t)nsize
> MAX_MALLOC
- (uintptr_t)addr
) {
641 if ((size_t)nsize
<= MAX_GETCORE
) {
642 if (GETCORE(nsize
) == ERRCORE
)
647 * the value required is too big for GETCORE() to deal with
648 * in one go, so use GETCORE() at most 2 times instead.
649 * Argument to GETCORE() must be multiple of ALIGN.
650 * If not, GETCORE(-MAX_GETCORE) will not return brk point
651 * to previous value, but will be ALIGN more.
652 * This would leave a small hole.
656 if (GETCORE(delta
) == ERRCORE
) {
657 if (addr
!= GETCORE(0))
658 (void) GETCORE(-MAX_GETCORE
);
661 nsize
-= MAX_GETCORE
;
666 /* contiguous memory */
671 n
+= SIZE(tp
) + 2 * WORDSIZE
;
673 addr
= Baddr
- WORDSIZE
;
679 /* new bottom address */
682 /* new bottom block */
683 tp
= (TREE
*)(uintptr_t)addr
;
684 SIZE(tp
) = n
- 2 * WORDSIZE
;
685 ASSERT((SIZE(tp
) % ALIGN
) == 0);
687 /* reserved the last word to head any noncontiguous memory */
688 SETBIT0(SIZE(NEXT(tp
)));
690 /* non-contiguous memory, free old bottom block */
691 if (Bottom
&& Bottom
!= tp
) {
692 SETBIT0(SIZE(Bottom
));
693 realfree(DATA(Bottom
));
701 * Tree rotation functions (BU: bottom-up, TD: top-down)
704 #define LEFT1(x, y) \
705 if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
706 if ((PARENT(y) = PARENT(x)) != NULL)\
707 if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
708 else RIGHT(PARENT(y)) = y;\
709 LEFT(y) = x; PARENT(x) = y
711 #define RIGHT1(x, y) \
712 if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
713 if ((PARENT(y) = PARENT(x)) != NULL)\
714 if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
715 else RIGHT(PARENT(y)) = y;\
716 RIGHT(y) = x; PARENT(x) = y
718 #define BULEFT2(x, y, z) \
719 if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
720 if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
721 if ((PARENT(z) = PARENT(x)) != NULL)\
722 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
723 else RIGHT(PARENT(z)) = z;\
724 LEFT(z) = y; PARENT(y) = z; LEFT(y) = x; PARENT(x) = y
726 #define BURIGHT2(x, y, z) \
727 if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
728 if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
729 if ((PARENT(z) = PARENT(x)) != NULL)\
730 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
731 else RIGHT(PARENT(z)) = z;\
732 RIGHT(z) = y; PARENT(y) = z; RIGHT(y) = x; PARENT(x) = y
734 #define TDLEFT2(x, y, z) \
735 if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
736 if ((PARENT(z) = PARENT(x)) != NULL)\
737 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
738 else RIGHT(PARENT(z)) = z;\
739 PARENT(x) = z; LEFT(z) = x;
741 #define TDRIGHT2(x, y, z) \
742 if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
743 if ((PARENT(z) = PARENT(x)) != NULL)\
744 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
745 else RIGHT(PARENT(z)) = z;\
746 PARENT(x) = z; RIGHT(z) = x;
749 * Delete a tree element
756 /* if this is a non-tree node */
759 if ((sp
= LINKFOR(op
)) != NULL
)
765 /* make op the root of the tree */
769 /* if this is the start of a list */
770 if ((tp
= LINKFOR(op
)) != NULL
) {
772 if ((sp
= LEFT(op
)) != NULL
)
776 if ((sp
= RIGHT(op
)) != NULL
)
784 /* if op has a non-null left subtree */
785 if ((tp
= LEFT(op
)) != NULL
) {
789 /* make the right-end of the left subtree its root */
790 while ((sp
= RIGHT(tp
)) != NULL
) {
791 if ((gp
= RIGHT(sp
)) != NULL
) {
800 /* hook the right subtree of op to the above elt */
801 RIGHT(tp
) = RIGHT(op
);
802 PARENT(RIGHT(tp
)) = tp
;
804 } else if ((tp
= RIGHT(op
)) != NULL
) /* no left subtree */
811 * Bottom up splaying (simple version).
812 * The basic idea is to roughly cut in half the
813 * path from Root to tp and make tp the new root.
820 /* iterate until tp is the root */
821 while ((pp
= PARENT(tp
)) != NULL
) {
822 /* grandparent of tp */
825 /* x is a left child */
826 if (LEFT(pp
) == tp
) {
827 if (gp
&& LEFT(gp
) == pp
) {
828 BURIGHT2(gp
, pp
, tp
);
833 ASSERT(RIGHT(pp
) == tp
);
834 if (gp
&& RIGHT(gp
) == pp
) {
846 * Performs a delayed free of the block pointed to
847 * by old. The pointer to old is saved on a list, flist,
848 * until the next malloc or realloc. At that time, all the
849 * blocks pointed to in flist are actually freed via
850 * realfree(). This allows the contents of free blocks to
851 * remain undisturbed until the next malloc or realloc.
856 if (!primary_link_map
) {
860 assert_no_libc_locks_held();
861 (void) mutex_lock(&libc_malloc_lock
);
863 (void) mutex_unlock(&libc_malloc_lock
);
868 _free_unlocked(void *old
)
876 * Make sure the same data block is not freed twice.
877 * 3 cases are checked. It returns immediately if either
878 * one of the conditions is true.
880 * 2. Not in use or freed already.
881 * 3. In the free list.
885 if (!ISBIT0(SIZE(BLOCK(old
))))
887 for (i
= 0; i
< freeidx
; i
++)
891 if (flist
[freeidx
] != NULL
)
892 realfree(flist
[freeidx
]);
893 flist
[freeidx
] = Lfree
= old
;
894 freeidx
= (freeidx
+ 1) & FREEMASK
; /* one forward */
898 * cleanfree() frees all the blocks pointed to be flist.
900 * realloc() should work if it is called with a pointer
901 * to a block that was freed since the last call to malloc() or
902 * realloc(). If cleanfree() is called from realloc(), ptr
903 * is set to the old block and that block should not be
904 * freed since it is actually being reallocated.
911 flp
= (char **)&(flist
[freeidx
]);
913 if (flp
== (char **)&(flist
[0]))
914 flp
= (char **)&(flist
[FREESIZE
]);