4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 1986 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
32 * Yet another memory allocator, this one based on a method
33 * described in C.J. Stephenson, "Fast Fits"
35 * The basic data structure is a "Cartesian" binary tree, in which
36 * nodes are ordered by ascending addresses (thus minimizing free
37 * list insertion time) and block sizes decrease with depth in the
38 * tree (thus minimizing search time for a block of a given size).
40 * In other words: for any node s, let D(s) denote the set of
41 * descendents of s; for all x in D(left(s)) and all y in
42 * D(right(s)), we have:
44 * a. addr(x) < addr(s) < addr(y)
45 * b. len(x) <= len(s) >= len(y)
53 /* system interface */
56 extern int getpagesize();
58 static int nbpg
= 0; /* set by calling getpagesize() */
59 static bool morecore(uint
); /* get more memory into free space */
62 #define ptr_t void * /* ANSI C says these are voids */
63 #define free_t void /* ANSI says void free(ptr_t ptr) */
64 #define free_return(x) return
66 #define ptr_t char * /* BSD still (4.3) wants char*'s */
67 #define free_t int /* BSD says int free(ptr_t ptr) */
68 #define free_return(x) return(x)
71 /* SystemV-compatible information structure */
73 #define INIT_NLBLKS 100
74 #define INIT_GRAIN ALIGNSIZ
76 struct mallinfo __mallinfo
= {
77 0,0,0,0,0,0,0,0,0,0, /* basic info */
78 INIT_MXFAST
, INIT_NLBLKS
, INIT_GRAIN
, /* mallopt options */
82 /* heap data structures */
84 Freehdr _root
= NIL
; /* root of free space list */
85 char *_lbound
= NULL
; /* lower bound of heap */
86 char *_ubound
= NULL
; /* upper bound of heap */
88 /* free header list management */
90 static Freehdr
getfreehdr(void);
91 static void putfreehdr(Freehdr
);
92 static Freehdr freehdrptr
= NIL
; /* ptr to block of available headers */
93 static int nfreehdrs
= 0; /* # of headers in current block */
94 static Freehdr freehdrlist
= NIL
; /* List of available headers */
97 static void error(char *, ...);
98 /* sets errno; prints msg and aborts if DEBUG is on */
100 static int reclaim(Dblk
, uint
, int);
102 #ifdef DEBUG /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
104 int malloc_debug(int);
105 int malloc_verify(void);
106 static int debug_level
= 1;
109 * A block with a negative size, a size that is not a multiple
110 * of ALIGNSIZ, a size greater than the current extent of the
111 * heap, or a size which extends beyond the end of the heap is
115 #define badblksize(p,size)\
116 ( (size) < SMALLEST_BLK \
117 || (size) & (ALIGNSIZ-1) \
118 || (size) > heapsize() \
119 || ((char*)(p))+(size) > _ubound )
121 #else /* !DEBUG ================================================= */
123 #define malloc_debug(level) 0
124 #define malloc_verify() 1
125 #define debug_level 0
126 #define badblksize(p,size) 0
128 #endif /* !DEBUG <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
132 * insert (newblk, len)
133 * Inserts a new node in the free space tree, placing it
134 * in the correct position with respect to the existing nodes.
137 * Starting from the root, a binary search is made for the new
138 * node. If this search were allowed to continue, it would
139 * eventually fail (since there cannot already be a node at the
140 * given address); but in fact it stops when it reaches a node in
141 * the tree which has a length less than that of the new node (or
142 * when it reaches a null tree pointer).
144 * The new node is then inserted at the root of the subtree for
145 * which the shorter node forms the old root (or in place of the
149 * newblk: Ptr to the block to insert
150 * len: Length of new node
154 insert(Dblk newblk
, uint len
)
156 Freehdr
*fpp
; /* Address of ptr to subtree */
158 Freehdr
*left_hook
; /* Temp for insertion */
159 Freehdr
*right_hook
; /* Temp for insertion */
163 * check for bad block size.
165 if ( badblksize(newblk
,len
) ) {
166 error("insert: bad block size (%d) at %#x\n", len
, newblk
);
171 * Search for the first node which has a weight less
172 * than that of the new node; this will be the
173 * point at which we insert the new node.
177 while (weight(x
) >= len
) {
178 if (newblk
< x
->block
)
186 * Perform root insertion. The variable x traces a path through
187 * the fpp, and with the help of left_hook and right_hook,
188 * rewrites all links that cross the territory occupied
192 if ((newhdr
= getfreehdr()) == NIL
) {
193 /* Error message returned by getfreehdr() */
200 newhdr
->block
= newblk
;
204 * set length word in the block for consistency with the header.
209 left_hook
= &newhdr
->left
;
210 right_hook
= &newhdr
->right
;
215 * The name 'left_hook' is somewhat confusing, since
216 * it is always set to the address of a .right link
217 * field. However, its value is always an address
218 * below (i.e., to the left of) newblk. Similarly
219 * for right_hook. The values of left_hook and
220 * right_hook converge toward the value of newblk,
221 * as in a classical binary search.
223 if (x
->block
< newblk
) {
225 * rewrite link crossing from the left
228 left_hook
= &x
->right
;
232 * rewrite link crossing from the right
235 right_hook
= &x
->left
;
240 *left_hook
= *right_hook
= NIL
; /* clear remaining hooks */
246 * deletes a node from a cartesian tree. p is the address of
247 * a pointer to the node which is to be deleted.
250 * The left and right branches of the node to be deleted define two
251 * subtrees which are to be merged and attached in place of the
252 * deleted node. Each node on the inside edges of these two
253 * subtrees is examined and longer nodes are placed above the
257 * *p is assumed to be non-null.
263 Freehdr left_branch
; /* left subtree of deleted node */
264 Freehdr right_branch
; /* right subtree of deleted node */
269 left_branch
= x
->left
;
270 left_weight
= weight(left_branch
);
271 right_branch
= x
->right
;
272 right_weight
= weight(right_branch
);
274 while (left_branch
!= right_branch
) {
276 * iterate until left branch and right branch are
279 if ( left_weight
>= right_weight
) {
281 * promote the left branch
283 if (left_branch
!= NIL
) {
284 if (left_weight
== 0) {
285 /* zero-length block */
286 error("blocksize=0 at %#x\n",
287 (int)left_branch
->block
->data
);
291 p
= &left_branch
->right
;
293 left_weight
= weight(left_branch
);
297 * promote the right branch
299 if (right_branch
!= NIL
) {
300 if (right_weight
== 0) {
301 /* zero-length block */
302 error("blocksize=0 at %#x\n",
303 (int)right_branch
->block
->data
);
307 p
= &right_branch
->left
;
309 right_weight
= weight(right_branch
);
320 * Demotes a node in a cartesian tree, if necessary, to establish
321 * the required vertical ordering.
324 * The left and right subtrees of the node to be demoted are to
325 * be partially merged and attached in place of the demoted node.
326 * The nodes on the inside edges of these two subtrees are
327 * examined and the longer nodes are placed above the shorter
328 * ones, until a node is reached which has a length no greater
329 * than that of the node being demoted (or until a null pointer
330 * is reached). The node is then attached at this point, and
331 * the remaining subtrees (if any) become its descendants.
334 * a. All the nodes in the tree, including the one to be demoted,
335 * must be correctly ordered horizontally;
336 * b. All the nodes except the one to be demoted must also be
337 * correctly positioned vertically. The node to be demoted
338 * may be already correctly positioned vertically, or it may
339 * have a length which is less than that of one or both of
347 Freehdr x
; /* addr of node to be demoted */
349 Freehdr right_branch
;
355 x_weight
= weight(x
);
356 left_branch
= x
->left
;
357 right_branch
= x
->right
;
358 left_weight
= weight(left_branch
);
359 right_weight
= weight(right_branch
);
361 while (left_weight
> x_weight
|| right_weight
> x_weight
) {
363 * select a descendant branch for promotion
365 if (left_weight
>= right_weight
) {
367 * promote the left branch
370 p
= &left_branch
->right
;
372 left_weight
= weight(left_branch
);
375 * promote the right branch
378 p
= &right_branch
->left
;
380 right_weight
= weight(right_branch
);
384 *p
= x
; /* attach demoted node here */
385 x
->left
= left_branch
;
386 x
->right
= right_branch
;
394 * Allocates a block of length specified in bytes. If nbytes is
395 * zero, a valid pointer (that should not be dereferenced) is returned.
398 * The freelist is searched by descending the tree from the root
399 * so that at each decision point the "better fitting" branch node
400 * is chosen (i.e., the shorter one, if it is long enough, or
401 * the longer one, otherwise). The descent stops when both
402 * branch nodes are too short.
405 * Malloc returns a pointer to the allocated block. A null
406 * pointer indicates an error.
410 * ENOMEM: storage could not be allocated.
412 * EINVAL: either the argument was invalid, or the heap was found
413 * to be in an inconsistent state. More detailed information may
414 * be obtained by enabling range checks (cf., malloc_debug()).
416 * Note: In this implementation, each allocated block includes a
417 * length word, which occurs before the address seen by the caller.
418 * Allocation requests are rounded up to a multiple of wordsize.
424 Freehdr allocp
; /* ptr to node to be allocated */
425 Freehdr
*fpp
; /* for tree modifications */
427 Freehdr right_branch
;
430 Dblk retblk
; /* block returned to the user */
433 * if rigorous checking was requested, do it.
435 if (debug_level
>= 2) {
440 * add the size of a length word to the request, and
441 * guarantee at least one word of usable data.
444 if (nbytes
< SMALLEST_BLK
) {
445 nbytes
= SMALLEST_BLK
;
447 nbytes
= roundup(nbytes
, ALIGNSIZ
);
451 * ensure that at least one block is big enough to satisfy
455 if (weight(_root
) < nbytes
) {
457 * the largest block is not enough.
459 if(!morecore(nbytes
))
464 * search down through the tree until a suitable block is
465 * found. At each decision point, select the better
471 left_branch
= allocp
->left
;
472 right_branch
= allocp
->right
;
473 left_weight
= weight(left_branch
);
474 right_weight
= weight(right_branch
);
476 while (left_weight
>= nbytes
|| right_weight
>= nbytes
) {
477 if (left_weight
<= right_weight
) {
478 if (left_weight
>= nbytes
) {
480 allocp
= left_branch
;
482 fpp
= &allocp
->right
;
483 allocp
= right_branch
;
486 if (right_weight
>= nbytes
) {
487 fpp
= &allocp
->right
;
488 allocp
= right_branch
;
491 allocp
= left_branch
;
494 left_branch
= allocp
->left
;
495 right_branch
= allocp
->right
;
496 left_weight
= weight(left_branch
);
497 right_weight
= weight(right_branch
);
501 * allocate storage from the selected node.
504 if (allocp
->size
- nbytes
<= SMALLEST_BLK
) {
506 * not big enough to split; must leave at least
507 * a dblk's worth of space.
509 retblk
= allocp
->block
;
514 * Split the selected block n bytes from the top. The
515 * n bytes at the top are returned to the caller; the
516 * remainder of the block goes back to free space.
520 retblk
= allocp
->block
;
521 nblk
= nextblk(retblk
, nbytes
); /* ^next block */
522 nblk
->size
= allocp
->size
= retblk
->size
- nbytes
;
523 __mallinfo
.ordblks
++; /* count fragments */
526 * Change the selected node to point at the newly split
527 * block, and move the node to its proper place in
528 * the free space list.
530 allocp
->block
= nblk
;
534 * set the length field of the allocated block; we need
535 * this because free() does not specify a length.
537 retblk
->size
= nbytes
;
539 /* maintain statistics */
540 __mallinfo
.uordbytes
+= retblk
->size
; /* bytes allocated */
541 __mallinfo
.allocated
++; /* frags allocated */
542 if (nbytes
< __mallinfo
.mxfast
)
543 __mallinfo
.smblks
++; /* kludge to pass the SVVS */
545 return((ptr_t
)retblk
->data
);
551 * return a block to the free space tree.
554 * Starting at the root, search for and coalesce free blocks
555 * adjacent to one given. When the appropriate place in the
556 * tree is found, insert the given block.
558 * Some sanity checks to avoid total confusion in the tree.
559 * If the block has already been freed, return.
560 * If the ptr is not from the sbrk'ed space, return.
561 * If the block size is invalid, return.
566 uint nbytes
; /* Size of node to be released */
567 Freehdr
*fpp
; /* For deletion from free list */
568 Freehdr neighbor
; /* Node to be coalesced */
569 Dblk neighbor_blk
; /* Ptr to potential neighbor */
570 uint neighbor_size
; /* Size of potential neighbor */
571 Dblk oldblk
; /* Ptr to block to be freed */
574 * if rigorous checking was requested, do it.
576 if (debug_level
>= 2) {
581 * Check the address of the old block.
583 if ( misaligned(ptr
) ) {
584 error("free: illegal address (%#x)\n", ptr
);
589 * Freeing something that wasn't allocated isn't
590 * exactly kosher, but fclose() does it routinely.
592 if( ptr
< (ptr_t
)_lbound
|| ptr
> (ptr_t
)_ubound
) {
598 * Get node length by backing up by the size of a header.
599 * Check for a valid length. It must be a positive
600 * multiple of ALIGNSIZ, at least as large as SMALLEST_BLK,
601 * no larger than the extent of the heap, and must not
602 * extend beyond the end of the heap.
604 oldblk
= (Dblk
)((char*)ptr
- ALIGNSIZ
);
605 nbytes
= oldblk
->size
;
606 if (badblksize(oldblk
,nbytes
)) {
607 error("free: bad block size (%d) at %#x\n",
608 (int)nbytes
, (int)oldblk
);
612 /* maintain statistics */
613 __mallinfo
.uordbytes
-= nbytes
; /* bytes allocated */
614 __mallinfo
.allocated
--; /* frags allocated */
617 * Search the tree for the correct insertion point for this
618 * node, coalescing adjacent free blocks along the way.
622 while (neighbor
!= NIL
) {
623 neighbor_blk
= neighbor
->block
;
624 neighbor_size
= neighbor
->size
;
625 if (oldblk
< neighbor_blk
) {
626 Dblk nblk
= nextblk(oldblk
,nbytes
);
627 if (nblk
== neighbor_blk
) {
629 * Absorb and delete right neighbor
631 nbytes
+= neighbor_size
;
632 __mallinfo
.ordblks
--;
634 } else if (nblk
> neighbor_blk
) {
636 * The block being freed overlaps
637 * another block in the tree. This
638 * is bad news. Return to avoid
639 * further fouling up the the tree.
641 error("free: blocks %#x, %#x overlap\n",
642 (int)oldblk
, (int)neighbor_blk
);
648 fpp
= &neighbor
->left
;
650 } else if (oldblk
> neighbor_blk
) {
651 Dblk nblk
= nextblk(neighbor_blk
, neighbor_size
);
652 if (nblk
== oldblk
) {
654 * Absorb and delete left neighbor
656 oldblk
= neighbor_blk
;
657 nbytes
+= neighbor_size
;
658 __mallinfo
.ordblks
--;
660 } else if (nblk
> oldblk
) {
662 * This block has already been freed
664 error("free: block %#x was already free\n",
669 * search to the right
671 fpp
= &neighbor
->right
;
675 * This block has already been freed
676 * as "oldblk == neighbor_blk"
678 error("free: block %#x was already free\n", (int)ptr
);
683 * Note that this depends on a side effect of
684 * delete(fpp) in order to terminate the loop!
691 * Insert the new node into the free space tree
693 insert( oldblk
, nbytes
);
701 * shrink(oldblk, oldsize, newsize)
702 * Decreases the size of an old block to a new size.
703 * Returns the remainder to free space. Returns the
704 * truncated block to the caller.
708 shrink(Dblk oldblk
, uint oldsize
, uint newsize
)
711 if (oldsize
- newsize
>= SMALLEST_BLK
) {
713 * Block is to be contracted. Split the old block
714 * and return the remainder to free space.
716 remainder
= nextblk(oldblk
, newsize
);
717 remainder
->size
= oldsize
- newsize
;
718 oldblk
->size
= newsize
;
720 /* maintain statistics */
721 __mallinfo
.ordblks
++; /* count fragments */
722 __mallinfo
.allocated
++; /* negate effect of free() */
724 free(remainder
->data
);
726 return(oldblk
->data
);
731 * realloc(ptr, nbytes)
733 * Reallocate an old block with a new size, returning the old block
734 * if possible. The block returned is guaranteed to preserve the
735 * contents of the old block up to min(size(old block), newsize).
737 * For backwards compatibility, ptr is allowed to reference
738 * a block freed since the LAST call of malloc(). Thus the old
739 * block may be busy, free, or may even be nested within a free
742 * Some old programs have been known to do things like the following,
743 * which is guaranteed not to work:
748 * ptr = realloc(ptr,nbytes);
750 * This atrocity was found in the source for diff(1).
753 realloc(ptr_t ptr
, uint nbytes
)
762 uint oldneighborsize
;
765 * Add SVR4 semantics for OS 5.x so /usr/lib librarys
766 * work correctly when running in BCP mode
769 return (malloc(nbytes
));
773 * if rigorous checking was requested, do it.
775 if (debug_level
>= 2) {
780 * Check the address of the old block.
782 if ( misaligned(ptr
) ||
783 ptr
< (ptr_t
)_lbound
||
784 ptr
> (ptr_t
)_ubound
) {
785 error("realloc: illegal address (%#x)\n", ptr
);
790 * check location and size of the old block and its
791 * neighboring block to the right. If the old block is
792 * at end of memory, the neighboring block is undefined.
794 oldblk
= (Dblk
)((char*)ptr
- ALIGNSIZ
);
795 oldsize
= oldblk
->size
;
796 if (badblksize(oldblk
,oldsize
)) {
797 error("realloc: bad block size (%d) at %#x\n",
801 oldneighbor
= nextblk(oldblk
,oldsize
);
803 /* *** tree search code pulled into separate subroutine *** */
804 if (reclaim(oldblk
, oldsize
, 1) == -1) {
805 return(NULL
); /* internal error */
809 * At this point, we can guarantee that oldblk is out of free
810 * space. What we do next depends on a comparison of the size
811 * of the old block and the requested new block size. To do
812 * this, first round up the new size request.
814 newsize
= nbytes
+ ALIGNSIZ
; /* add size of a length word */
815 if (newsize
< SMALLEST_BLK
) {
816 newsize
= SMALLEST_BLK
;
818 newsize
= roundup(newsize
, ALIGNSIZ
);
822 * Next, examine the size of the old block, and compare it
823 * with the requested new size.
826 if (oldsize
>= newsize
) {
828 * Block is to be made smaller.
830 return(shrink(oldblk
, oldsize
, newsize
));
834 * Block is to be expanded. Look for adjacent free memory.
836 if ( oldneighbor
< (Dblk
)_ubound
) {
838 * Search for the adjacent block in the free
839 * space tree. Note that the tree may have been
840 * modified in the earlier loop.
844 oldneighborsize
= oldneighbor
->size
;
845 if ( badblksize(oldneighbor
, oldneighborsize
) ) {
846 error("realloc: bad blocksize(%d) at %#x\n",
847 oldneighborsize
, oldneighbor
);
850 while ( weight(fp
) >= oldneighborsize
) {
852 if (oldneighbor
< freeblk
) {
859 else if (oldneighbor
> freeblk
) {
861 * search to the right
866 else { /* oldneighbor == freeblk */
868 * neighboring block is free; is it big enough?
870 if (oldsize
+ oldneighborsize
>= newsize
) {
872 * Big enough. Delete freeblk, join
873 * oldblk to neighbor, return newsize
874 * bytes to the caller, and return the
875 * remainder to free storage.
879 /* maintain statistics */
880 __mallinfo
.ordblks
--;
881 __mallinfo
.uordbytes
+= oldneighborsize
;
883 oldsize
+= oldneighborsize
;
884 oldblk
->size
= oldsize
;
885 return(shrink(oldblk
, oldsize
, newsize
));
888 * Not big enough. Stop looking for a
898 * At this point, we know there is no free space in which to
899 * expand. Malloc a new block, copy the old block to the new,
900 * and free the old block, IN THAT ORDER.
902 ptr
= malloc(nbytes
);
904 bcopy(oldblk
->data
, ptr
, oldsize
-ALIGNSIZ
);
913 * *** The following code was pulled out of realloc() ***
916 * reclaim(oldblk, oldsize, flag)
917 * If a block containing 'oldsize' bytes from 'oldblk'
918 * is in the free list, remove it from the free list.
919 * 'oldblk' and 'oldsize' are assumed to include the free block header.
921 * Returns 1 if block was successfully removed.
922 * Returns 0 if block was not in free list.
923 * Returns -1 if block spans a free/allocated boundary (error() called
927 reclaim(Dblk oldblk
, uint oldsize
, int flag
)
936 * Search the free space list for a node describing oldblk,
937 * or a node describing a block containing oldblk. Assuming
938 * the size of blocks decreases monotonically with depth in
939 * the tree, the loop may terminate as soon as a block smaller
940 * than oldblk is encountered.
943 oldneighbor
= nextblk(oldblk
, oldsize
);
947 while ( (size
= weight(fp
)) >= oldsize
) {
949 if (badblksize(freeblk
,size
)) {
950 error("realloc: bad block size (%d) at %#x\n",
954 if ( oldblk
== freeblk
) {
957 * _________________________________
959 * ---------------------------------
960 * Found oldblk in the free space tree; delete it.
964 /* maintain statistics */
965 __mallinfo
.uordbytes
+= oldsize
;
966 __mallinfo
.allocated
++;
969 else if (oldblk
< freeblk
) {
972 * _________________________________
974 * ---------------------------------
975 * Search to the left for oldblk
983 * _________________________________
984 * | |<--oldblk--->|<--oldneighbor
985 * ---------------------------------
986 * oldblk is somewhere to the right of freeblk.
987 * Check to see if it lies within freeblk.
990 freeneighbor
= nextblk(freeblk
, freeblk
->size
);
991 if (oldblk
>= freeneighbor
) {
993 * |<-- freeblk--->|<--- freeneighbor ...
994 * _________________________________
996 * ---------------------------------
997 * no such luck; search to the right.
1004 * freeblk < oldblk < freeneighbor;
1005 * i.e., oldblk begins within freeblk.
1007 if (oldneighbor
> freeneighbor
) {
1009 * |<-- freeblk--->|<--- freeneighbor
1010 * _________________________________
1011 * | |<--oldblk--->|<--oldneighbor
1012 * ---------------------------------
1013 * oldblk straddles a block boundary!
1016 error("realloc: block %#x straddles free block boundary\n", oldblk
);
1020 else if ( oldneighbor
== freeneighbor
) {
1022 * |<-------- freeblk------------->|
1023 * _________________________________
1025 * ---------------------------------
1026 * Oldblk is on the right end of
1027 * freeblk. Delete freeblk, split
1028 * into two fragments, and return
1029 * the one on the left to free space.
1033 /* maintain statistics */
1034 __mallinfo
.ordblks
++;
1035 __mallinfo
.uordbytes
+= oldsize
;
1036 __mallinfo
.allocated
+= 2;
1038 freeblk
->size
-= oldsize
;
1039 free(freeblk
->data
);
1044 * |<-------- freeblk------------->|
1045 * _________________________________
1046 * | |oldblk | oldneighbor |
1047 * ---------------------------------
1048 * Oldblk is in the middle of freeblk.
1049 * Delete freeblk, split into three
1050 * fragments, and return the ones on
1051 * the ends to free space.
1055 /* maintain statistics */
1056 __mallinfo
.ordblks
+= 2;
1057 __mallinfo
.uordbytes
+= freeblk
->size
;
1058 __mallinfo
.allocated
+= 3;
1061 * split the left fragment by
1062 * subtracting the size of oldblk
1063 * and oldblk's neighbor
1066 ( (char*)freeneighbor
1069 * split the right fragment by
1070 * setting oldblk's neighbor's size
1074 - (char*)oldneighbor
;
1076 * return the fragments to free space
1078 free(freeblk
->data
);
1079 free(oldneighbor
->data
);
1086 return(0); /* free block not found */
1092 * Add a block of at least nbytes from end-of-memory to the
1096 * true if at least n bytes can be allocated
1101 * -- free space (delimited by the extern variable _ubound) is
1102 * extended by an amount determined by rounding nbytes up to
1103 * a multiple of the system page size.
1105 * -- The lower bound of the heap is determined the first time
1106 * this routine is entered. It does NOT necessarily begin at
1107 * the end of static data space, since startup code (e.g., for
1108 * profiling) may have invoked sbrk() before we got here.
1112 morecore(uint nbytes
)
1118 nbpg
= getpagesize();
1119 /* hack to avoid fragmenting the heap with the first
1121 if ((newhdr
= getfreehdr()) == NIL
) {
1122 /* Error message returned by getfreehdr() */
1125 (void)putfreehdr(newhdr
);
1127 nbytes
= roundup(nbytes
, nbpg
);
1128 p
= (Dblk
) sbrk((int)nbytes
);
1129 if (p
== (Dblk
) -1) {
1130 if (errno
== EAGAIN
) errno
= ENOMEM
;
1131 return(false); /* errno = ENOMEM */
1133 if (_lbound
== NULL
) /* set _lbound the first time through */
1134 _lbound
= (char*) p
;
1135 _ubound
= (char *) p
+ nbytes
;
1138 /* maintain statistics */
1139 __mallinfo
.arena
= _ubound
- _lbound
;
1140 __mallinfo
.uordbytes
+= nbytes
;
1141 __mallinfo
.ordblks
++;
1142 __mallinfo
.allocated
++;
1151 * Get a free block header from the free header list.
1152 * When the list is empty, allocate an array of headers.
1153 * When the array is empty, allocate another one.
1154 * When we can't allocate another array, we're in deep weeds.
1163 if (freehdrlist
!= NIL
) {
1165 freehdrlist
= freehdrlist
->left
;
1168 if (nfreehdrs
<= 0) {
1169 size
= NFREE_HDRS
*sizeof(struct freehdr
) + ALIGNSIZ
;
1170 blk
= (Dblk
) sbrk(size
);
1171 if ((int)blk
== -1) {
1173 error("getfreehdr: out of memory");
1174 if (errno
== EAGAIN
) errno
= ENOMEM
;
1177 if (_lbound
== NULL
) /* set _lbound on first allocation */
1178 _lbound
= (char*)blk
;
1180 freehdrptr
= (Freehdr
)blk
->data
;
1181 nfreehdrs
= NFREE_HDRS
;
1182 _ubound
= (char*) nextblk(blk
,size
);
1184 /* maintain statistics */
1185 __mallinfo
.arena
= _ubound
- _lbound
;
1186 __mallinfo
.treeoverhead
+= size
;
1189 return(freehdrptr
++);
1193 * Free a free block header
1194 * Add it to the list of available headers.
1197 putfreehdr(Freehdr p
)
1199 p
->left
= freehdrlist
;
1203 #ifndef DEBUG /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1206 * stubs for error handling and diagnosis routines. These are what
1207 * you get in the standard C library; for non-placebo diagnostics
1208 * load /usr/lib/malloc.debug.o with your program.
1212 error(char *fmt
, ...)
1217 #endif /* !DEBUG <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1220 #ifdef DEBUG /* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1223 * malloc_debug(level)
1227 * Controls the level of error diagnosis and consistency checking
1228 * done by malloc() and free(). level is interpreted as follows:
1230 * 0: malloc() and free() return 0 if error detected in arguments
1231 * (errno is set to EINVAL)
1232 * 1: malloc() and free() abort if errors detected in arguments
1233 * 2: same as 1, but scan entire heap for errors on every call
1234 * to malloc() or free()
1237 * returns the previous level of error reporting.
1240 malloc_debug(int level
)
1243 old_level
= debug_level
;
1244 debug_level
= level
;
1249 * check a free space tree pointer. Should be in
1250 * the static free pool or somewhere in the heap.
1255 || ((Dblk)(p) < (Dblk)_lbound || (Dblk)(p) > (Dblk)_ubound)){\
1260 #define chkhdr(p) chkblk(p)
1265 error("Illegal block address (%#x)\n", (p
));
1270 * returns 1 if free space tree p satisfies internal consistency
1275 cartesian(Freehdr p
)
1280 if (p
== NIL
) /* no tree to test */
1283 * check that root has a data block
1290 * check that the child blocks are no larger than the parent block.
1297 if (probe
->size
> p
->size
) /* child larger than parent */
1305 if (probe
->size
> p
->size
) /* child larger than parent */
1309 * test data addresses in the left subtree,
1310 * starting at the left subroot and probing to
1311 * the right. All data addresses must be < p->block.
1314 while (probe
!= NIL
) {
1318 if ( nextblk(db
, probe
->size
) >= pdb
) /* overlap */
1320 probe
= probe
->right
;
1323 * test data addresses in the right subtree,
1324 * starting at the right subroot and probing to
1325 * the left. All addresses must be > nextblk(p->block).
1327 pdb
= nextblk(pdb
, p
->size
);
1329 while (probe
!= NIL
) {
1333 if (db
== NULL
|| db
<= pdb
) /* overlap */
1335 probe
= probe
->left
;
1337 return (cartesian(p
->left
) && cartesian(p
->right
));
1343 * This is a verification routine. It walks through all blocks
1344 * in the heap (both free and busy) and checks for bad blocks.
1345 * malloc_verify returns 1 if the heap contains no detectably bad
1346 * blocks; otherwise it returns 0.
1360 if (_lbound
== NULL
) /* no allocation yet */
1364 * first check heap bounds pointers
1369 if ((uint
)_lbound
< lb
|| (uint
)_lbound
> ub
) {
1370 error("malloc_verify: illegal heap lower bound (%#x)\n",
1374 if ((uint
)_ubound
< lb
|| (uint
)_ubound
> ub
) {
1375 error("malloc_verify: illegal heap upper bound (%#x)\n",
1379 maxsize
= heapsize();
1381 while (p
< (Dblk
) _ubound
) {
1383 if ( (size
) < SMALLEST_BLK
1384 || (size
) & (ALIGNSIZ
-1)
1385 || (size
) > heapsize()
1386 || ((char*)(p
))+(size
) > _ubound
) {
1387 error("malloc_verify: bad block size (%d) at %#x\n",
1389 return(0); /* Badness */
1391 p
= nextblk(p
, size
);
1393 if (p
> (Dblk
) _ubound
) {
1394 error("malloc_verify: heap corrupted\n");
1397 if (!cartesian(_root
)){
1398 error("malloc_verify: free space tree corrupted\n");
1405 * The following is a kludge to avoid dependency on stdio, which
1406 * uses malloc() and free(), one of which probably got us here in
1410 #define putchar(c) (*buf++ = (c))
1411 extern int fileno(); /*bletch*/
1412 #define stderr 2 /*bletch*/
1415 static char stderrbuf
[LBUFSIZ
];
1419 * If debug_level == 0, does nothing except set errno = EINVAL.
1420 * Otherwise, prints an error message to stderr and generates a
1424 error(char *fmt
, ...)
1426 static int n
= 0; /* prevents infinite recursion when using stdio */
1431 if (debug_level
== 0)
1435 nbytes
= vsprintf(stderrbuf
, fmt
, ap
);
1437 stderrbuf
[nbytes
++] = '\n';
1438 stderrbuf
[nbytes
] = '\0';
1439 write(fileno(stderr
), stderrbuf
, nbytes
);
1444 #endif /* DEBUG <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */