4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
30 #include <sys/types.h>
48 static mutex_t mlock
= DEFAULTMUTEX
;
49 static ssize_t
freespace(struct holdblk
*);
50 static void *malloc_unlocked(size_t, int);
51 static void *realloc_unlocked(void *, size_t);
52 static void free_unlocked(void *);
53 static void *morecore(size_t);
56 * use level memory allocater (malloc, free, realloc)
58 * -malloc, free, realloc and mallopt form a memory allocator
59 * similar to malloc, free, and realloc. The routines
60 * here are much faster than the original, with slightly worse
61 * space usage (a few percent difference on most input). They
62 * do not have the property that data in freed blocks is left
63 * untouched until the space is reallocated.
65 * -Memory is kept in the "arena", a singly linked list of blocks.
66 * These blocks are of 3 types.
67 * 1. A free block. This is a block not in use by the
68 * user. It has a 3 word header. (See description
70 * 2. An allocated block. This is a block the user has
71 * requested. It has only a 1 word header, pointing
72 * to the next block of any sort.
73 * 3. A permanently allocated block. This covers space
74 * aquired by the user directly through sbrk(). It
75 * has a 1 word header, as does 2.
76 * Blocks of type 1 have the lower bit of the pointer to the
77 * nextblock = 0. Blocks of type 2 and 3 have that bit set,
80 * -Unallocated blocks are kept on an unsorted doubly linked
83 * -Memory is allocated in blocks, with sizes specified by the
84 * user. A circular first-fit startegy is used, with a roving
85 * head of the free queue, which prevents bunching of small
86 * blocks at the head of the queue.
88 * -Compaction is performed at free time of any blocks immediately
89 * following the freed block. The freed block will be combined
90 * with a preceding block during the search phase of malloc.
91 * Since a freed block is added at the front of the free queue,
92 * which is moved to the end of the queue if considered and
93 * rejected during the search, fragmentation only occurs if
94 * a block with a contiguious preceding block that is free is
95 * freed and reallocated on the next call to malloc. The
96 * time savings of this strategy is judged to be worth the
97 * occasional waste of memory.
99 * -Small blocks (of size < MAXSIZE) are not allocated directly.
100 * A large "holding" block is allocated via a recursive call to
101 * malloc. This block contains a header and ?????? small blocks.
102 * Holding blocks for a given size of small block (rounded to the
103 * nearest ALIGNSZ bytes) are kept on a queue with the property that any
104 * holding block with an unused small block is in front of any without.
105 * A list of free blocks is kept within the holding block.
109 * description of arena, free queue, holding blocks etc.
111 * New compiler and linker does not guarentee order of initialized data.
112 * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
113 * Later code depends on this order.
116 static struct header arena
[4] = {
123 * the second word is a minimal block to
124 * start the arena. The first is a busy
125 * block to be pointed to by the last block.
128 #define freeptr (arena + 2)
129 /* first and last entry in free list */
130 static struct header
*arenaend
; /* ptr to block marking high end of arena */
131 static struct header
*lastblk
; /* the highest block in the arena */
132 static struct holdblk
**holdhead
; /* pointer to array of head pointers */
133 /* to holding block chains */
135 * In order to save time calculating indices, the array is 1 too
136 * large, and the first element is unused
138 * Variables controlling algorithm, esp. how holding blocs are used
140 static int numlblks
= NUMLBLKS
;
141 static int minhead
= MINHEAD
;
142 static int change
= 0; /* != 0, once param changes are no longer allowed */
143 static int fastct
= FASTCT
;
144 static unsigned int maxfast
= MAXFAST
;
145 /* number of small block sizes to map to one size */
147 static int grain
= ALIGNSZ
;
150 static int case1count
= 0;
155 register struct header
*p
;
161 while (p
!= &freeptr
[1]) {
163 assert(p
->prevfree
->nextfree
== p
);
168 while (p
!= &freeptr
[0]) {
170 assert(p
->nextfree
->prevfree
== p
);
177 * malloc(nbytes) - give a user nbytes to use
181 malloc(size_t nbytes
)
185 (void) mutex_lock(&mlock
);
186 ret
= malloc_unlocked(nbytes
, 0);
187 (void) mutex_unlock(&mlock
);
192 * Use malloc_unlocked() to get the address to start with; Given this
193 * address, find out the closest address that aligns with the request
194 * and return that address after doing some house keeping (refer to the
198 memalign(size_t alignment
, size_t size
)
206 if (size
== 0 || alignment
== 0 ||
207 (alignment
& (alignment
- 1)) != 0) {
210 if (alignment
<= ALIGNSZ
)
211 return (malloc(size
));
213 alloc_size
= size
+ alignment
;
214 if (alloc_size
< size
) { /* overflow */
218 (void) mutex_lock(&mlock
);
219 alloc_buf
= malloc_unlocked(alloc_size
, 1);
220 (void) mutex_unlock(&mlock
);
222 if (alloc_buf
== NULL
)
224 fr
= (uintptr_t)alloc_buf
;
226 fr
= (fr
+ alignment
- 1) / alignment
* alignment
;
228 if (fr
== (uintptr_t)alloc_buf
)
231 if ((fr
- (uintptr_t)alloc_buf
) <= HEADSZ
) {
233 * we hit an edge case, where the space ahead of aligned
234 * address is not sufficient to hold 'header' and hence we
235 * can't free it. So double the allocation request.
239 alloc_size
= size
+ alignment
*2;
240 if (alloc_size
< size
) {
244 (void) mutex_lock(&mlock
);
245 alloc_buf
= malloc_unlocked(alloc_size
, 1);
246 (void) mutex_unlock(&mlock
);
248 if (alloc_buf
== NULL
)
250 fr
= (uintptr_t)alloc_buf
;
252 fr
= (fr
+ alignment
- 1) / alignment
* alignment
;
253 if (fr
== (uintptr_t)alloc_buf
)
255 if ((fr
- (uintptr_t)alloc_buf
) <= HEADSZ
) {
261 * +-------+ +-------+
262 * +---| <a> | | <a> |--+
263 * | +-------+<--alloc_buf-->+-------+ |
267 * | | | hd--> +-------+ |
268 * | | | +---| <b> |<-+
269 * | | | | +-------+<--- fr
276 * | +-------+ | +-------+
277 * +-->| next | +-->| next |
278 * +-------+ +-------+
281 hd
= (struct header
*)((char *)fr
- minhead
);
282 (void) mutex_lock(&mlock
);
283 hd
->nextblk
= ((struct header
*)((char *)alloc_buf
- minhead
))->nextblk
;
284 ((struct header
*)((char *)alloc_buf
- minhead
))->nextblk
= SETBUSY(hd
);
285 (void) mutex_unlock(&mlock
);
294 static unsigned pagesize
;
299 pagesize
= sysconf(_SC_PAGESIZE
);
301 return (memalign(pagesize
, size
));
305 * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
309 malloc_unlocked(size_t nbytes
, int nosmall
)
312 size_t nb
; /* size of entire block we need */
314 /* on first call, initialize */
315 if (freeptr
[0].nextfree
== GROUND
) {
316 /* initialize arena */
317 arena
[1].nextblk
= (struct header
*)BUSY
;
318 arena
[0].nextblk
= (struct header
*)BUSY
;
319 lastblk
= arenaend
= &(arena
[1]);
320 /* initialize free queue */
321 freeptr
[0].nextfree
= &(freeptr
[1]);
322 freeptr
[1].nextblk
= &(arena
[0]);
323 freeptr
[1].prevfree
= &(freeptr
[0]);
324 /* mark that small blocks not init yet */
329 if (nbytes
<= maxfast
&& !nosmall
) {
331 * We can allocate out of a holding block
333 struct holdblk
*holdblk
; /* head of right sized queue */
334 struct lblk
*lblk
; /* pointer to a little block */
335 struct holdblk
*newhold
;
340 * This allocates space for hold block
341 * pointers by calling malloc recursively.
342 * Maxfast is temporarily set to 0, to
343 * avoid infinite recursion. allocate
344 * space for an extra ptr so that an index
345 * is just ->blksz/grain, with the first
348 change
= 1; /* change to algorithm params */
349 /* no longer allowed */
351 * temporarily alter maxfast, to avoid
355 holdhead
= (struct holdblk
**)
356 malloc_unlocked(sizeof (struct holdblk
*) *
358 if (holdhead
== NULL
)
359 return (malloc_unlocked(nbytes
, 0));
360 for (i
= 1; i
<= fastct
; i
++) {
361 holdhead
[i
] = HGROUND
;
363 maxfast
= fastct
* grain
;
366 * Note that this uses the absolute min header size (MINHEAD)
367 * unlike the large block case which uses minhead
369 * round up to nearest multiple of grain
370 * code assumes grain is a multiple of MINHEAD
372 /* round up to grain */
373 nb
= (nbytes
+ grain
- 1) / grain
* grain
;
374 holdblk
= holdhead
[nb
/ grain
];
377 * look for space in the holding block. Blocks with
378 * space will be in front of those without
380 if ((holdblk
!= HGROUND
) && (holdblk
->lfreeq
!= LGROUND
)) {
382 lblk
= holdblk
->lfreeq
;
385 * Now make lfreeq point to a free block.
386 * If lblk has been previously allocated and
387 * freed, it has a valid pointer to use.
388 * Otherwise, lblk is at the beginning of
389 * the unallocated blocks at the end of
390 * the holding block, so, if there is room, take
391 * the next space. If not, mark holdblk full,
392 * and move holdblk to the end of the queue
394 if (lblk
< holdblk
->unused
) {
395 /* move to next holdblk, if this one full */
396 if ((holdblk
->lfreeq
=
397 CLRSMAL(lblk
->header
.nextfree
)) ==
399 holdhead
[(nb
-MINHEAD
) / grain
] =
402 } else if (((char *)holdblk
->unused
+ nb
) <
403 ((char *)holdblk
+ HOLDSZ(nb
))) {
404 holdblk
->unused
= (struct lblk
*)
405 ((char *)holdblk
->unused
+nb
);
406 holdblk
->lfreeq
= holdblk
->unused
;
408 holdblk
->unused
= (struct lblk
*)
409 ((char *)holdblk
->unused
+nb
);
410 holdblk
->lfreeq
= LGROUND
;
411 holdhead
[(nb
-MINHEAD
)/grain
] =
414 /* mark as busy and small */
415 lblk
->header
.holder
= (struct holdblk
*)SETALL(holdblk
);
417 /* we need a new holding block */
418 newhold
= (struct holdblk
*)
419 malloc_unlocked(HOLDSZ(nb
), 0);
420 if ((char *)newhold
== NULL
) {
423 /* add to head of free queue */
424 if (holdblk
!= HGROUND
) {
425 newhold
->nexthblk
= holdblk
;
426 newhold
->prevhblk
= holdblk
->prevhblk
;
427 holdblk
->prevhblk
= newhold
;
428 newhold
->prevhblk
->nexthblk
= newhold
;
430 newhold
->nexthblk
= newhold
->prevhblk
= newhold
;
432 holdhead
[(nb
-MINHEAD
)/grain
] = newhold
;
434 lblk
= (struct lblk
*)(newhold
->space
);
435 newhold
->lfreeq
= newhold
->unused
=
436 (struct lblk
*)((char *)newhold
->space
+nb
);
437 lblk
->header
.holder
= (struct holdblk
*)SETALL(newhold
);
438 newhold
->blksz
= nb
-MINHEAD
;
441 assert(((struct holdblk
*)CLRALL(lblk
->header
.holder
))->blksz
>=
444 return ((char *)lblk
+ MINHEAD
);
447 * We need an ordinary block
449 struct header
*newblk
; /* used for creating a block */
451 /* get number of bytes we need */
452 nb
= nbytes
+ minhead
;
453 nb
= (nb
+ ALIGNSZ
- 1) / ALIGNSZ
* ALIGNSZ
; /* align */
454 nb
= (nb
> MINBLKSZ
) ? nb
: MINBLKSZ
;
456 * see if there is a big enough block
457 * If none exists, you will get to freeptr[1].
458 * freeptr[1].next = &arena[0], so when you do the test,
459 * the result is a large positive number, since arena[0]
460 * comes before all blocks. Arena[0] is marked busy so
461 * that it will not be compacted. This kludge is for the
462 * sake of the almighty efficiency.
464 /* check that a very large request won't cause an inf. loop */
466 if ((freeptr
[1].nextblk
-&(freeptr
[1])) < nb
) {
469 struct header
*next
; /* following block */
470 struct header
*nextnext
; /* block after next */
475 /* see if we can compact */
477 if (!TESTBUSY(nextnext
= next
->nextblk
)) {
481 nextnext
= next
->nextblk
;
482 } while (!TESTBUSY(nextnext
));
484 * next will be at most == to lastblk,
485 * but I think the >= test is faster
487 if (next
>= arenaend
)
491 } while (((char *)(next
) - (char *)blk
) < nb
);
494 * if we didn't find a block, get more memory
496 if (blk
== &(freeptr
[1])) {
498 * careful coding could likely replace
499 * newend with arenaend
501 struct header
*newend
; /* new end of arena */
502 ssize_t nget
; /* number of words to get */
505 * Three cases - 1. There is space between arenaend
506 * and the break value that will become
507 * a permanently allocated block.
508 * 2. Case 1 is not true, and the last
509 * block is allocated.
510 * 3. Case 1 is not true, and the last
513 if ((newblk
= (struct header
*)sbrk(0)) !=
514 (struct header
*)((char *)arenaend
+ HEADSZ
)) {
517 if (case1count
++ > 0)
518 (void) write(2, "Case 1 hit more that once."
519 " brk or sbrk?\n", 41);
521 /* get size to fetch */
523 /* round up to a block */
524 nget
= (nget
+ BLOCKSZ
- 1)/BLOCKSZ
* BLOCKSZ
;
525 assert((uintptr_t)newblk
% ALIGNSZ
== 0);
527 if (morecore(nget
) == (void *)-1)
530 newend
= (struct header
*)((char *)newblk
+ nget
532 assert((uintptr_t)newblk
% ALIGNSZ
== 0);
533 newend
->nextblk
= SETBUSY(&(arena
[1]));
535 newblk
->nextblk
= newend
;
538 * space becomes a permanently allocated block.
539 * This is likely not mt-safe as lock is not
540 * shared with brk or sbrk
542 arenaend
->nextblk
= SETBUSY(newblk
);
543 /* adjust other pointers */
547 } else if (TESTBUSY(lastblk
->nextblk
)) {
549 nget
= (nb
+ BLOCKSZ
- 1) / BLOCKSZ
* BLOCKSZ
;
550 if (morecore(nget
) == (void *)-1)
552 /* block must be word aligned */
553 assert(((uintptr_t)newblk
%ALIGNSZ
) == 0);
555 * stub at old arenaend becomes first word
558 /* ??? newblk = arenaend; */
561 (struct header
*)((char *)arenaend
+nget
);
562 newend
->nextblk
= SETBUSY(&(arena
[1]));
563 arenaend
->nextblk
= newend
;
564 lastblk
= blk
= arenaend
;
569 * last block in arena is at end of memory and
572 /* 1.7 had this backward without cast */
574 ((char *)arenaend
- (char *)lastblk
);
575 nget
= (nget
+ (BLOCKSZ
- 1)) /
577 assert(((uintptr_t)newblk
% ALIGNSZ
) == 0);
578 if (morecore(nget
) == (void *)-1)
580 /* combine with last block, put in arena */
581 newend
= (struct header
*)
582 ((char *)arenaend
+ nget
);
583 arenaend
= lastblk
->nextblk
= newend
;
584 newend
->nextblk
= SETBUSY(&(arena
[1]));
585 /* set which block to use */
590 struct header
*nblk
; /* next block */
592 /* take block found of free queue */
595 * make head of free queue immediately follow blk,
596 * unless blk was at the end of the queue
598 nblk
= blk
->nextfree
;
599 if (nblk
!= &(freeptr
[1])) {
603 /* blk now points to an adequate block */
604 if (((char *)blk
->nextblk
- (char *)blk
) - nb
>= MINBLKSZ
) {
605 /* carve out the right size block */
606 /* newblk will be the remainder */
607 newblk
= (struct header
*)((char *)blk
+ nb
);
608 newblk
->nextblk
= blk
->nextblk
;
609 /* mark the block busy */
610 blk
->nextblk
= SETBUSY(newblk
);
612 /* if blk was lastblk, make newblk lastblk */
616 /* just mark the block busy */
617 blk
->nextblk
= SETBUSY(blk
->nextblk
);
621 assert((char *)CLRALL(blk
->nextblk
) -
622 ((char *)blk
+ minhead
) >= nbytes
);
623 assert((char *)CLRALL(blk
->nextblk
) -
624 ((char *)blk
+ minhead
) < nbytes
+ MINBLKSZ
);
625 return ((char *)blk
+ minhead
);
629 * free(ptr) - free block that user thinks starts at ptr
631 * input - ptr-1 contains the block header.
632 * If the header points forward, we have a normal
633 * block pointing to the next block
634 * if the header points backward, we have a small
635 * block from a holding block.
636 * In both cases, the busy bit must be set
642 (void) mutex_lock(&mlock
);
644 (void) mutex_unlock(&mlock
);
648 * free_unlocked(ptr) - Do the real work for free()
652 free_unlocked(void *ptr
)
654 struct holdblk
*holdblk
; /* block holding blk */
655 struct holdblk
*oldhead
; /* former head of the hold block */
656 /* queue containing blk's holder */
660 if (TESTSMAL(((struct header
*)((char *)ptr
- MINHEAD
))->nextblk
)) {
661 struct lblk
*lblk
; /* pointer to freed block */
662 ssize_t offset
; /* choice of header lists */
664 lblk
= (struct lblk
*)CLRBUSY((char *)ptr
- MINHEAD
);
665 assert((struct header
*)lblk
< arenaend
);
666 assert((struct header
*)lblk
> arena
);
667 /* allow twits (e.g. awk) to free a block twice */
668 holdblk
= lblk
->header
.holder
;
669 if (!TESTBUSY(holdblk
))
671 holdblk
= (struct holdblk
*)CLRALL(holdblk
);
672 /* put lblk on its hold block's free list */
673 lblk
->header
.nextfree
= SETSMAL(holdblk
->lfreeq
);
674 holdblk
->lfreeq
= lblk
;
675 /* move holdblk to head of queue, if its not already there */
676 offset
= holdblk
->blksz
/ grain
;
677 oldhead
= holdhead
[offset
];
678 if (oldhead
!= holdblk
) {
679 /* first take out of current spot */
680 holdhead
[offset
] = holdblk
;
681 holdblk
->nexthblk
->prevhblk
= holdblk
->prevhblk
;
682 holdblk
->prevhblk
->nexthblk
= holdblk
->nexthblk
;
683 /* now add at front */
684 holdblk
->nexthblk
= oldhead
;
685 holdblk
->prevhblk
= oldhead
->prevhblk
;
686 oldhead
->prevhblk
= holdblk
;
687 holdblk
->prevhblk
->nexthblk
= holdblk
;
690 struct header
*blk
; /* real start of block */
691 struct header
*next
; /* next = blk->nextblk */
692 struct header
*nextnext
; /* block after next */
694 blk
= (struct header
*)((char *)ptr
- minhead
);
696 /* take care of twits (e.g. awk) who return blocks twice */
699 blk
->nextblk
= next
= CLRBUSY(next
);
701 /* see if we can compact */
702 if (!TESTBUSY(nextnext
= next
->nextblk
)) {
706 } while (!TESTBUSY(nextnext
= next
->nextblk
));
707 if (next
== arenaend
) lastblk
= blk
;
716 * realloc(ptr, size) - give the user a block of size "size", with
717 * the contents pointed to by ptr. Free ptr.
721 realloc(void *ptr
, size_t size
)
725 (void) mutex_lock(&mlock
);
726 retval
= realloc_unlocked(ptr
, size
);
727 (void) mutex_unlock(&mlock
);
733 * realloc_unlocked(ptr) - Do the real work for realloc()
737 realloc_unlocked(void *ptr
, size_t size
)
739 struct header
*blk
; /* block ptr is contained in */
740 size_t trusize
; /* block size as allocater sees it */
741 char *newptr
; /* pointer to user's new block */
742 size_t cpysize
; /* amount to copy */
743 struct header
*next
; /* block after blk */
746 return (malloc_unlocked(size
, 0));
753 if (TESTSMAL(((struct lblk
*)((char *)ptr
- MINHEAD
))->
756 * we have a special small block which can't be expanded
758 * This makes the assumption that even if the user is
759 * reallocating a free block, malloc doesn't alter the contents
762 newptr
= malloc_unlocked(size
, 0);
765 /* this isn't to save time--its to protect the twits */
766 if ((char *)ptr
!= newptr
) {
768 lblk
= (struct lblk
*)((char *)ptr
- MINHEAD
);
769 cpysize
= ((struct holdblk
*)
770 CLRALL(lblk
->header
.holder
))->blksz
;
771 cpysize
= (size
> cpysize
) ? cpysize
: size
;
772 (void) memcpy(newptr
, ptr
, cpysize
);
776 blk
= (struct header
*)((char *)ptr
- minhead
);
779 * deal with twits who reallocate free blocks
781 * if they haven't reset minblk via getopt, that's
784 if (!TESTBUSY(next
)) {
786 blk
->nextblk
= SETBUSY(next
);
788 next
= CLRBUSY(next
);
789 /* make blk as big as possible */
790 if (!TESTBUSY(next
->nextblk
)) {
793 next
= next
->nextblk
;
794 } while (!TESTBUSY(next
->nextblk
));
795 blk
->nextblk
= SETBUSY(next
);
796 if (next
>= arenaend
) lastblk
= blk
;
798 /* get size we really need */
799 trusize
= size
+minhead
;
800 trusize
= (trusize
+ ALIGNSZ
- 1)/ALIGNSZ
*ALIGNSZ
;
801 trusize
= (trusize
>= MINBLKSZ
) ? trusize
: MINBLKSZ
;
802 /* see if we have enough */
803 /* this isn't really the copy size, but I need a register */
804 cpysize
= (char *)next
- (char *)blk
;
805 if (cpysize
>= trusize
) {
806 /* carve out the size we need */
807 struct header
*newblk
; /* remainder */
809 if (cpysize
- trusize
>= MINBLKSZ
) {
811 * carve out the right size block
812 * newblk will be the remainder
814 newblk
= (struct header
*)((char *)blk
+
816 newblk
->nextblk
= next
;
817 blk
->nextblk
= SETBUSY(newblk
);
818 /* at this point, next is invalid */
820 /* if blk was lastblk, make newblk lastblk */
826 /* bite the bullet, and call malloc */
827 cpysize
= (size
> cpysize
) ? cpysize
: size
;
828 newptr
= malloc_unlocked(size
, 0);
831 (void) memcpy(newptr
, ptr
, cpysize
);
840 * calloc - allocate and clear memory block
844 calloc(size_t num
, size_t size
)
849 if (num
== 0 || size
== 0) {
854 /* check for overflow */
855 if ((total
/ num
) != size
) {
864 (void) memset(mp
, 0, total
);
870 * Mallopt - set options for allocation
872 * Mallopt provides for control over the allocation algorithm.
873 * The cmds available are:
875 * M_MXFAST Set maxfast to value. Maxfast is the size of the
876 * largest small, quickly allocated block. Maxfast
877 * may be set to 0 to disable fast allocation entirely.
879 * M_NLBLKS Set numlblks to value. Numlblks is the number of
880 * small blocks per holding block. Value must be
883 * M_GRAIN Set grain to value. The sizes of all blocks
884 * smaller than maxfast are considered to be rounded
885 * up to the nearest multiple of grain. The default
886 * value of grain is the smallest number of bytes
887 * which will allow alignment of any data type. Grain
888 * will be rounded up to a multiple of its default,
889 * and maxsize will be rounded up to a multiple of
890 * grain. Value must be greater than 0.
892 * M_KEEP Retain data in freed block until the next malloc,
893 * realloc, or calloc. Value is ignored.
894 * This option is provided only for compatibility with
895 * the old version of malloc, and is not recommended.
897 * returns - 0, upon successful completion
898 * 1, if malloc has previously been called or
899 * if value or cmd have illegal values
903 mallopt(int cmd
, int value
)
905 /* disallow changes once a small block is allocated */
906 (void) mutex_lock(&mlock
);
908 (void) mutex_unlock(&mlock
);
914 (void) mutex_unlock(&mlock
);
917 fastct
= (value
+ grain
- 1) / grain
;
918 maxfast
= grain
*fastct
;
922 (void) mutex_unlock(&mlock
);
929 (void) mutex_unlock(&mlock
);
933 /* round grain up to a multiple of ALIGNSZ */
934 grain
= (value
+ ALIGNSZ
- 1)/ALIGNSZ
*ALIGNSZ
;
936 /* reduce fastct appropriately */
937 fastct
= (maxfast
+ grain
- 1) / grain
;
938 maxfast
= grain
* fastct
;
941 if (change
&& holdhead
!= NULL
) {
942 (void) mutex_unlock(&mlock
);
948 (void) mutex_unlock(&mlock
);
951 (void) mutex_unlock(&mlock
);
956 * mallinfo-provide information about space usage
958 * input - max; mallinfo will return the size of the
959 * largest block < max.
961 * output - a structure containing a description of
962 * of space usage, defined in malloc.h
968 struct header
*blk
, *next
; /* ptr to ordinary blocks */
969 struct holdblk
*hblk
; /* ptr to holding blocks */
970 struct mallinfo inf
; /* return value */
971 int i
; /* the ubiquitous counter */
972 ssize_t size
; /* size of a block */
973 ssize_t fsp
; /* free space in 1 hold block */
975 (void) mutex_lock(&mlock
);
976 (void) memset(&inf
, 0, sizeof (struct mallinfo
));
977 if (freeptr
[0].nextfree
== GROUND
) {
978 (void) mutex_unlock(&mlock
);
981 blk
= CLRBUSY(arena
[1].nextblk
);
982 /* return total space used */
983 inf
.arena
= (char *)arenaend
- (char *)blk
;
986 * loop through arena, counting # of blocks, and
987 * and space used by blocks
989 next
= CLRBUSY(blk
->nextblk
);
990 while (next
!= &(arena
[1])) {
992 size
= (char *)next
- (char *)blk
;
993 if (TESTBUSY(blk
->nextblk
)) {
994 inf
.uordblks
+= size
;
995 inf
.keepcost
+= HEADSZ
-MINHEAD
;
997 inf
.fordblks
+= size
;
1000 next
= CLRBUSY(blk
->nextblk
);
1004 * if any holding block have been allocated
1005 * then examine space in holding blks
1007 if (change
&& holdhead
!= NULL
) {
1008 for (i
= fastct
; i
> 0; i
--) { /* loop thru ea. chain */
1010 /* do only if chain not empty */
1011 if (hblk
!= HGROUND
) {
1012 size
= hblk
->blksz
+
1013 sizeof (struct lblk
) - sizeof (int);
1014 do { /* loop thru 1 hold blk chain */
1016 fsp
= freespace(hblk
);
1018 inf
.usmblks
+= numlblks
*size
- fsp
;
1019 inf
.smblks
+= numlblks
;
1020 hblk
= hblk
->nexthblk
;
1021 } while (hblk
!= holdhead
[i
]);
1025 inf
.hblkhd
= (inf
.smblks
/ numlblks
) * sizeof (struct holdblk
);
1026 /* holding block were counted in ordblks, so subtract off */
1027 inf
.ordblks
-= inf
.hblks
;
1028 inf
.uordblks
-= inf
.hblkhd
+ inf
.usmblks
+ inf
.fsmblks
;
1029 inf
.keepcost
-= inf
.hblks
*(HEADSZ
- MINHEAD
);
1030 (void) mutex_unlock(&mlock
);
1036 * freespace - calc. how much space is used in the free
1037 * small blocks in a given holding block
1039 * input - hblk = given holding block
1041 * returns space used in free small blocks of hblk
1045 freespace(struct holdblk
*holdblk
)
1050 struct lblk
*unused
;
1052 lblk
= CLRSMAL(holdblk
->lfreeq
);
1053 size
= holdblk
->blksz
+ sizeof (struct lblk
) - sizeof (int);
1054 unused
= CLRSMAL(holdblk
->unused
);
1055 /* follow free chain */
1056 while ((lblk
!= LGROUND
) && (lblk
!= unused
)) {
1058 lblk
= CLRSMAL(lblk
->header
.nextfree
);
1060 space
+= ((char *)holdblk
+ HOLDSZ(size
)) - (char *)unused
;
1065 morecore(size_t bytes
)
1069 if (bytes
> LONG_MAX
) {
1072 * The request size is too big. We need to do this in
1073 * chunks. Sbrk only takes an int for an arg.
1075 if (bytes
== ULONG_MAX
)
1076 return ((void *)-1);
1081 if (sbrk(wad
) == (void *)-1) {
1083 (void) sbrk(-LONG_MAX
);
1084 return ((void *)-1);
1099 struct header
*blk
, *prev
, *next
; /* ptr to ordinary blocks */
1101 (void) mutex_lock(&mlock
);
1102 if (freeptr
[0].nextfree
== GROUND
) {
1103 (void) mutex_unlock(&mlock
);
1108 /* loop through arena, checking */
1109 blk
= (struct header
*)CLRALL(blk
->nextblk
);
1110 next
= (struct header
*)CLRALL(blk
->nextblk
);
1111 while (next
!= arena
+ 1) {
1112 assert(blk
>= arena
+ 1);
1113 assert(blk
<= lastblk
);
1114 assert(next
>= blk
+ 1);
1115 assert(((uintptr_t)((struct header
*)blk
->nextblk
) &
1118 if (TESTBUSY(blk
->nextblk
) == 0) {
1119 assert(blk
->nextfree
>= freeptr
);
1120 assert(blk
->prevfree
>= freeptr
);
1121 assert(blk
->nextfree
<= lastblk
);
1122 assert(blk
->prevfree
<= lastblk
);
1123 assert(((uintptr_t)((struct header
*)blk
->nextfree
) &
1125 assert(((uintptr_t)((struct header
*)blk
->prevfree
) &
1126 7) == 0 || blk
->prevfree
== freeptr
);
1129 next
= CLRBUSY(blk
->nextblk
);
1131 (void) mutex_unlock(&mlock
);
1140 * rstalloc - reset alloc routines
1142 * description - return allocated memory and reset
1143 * allocation pointers.
1145 * Warning - This is for debugging purposes only.
1146 * It will return all memory allocated after
1147 * the first call to malloc, even if some
1148 * of it was fetched by a user's sbrk().
1154 (void) mutex_lock(&mlock
);
1157 numlblks
= NUMLBLKS
;
1161 if (freeptr
[0].nextfree
== GROUND
) {
1162 (void) mutex_unlock(&mlock
);
1165 brk(CLRBUSY(arena
[1].nextblk
));
1166 freeptr
[0].nextfree
= GROUND
;
1170 (void) mutex_unlock(&mlock
);
1172 #endif /* RSTALLOC */
1175 * cfree is an undocumented, obsolete function
1180 cfree(void *p
, size_t num
, size_t size
)
1188 (void) mutex_lock(&mlock
);
1194 (void) mutex_unlock(&mlock
);
1197 #pragma init(malloc_init)
1201 (void) pthread_atfork(malloc_prepare
, malloc_release
, malloc_release
);