dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / usr / src / lib / libmalloc / common / malloc.c
blob25343ea62c0ae06080319fd5f19c82bb1b436e72
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
30 #include <sys/types.h>
32 #ifndef debug
33 #define NDEBUG
34 #endif
36 #include <stdlib.h>
37 #include <string.h>
38 #include <errno.h>
39 #include "assert.h"
40 #include "malloc.h"
41 #include "mallint.h"
42 #include <thread.h>
43 #include <pthread.h>
44 #include <synch.h>
45 #include <unistd.h>
46 #include <limits.h>
48 static mutex_t mlock = DEFAULTMUTEX;
49 static ssize_t freespace(struct holdblk *);
50 static void *malloc_unlocked(size_t, int);
51 static void *realloc_unlocked(void *, size_t);
52 static void free_unlocked(void *);
53 static void *morecore(size_t);
56 * use level memory allocater (malloc, free, realloc)
58 * -malloc, free, realloc and mallopt form a memory allocator
59 * similar to malloc, free, and realloc. The routines
60 * here are much faster than the original, with slightly worse
61 * space usage (a few percent difference on most input). They
62 * do not have the property that data in freed blocks is left
63 * untouched until the space is reallocated.
65 * -Memory is kept in the "arena", a singly linked list of blocks.
66 * These blocks are of 3 types.
67 * 1. A free block. This is a block not in use by the
68 * user. It has a 3 word header. (See description
69 * of the free queue.)
70 * 2. An allocated block. This is a block the user has
71 * requested. It has only a 1 word header, pointing
72 * to the next block of any sort.
73 * 3. A permanently allocated block. This covers space
74 * aquired by the user directly through sbrk(). It
75 * has a 1 word header, as does 2.
76 * Blocks of type 1 have the lower bit of the pointer to the
77 * nextblock = 0. Blocks of type 2 and 3 have that bit set,
78 * to mark them busy.
80 * -Unallocated blocks are kept on an unsorted doubly linked
81 * free list.
83 * -Memory is allocated in blocks, with sizes specified by the
84 * user. A circular first-fit startegy is used, with a roving
85 * head of the free queue, which prevents bunching of small
86 * blocks at the head of the queue.
88 * -Compaction is performed at free time of any blocks immediately
89 * following the freed block. The freed block will be combined
90 * with a preceding block during the search phase of malloc.
91 * Since a freed block is added at the front of the free queue,
92 * which is moved to the end of the queue if considered and
93 * rejected during the search, fragmentation only occurs if
94 * a block with a contiguious preceding block that is free is
95 * freed and reallocated on the next call to malloc. The
96 * time savings of this strategy is judged to be worth the
97 * occasional waste of memory.
99 * -Small blocks (of size < MAXSIZE) are not allocated directly.
100 * A large "holding" block is allocated via a recursive call to
101 * malloc. This block contains a header and ?????? small blocks.
102 * Holding blocks for a given size of small block (rounded to the
103 * nearest ALIGNSZ bytes) are kept on a queue with the property that any
104 * holding block with an unused small block is in front of any without.
105 * A list of free blocks is kept within the holding block.
109 * description of arena, free queue, holding blocks etc.
111 * New compiler and linker does not guarentee order of initialized data.
112 * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
113 * Later code depends on this order.
116 static struct header arena[4] = {
117 {0, 0, 0},
118 {0, 0, 0},
119 {0, 0, 0},
120 {0, 0, 0}
123 * the second word is a minimal block to
124 * start the arena. The first is a busy
125 * block to be pointed to by the last block.
128 #define freeptr (arena + 2)
129 /* first and last entry in free list */
130 static struct header *arenaend; /* ptr to block marking high end of arena */
131 static struct header *lastblk; /* the highest block in the arena */
132 static struct holdblk **holdhead; /* pointer to array of head pointers */
133 /* to holding block chains */
135 * In order to save time calculating indices, the array is 1 too
136 * large, and the first element is unused
138 * Variables controlling algorithm, esp. how holding blocs are used
140 static int numlblks = NUMLBLKS;
141 static int minhead = MINHEAD;
142 static int change = 0; /* != 0, once param changes are no longer allowed */
143 static int fastct = FASTCT;
144 static unsigned int maxfast = MAXFAST;
145 /* number of small block sizes to map to one size */
147 static int grain = ALIGNSZ;
149 #ifdef debug
150 static int case1count = 0;
152 static void
153 checkq(void)
155 register struct header *p;
157 p = &freeptr[0];
159 /* check forward */
160 /*CSTYLED*/
161 while (p != &freeptr[1]) {
162 p = p->nextfree;
163 assert(p->prevfree->nextfree == p);
166 /* check backward */
167 /*CSTYLED*/
168 while (p != &freeptr[0]) {
169 p = p->prevfree;
170 assert(p->nextfree->prevfree == p);
173 #endif
177 * malloc(nbytes) - give a user nbytes to use
180 void *
181 malloc(size_t nbytes)
183 void *ret;
185 (void) mutex_lock(&mlock);
186 ret = malloc_unlocked(nbytes, 0);
187 (void) mutex_unlock(&mlock);
188 return (ret);
192 * Use malloc_unlocked() to get the address to start with; Given this
193 * address, find out the closest address that aligns with the request
194 * and return that address after doing some house keeping (refer to the
195 * ascii art below).
197 void *
198 memalign(size_t alignment, size_t size)
200 void *alloc_buf;
201 struct header *hd;
202 size_t alloc_size;
203 uintptr_t fr;
204 static int realloc;
206 if (size == 0 || alignment == 0 ||
207 (alignment & (alignment - 1)) != 0) {
208 return (NULL);
210 if (alignment <= ALIGNSZ)
211 return (malloc(size));
213 alloc_size = size + alignment;
214 if (alloc_size < size) { /* overflow */
215 return (NULL);
218 (void) mutex_lock(&mlock);
219 alloc_buf = malloc_unlocked(alloc_size, 1);
220 (void) mutex_unlock(&mlock);
222 if (alloc_buf == NULL)
223 return (NULL);
224 fr = (uintptr_t)alloc_buf;
226 fr = (fr + alignment - 1) / alignment * alignment;
228 if (fr == (uintptr_t)alloc_buf)
229 return (alloc_buf);
231 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
233 * we hit an edge case, where the space ahead of aligned
234 * address is not sufficient to hold 'header' and hence we
235 * can't free it. So double the allocation request.
237 realloc++;
238 free(alloc_buf);
239 alloc_size = size + alignment*2;
240 if (alloc_size < size) {
241 return (NULL);
244 (void) mutex_lock(&mlock);
245 alloc_buf = malloc_unlocked(alloc_size, 1);
246 (void) mutex_unlock(&mlock);
248 if (alloc_buf == NULL)
249 return (NULL);
250 fr = (uintptr_t)alloc_buf;
252 fr = (fr + alignment - 1) / alignment * alignment;
253 if (fr == (uintptr_t)alloc_buf)
254 return (alloc_buf);
255 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
256 fr = fr + alignment;
261 * +-------+ +-------+
262 * +---| <a> | | <a> |--+
263 * | +-------+<--alloc_buf-->+-------+ |
264 * | | | | | |
265 * | | | | | |
266 * | | | | | |
267 * | | | hd--> +-------+ |
268 * | | | +---| <b> |<-+
269 * | | | | +-------+<--- fr
270 * | | | | | |
271 * | | | | | |
272 * | | | | | |
273 * | | | | | |
274 * | | | | | |
275 * | | | | | |
276 * | +-------+ | +-------+
277 * +-->| next | +-->| next |
278 * +-------+ +-------+
281 hd = (struct header *)((char *)fr - minhead);
282 (void) mutex_lock(&mlock);
283 hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
284 ((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
285 (void) mutex_unlock(&mlock);
286 free(alloc_buf);
287 CHECKQ
288 return ((void *)fr);
291 void *
292 valloc(size_t size)
294 static unsigned pagesize;
295 if (size == 0)
296 return (NULL);
298 if (!pagesize)
299 pagesize = sysconf(_SC_PAGESIZE);
301 return (memalign(pagesize, size));
305 * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
308 static void *
309 malloc_unlocked(size_t nbytes, int nosmall)
311 struct header *blk;
312 size_t nb; /* size of entire block we need */
314 /* on first call, initialize */
315 if (freeptr[0].nextfree == GROUND) {
316 /* initialize arena */
317 arena[1].nextblk = (struct header *)BUSY;
318 arena[0].nextblk = (struct header *)BUSY;
319 lastblk = arenaend = &(arena[1]);
320 /* initialize free queue */
321 freeptr[0].nextfree = &(freeptr[1]);
322 freeptr[1].nextblk = &(arena[0]);
323 freeptr[1].prevfree = &(freeptr[0]);
324 /* mark that small blocks not init yet */
326 if (nbytes == 0)
327 return (NULL);
329 if (nbytes <= maxfast && !nosmall) {
331 * We can allocate out of a holding block
333 struct holdblk *holdblk; /* head of right sized queue */
334 struct lblk *lblk; /* pointer to a little block */
335 struct holdblk *newhold;
337 if (!change) {
338 int i;
340 * This allocates space for hold block
341 * pointers by calling malloc recursively.
342 * Maxfast is temporarily set to 0, to
343 * avoid infinite recursion. allocate
344 * space for an extra ptr so that an index
345 * is just ->blksz/grain, with the first
346 * ptr unused.
348 change = 1; /* change to algorithm params */
349 /* no longer allowed */
351 * temporarily alter maxfast, to avoid
352 * infinite recursion
354 maxfast = 0;
355 holdhead = (struct holdblk **)
356 malloc_unlocked(sizeof (struct holdblk *) *
357 (fastct + 1), 0);
358 if (holdhead == NULL)
359 return (malloc_unlocked(nbytes, 0));
360 for (i = 1; i <= fastct; i++) {
361 holdhead[i] = HGROUND;
363 maxfast = fastct * grain;
366 * Note that this uses the absolute min header size (MINHEAD)
367 * unlike the large block case which uses minhead
369 * round up to nearest multiple of grain
370 * code assumes grain is a multiple of MINHEAD
372 /* round up to grain */
373 nb = (nbytes + grain - 1) / grain * grain;
374 holdblk = holdhead[nb / grain];
375 nb = nb + MINHEAD;
377 * look for space in the holding block. Blocks with
378 * space will be in front of those without
380 if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND)) {
381 /* there is space */
382 lblk = holdblk->lfreeq;
385 * Now make lfreeq point to a free block.
386 * If lblk has been previously allocated and
387 * freed, it has a valid pointer to use.
388 * Otherwise, lblk is at the beginning of
389 * the unallocated blocks at the end of
390 * the holding block, so, if there is room, take
391 * the next space. If not, mark holdblk full,
392 * and move holdblk to the end of the queue
394 if (lblk < holdblk->unused) {
395 /* move to next holdblk, if this one full */
396 if ((holdblk->lfreeq =
397 CLRSMAL(lblk->header.nextfree)) ==
398 LGROUND) {
399 holdhead[(nb-MINHEAD) / grain] =
400 holdblk->nexthblk;
402 } else if (((char *)holdblk->unused + nb) <
403 ((char *)holdblk + HOLDSZ(nb))) {
404 holdblk->unused = (struct lblk *)
405 ((char *)holdblk->unused+nb);
406 holdblk->lfreeq = holdblk->unused;
407 } else {
408 holdblk->unused = (struct lblk *)
409 ((char *)holdblk->unused+nb);
410 holdblk->lfreeq = LGROUND;
411 holdhead[(nb-MINHEAD)/grain] =
412 holdblk->nexthblk;
414 /* mark as busy and small */
415 lblk->header.holder = (struct holdblk *)SETALL(holdblk);
416 } else {
417 /* we need a new holding block */
418 newhold = (struct holdblk *)
419 malloc_unlocked(HOLDSZ(nb), 0);
420 if ((char *)newhold == NULL) {
421 return (NULL);
423 /* add to head of free queue */
424 if (holdblk != HGROUND) {
425 newhold->nexthblk = holdblk;
426 newhold->prevhblk = holdblk->prevhblk;
427 holdblk->prevhblk = newhold;
428 newhold->prevhblk->nexthblk = newhold;
429 } else {
430 newhold->nexthblk = newhold->prevhblk = newhold;
432 holdhead[(nb-MINHEAD)/grain] = newhold;
433 /* set up newhold */
434 lblk = (struct lblk *)(newhold->space);
435 newhold->lfreeq = newhold->unused =
436 (struct lblk *)((char *)newhold->space+nb);
437 lblk->header.holder = (struct holdblk *)SETALL(newhold);
438 newhold->blksz = nb-MINHEAD;
440 #ifdef debug
441 assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
442 nbytes);
443 #endif /* debug */
444 return ((char *)lblk + MINHEAD);
445 } else {
447 * We need an ordinary block
449 struct header *newblk; /* used for creating a block */
451 /* get number of bytes we need */
452 nb = nbytes + minhead;
453 nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ; /* align */
454 nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
456 * see if there is a big enough block
457 * If none exists, you will get to freeptr[1].
458 * freeptr[1].next = &arena[0], so when you do the test,
459 * the result is a large positive number, since arena[0]
460 * comes before all blocks. Arena[0] is marked busy so
461 * that it will not be compacted. This kludge is for the
462 * sake of the almighty efficiency.
464 /* check that a very large request won't cause an inf. loop */
466 if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
467 return (NULL);
468 } else {
469 struct header *next; /* following block */
470 struct header *nextnext; /* block after next */
472 blk = freeptr;
473 do {
474 blk = blk->nextfree;
475 /* see if we can compact */
476 next = blk->nextblk;
477 if (!TESTBUSY(nextnext = next->nextblk)) {
478 do {
479 DELFREEQ(next);
480 next = nextnext;
481 nextnext = next->nextblk;
482 } while (!TESTBUSY(nextnext));
484 * next will be at most == to lastblk,
485 * but I think the >= test is faster
487 if (next >= arenaend)
488 lastblk = blk;
489 blk->nextblk = next;
491 } while (((char *)(next) - (char *)blk) < nb);
494 * if we didn't find a block, get more memory
496 if (blk == &(freeptr[1])) {
498 * careful coding could likely replace
499 * newend with arenaend
501 struct header *newend; /* new end of arena */
502 ssize_t nget; /* number of words to get */
505 * Three cases - 1. There is space between arenaend
506 * and the break value that will become
507 * a permanently allocated block.
508 * 2. Case 1 is not true, and the last
509 * block is allocated.
510 * 3. Case 1 is not true, and the last
511 * block is free
513 if ((newblk = (struct header *)sbrk(0)) !=
514 (struct header *)((char *)arenaend + HEADSZ)) {
515 /* case 1 */
516 #ifdef debug
517 if (case1count++ > 0)
518 (void) write(2, "Case 1 hit more that once."
519 " brk or sbrk?\n", 41);
520 #endif
521 /* get size to fetch */
522 nget = nb + HEADSZ;
523 /* round up to a block */
524 nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
525 assert((uintptr_t)newblk % ALIGNSZ == 0);
526 /* get memory */
527 if (morecore(nget) == (void *)-1)
528 return (NULL);
529 /* add to arena */
530 newend = (struct header *)((char *)newblk + nget
531 - HEADSZ);
532 assert((uintptr_t)newblk % ALIGNSZ == 0);
533 newend->nextblk = SETBUSY(&(arena[1]));
534 /* ??? newblk ?? */
535 newblk->nextblk = newend;
538 * space becomes a permanently allocated block.
539 * This is likely not mt-safe as lock is not
540 * shared with brk or sbrk
542 arenaend->nextblk = SETBUSY(newblk);
543 /* adjust other pointers */
544 arenaend = newend;
545 lastblk = newblk;
546 blk = newblk;
547 } else if (TESTBUSY(lastblk->nextblk)) {
548 /* case 2 */
549 nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
550 if (morecore(nget) == (void *)-1)
551 return (NULL);
552 /* block must be word aligned */
553 assert(((uintptr_t)newblk%ALIGNSZ) == 0);
555 * stub at old arenaend becomes first word
556 * in blk
558 /* ??? newblk = arenaend; */
560 newend =
561 (struct header *)((char *)arenaend+nget);
562 newend->nextblk = SETBUSY(&(arena[1]));
563 arenaend->nextblk = newend;
564 lastblk = blk = arenaend;
565 arenaend = newend;
566 } else {
567 /* case 3 */
569 * last block in arena is at end of memory and
570 * is free
572 /* 1.7 had this backward without cast */
573 nget = nb -
574 ((char *)arenaend - (char *)lastblk);
575 nget = (nget + (BLOCKSZ - 1)) /
576 BLOCKSZ * BLOCKSZ;
577 assert(((uintptr_t)newblk % ALIGNSZ) == 0);
578 if (morecore(nget) == (void *)-1)
579 return (NULL);
580 /* combine with last block, put in arena */
581 newend = (struct header *)
582 ((char *)arenaend + nget);
583 arenaend = lastblk->nextblk = newend;
584 newend->nextblk = SETBUSY(&(arena[1]));
585 /* set which block to use */
586 blk = lastblk;
587 DELFREEQ(blk);
589 } else {
590 struct header *nblk; /* next block */
592 /* take block found of free queue */
593 DELFREEQ(blk);
595 * make head of free queue immediately follow blk,
596 * unless blk was at the end of the queue
598 nblk = blk->nextfree;
599 if (nblk != &(freeptr[1])) {
600 MOVEHEAD(nblk);
603 /* blk now points to an adequate block */
604 if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
605 /* carve out the right size block */
606 /* newblk will be the remainder */
607 newblk = (struct header *)((char *)blk + nb);
608 newblk->nextblk = blk->nextblk;
609 /* mark the block busy */
610 blk->nextblk = SETBUSY(newblk);
611 ADDFREEQ(newblk);
612 /* if blk was lastblk, make newblk lastblk */
613 if (blk == lastblk)
614 lastblk = newblk;
615 } else {
616 /* just mark the block busy */
617 blk->nextblk = SETBUSY(blk->nextblk);
620 CHECKQ
621 assert((char *)CLRALL(blk->nextblk) -
622 ((char *)blk + minhead) >= nbytes);
623 assert((char *)CLRALL(blk->nextblk) -
624 ((char *)blk + minhead) < nbytes + MINBLKSZ);
625 return ((char *)blk + minhead);
629 * free(ptr) - free block that user thinks starts at ptr
631 * input - ptr-1 contains the block header.
632 * If the header points forward, we have a normal
633 * block pointing to the next block
634 * if the header points backward, we have a small
635 * block from a holding block.
636 * In both cases, the busy bit must be set
639 void
640 free(void *ptr)
642 (void) mutex_lock(&mlock);
643 free_unlocked(ptr);
644 (void) mutex_unlock(&mlock);
648 * free_unlocked(ptr) - Do the real work for free()
651 void
652 free_unlocked(void *ptr)
654 struct holdblk *holdblk; /* block holding blk */
655 struct holdblk *oldhead; /* former head of the hold block */
656 /* queue containing blk's holder */
658 if (ptr == NULL)
659 return;
660 if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
661 struct lblk *lblk; /* pointer to freed block */
662 ssize_t offset; /* choice of header lists */
664 lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
665 assert((struct header *)lblk < arenaend);
666 assert((struct header *)lblk > arena);
667 /* allow twits (e.g. awk) to free a block twice */
668 holdblk = lblk->header.holder;
669 if (!TESTBUSY(holdblk))
670 return;
671 holdblk = (struct holdblk *)CLRALL(holdblk);
672 /* put lblk on its hold block's free list */
673 lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
674 holdblk->lfreeq = lblk;
675 /* move holdblk to head of queue, if its not already there */
676 offset = holdblk->blksz / grain;
677 oldhead = holdhead[offset];
678 if (oldhead != holdblk) {
679 /* first take out of current spot */
680 holdhead[offset] = holdblk;
681 holdblk->nexthblk->prevhblk = holdblk->prevhblk;
682 holdblk->prevhblk->nexthblk = holdblk->nexthblk;
683 /* now add at front */
684 holdblk->nexthblk = oldhead;
685 holdblk->prevhblk = oldhead->prevhblk;
686 oldhead->prevhblk = holdblk;
687 holdblk->prevhblk->nexthblk = holdblk;
689 } else {
690 struct header *blk; /* real start of block */
691 struct header *next; /* next = blk->nextblk */
692 struct header *nextnext; /* block after next */
694 blk = (struct header *)((char *)ptr - minhead);
695 next = blk->nextblk;
696 /* take care of twits (e.g. awk) who return blocks twice */
697 if (!TESTBUSY(next))
698 return;
699 blk->nextblk = next = CLRBUSY(next);
700 ADDFREEQ(blk);
701 /* see if we can compact */
702 if (!TESTBUSY(nextnext = next->nextblk)) {
703 do {
704 DELFREEQ(next);
705 next = nextnext;
706 } while (!TESTBUSY(nextnext = next->nextblk));
707 if (next == arenaend) lastblk = blk;
708 blk->nextblk = next;
711 CHECKQ
716 * realloc(ptr, size) - give the user a block of size "size", with
717 * the contents pointed to by ptr. Free ptr.
720 void *
721 realloc(void *ptr, size_t size)
723 void *retval;
725 (void) mutex_lock(&mlock);
726 retval = realloc_unlocked(ptr, size);
727 (void) mutex_unlock(&mlock);
728 return (retval);
733 * realloc_unlocked(ptr) - Do the real work for realloc()
736 static void *
737 realloc_unlocked(void *ptr, size_t size)
739 struct header *blk; /* block ptr is contained in */
740 size_t trusize; /* block size as allocater sees it */
741 char *newptr; /* pointer to user's new block */
742 size_t cpysize; /* amount to copy */
743 struct header *next; /* block after blk */
745 if (ptr == NULL)
746 return (malloc_unlocked(size, 0));
748 if (size == 0) {
749 free_unlocked(ptr);
750 return (NULL);
753 if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
754 header.holder)) {
756 * we have a special small block which can't be expanded
758 * This makes the assumption that even if the user is
759 * reallocating a free block, malloc doesn't alter the contents
760 * of small blocks
762 newptr = malloc_unlocked(size, 0);
763 if (newptr == NULL)
764 return (NULL);
765 /* this isn't to save time--its to protect the twits */
766 if ((char *)ptr != newptr) {
767 struct lblk *lblk;
768 lblk = (struct lblk *)((char *)ptr - MINHEAD);
769 cpysize = ((struct holdblk *)
770 CLRALL(lblk->header.holder))->blksz;
771 cpysize = (size > cpysize) ? cpysize : size;
772 (void) memcpy(newptr, ptr, cpysize);
773 free_unlocked(ptr);
775 } else {
776 blk = (struct header *)((char *)ptr - minhead);
777 next = blk->nextblk;
779 * deal with twits who reallocate free blocks
781 * if they haven't reset minblk via getopt, that's
782 * their problem
784 if (!TESTBUSY(next)) {
785 DELFREEQ(blk);
786 blk->nextblk = SETBUSY(next);
788 next = CLRBUSY(next);
789 /* make blk as big as possible */
790 if (!TESTBUSY(next->nextblk)) {
791 do {
792 DELFREEQ(next);
793 next = next->nextblk;
794 } while (!TESTBUSY(next->nextblk));
795 blk->nextblk = SETBUSY(next);
796 if (next >= arenaend) lastblk = blk;
798 /* get size we really need */
799 trusize = size+minhead;
800 trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
801 trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
802 /* see if we have enough */
803 /* this isn't really the copy size, but I need a register */
804 cpysize = (char *)next - (char *)blk;
805 if (cpysize >= trusize) {
806 /* carve out the size we need */
807 struct header *newblk; /* remainder */
809 if (cpysize - trusize >= MINBLKSZ) {
811 * carve out the right size block
812 * newblk will be the remainder
814 newblk = (struct header *)((char *)blk +
815 trusize);
816 newblk->nextblk = next;
817 blk->nextblk = SETBUSY(newblk);
818 /* at this point, next is invalid */
819 ADDFREEQ(newblk);
820 /* if blk was lastblk, make newblk lastblk */
821 if (blk == lastblk)
822 lastblk = newblk;
824 newptr = ptr;
825 } else {
826 /* bite the bullet, and call malloc */
827 cpysize = (size > cpysize) ? cpysize : size;
828 newptr = malloc_unlocked(size, 0);
829 if (newptr == NULL)
830 return (NULL);
831 (void) memcpy(newptr, ptr, cpysize);
832 free_unlocked(ptr);
835 return (newptr);
840 * calloc - allocate and clear memory block
843 void *
844 calloc(size_t num, size_t size)
846 char *mp;
847 size_t total;
849 if (num == 0 || size == 0) {
850 total = 0;
851 } else {
852 total = num * size;
854 /* check for overflow */
855 if ((total / num) != size) {
856 errno = ENOMEM;
857 return (NULL);
861 mp = malloc(total);
862 if (mp == NULL)
863 return (NULL);
864 (void) memset(mp, 0, total);
865 return (mp);
870 * Mallopt - set options for allocation
872 * Mallopt provides for control over the allocation algorithm.
873 * The cmds available are:
875 * M_MXFAST Set maxfast to value. Maxfast is the size of the
876 * largest small, quickly allocated block. Maxfast
877 * may be set to 0 to disable fast allocation entirely.
879 * M_NLBLKS Set numlblks to value. Numlblks is the number of
880 * small blocks per holding block. Value must be
881 * greater than 0.
883 * M_GRAIN Set grain to value. The sizes of all blocks
884 * smaller than maxfast are considered to be rounded
885 * up to the nearest multiple of grain. The default
886 * value of grain is the smallest number of bytes
887 * which will allow alignment of any data type. Grain
888 * will be rounded up to a multiple of its default,
889 * and maxsize will be rounded up to a multiple of
890 * grain. Value must be greater than 0.
892 * M_KEEP Retain data in freed block until the next malloc,
893 * realloc, or calloc. Value is ignored.
894 * This option is provided only for compatibility with
895 * the old version of malloc, and is not recommended.
897 * returns - 0, upon successful completion
898 * 1, if malloc has previously been called or
899 * if value or cmd have illegal values
903 mallopt(int cmd, int value)
905 /* disallow changes once a small block is allocated */
906 (void) mutex_lock(&mlock);
907 if (change) {
908 (void) mutex_unlock(&mlock);
909 return (1);
911 switch (cmd) {
912 case M_MXFAST:
913 if (value < 0) {
914 (void) mutex_unlock(&mlock);
915 return (1);
917 fastct = (value + grain - 1) / grain;
918 maxfast = grain*fastct;
919 break;
920 case M_NLBLKS:
921 if (value <= 1) {
922 (void) mutex_unlock(&mlock);
923 return (1);
925 numlblks = value;
926 break;
927 case M_GRAIN:
928 if (value <= 0) {
929 (void) mutex_unlock(&mlock);
930 return (1);
933 /* round grain up to a multiple of ALIGNSZ */
934 grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
936 /* reduce fastct appropriately */
937 fastct = (maxfast + grain - 1) / grain;
938 maxfast = grain * fastct;
939 break;
940 case M_KEEP:
941 if (change && holdhead != NULL) {
942 (void) mutex_unlock(&mlock);
943 return (1);
945 minhead = HEADSZ;
946 break;
947 default:
948 (void) mutex_unlock(&mlock);
949 return (1);
951 (void) mutex_unlock(&mlock);
952 return (0);
956 * mallinfo-provide information about space usage
958 * input - max; mallinfo will return the size of the
959 * largest block < max.
961 * output - a structure containing a description of
962 * of space usage, defined in malloc.h
965 struct mallinfo
966 mallinfo(void)
968 struct header *blk, *next; /* ptr to ordinary blocks */
969 struct holdblk *hblk; /* ptr to holding blocks */
970 struct mallinfo inf; /* return value */
971 int i; /* the ubiquitous counter */
972 ssize_t size; /* size of a block */
973 ssize_t fsp; /* free space in 1 hold block */
975 (void) mutex_lock(&mlock);
976 (void) memset(&inf, 0, sizeof (struct mallinfo));
977 if (freeptr[0].nextfree == GROUND) {
978 (void) mutex_unlock(&mlock);
979 return (inf);
981 blk = CLRBUSY(arena[1].nextblk);
982 /* return total space used */
983 inf.arena = (char *)arenaend - (char *)blk;
986 * loop through arena, counting # of blocks, and
987 * and space used by blocks
989 next = CLRBUSY(blk->nextblk);
990 while (next != &(arena[1])) {
991 inf.ordblks++;
992 size = (char *)next - (char *)blk;
993 if (TESTBUSY(blk->nextblk)) {
994 inf.uordblks += size;
995 inf.keepcost += HEADSZ-MINHEAD;
996 } else {
997 inf.fordblks += size;
999 blk = next;
1000 next = CLRBUSY(blk->nextblk);
1004 * if any holding block have been allocated
1005 * then examine space in holding blks
1007 if (change && holdhead != NULL) {
1008 for (i = fastct; i > 0; i--) { /* loop thru ea. chain */
1009 hblk = holdhead[i];
1010 /* do only if chain not empty */
1011 if (hblk != HGROUND) {
1012 size = hblk->blksz +
1013 sizeof (struct lblk) - sizeof (int);
1014 do { /* loop thru 1 hold blk chain */
1015 inf.hblks++;
1016 fsp = freespace(hblk);
1017 inf.fsmblks += fsp;
1018 inf.usmblks += numlblks*size - fsp;
1019 inf.smblks += numlblks;
1020 hblk = hblk->nexthblk;
1021 } while (hblk != holdhead[i]);
1025 inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1026 /* holding block were counted in ordblks, so subtract off */
1027 inf.ordblks -= inf.hblks;
1028 inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1029 inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1030 (void) mutex_unlock(&mlock);
1031 return (inf);
1036 * freespace - calc. how much space is used in the free
1037 * small blocks in a given holding block
1039 * input - hblk = given holding block
1041 * returns space used in free small blocks of hblk
1044 static ssize_t
1045 freespace(struct holdblk *holdblk)
1047 struct lblk *lblk;
1048 ssize_t space = 0;
1049 ssize_t size;
1050 struct lblk *unused;
1052 lblk = CLRSMAL(holdblk->lfreeq);
1053 size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1054 unused = CLRSMAL(holdblk->unused);
1055 /* follow free chain */
1056 while ((lblk != LGROUND) && (lblk != unused)) {
1057 space += size;
1058 lblk = CLRSMAL(lblk->header.nextfree);
1060 space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1061 return (space);
1064 static void *
1065 morecore(size_t bytes)
1067 void * ret;
1069 if (bytes > LONG_MAX) {
1070 intptr_t wad;
1072 * The request size is too big. We need to do this in
1073 * chunks. Sbrk only takes an int for an arg.
1075 if (bytes == ULONG_MAX)
1076 return ((void *)-1);
1078 ret = sbrk(0);
1079 wad = LONG_MAX;
1080 while (wad > 0) {
1081 if (sbrk(wad) == (void *)-1) {
1082 if (ret != sbrk(0))
1083 (void) sbrk(-LONG_MAX);
1084 return ((void *)-1);
1086 bytes -= LONG_MAX;
1087 wad = bytes;
1089 } else
1090 ret = sbrk(bytes);
1092 return (ret);
1095 #ifdef debug
1097 check_arena(void)
1099 struct header *blk, *prev, *next; /* ptr to ordinary blocks */
1101 (void) mutex_lock(&mlock);
1102 if (freeptr[0].nextfree == GROUND) {
1103 (void) mutex_unlock(&mlock);
1104 return (-1);
1106 blk = arena + 1;
1108 /* loop through arena, checking */
1109 blk = (struct header *)CLRALL(blk->nextblk);
1110 next = (struct header *)CLRALL(blk->nextblk);
1111 while (next != arena + 1) {
1112 assert(blk >= arena + 1);
1113 assert(blk <= lastblk);
1114 assert(next >= blk + 1);
1115 assert(((uintptr_t)((struct header *)blk->nextblk) &
1116 (4 | SMAL)) == 0);
1118 if (TESTBUSY(blk->nextblk) == 0) {
1119 assert(blk->nextfree >= freeptr);
1120 assert(blk->prevfree >= freeptr);
1121 assert(blk->nextfree <= lastblk);
1122 assert(blk->prevfree <= lastblk);
1123 assert(((uintptr_t)((struct header *)blk->nextfree) &
1124 7) == 0);
1125 assert(((uintptr_t)((struct header *)blk->prevfree) &
1126 7) == 0 || blk->prevfree == freeptr);
1128 blk = next;
1129 next = CLRBUSY(blk->nextblk);
1131 (void) mutex_unlock(&mlock);
1132 return (0);
1135 #define RSTALLOC 1
1136 #endif
1138 #ifdef RSTALLOC
1140 * rstalloc - reset alloc routines
1142 * description - return allocated memory and reset
1143 * allocation pointers.
1145 * Warning - This is for debugging purposes only.
1146 * It will return all memory allocated after
1147 * the first call to malloc, even if some
1148 * of it was fetched by a user's sbrk().
1151 void
1152 rstalloc(void)
1154 (void) mutex_lock(&mlock);
1155 minhead = MINHEAD;
1156 grain = ALIGNSZ;
1157 numlblks = NUMLBLKS;
1158 fastct = FASTCT;
1159 maxfast = MAXFAST;
1160 change = 0;
1161 if (freeptr[0].nextfree == GROUND) {
1162 (void) mutex_unlock(&mlock);
1163 return;
1165 brk(CLRBUSY(arena[1].nextblk));
1166 freeptr[0].nextfree = GROUND;
1167 #ifdef debug
1168 case1count = 0;
1169 #endif
1170 (void) mutex_unlock(&mlock);
1172 #endif /* RSTALLOC */
1175 * cfree is an undocumented, obsolete function
1178 /* ARGSUSED1 */
1179 void
1180 cfree(void *p, size_t num, size_t size)
1182 free(p);
1185 static void
1186 malloc_prepare()
1188 (void) mutex_lock(&mlock);
1191 static void
1192 malloc_release()
1194 (void) mutex_unlock(&mlock);
1197 #pragma init(malloc_init)
1198 static void
1199 malloc_init(void)
1201 (void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);