4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
30 #pragma ident "%Z%%M% %I% %E% SMI"
33 * Memory management: malloc(), realloc(), free(), memalign().
35 * The following #-parameters may be redefined:
36 * GETCORE: a function to get more core memory.
37 * GETCORE(0) is assumed to return the next available
38 * address. Default is 'sbrk'.
39 * ERRCORE: the error code as returned by GETCORE.
40 * Default is ((char *)(-1)).
41 * CORESIZE: a desired unit (measured in bytes) to be used
42 * with GETCORE. Default is (1024*ALIGN).
44 * This algorithm is based on a best fit strategy with lists of
45 * free elts maintained in a self-adjusting binary tree. Each list
46 * contains all elts of the same size. The tree is ordered by size.
47 * For results on self-adjusting trees, see the paper:
48 * Self-Adjusting Binary Trees,
49 * DD Sleator & RE Tarjan, JACM 1985.
51 * The header of a block contains the size of the data part in bytes.
52 * Since the size of a block is 0%4, the low two bits of the header
53 * are free and used as follows:
55 * BIT0: 1 for busy (block is in use), 0 for free.
56 * BIT1: if the block is busy, this bit is 1 if the
57 * preceding block in contiguous memory is free.
58 * Otherwise, it is always 0.
63 static mutex_t __watch_malloc_lock
= DEFAULTMUTEX
;
65 static TREE
*Root
; /* root of the free tree */
66 static TREE
*Bottom
; /* the last free chunk in the arena */
67 static char *Baddr
; /* current high address of the arena */
69 static void t_delete(TREE
*);
70 static void t_splay(TREE
*);
71 static void realfree(void *);
72 static void *malloc_unlocked(size_t);
73 static void free_unlocked(void *);
74 static TREE
*morecore(size_t);
76 static void protect(TREE
*);
77 static void unprotect(TREE
*);
83 * Patterns to be copied into freed blocks and allocated blocks.
84 * 0xfeedbeef and 0xfeedface are invalid pointer values in all programs.
86 static uint64_t patterns
[2] = {
87 0xdeadbeefdeadbeefULL
, /* pattern in a freed block */
88 0xbaddcafebaddcafeULL
/* pattern in an allocated block */
92 copy_pattern(int pat
, TREE
*tp
)
94 uint64_t pattern
= patterns
[pat
];
95 size_t sz
= SIZE(tp
) / sizeof (uint64_t);
96 /* LINTED improper alignment */
97 uint64_t *datap
= (uint64_t *)DATA(tp
);
104 * Keep lists of small blocks, LIFO order.
106 static TREE
*List
[MINSIZE
/WORDSIZE
-1];
107 static TREE
*Last
[MINSIZE
/WORDSIZE
-1];
109 /* number of blocks to get at one time */
110 #define NPS (WORDSIZE*8)
118 ASSERT(size
% WORDSIZE
== 0);
119 /* want to return a unique pointer on malloc(0) */
124 i
= size
/ WORDSIZE
- 1;
126 if (List
[i
] == NULL
) {
129 ASSERT((size
+ WORDSIZE
) * NPS
>= MINSIZE
);
131 /* get NPS of these block types */
132 if ((np
= malloc_unlocked((size
+ WORDSIZE
)*NPS
)) == NULL
)
135 /* make them into a link list */
136 for (n
= 0, List
[i
] = np
; n
< NPS
; ++n
) {
139 copy_pattern(FREEPAT
, tp
);
144 /* LINTED improper alignment */
152 /* allocate from the head of the queue */
155 if ((List
[i
] = AFTER(tp
)) == NULL
)
157 copy_pattern(LIVEPAT
, tp
);
167 (void) mutex_lock(&__watch_malloc_lock
);
168 ret
= malloc_unlocked(size
);
169 (void) mutex_unlock(&__watch_malloc_lock
);
174 malloc_unlocked(size_t size
)
180 ASSERT(WORDSIZE
== ALIGN
);
182 /* check for size that could overflow calculations */
183 if (size
> MAX_MALLOC
) {
187 /* make sure that size is 0 mod ALIGN */
192 return (smalloc(size
));
194 /* search for an elt of the right size */
201 if (SIZE(tp
) >= size
) { /* branch left */
202 if (n
== 0 || n
>= SIZE(tp
)) {
206 if ((tmp
= LEFT(tp
)) != NULL
) {
213 } else { /* branch right */
214 if ((tmp
= RIGHT(tp
)) != NULL
) {
227 } else if (tp
!= Root
) {
228 /* make the searched-to element the root */
236 /* if found none fitted in the tree */
240 if (size
<= SIZE(Bottom
)) {
245 if ((sp
= morecore(size
)) == NULL
)
249 if ((sp
= morecore(size
)) == NULL
)
254 /* tell the forward neighbor that we're busy */
255 /* LINTED improper alignment */
259 ASSERT(ISBIT0(SIZE(tmp
)));
263 /* if the leftover is enough for a new free piece */
264 if ((n
= (SIZE(sp
) - size
)) >= MINSIZE
+ WORDSIZE
) {
267 /* LINTED improper alignment */
271 } else if (BOTTOM(sp
))
274 /* return the allocated space */
275 copy_pattern(LIVEPAT
, sp
);
283 * If the block size is increasing, we try forward merging first.
284 * This is not best-fit but it avoids some data recopying.
287 realloc(void *old
, size_t size
)
295 /* check for size that could overflow calculations */
296 if (size
> MAX_MALLOC
) {
301 /* pointer to the block */
302 (void) mutex_lock(&__watch_malloc_lock
);
304 new = malloc_unlocked(size
);
305 (void) mutex_unlock(&__watch_malloc_lock
);
309 /* make sure that size is 0 mod ALIGN */
312 /* LINTED improper alignment */
317 /* if the block was freed, data has been destroyed. */
319 /* XXX; complain here! */
321 (void) mutex_unlock(&__watch_malloc_lock
);
327 if (size
== SIZE(tp
)) { /* nothing to do */
330 (void) mutex_unlock(&__watch_malloc_lock
);
334 /* special cases involving small blocks */
335 if (size
< MINSIZE
|| SIZE(tp
) < MINSIZE
) {
337 SETOLD01(SIZE(tp
), ts
);
339 (void) mutex_unlock(&__watch_malloc_lock
);
345 /* block is increasing in size, try merging the next block */
346 if (size
> SIZE(tp
)) {
347 /* LINTED improper alignment */
350 if (ISBIT0(SIZE(np
)))
354 ASSERT(SIZE(np
) >= MINSIZE
);
355 ASSERT(!ISBIT1(SIZE(np
)));
356 SIZE(tp
) += SIZE(np
) + WORDSIZE
;
361 /* LINTED improper alignment */
368 /* not enough & at TRUE end of memory, try extending core */
369 if (size
> SIZE(tp
) && BOTTOM(tp
) && GETCORE(0) == Baddr
) {
372 if ((tp
= morecore(size
)) == NULL
) {
380 /* got enough space to use */
381 if (size
<= SIZE(tp
)) {
384 if ((n
= (SIZE(tp
) - size
)) >= MINSIZE
+ WORDSIZE
) {
387 /* LINTED improper alignment */
391 } else if (BOTTOM(tp
))
394 /* the previous block may be free */
395 SETOLD01(SIZE(tp
), ts
);
397 (void) mutex_unlock(&__watch_malloc_lock
);
401 call_malloc
: /* call malloc to get a new block */
402 SETOLD01(SIZE(tp
), ts
);
403 if ((new = malloc_unlocked(size
)) != NULL
) {
407 (void) memcpy(new, old
, ts
);
409 (void) mutex_unlock(&__watch_malloc_lock
);
414 * Attempt special case recovery allocations since malloc() failed:
416 * 1. size <= SIZE(tp) < MINSIZE
417 * Simply return the existing block
418 * 2. SIZE(tp) < size < MINSIZE
419 * malloc() may have failed to allocate the chunk of
420 * small blocks. Try asking for MINSIZE bytes.
421 * 3. size < MINSIZE <= SIZE(tp)
422 * malloc() may have failed as with 2. Change to
423 * MINSIZE allocation which is taken from the beginning
424 * of the current block.
425 * 4. MINSIZE <= SIZE(tp) < size
426 * If the previous block is free and the combination of
427 * these two blocks has at least size bytes, then merge
428 * the two blocks copying the existing contents backwards.
431 if (SIZE(tp
) < MINSIZE
) {
432 if (size
< SIZE(tp
)) /* case 1. */ {
433 SETOLD01(SIZE(tp
), ts
);
435 (void) mutex_unlock(&__watch_malloc_lock
);
437 } else if (size
< MINSIZE
) /* case 2. */ {
441 } else if (size
< MINSIZE
) /* case 3. */ {
444 } else if (ISBIT1(ts
)) {
447 if ((SIZE(np
) + SIZE(tp
) + WORDSIZE
) >= size
) {
448 ASSERT(!ISBIT0(SIZE(np
)));
450 SIZE(np
) += SIZE(tp
) + WORDSIZE
;
452 * Since the copy may overlap, use memmove().
454 (void) memmove(DATA(np
), old
, SIZE(tp
));
462 SETOLD01(SIZE(tp
), ts
);
464 (void) mutex_unlock(&__watch_malloc_lock
);
465 /* malloc() sets errno */
471 * Coalescing of adjacent free blocks is done first.
472 * Then, the new free block is leaf-inserted into the free tree
473 * without splaying. This strategy does not guarantee the amortized
474 * O(nlogn) behaviour for the insert/delete/find set of operations
475 * on the tree. In practice, however, free is much more infrequent
476 * than malloc/realloc and the tree searches performed by these
477 * functions adequately keep the tree in balance.
482 TREE
*tp
, *sp
, *np
, *tmp
;
487 /* pointer to the block */
488 /* LINTED improper alignment */
492 if (!ISBIT0(ts
)) { /* block is not busy; previously freed? */
493 protect(tp
); /* force a watchpoint trap */
498 copy_pattern(FREEPAT
, tp
);
500 /* small block, return it to the tail of its queue */
501 if (SIZE(tp
) < MINSIZE
) {
502 ASSERT(SIZE(tp
) / WORDSIZE
>= 1);
503 ts
= SIZE(tp
) / WORDSIZE
- 1;
506 if (List
[ts
] == NULL
) {
519 /* see if coalescing with next block is warranted */
520 /* LINTED improper alignment */
523 if (ISBIT0(SIZE(np
)))
528 SIZE(tp
) += SIZE(np
) + WORDSIZE
;
531 /* the same with the preceding block */
535 ASSERT(!ISBIT0(SIZE(np
)));
536 ASSERT(np
!= Bottom
);
538 SIZE(np
) += SIZE(tp
) + WORDSIZE
;
542 /* initialize tree info */
543 PARENT(tp
) = LEFT(tp
) = RIGHT(tp
) = LINKFOR(tp
) = NULL
;
545 /* set bottom block, or insert in the free tree */
549 /* search for the place to insert */
555 if (SIZE(np
) > size
) {
556 if ((tmp
= LEFT(np
)) != NULL
) {
565 } else if (SIZE(np
) < size
) {
566 if ((tmp
= RIGHT(np
)) != NULL
) {
576 if ((sp
= PARENT(np
)) != NULL
) {
587 /* insert to head of list */
588 if ((sp
= LEFT(np
)) != NULL
) {
595 if ((sp
= RIGHT(np
)) != NULL
) {
602 /* doubly link list */
617 * Tell next block that this one is free.
618 * The first WORD of the next block contains self's address.
620 /* LINTED improper alignment */
623 /* LINTED improper alignment */
626 ASSERT(ISBIT0(SIZE(tmp
)));
632 * Get more core. Gaps in memory are noted as busy blocks.
635 morecore(size_t size
)
638 size_t n
, offset
, requestsize
;
641 /* compute new amount of memory to get */
643 n
= size
+ 2 * WORDSIZE
;
647 /* errno set by GETCORE sbrk */
650 /* need to pad size out so that addr is aligned */
651 if ((((size_t)addr
) % ALIGN
) != 0)
652 offset
= ALIGN
- (size_t)addr
% ALIGN
;
659 /* if not segmented memory, what we need may be smaller */
666 /* get a multiple of CORESIZE */
667 n
= ((n
- 1) / CORESIZE
+ 1) * CORESIZE
;
668 requestsize
= n
+ offset
;
670 /* check if nsize request could overflow in GETCORE */
671 if (requestsize
> MAX_MALLOC
- (size_t)addr
) {
678 if (requestsize
> MAX_GETCORE
) {
681 * the value required is too big for GETCORE() to deal with
682 * in one go, so use GETCORE() at most 2 times instead.
683 * Argument to GETCORE() must be multiple of ALIGN.
684 * If not, GETCORE(-MAX_GETCORE) will not return brk point
685 * to previous value, but will be ALIGN more.
686 * This would leave a small hole.
690 if (GETCORE(delta
) == ERRCORE
) {
693 if (addr
!= GETCORE(0))
694 (void) GETCORE(-MAX_GETCORE
);
697 requestsize
-= MAX_GETCORE
;
700 } else if (GETCORE(requestsize
) == ERRCORE
) {
706 /* contiguous memory */
711 n
+= SIZE(tp
) + 2 * WORDSIZE
;
713 addr
= Baddr
- WORDSIZE
;
720 /* new bottom address */
723 /* new bottom block */
724 /* LINTED improper alignment */
726 SIZE(tp
) = n
- 2 * WORDSIZE
;
727 ASSERT((SIZE(tp
) % ALIGN
) == 0);
729 /* reserved the last word to head any noncontiguous memory */
730 /* LINTED improper alignment */
731 SETBIT0(SIZE(NEXT(tp
)));
733 /* non-contiguous memory, free old bottom block */
734 if (Bottom
&& Bottom
!= tp
) {
735 SETBIT0(SIZE(Bottom
));
736 realfree(DATA(Bottom
));
743 * Utility function to avoid protecting a tree node twice.
744 * Return true if tp is in the NULL-terminated array of tree nodes.
747 in_list(TREE
*tp
, TREE
**npp
)
751 while ((sp
= *npp
++) != NULL
)
758 * Tree rotation functions (BU: bottom-up, TD: top-down).
759 * All functions are entered with the arguments unprotected.
760 * They must return in the same condition, with all other elements
761 * that have been unprotected during the operation re-protected.
764 LEFT1(TREE
*x
, TREE
*y
)
770 if ((RIGHT(x
) = LEFT(y
)) != NULL
) {
771 unprotect(*npp
++ = RIGHT(x
));
772 PARENT(RIGHT(x
)) = x
;
774 if ((PARENT(y
) = PARENT(x
)) != NULL
) {
775 unprotect(*npp
++ = PARENT(x
));
776 if (LEFT(PARENT(x
)) == x
)
779 RIGHT(PARENT(y
)) = y
;
786 while ((tp
= *npp
++) != NULL
)
787 if (tp
!= x
&& tp
!= y
&& !in_list(tp
, npp
))
792 RIGHT1(TREE
*x
, TREE
*y
)
798 if ((LEFT(x
) = RIGHT(y
)) != NULL
) {
799 unprotect(*npp
++ = LEFT(x
));
802 if ((PARENT(y
) = PARENT(x
)) != NULL
) {
803 unprotect(*npp
++ = PARENT(x
));
804 if (LEFT(PARENT(x
)) == x
)
807 RIGHT(PARENT(y
)) = y
;
814 while ((tp
= *npp
++) != NULL
)
815 if (tp
!= x
&& tp
!= y
&& !in_list(tp
, npp
))
820 BULEFT2(TREE
*x
, TREE
*y
, TREE
*z
)
826 if ((RIGHT(x
) = LEFT(y
)) != NULL
) {
827 unprotect(*npp
++ = RIGHT(x
));
828 PARENT(RIGHT(x
)) = x
;
830 if ((RIGHT(y
) = LEFT(z
)) != NULL
) {
831 unprotect(*npp
++ = RIGHT(y
));
832 PARENT(RIGHT(y
)) = y
;
834 if ((PARENT(z
) = PARENT(x
)) != NULL
) {
835 unprotect(*npp
++ = PARENT(x
));
836 if (LEFT(PARENT(x
)) == x
)
839 RIGHT(PARENT(z
)) = z
;
848 while ((tp
= *npp
++) != NULL
)
849 if (tp
!= x
&& tp
!= y
&& tp
!= z
&& !in_list(tp
, npp
))
854 BURIGHT2(TREE
*x
, TREE
*y
, TREE
*z
)
860 if ((LEFT(x
) = RIGHT(y
)) != NULL
) {
861 unprotect(*npp
++ = LEFT(x
));
864 if ((LEFT(y
) = RIGHT(z
)) != NULL
) {
865 unprotect(*npp
++ = LEFT(y
));
868 if ((PARENT(z
) = PARENT(x
)) != NULL
) {
869 unprotect(*npp
++ = PARENT(x
));
870 if (LEFT(PARENT(x
)) == x
)
873 RIGHT(PARENT(z
)) = z
;
882 while ((tp
= *npp
++) != NULL
)
883 if (tp
!= x
&& tp
!= y
&& tp
!= z
&& !in_list(tp
, npp
))
888 TDLEFT2(TREE
*x
, TREE
*y
, TREE
*z
)
894 if ((RIGHT(y
) = LEFT(z
)) != NULL
) {
895 unprotect(*npp
++ = RIGHT(y
));
896 PARENT(RIGHT(y
)) = y
;
898 if ((PARENT(z
) = PARENT(x
)) != NULL
) {
899 unprotect(*npp
++ = PARENT(x
));
900 if (LEFT(PARENT(x
)) == x
)
903 RIGHT(PARENT(z
)) = z
;
910 while ((tp
= *npp
++) != NULL
)
911 if (tp
!= x
&& tp
!= y
&& tp
!= z
&& !in_list(tp
, npp
))
915 #if 0 /* Not used, for now */
917 TDRIGHT2(TREE
*x
, TREE
*y
, TREE
*z
)
923 if ((LEFT(y
) = RIGHT(z
)) != NULL
) {
924 unprotect(*npp
++ = LEFT(y
));
927 if ((PARENT(z
) = PARENT(x
)) != NULL
) {
928 unprotect(*npp
++ = PARENT(x
));
929 if (LEFT(PARENT(x
)) == x
)
932 RIGHT(PARENT(z
)) = z
;
939 while ((tp
= *npp
++) != NULL
)
940 if (tp
!= x
&& tp
!= y
&& tp
!= z
&& !in_list(tp
, npp
))
946 * Delete a tree element
953 /* if this is a non-tree node */
957 if ((sp
= LINKFOR(op
)) != NULL
) {
967 /* make op the root of the tree */
971 /* if this is the start of a list */
972 if ((tp
= LINKFOR(op
)) != NULL
) {
975 if ((sp
= LEFT(op
)) != NULL
) {
982 if ((sp
= RIGHT(op
)) != NULL
) {
994 /* if op has a non-null left subtree */
995 if ((tp
= LEFT(op
)) != NULL
) {
999 /* make the right-end of the left subtree its root */
1000 while ((sp
= RIGHT(tp
)) != NULL
) {
1002 if ((gp
= RIGHT(sp
)) != NULL
) {
1004 TDLEFT2(tp
, sp
, gp
);
1015 /* hook the right subtree of op to the above elt */
1016 RIGHT(tp
) = sp
= RIGHT(op
);
1022 } else if ((tp
= RIGHT(op
)) != NULL
) { /* no left subtree */
1032 * Bottom up splaying (simple version).
1033 * The basic idea is to roughly cut in half the
1034 * path from Root to tp and make tp the new root.
1041 /* iterate until tp is the root */
1042 while ((pp
= PARENT(tp
)) != NULL
) {
1044 /* grandparent of tp */
1049 /* x is a left child */
1050 if (LEFT(pp
) == tp
) {
1051 if (gp
&& LEFT(gp
) == pp
) {
1052 BURIGHT2(gp
, pp
, tp
);
1060 ASSERT(RIGHT(pp
) == tp
);
1061 if (gp
&& RIGHT(gp
) == pp
) {
1062 BULEFT2(gp
, pp
, tp
);
1071 unprotect(tp
); /* just in case */
1078 (void) mutex_lock(&__watch_malloc_lock
);
1080 (void) mutex_unlock(&__watch_malloc_lock
);
1085 free_unlocked(void *old
)
1093 * memalign(align,nbytes)
1096 * Returns a block of specified size on a specified alignment boundary.
1099 * Malloc enough to ensure that a block can be aligned correctly.
1100 * Find the alignment point and return the fragments
1101 * before and after the block.
1104 * Returns NULL and sets errno as follows:
1107 * or if alignment is misaligned,
1108 * or if the heap has been detectably corrupted.
1110 * if the requested memory could not be allocated.
1113 #define misaligned(p) ((unsigned)(p) & 3)
1114 /* 4-byte "word" alignment is considered ok in LP64 */
1115 #define nextblk(p, size) ((TREE *)((char *)(p) + (size)))
1118 memalign(size_t align
, size_t nbytes
)
1120 size_t reqsize
; /* Num of bytes to get from malloc() */
1121 TREE
*p
; /* Ptr returned from malloc() */
1122 TREE
*blk
; /* For addressing fragment blocks */
1123 size_t blksize
; /* Current (shrinking) block size */
1124 TREE
*alignedp
; /* Ptr to properly aligned boundary */
1125 TREE
*aligned_blk
; /* The block to be returned */
1126 size_t frag_size
; /* size of fragments fore and aft */
1130 * check for valid size and alignment parameters
1131 * MAX_ALIGN check prevents overflow in later calculation.
1133 if (nbytes
== 0 || misaligned(align
) || align
== 0 ||
1134 align
> MAX_ALIGN
) {
1140 * Malloc enough memory to guarantee that the result can be
1141 * aligned correctly. The worst case is when malloc returns
1142 * a block so close to the next alignment boundary that a
1143 * fragment of minimum size cannot be created. In order to
1144 * make sure we can handle this, we need to force the
1145 * alignment to be at least as large as the minimum frag size
1146 * (MINSIZE + WORDSIZE).
1149 /* check for size that could overflow ROUND calculation */
1150 if (nbytes
> MAX_MALLOC
) {
1155 if (nbytes
< MINSIZE
)
1158 while (align
< MINSIZE
+ WORDSIZE
)
1160 reqsize
= nbytes
+ align
+ (MINSIZE
+ WORDSIZE
);
1161 /* check for overflow */
1162 if (reqsize
< nbytes
) {
1166 p
= (TREE
*) malloc(reqsize
);
1167 if (p
== (TREE
*) NULL
) {
1168 /* malloc sets errno */
1171 (void) mutex_lock(&__watch_malloc_lock
);
1174 * get size of the entire block (overhead and all)
1176 /* LINTED improper alignment */
1177 blk
= BLOCK(p
); /* back up to get length word */
1179 blksize
= SIZE(blk
);
1183 * locate the proper alignment boundary within the block.
1187 x
+= align
- (x
% align
);
1188 alignedp
= (TREE
*)x
;
1189 /* LINTED improper alignment */
1190 aligned_blk
= BLOCK(alignedp
);
1193 * Check out the space to the left of the alignment
1194 * boundary, and split off a fragment if necessary.
1196 frag_size
= (size_t)aligned_blk
- (size_t)blk
;
1197 if (frag_size
!= 0) {
1199 * Create a fragment to the left of the aligned block.
1201 if (frag_size
< MINSIZE
+ WORDSIZE
) {
1203 * Not enough space. So make the split
1204 * at the other end of the alignment unit.
1205 * We know this yields enough space, because
1206 * we forced align >= MINSIZE + WORDSIZE above.
1209 /* LINTED improper alignment */
1210 aligned_blk
= nextblk(aligned_blk
, align
);
1212 blksize
-= frag_size
;
1213 SIZE(aligned_blk
) = blksize
| BIT0
;
1214 frag_size
-= WORDSIZE
;
1215 SIZE(blk
) = frag_size
| BIT0
| ISBIT1(SIZE(blk
));
1216 free_unlocked(DATA(blk
));
1218 * free_unlocked(DATA(blk)) has the side-effect of calling
1219 * protect() on the block following blk, that is, aligned_blk.
1220 * We recover from this by unprotect()ing it here.
1222 unprotect(aligned_blk
);
1226 * Is there a (sufficiently large) fragment to the
1227 * right of the aligned block?
1229 frag_size
= blksize
- nbytes
;
1230 if (frag_size
>= MINSIZE
+ WORDSIZE
) {
1232 * split and free a fragment on the right
1234 blksize
= SIZE(aligned_blk
);
1235 SIZE(aligned_blk
) = nbytes
;
1236 /* LINTED improper alignment */
1237 blk
= NEXT(aligned_blk
);
1238 SETOLD01(SIZE(aligned_blk
), blksize
);
1239 frag_size
-= WORDSIZE
;
1240 SIZE(blk
) = frag_size
| BIT0
;
1241 free_unlocked(DATA(blk
));
1243 copy_pattern(LIVEPAT
, aligned_blk
);
1244 protect(aligned_blk
);
1245 (void) mutex_unlock(&__watch_malloc_lock
);
1246 return (DATA(aligned_blk
));
1252 static unsigned pagesize
;
1254 pagesize
= _sysconf(_SC_PAGESIZE
);
1255 return (memalign(pagesize
, size
));
1259 calloc(size_t num
, size_t size
)
1266 /* check for overflow */
1267 if (num
!= 0 && total
/ num
!= size
) {
1271 if ((mp
= malloc(total
)) != NULL
)
1272 (void) memset(mp
, 0, total
);
1278 cfree(void *p
, size_t num
, size_t size
)
1288 static pid_t my_pid
= 0; /* to check for whether we fork()d */
1289 static int dont_watch
= 0;
1290 static int do_stop
= 0;
1291 static int ctlfd
= -1;
1292 struct stat ctlstatb
;
1293 static int wflags
= WA_WRITE
;
1305 if ((s
= getenv("MALLOC_DEBUG")) == NULL
)
1308 s
= strncpy(str
, s
, sizeof (str
));
1310 char *e
= strchr(s
, ',');
1313 if (strcmp(s
, "STOP") == 0)
1315 else if (strcmp(s
, "WATCH") == 0)
1317 else if (strcmp(s
, "RW") == 0) {
1319 wflags
= WA_READ
|WA_WRITE
;
1327 if ((ctlfd
= open("/proc/self/ctl", O_WRONLY
)) < 0 ||
1328 fstat(ctlfd
, &ctlstatb
) != 0) {
1330 (void) close(ctlfd
);
1336 (void) fcntl(ctlfd
, F_SETFD
, 1);
1347 * Play together with some /proc controller
1348 * that has set other stop-on-fault flags.
1350 premptyset(&ctl
.fltset
);
1351 if ((pfd
= open("/proc/self/status", O_RDONLY
)) >= 0) {
1352 if (read(pfd
, &pstatus
, sizeof (pstatus
))
1353 == sizeof (pstatus
))
1354 ctl
.fltset
= pstatus
.pr_flttrace
;
1357 praddset(&ctl
.fltset
, FLTWATCH
);
1359 (void) write(ctlfd
, &ctl
, sizeof (ctl
));
1370 if (ctlfd
< 0) /* first time */
1372 else if (fstat(ctlfd
, &statb
) != 0 ||
1373 statb
.st_dev
!= ctlstatb
.st_dev
||
1374 statb
.st_ino
!= ctlstatb
.st_ino
) {
1376 * Someone closed our file descriptor.
1377 * Just open another one.
1379 if ((ctlfd
= open("/proc/self/ctl", O_WRONLY
)) < 0 ||
1380 fstat(ctlfd
, &ctlstatb
) != 0) {
1382 (void) close(ctlfd
);
1388 (void) fcntl(ctlfd
, F_SETFD
, 1);
1390 if (my_pid
!= getpid()) {
1392 * We fork()d since the last call to the allocator.
1393 * watchpoints are not inherited across fork().
1394 * XXX: how to recover from this ???
1397 (void) close(ctlfd
);
1400 return (dont_watch
);
1411 if (tp
== NULL
|| DATA(tp
) == Baddr
)
1414 sz
= size
= SIZE(tp
);
1418 if (ISBIT0(sz
)) /* block is busy, protect only the head */
1421 ctl
.prwatch
.pr_vaddr
= (uintptr_t)tp
;
1422 ctl
.prwatch
.pr_size
= size
+ WORDSIZE
;
1423 ctl
.prwatch
.pr_wflags
= wflags
;
1424 (void) write(ctlfd
, &ctl
, sizeof (ctl
));
1434 if (tp
== NULL
|| DATA(tp
) == Baddr
)
1438 ctl
.prwatch
.pr_vaddr
= (uintptr_t)tp
;
1439 ctl
.prwatch
.pr_size
= WORDSIZE
; /* size is arbitrary */
1440 ctl
.prwatch
.pr_wflags
= 0; /* clear the watched area */
1441 (void) write(ctlfd
, &ctl
, sizeof (ctl
));
1447 (void) mutex_lock(&__watch_malloc_lock
);
1453 (void) mutex_unlock(&__watch_malloc_lock
);
1456 #pragma init(malloc_init)
1460 (void) pthread_atfork(malloc_prepare
, malloc_release
, malloc_release
);