1 /* malloc.c - dynamic memory allocation for bush. */
3 /* Copyright (C) 1985-2020 Free Software Foundation, Inc.
5 This file is part of GNU Bush, the Bourne-Again SHell.
7 Bush is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 Bush is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Bush. If not, see <http://www.gnu.org/licenses/>.
22 * @(#)nmalloc.c 1 (Caltech) 2/21/82
24 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
26 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
28 * [VERY] old explanation:
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
50 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
51 * smallest allocatable block is 8 bytes. The overhead information will
52 * go in the first int of the block, and the returned pointer will point
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
63 #if defined (HAVE_CONFIG_H)
65 #endif /* HAVE_CONFIG_H */
68 # include "bushtypes.h"
71 # include <sys/types.h>
74 #if defined (HAVE_UNISTD_H)
78 /* Determine which kind of system this is. */
81 #if defined (HAVE_STRING_H)
93 #if defined (HAVE_MMAP)
97 /* Define getpagesize () if the system does not. */
98 #ifndef HAVE_GETPAGESIZE
99 # include "getpagesize.h"
106 #ifdef MALLOC_REGISTER
116 /* Could also use (((x) & -(x)) == (x)) */
117 #define powerof2(x) ((((x) - 1) & (x)) == 0)
119 /* System-specific omissions. */
124 /* SIZEOF_LONG * 4 - 2, usable bins from 1..NBUCKETS-1 */
127 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
128 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
129 /* this is for error checking only */
130 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
131 memalign, with the rest of the word
132 being the distance to the true
133 beginning of the block. */
136 /* We have a flag indicating whether memory is allocated, an index in
137 nextf[], a size field, and a sentinel value to determine whether or
138 not a caller wrote before the start of allocated memory; to realloc()
139 memory we either copy mh_nbytes or just change mh_nbytes if there is
140 enough room in the block for the new size. Range checking is always
143 #if SIZEOF_CHAR_P == 8
144 bits64_t mh_align
[2]; /* 16 */
146 bits64_t mh_align
; /* 8 */
149 char mi_alloc
; /* ISALLOC or ISFREE */ /* 1 */
150 char mi_index
; /* index in nextf[] */ /* 1 */
151 /* Remainder are valid only when block is allocated */
152 u_bits16_t mi_magic2
; /* should be == MAGIC2 */ /* 2 */
153 u_bits32_t mi_nbytes
; /* # of bytes allocated */ /* 4 */
154 #if SIZEOF_CHAR_P == 8
155 char mi_magic8
[8]; /* MAGIC1 guard bytes */ /* 8 */
159 #define mh_alloc minfo.mi_alloc
160 #define mh_index minfo.mi_index
161 #define mh_nbytes minfo.mi_nbytes
162 #define mh_magic2 minfo.mi_magic2
163 #define mh_magic8 minfo.mi_magic8
165 #define MOVERHEAD sizeof(union mhead)
167 #if SIZEOF_CHAR_P == 8
168 #define MALIGN_MASK 15
170 #define MALIGN_MASK 7 /* one less than desired alignment */
173 typedef union _malloc_guard
{
178 /* Access free-list pointer of a block.
179 It is stored at block + sizeof (char *).
180 This is not a field in the minfo structure member of union mhead
181 because we want sizeof (union mhead)
182 to describe the overhead for when the block is in use,
183 and we do not want the free-list pointer to count in that. */
185 /* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of
186 the rest of the struct. This may need adjusting. */
188 (*(union mhead **) (sizeof (char *) + (char *) (a)))
190 /* To implement range checking, we write magic values in at the beginning
191 and end of each allocated block, and make sure they are undisturbed
192 whenever a free or a realloc occurs. */
194 /* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */
196 #define MAGIC2 0x5555
197 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
199 /* How many bytes are actually allocated for a request of size N --
200 rounded up to nearest multiple of 2*SIZEOF_CHAR_P after accounting for
202 #define ALLOCATED_BYTES(n) \
203 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
208 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \
212 /* Minimum and maximum bucket indices for block splitting (and to bound
213 the search for a block to split). */
214 #define SPLIT_MIN 2 /* XXX - was 3 */
218 /* Minimum and maximum bucket indices for block coalescing. */
219 #define COMBINE_MIN 2
220 #define COMBINE_MAX (pagebucket - 1) /* XXX */
222 #define LESSCORE_MIN 10
223 #define LESSCORE_FRC 13
227 /* Should we use mmap for large allocations? */
228 #if defined (HAVE_MMAP)
229 # if defined (MAP_ANON) && !defined (MAP_ANONYMOUS)
230 # define MAP_ANONYMOUS MAP_ANON
234 #if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS)
238 #if defined (USE_MMAP)
239 # define MMAP_THRESHOLD 14 /* must be >= SPLIT_MAX, COMBINE_MAX */
241 # define MMAP_THRESHOLD (8 * SIZEOF_LONG)
244 /* Flags for the internal functions. */
245 #define MALLOC_WRAPPER 0x01 /* wrapper function */
246 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
247 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
248 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
251 #define ERR_DUPFREE 0x01
252 #define ERR_UNALLOC 0x02
253 #define ERR_UNDERFLOW 0x04
254 #define ERR_ASSERT_FAILED 0x08
256 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
257 appropriately by the caller to account for malloc overhead. This only
258 checks that the recorded size is not too big for the bucket. We
259 can't check whether or not it's in between NU and NU-1 because we
260 might have encountered a busy bucket when allocating and moved up to
262 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
264 /* Use this when we want to be sure that NB is in bucket NU. */
265 #define RIGHT_BUCKET(nb, nu) \
266 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
268 /* nextf[i] is free list of blocks of size 2**(i + 3) */
270 static union mhead
*nextf
[NBUCKETS
];
272 /* busy[i] is nonzero while allocation or free of block size i is in progress. */
274 static char busy
[NBUCKETS
];
276 static int pagesz
; /* system page size. */
277 static int pagebucket
; /* bucket for requests a page in size */
278 static int maxbuck
; /* highest bucket receiving allocation request. */
280 static char *memtop
; /* top of heap */
282 static const unsigned long binsizes
[NBUCKETS
] = {
283 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
284 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
285 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
286 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
287 2147483648UL, 4294967295UL
290 /* binsizes[x] == (1 << ((x) + 3)) */
291 #define binsize(x) binsizes[(x)]
297 /* Declarations for internal functions */
298 static PTR_T internal_malloc
PARAMS((size_t, const char *, int, int));
299 static PTR_T internal_realloc
PARAMS((PTR_T
, size_t, const char *, int, int));
300 static void internal_free
PARAMS((PTR_T
, const char *, int, int));
301 static PTR_T internal_memalign
PARAMS((size_t, size_t, const char *, int, int));
303 static PTR_T internal_calloc
PARAMS((size_t, size_t, const char *, int, int));
304 static void internal_cfree
PARAMS((PTR_T
, const char *, int, int));
307 static PTR_T internal_valloc
PARAMS((size_t, const char *, int, int));
311 extern void botch ();
313 static void botch
PARAMS((const char *, const char *, int));
315 static void xbotch
PARAMS((PTR_T
, int, const char *, const char *, int));
318 extern char *sbrk ();
319 #endif /* !HAVE_DECL_SBRK */
322 extern int running_trap
;
323 extern int signal_is_trapped
PARAMS((int));
327 struct _malstats _mstats
;
328 #endif /* MALLOC_STATS */
330 /* Debugging variables available to applications. */
331 int malloc_flags
= 0; /* future use */
332 int malloc_trace
= 0; /* trace allocations and frees to stderr */
333 int malloc_register
= 0; /* future use */
335 /* Use a variable in case we want to dynamically adapt it in the future */
336 int malloc_mmap_threshold
= MMAP_THRESHOLD
;
339 char _malloc_trace_buckets
[NBUCKETS
];
341 /* These should really go into a header file. */
342 extern void mtrace_alloc
PARAMS((const char *, PTR_T
, size_t, const char *, int));
343 extern void mtrace_free
PARAMS((PTR_T
, int, const char *, int));
348 botch (s
, file
, line
)
353 fprintf (stderr
, _("malloc: failed assertion: %s\n"), s
);
354 (void)fflush (stderr
);
359 /* print the file and line number that caused the assertion failure and
360 call botch() to do whatever the application wants with the information */
362 xbotch (mem
, e
, s
, file
, line
)
369 fprintf (stderr
, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
370 file
? file
: _("unknown"), line
);
371 #ifdef MALLOC_REGISTER
372 if (mem
!= NULL
&& malloc_register
)
373 mregister_describe_mem (mem
, stderr
);
375 (void)fflush (stderr
);
376 botch(s
, file
, line
);
379 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
380 as long as we can find two adjacent free blocks. nextf[NU -1] is
381 assumed to not be busy; the caller (morecore()) checks for this.
382 BUSY[NU] must be set to 1. */
387 register union mhead
*mp
, *mp1
, *mp2
;
392 if (nextf
[nbuck
] == 0 || busy
[nbuck
])
396 siz
= binsize (nbuck
);
398 mp2
= mp1
= nextf
[nbuck
];
400 while (mp
&& mp
!= (union mhead
*)((char *)mp1
+ siz
))
413 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
414 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
415 if (mp2
!= mp1
&& CHAIN(mp2
) != mp1
)
418 xbotch ((PTR_T
)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL
, 0);
422 if (CHAIN (mp1
) != (union mhead
*)((char *)mp1
+ siz
))
425 return; /* not adjacent */
429 /* Since they are adjacent, remove them from the free list */
430 if (mp1
== nextf
[nbuck
])
431 nextf
[nbuck
] = CHAIN (mp
);
433 CHAIN (mp2
) = CHAIN (mp
);
437 _mstats
.tbcoalesce
++;
438 _mstats
.ncoalesce
[nbuck
]++;
441 /* And add the combined two blocks to nextf[NU]. */
442 mp1
->mh_alloc
= ISFREE
;
444 CHAIN (mp1
) = nextf
[nu
];
448 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
449 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
450 is assumed to be empty. Must be called with signals blocked (e.g.,
451 by morecore()). BUSY[NU] must be set to 1. */
456 register union mhead
*mp
;
457 int nbuck
, nblks
, split_max
;
460 split_max
= (maxbuck
> SPLIT_MAX
) ? maxbuck
: SPLIT_MAX
;
464 for (nbuck
= split_max
; nbuck
> nu
; nbuck
--)
466 if (busy
[nbuck
] || nextf
[nbuck
] == 0)
473 for (nbuck
= nu
+ 1; nbuck
<= split_max
; nbuck
++)
475 if (busy
[nbuck
] || nextf
[nbuck
] == 0)
481 if (nbuck
> split_max
|| nbuck
<= nu
)
484 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
485 and nbuck is below some threshold. */
487 /* Remove the block from the chain of larger blocks. */
490 nextf
[nbuck
] = CHAIN (mp
);
495 _mstats
.nsplit
[nbuck
]++;
498 /* Figure out how many blocks we'll get. */
500 nblks
= binsize (nbuck
) / siz
;
502 /* Split the block and put it on the requested chain. */
506 mp
->mh_alloc
= ISFREE
;
508 if (--nblks
<= 0) break;
509 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
510 mp
= (union mhead
*)((char *)mp
+ siz
);
515 /* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
516 but is busy. This avoids memory orphaning. */
523 int nbuck
, nblks
, split_max
;
527 while (nbuck
>= SPLIT_MIN
&& busy
[nbuck
])
529 if (nbuck
< SPLIT_MIN
)
534 _mstats
.nsplit
[nu
]++;
537 /* Figure out how many blocks we'll get. */
538 siz
= binsize (nu
); /* original block size */
539 nblks
= siz
/ binsize (nbuck
); /* should be 2 most of the time */
541 /* And add it to nextf[nbuck] */
542 siz
= binsize (nbuck
); /* XXX - resetting here */
546 mp
->mh_alloc
= ISFREE
;
547 mp
->mh_index
= nbuck
;
548 if (--nblks
<= 0) break;
549 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
550 mp
= (union mhead
*)((char *)mp
+ siz
);
553 CHAIN (mp
) = nextf
[nbuck
];
559 _malloc_block_signals (setp
, osetp
)
560 sigset_t
*setp
, *osetp
;
562 #ifdef HAVE_POSIX_SIGNALS
565 sigprocmask (SIG_BLOCK
, setp
, osetp
);
567 # if defined (HAVE_BSD_SIGNALS)
568 *osetp
= sigsetmask (-1);
574 _malloc_unblock_signals (setp
, osetp
)
575 sigset_t
*setp
, *osetp
;
577 #ifdef HAVE_POSIX_SIGNALS
578 sigprocmask (SIG_SETMASK
, osetp
, (sigset_t
*)NULL
);
580 # if defined (HAVE_BSD_SIGNALS)
586 /* Return some memory to the system by reducing the break. This is only
587 called with NU > pagebucket, so we're always assured of giving back
588 more than one page of memory. */
590 lesscore (nu
) /* give system back some memory */
591 register int nu
; /* size index we're discarding */
596 /* Should check for errors here, I guess. */
602 _mstats
.tsbrk
-= siz
;
603 _mstats
.nlesscore
[nu
]++;
607 /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
610 register int nu
; /* size index to get more of */
612 register union mhead
*mp
;
615 long sbrk_amt
; /* amount to get via sbrk() */
619 /* Block all signals in case we are executed from a signal handler. */
622 # if defined (SIGCHLD)
623 if (running_trap
|| signal_is_trapped (SIGINT
) || signal_is_trapped (SIGCHLD
))
625 if (running_trap
|| signal_is_trapped (SIGINT
))
629 _malloc_block_signals (&set
, &oset
);
633 siz
= binsize (nu
); /* size of desired block for nextf[nu] */
636 goto morecore_done
; /* oops */
639 _mstats
.nmorecore
[nu
]++;
642 /* Try to split a larger block here, if we're within the range of sizes
644 if (nu
>= SPLIT_MIN
&& nu
<= malloc_mmap_threshold
)
651 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
652 if we can, and we're within the range of the block coalescing limits. */
653 if (nu
>= COMBINE_MIN
&& nu
< COMBINE_MAX
&& nu
<= malloc_mmap_threshold
&& busy
[nu
- 1] == 0 && nextf
[nu
- 1])
660 /* Take at least a page, and figure out how many blocks of the requested
661 size we're getting. */
665 nblks
= sbrk_amt
/ siz
;
669 /* We always want to request an integral multiple of the page size
670 from the kernel, so let's compute whether or not `siz' is such
671 an amount. If it is, we can just request it. If not, we want
672 the smallest integral multiple of pagesize that is larger than
673 `siz' and will satisfy the request. */
674 sbrk_amt
= siz
& (pagesz
- 1);
678 sbrk_amt
= siz
+ pagesz
- sbrk_amt
;
682 #if defined (USE_MMAP)
683 if (nu
> malloc_mmap_threshold
)
685 mp
= (union mhead
*)mmap (0, sbrk_amt
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
686 if ((void *)mp
== MAP_FAILED
)
689 mp
->mh_alloc
= ISFREE
;
694 _mstats
.tmmap
+= sbrk_amt
;
703 _mstats
.tsbrk
+= sbrk_amt
;
706 mp
= (union mhead
*) sbrk (sbrk_amt
);
708 /* Totally out of memory. */
714 /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */
715 if ((long)mp
& MALIGN_MASK
)
717 mp
= (union mhead
*) (((long)mp
+ MALIGN_MASK
) & ~MALIGN_MASK
);
721 /* save new header and link the nblks blocks together */
725 mp
->mh_alloc
= ISFREE
;
727 if (--nblks
<= 0) break;
728 CHAIN (mp
) = (union mhead
*)((char *)mp
+ siz
);
729 mp
= (union mhead
*)((char *)mp
+ siz
);
735 _malloc_unblock_signals (&set
, &oset
);
739 malloc_debug_dummy ()
741 write (1, "malloc_debug_dummy\n", 19);
744 #if SIZEOF_CHAR_P == 8
746 #define PREPOP_SIZE 64
749 #define PREPOP_SIZE 32
756 register union mhead
*mp
;
760 pagesz
= getpagesize ();
764 /* OK, how much do we need to allocate to make things page-aligned?
765 Some of this partial page will be wasted space, but we'll use as
766 much as we can. Once we figure out how much to advance the break
767 pointer, go ahead and do it. */
768 memtop
= curbrk
= sbrk (0);
769 sbrk_needed
= pagesz
- ((long)curbrk
& (pagesz
- 1)); /* sbrk(0) % pagesz */
771 sbrk_needed
+= pagesz
;
773 /* Now allocate the wasted space. */
778 _mstats
.tsbrk
+= sbrk_needed
;
780 curbrk
= sbrk (sbrk_needed
);
781 if ((long)curbrk
== -1)
783 memtop
+= sbrk_needed
;
785 /* Take the memory which would otherwise be wasted and populate the most
786 popular bin (3 == 64 bytes) with it. Add whatever we need to curbrk
787 to make things 64-byte aligned, compute how many 64-byte chunks we're
788 going to get, and set up the bin. */
789 curbrk
+= sbrk_needed
& (PREPOP_SIZE
- 1);
790 sbrk_needed
-= sbrk_needed
& (PREPOP_SIZE
- 1);
791 nunits
= sbrk_needed
/ PREPOP_SIZE
;
795 mp
= (union mhead
*)curbrk
;
797 nextf
[PREPOP_BIN
] = mp
;
800 mp
->mh_alloc
= ISFREE
;
801 mp
->mh_index
= PREPOP_BIN
;
802 if (--nunits
<= 0) break;
803 CHAIN(mp
) = (union mhead
*)((char *)mp
+ PREPOP_SIZE
);
804 mp
= (union mhead
*)((char *)mp
+ PREPOP_SIZE
);
810 /* compute which bin corresponds to the page size. */
811 for (nunits
= 7; nunits
< NBUCKETS
; nunits
++)
812 if (pagesz
<= binsize(nunits
))
820 internal_malloc (n
, file
, line
, flags
) /* get a block */
825 register union mhead
*p
;
827 register char *m
, *z
;
831 /* Get the system page size and align break pointer so future sbrks will
832 be page-aligned. The page size must be at least 1K -- anything
833 smaller is increased. */
835 if (pagealign () < 0)
836 return ((PTR_T
)NULL
);
838 /* Figure out how many bytes are required, rounding up to the nearest
839 multiple of 8, then figure out which nextf[] area to use. Try to
840 be smart about where to start searching -- if the number of bytes
841 needed is greater than the page size, we can start at pagebucket. */
842 nbytes
= ALLOCATED_BYTES(n
);
843 nunits
= (nbytes
<= (pagesz
>> 1)) ? STARTBUCK
: pagebucket
;
844 for ( ; nunits
< NBUCKETS
; nunits
++)
845 if (nbytes
<= binsize(nunits
))
848 /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */
849 if (nunits
>= NBUCKETS
)
850 return ((PTR_T
) NULL
);
852 /* In case this is reentrant use of malloc from signal handler,
853 pick a block size that no other malloc level is currently
854 trying to allocate. That's the easiest harmless way not to
855 interfere with the other level of execution. */
857 if (busy
[nunits
]) _mstats
.nrecurse
++;
859 while (busy
[nunits
]) nunits
++;
862 if (nunits
> maxbuck
)
865 /* If there are no blocks of the appropriate size, go get some */
866 if (nextf
[nunits
] == 0)
869 /* Get one block off the list, and set the new list head */
870 if ((p
= nextf
[nunits
]) == NULL
)
875 nextf
[nunits
] = CHAIN (p
);
878 /* Check for free block clobbered */
879 /* If not for this check, we would gobble a clobbered free chain ptr
880 and bomb out on the NEXT allocate of this size block */
881 if (p
->mh_alloc
!= ISFREE
|| p
->mh_index
!= nunits
)
882 xbotch ((PTR_T
)(p
+1), 0, _("malloc: block on free list clobbered"), file
, line
);
884 /* Fill in the info, and set up the magic numbers for range checking. */
885 p
->mh_alloc
= ISALLOC
;
886 p
->mh_magic2
= MAGIC2
;
889 #if SIZEOF_CHAR_P == 8
891 MALLOC_MEMSET ((char *)p
->mh_magic8
, MAGIC1
, 8);
897 m
= (char *) (p
+ 1) + n
;
898 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
902 MALLOC_MEMSET ((char *)(p
+ 1), 0xdf, n
); /* scramble previous contents */
905 _mstats
.nmalloc
[nunits
]++;
906 _mstats
.tmalloc
[nunits
]++;
908 _mstats
.bytesreq
+= n
;
909 #endif /* MALLOC_STATS */
912 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
913 mtrace_alloc ("malloc", p
+ 1, n
, file
, line
);
914 else if (_malloc_trace_buckets
[nunits
])
915 mtrace_alloc ("malloc", p
+ 1, n
, file
, line
);
918 #ifdef MALLOC_REGISTER
919 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
920 mregister_alloc ("malloc", p
+ 1, n
, file
, line
);
924 if (_malloc_nwatch
> 0)
925 _malloc_ckwatch (p
+ 1, file
, line
, W_ALLOC
, n
);
928 #if defined (MALLOC_DEBUG)
929 z
= (char *) (p
+ 1);
930 /* Check alignment of returned pointer */
931 if ((unsigned long)z
& MALIGN_MASK
)
932 fprintf (stderr
, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n",
933 file
? file
: _("unknown"), line
, p
->mh_nbytes
, MALIGN_MASK
+1);
936 return (PTR_T
) (p
+ 1);
940 internal_free (mem
, file
, line
, flags
)
945 register union mhead
*p
;
946 register char *ap
, *z
;
948 register unsigned int nbytes
;
949 int ubytes
; /* caller-requested size */
952 if ((ap
= (char *)mem
) == 0)
955 p
= (union mhead
*) ap
- 1;
957 if (p
->mh_alloc
== ISMEMALIGN
)
960 p
= (union mhead
*) ap
- 1;
963 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH)
964 if (malloc_trace
|| malloc_register
|| _malloc_nwatch
> 0)
965 ubytes
= p
->mh_nbytes
;
968 if (p
->mh_alloc
!= ISALLOC
)
970 if (p
->mh_alloc
== ISFREE
)
971 xbotch (mem
, ERR_DUPFREE
,
972 _("free: called with already freed block argument"), file
, line
);
974 xbotch (mem
, ERR_UNALLOC
,
975 _("free: called with unallocated block argument"), file
, line
);
978 ASSERT (p
->mh_magic2
== MAGIC2
);
980 nunits
= p
->mh_index
;
981 nbytes
= ALLOCATED_BYTES(p
->mh_nbytes
);
982 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
983 are now used for the number of bytes allocated, a simple check of
984 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
985 We sanity-check the value of mh_nbytes against the size of the blocks
986 in the appropriate bucket before we use it. This can still cause problems
987 and obscure errors if mh_nbytes is wrong but still within range; the
988 checks against the size recorded at the end of the chunk will probably
989 fail then. Using MALLOC_REGISTER will help here, since it saves the
990 original number of bytes requested. */
992 if (IN_BUCKET(nbytes
, nunits
) == 0)
993 xbotch (mem
, ERR_UNDERFLOW
,
994 _("free: underflow detected; mh_nbytes out of range"), file
, line
);
995 #if SIZEOF_CHAR_P == 8
998 for (i
= 0, z
= p
->mh_magic8
; i
< 8; i
++)
1000 xbotch (mem
, ERR_UNDERFLOW
,
1001 _("free: underflow detected; magic8 corrupted"), file
, line
);
1007 *z
++ = *ap
++, *z
++ = *ap
++, *z
++ = *ap
++, *z
++ = *ap
++;
1008 if (mg
.i
!= p
->mh_nbytes
)
1009 xbotch (mem
, ERR_ASSERT_FAILED
, _("free: start and end chunk sizes differ"), file
, line
);
1011 #if defined (USE_MMAP)
1012 if (nunits
> malloc_mmap_threshold
)
1014 munmap (p
, binsize (nunits
));
1015 #if defined (MALLOC_STATS)
1016 _mstats
.nlesscore
[nunits
]++;
1023 if (nunits
>= LESSCORE_MIN
&& ((char *)p
+ binsize(nunits
) == sbrk (0)))
1025 if (nunits
>= LESSCORE_MIN
&& ((char *)p
+ binsize(nunits
) == memtop
))
1028 /* If above LESSCORE_FRC, give back unconditionally. This should be set
1029 high enough to be infrequently encountered. If between LESSCORE_MIN
1030 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
1031 there's already a block on the free list. */
1032 if ((nunits
>= LESSCORE_FRC
) || busy
[nunits
] || nextf
[nunits
] != 0)
1035 /* keeps the tracing and registering code in one place */
1042 MALLOC_MEMSET (mem
, 0xcf, p
->mh_nbytes
);
1045 ASSERT (nunits
< NBUCKETS
);
1047 if (busy
[nunits
] == 1)
1049 xsplit (p
, nunits
); /* split block and add to different chain */
1053 p
->mh_alloc
= ISFREE
;
1054 /* Protect against signal handlers calling malloc. */
1056 /* Put this block on the free list. */
1057 CHAIN (p
) = nextf
[nunits
];
1062 ; /* Empty statement in case this is the end of the function */
1065 _mstats
.nmalloc
[nunits
]--;
1067 #endif /* MALLOC_STATS */
1070 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
1071 mtrace_free (mem
, ubytes
, file
, line
);
1072 else if (_malloc_trace_buckets
[nunits
])
1073 mtrace_free (mem
, ubytes
, file
, line
);
1076 #ifdef MALLOC_REGISTER
1077 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
1078 mregister_free (mem
, ubytes
, file
, line
);
1082 if (_malloc_nwatch
> 0)
1083 _malloc_ckwatch (mem
, file
, line
, W_FREE
, ubytes
);
1088 internal_realloc (mem
, n
, file
, line
, flags
)
1094 register union mhead
*p
;
1095 register u_bits32_t tocopy
;
1096 register unsigned int nbytes
;
1097 register int nunits
;
1098 register char *m
, *z
;
1107 internal_free (mem
, file
, line
, MALLOC_INTERNAL
);
1110 if ((p
= (union mhead
*) mem
) == 0)
1111 return internal_malloc (n
, file
, line
, MALLOC_INTERNAL
);
1114 nunits
= p
->mh_index
;
1115 ASSERT (nunits
< NBUCKETS
);
1117 if (p
->mh_alloc
!= ISALLOC
)
1118 xbotch (mem
, ERR_UNALLOC
,
1119 _("realloc: called with unallocated block argument"), file
, line
);
1121 ASSERT (p
->mh_magic2
== MAGIC2
);
1122 nbytes
= ALLOCATED_BYTES(p
->mh_nbytes
);
1123 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1124 are now used for the number of bytes allocated, a simple check of
1125 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1126 We sanity-check the value of mh_nbytes against the size of the blocks
1127 in the appropriate bucket before we use it. This can still cause problems
1128 and obscure errors if mh_nbytes is wrong but still within range; the
1129 checks against the size recorded at the end of the chunk will probably
1130 fail then. Using MALLOC_REGISTER will help here, since it saves the
1131 original number of bytes requested. */
1132 if (IN_BUCKET(nbytes
, nunits
) == 0)
1133 xbotch (mem
, ERR_UNDERFLOW
,
1134 _("realloc: underflow detected; mh_nbytes out of range"), file
, line
);
1135 #if SIZEOF_CHAR_P == 8
1138 for (i
= 0, z
= p
->mh_magic8
; i
< 8; i
++)
1140 xbotch (mem
, ERR_UNDERFLOW
,
1141 _("realloc: underflow detected; magic8 corrupted"), file
, line
);
1146 m
= (char *)mem
+ (tocopy
= p
->mh_nbytes
);
1148 *z
++ = *m
++, *z
++ = *m
++, *z
++ = *m
++, *z
++ = *m
++;
1149 if (mg
.i
!= p
->mh_nbytes
)
1150 xbotch (mem
, ERR_ASSERT_FAILED
, _("realloc: start and end chunk sizes differ"), file
, line
);
1153 if (_malloc_nwatch
> 0)
1154 _malloc_ckwatch (p
+ 1, file
, line
, W_REALLOC
, n
);
1157 _mstats
.bytesreq
+= (n
< tocopy
) ? 0 : n
- tocopy
;
1160 /* If we're reallocating to the same size as previously, return now */
1161 if (n
== p
->mh_nbytes
)
1164 /* See if desired size rounds to same power of 2 as actual size. */
1165 nbytes
= ALLOCATED_BYTES(n
);
1167 /* If ok, use the same block, just marking its size as changed. */
1168 if (RIGHT_BUCKET(nbytes
, nunits
) || RIGHT_BUCKET(nbytes
, nunits
-1))
1170 /* Compensate for increment above. */
1173 *m
++ = 0; *m
++ = 0; *m
++ = 0; *m
++ = 0;
1174 m
= (char *)mem
+ (p
->mh_nbytes
= n
);
1178 *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++, *m
++ = *z
++;
1190 /* If we are using mmap and have mremap, we could use it here. */
1192 if ((m
= internal_malloc (n
, file
, line
, MALLOC_INTERNAL
|MALLOC_NOTRACE
|MALLOC_NOREG
)) == 0)
1194 FASTCOPY (mem
, m
, tocopy
);
1195 internal_free (mem
, file
, line
, MALLOC_INTERNAL
);
1198 if (malloc_trace
&& (flags
& MALLOC_NOTRACE
) == 0)
1199 mtrace_alloc ("realloc", m
, n
, file
, line
);
1200 else if (_malloc_trace_buckets
[nunits
])
1201 mtrace_alloc ("realloc", m
, n
, file
, line
);
1204 #ifdef MALLOC_REGISTER
1205 if (malloc_register
&& (flags
& MALLOC_NOREG
) == 0)
1206 mregister_alloc ("realloc", m
, n
, file
, line
);
1210 if (_malloc_nwatch
> 0)
1211 _malloc_ckwatch (m
, file
, line
, W_RESIZED
, n
);
1218 internal_memalign (alignment
, size
, file
, line
, flags
)
1225 register char *aligned
;
1226 register union mhead
*p
;
1228 ptr
= internal_malloc (size
+ alignment
, file
, line
, MALLOC_INTERNAL
);
1232 /* If entire block has the desired alignment, just accept it. */
1233 if (((long) ptr
& (alignment
- 1)) == 0)
1235 /* Otherwise, get address of byte in the block that has that alignment. */
1236 aligned
= (char *) (((long) ptr
+ alignment
- 1) & (~alignment
+ 1));
1238 /* Store a suitable indication of how to free the block,
1239 so that free can find the true beginning of it. */
1240 p
= (union mhead
*) aligned
- 1;
1241 p
->mh_nbytes
= aligned
- ptr
;
1242 p
->mh_alloc
= ISMEMALIGN
;
1248 posix_memalign (memptr
, alignment
, size
)
1250 size_t alignment
, size
;
1254 /* Perform posix-mandated error checking here */
1255 if ((alignment
% sizeof (void *) != 0) || alignment
== 0)
1257 else if (powerof2 (alignment
) == 0)
1260 mem
= internal_memalign (alignment
, size
, (char *)0, 0, 0);
1270 malloc_usable_size (mem
)
1273 register union mhead
*p
;
1275 register int maxbytes
;
1278 if ((ap
= (char *)mem
) == 0)
1281 /* Find the true start of the memory block to discover which bin */
1282 p
= (union mhead
*) ap
- 1;
1283 if (p
->mh_alloc
== ISMEMALIGN
)
1286 p
= (union mhead
*) ap
- 1;
1289 /* XXX - should we return 0 if ISFREE? */
1290 maxbytes
= binsize(p
->mh_index
);
1292 /* So the usable size is the maximum number of bytes in the bin less the
1294 maxbytes
-= MOVERHEAD
+ MSLOP
;
1298 #if !defined (NO_VALLOC)
1299 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1300 Patching out seems cleaner than the ugly fix needed. */
1302 internal_valloc (size
, file
, line
, flags
)
1307 return internal_memalign (getpagesize (), size
, file
, line
, flags
|MALLOC_INTERNAL
);
1309 #endif /* !NO_VALLOC */
1313 internal_calloc (n
, s
, file
, line
, flags
)
1322 result
= internal_malloc (total
, file
, line
, flags
|MALLOC_INTERNAL
);
1324 memset (result
, 0, total
);
1329 internal_cfree (p
, file
, line
, flags
)
1334 internal_free (p
, file
, line
, flags
|MALLOC_INTERNAL
);
1336 #endif /* !NO_CALLOC */
1340 malloc_free_blocks (size
)
1344 register union mhead
*p
;
1347 for (p
= nextf
[size
]; p
; p
= CHAIN (p
))
1354 #if defined (MALLOC_WRAPFUNCS)
1356 sh_malloc (bytes
, file
, line
)
1361 return internal_malloc (bytes
, file
, line
, MALLOC_WRAPPER
);
1365 sh_realloc (ptr
, size
, file
, line
)
1371 return internal_realloc (ptr
, size
, file
, line
, MALLOC_WRAPPER
);
1375 sh_free (mem
, file
, line
)
1380 internal_free (mem
, file
, line
, MALLOC_WRAPPER
);
1384 sh_memalign (alignment
, size
, file
, line
)
1390 return internal_memalign (alignment
, size
, file
, line
, MALLOC_WRAPPER
);
1395 sh_calloc (n
, s
, file
, line
)
1400 return internal_calloc (n
, s
, file
, line
, MALLOC_WRAPPER
);
1404 sh_cfree (mem
, file
, line
)
1409 internal_cfree (mem
, file
, line
, MALLOC_WRAPPER
);
1415 sh_valloc (size
, file
, line
)
1420 return internal_valloc (size
, file
, line
, MALLOC_WRAPPER
);
1422 #endif /* !NO_VALLOC */
1424 #endif /* MALLOC_WRAPFUNCS */
1426 /* Externally-available functions that call their internal counterparts. */
1432 return internal_malloc (size
, (char *)NULL
, 0, 0);
1436 realloc (mem
, nbytes
)
1440 return internal_realloc (mem
, nbytes
, (char *)NULL
, 0, 0);
1447 internal_free (mem
, (char *)NULL
, 0, 0);
1451 memalign (alignment
, size
)
1455 return internal_memalign (alignment
, size
, (char *)NULL
, 0, 0);
1463 return internal_valloc (size
, (char *)NULL
, 0, 0);
1472 return internal_calloc (n
, s
, (char *)NULL
, 0, 0);
1479 internal_cfree (mem
, (char *)NULL
, 0, 0);