init version.
[bush.git] / lib / malloc / malloc.c
blob436b1c4dca31e944c8b04a6efec84b3c59b3973e
1 /* malloc.c - dynamic memory allocation for bush. */
3 /* Copyright (C) 1985-2020 Free Software Foundation, Inc.
5 This file is part of GNU Bush, the Bourne-Again SHell.
7 Bush is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
12 Bush is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with Bush. If not, see <http://www.gnu.org/licenses/>.
22 * @(#)nmalloc.c 1 (Caltech) 2/21/82
24 * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
26 * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
28 * [VERY] old explanation:
30 * This is a very fast storage allocator. It allocates blocks of a small
31 * number of different sizes, and keeps free lists of each size. Blocks
32 * that don't exactly fit are passed up to the next larger size. In this
33 * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
34 * This is designed for use in a program that uses vast quantities of
35 * memory, but bombs when it runs out. To make it a little better, it
36 * warns the user when he starts to get near the end.
38 * June 84, ACT: modified rcheck code to check the range given to malloc,
39 * rather than the range determined by the 2-power used.
41 * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
42 * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
43 * You should call malloc_init to reinitialize after loading dumped Emacs.
44 * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
45 * realloc knows how to return same block given, just changing its size,
46 * if the power of 2 is correct.
50 * nextf[i] is the pointer to the next free block of size 2^(i+3). The
51 * smallest allocatable block is 8 bytes. The overhead information will
52 * go in the first int of the block, and the returned pointer will point
53 * to the second.
56 /* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
57 uncover callers that refer to freed memory, and to have malloc() write 0xdf
58 into memory as it's allocated to avoid referring to previous contents. */
60 /* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
61 handled by configure. */
63 #if defined (HAVE_CONFIG_H)
64 # include <config.h>
65 #endif /* HAVE_CONFIG_H */
67 #if defined (SHELL)
68 # include "bushtypes.h"
69 # include "stdc.h"
70 #else
71 # include <sys/types.h>
72 #endif
74 #if defined (HAVE_UNISTD_H)
75 # include <unistd.h>
76 #endif
78 /* Determine which kind of system this is. */
79 #include <signal.h>
81 #if defined (HAVE_STRING_H)
82 # include <string.h>
83 #else
84 # include <strings.h>
85 #endif
86 #include <errno.h>
87 #include <stdio.h>
89 #if !defined (botch)
90 #include <stdlib.h>
91 #endif
93 #if defined (HAVE_MMAP)
94 #include <sys/mman.h>
95 #endif
97 /* Define getpagesize () if the system does not. */
98 #ifndef HAVE_GETPAGESIZE
99 # include "getpagesize.h"
100 #endif
102 #include "imalloc.h"
103 #ifdef MALLOC_STATS
104 # include "mstats.h"
105 #endif
106 #ifdef MALLOC_REGISTER
107 # include "table.h"
108 #endif
109 #ifdef MALLOC_WATCH
110 # include "watch.h"
111 #endif
113 #ifdef powerof2
114 # undef powerof2
115 #endif
116 /* Could also use (((x) & -(x)) == (x)) */
117 #define powerof2(x) ((((x) - 1) & (x)) == 0)
119 /* System-specific omissions. */
120 #ifdef HPUX
121 # define NO_VALLOC
122 #endif
124 /* SIZEOF_LONG * 4 - 2, usable bins from 1..NBUCKETS-1 */
125 #define NBUCKETS 30
127 #define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
128 #define ISFREE ((char) 0x54) /* magic byte that implies free block */
129 /* this is for error checking only */
130 #define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
131 memalign, with the rest of the word
132 being the distance to the true
133 beginning of the block. */
136 /* We have a flag indicating whether memory is allocated, an index in
137 nextf[], a size field, and a sentinel value to determine whether or
138 not a caller wrote before the start of allocated memory; to realloc()
139 memory we either copy mh_nbytes or just change mh_nbytes if there is
140 enough room in the block for the new size. Range checking is always
141 done. */
142 union mhead {
143 #if SIZEOF_CHAR_P == 8
144 bits64_t mh_align[2]; /* 16 */
145 #else
146 bits64_t mh_align; /* 8 */
147 #endif
148 struct {
149 char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
150 char mi_index; /* index in nextf[] */ /* 1 */
151 /* Remainder are valid only when block is allocated */
152 u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
153 u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
154 #if SIZEOF_CHAR_P == 8
155 char mi_magic8[8]; /* MAGIC1 guard bytes */ /* 8 */
156 #endif
157 } minfo;
159 #define mh_alloc minfo.mi_alloc
160 #define mh_index minfo.mi_index
161 #define mh_nbytes minfo.mi_nbytes
162 #define mh_magic2 minfo.mi_magic2
163 #define mh_magic8 minfo.mi_magic8
165 #define MOVERHEAD sizeof(union mhead)
167 #if SIZEOF_CHAR_P == 8
168 #define MALIGN_MASK 15
169 #else
170 #define MALIGN_MASK 7 /* one less than desired alignment */
171 #endif
173 typedef union _malloc_guard {
174 char s[4];
175 u_bits32_t i;
176 } mguard_t;
178 /* Access free-list pointer of a block.
179 It is stored at block + sizeof (char *).
180 This is not a field in the minfo structure member of union mhead
181 because we want sizeof (union mhead)
182 to describe the overhead for when the block is in use,
183 and we do not want the free-list pointer to count in that. */
185 /* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of
186 the rest of the struct. This may need adjusting. */
187 #define CHAIN(a) \
188 (*(union mhead **) (sizeof (char *) + (char *) (a)))
190 /* To implement range checking, we write magic values in at the beginning
191 and end of each allocated block, and make sure they are undisturbed
192 whenever a free or a realloc occurs. */
194 /* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */
195 #define MAGIC1 0x55
196 #define MAGIC2 0x5555
197 #define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
199 /* How many bytes are actually allocated for a request of size N --
200 rounded up to nearest multiple of 2*SIZEOF_CHAR_P after accounting for
201 malloc overhead. */
202 #define ALLOCATED_BYTES(n) \
203 (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
205 #define ASSERT(p) \
206 do \
208 if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \
210 while (0)
212 /* Minimum and maximum bucket indices for block splitting (and to bound
213 the search for a block to split). */
214 #define SPLIT_MIN 2 /* XXX - was 3 */
215 #define SPLIT_MID 11
216 #define SPLIT_MAX 14
218 /* Minimum and maximum bucket indices for block coalescing. */
219 #define COMBINE_MIN 2
220 #define COMBINE_MAX (pagebucket - 1) /* XXX */
222 #define LESSCORE_MIN 10
223 #define LESSCORE_FRC 13
225 #define STARTBUCK 1
227 /* Should we use mmap for large allocations? */
228 #if defined (HAVE_MMAP)
229 # if defined (MAP_ANON) && !defined (MAP_ANONYMOUS)
230 # define MAP_ANONYMOUS MAP_ANON
231 # endif
232 #endif
234 #if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS)
235 # define USE_MMAP
236 #endif
238 #if defined (USE_MMAP)
239 # define MMAP_THRESHOLD 14 /* must be >= SPLIT_MAX, COMBINE_MAX */
240 #else
241 # define MMAP_THRESHOLD (8 * SIZEOF_LONG)
242 #endif
244 /* Flags for the internal functions. */
245 #define MALLOC_WRAPPER 0x01 /* wrapper function */
246 #define MALLOC_INTERNAL 0x02 /* internal function calling another */
247 #define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
248 #define MALLOC_NOREG 0x08 /* don't register this allocation or free */
250 /* Future use. */
251 #define ERR_DUPFREE 0x01
252 #define ERR_UNALLOC 0x02
253 #define ERR_UNDERFLOW 0x04
254 #define ERR_ASSERT_FAILED 0x08
256 /* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
257 appropriately by the caller to account for malloc overhead. This only
258 checks that the recorded size is not too big for the bucket. We
259 can't check whether or not it's in between NU and NU-1 because we
260 might have encountered a busy bucket when allocating and moved up to
261 the next size. */
262 #define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
264 /* Use this when we want to be sure that NB is in bucket NU. */
265 #define RIGHT_BUCKET(nb, nu) \
266 (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
268 /* nextf[i] is free list of blocks of size 2**(i + 3) */
270 static union mhead *nextf[NBUCKETS];
272 /* busy[i] is nonzero while allocation or free of block size i is in progress. */
274 static char busy[NBUCKETS];
276 static int pagesz; /* system page size. */
277 static int pagebucket; /* bucket for requests a page in size */
278 static int maxbuck; /* highest bucket receiving allocation request. */
280 static char *memtop; /* top of heap */
282 static const unsigned long binsizes[NBUCKETS] = {
283 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
284 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
285 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
286 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
287 2147483648UL, 4294967295UL
290 /* binsizes[x] == (1 << ((x) + 3)) */
291 #define binsize(x) binsizes[(x)]
293 #if !defined (errno)
294 extern int errno;
295 #endif
297 /* Declarations for internal functions */
298 static PTR_T internal_malloc PARAMS((size_t, const char *, int, int));
299 static PTR_T internal_realloc PARAMS((PTR_T, size_t, const char *, int, int));
300 static void internal_free PARAMS((PTR_T, const char *, int, int));
301 static PTR_T internal_memalign PARAMS((size_t, size_t, const char *, int, int));
302 #ifndef NO_CALLOC
303 static PTR_T internal_calloc PARAMS((size_t, size_t, const char *, int, int));
304 static void internal_cfree PARAMS((PTR_T, const char *, int, int));
305 #endif
306 #ifndef NO_VALLOC
307 static PTR_T internal_valloc PARAMS((size_t, const char *, int, int));
308 #endif
310 #if defined (botch)
311 extern void botch ();
312 #else
313 static void botch PARAMS((const char *, const char *, int));
314 #endif
315 static void xbotch PARAMS((PTR_T, int, const char *, const char *, int));
317 #if !HAVE_DECL_SBRK
318 extern char *sbrk ();
319 #endif /* !HAVE_DECL_SBRK */
321 #ifdef SHELL
322 extern int running_trap;
323 extern int signal_is_trapped PARAMS((int));
324 #endif
326 #ifdef MALLOC_STATS
327 struct _malstats _mstats;
328 #endif /* MALLOC_STATS */
330 /* Debugging variables available to applications. */
331 int malloc_flags = 0; /* future use */
332 int malloc_trace = 0; /* trace allocations and frees to stderr */
333 int malloc_register = 0; /* future use */
335 /* Use a variable in case we want to dynamically adapt it in the future */
336 int malloc_mmap_threshold = MMAP_THRESHOLD;
338 #ifdef MALLOC_TRACE
339 char _malloc_trace_buckets[NBUCKETS];
341 /* These should really go into a header file. */
342 extern void mtrace_alloc PARAMS((const char *, PTR_T, size_t, const char *, int));
343 extern void mtrace_free PARAMS((PTR_T, int, const char *, int));
344 #endif
346 #if !defined (botch)
347 static void
348 botch (s, file, line)
349 const char *s;
350 const char *file;
351 int line;
353 fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
354 (void)fflush (stderr);
355 abort ();
357 #endif
359 /* print the file and line number that caused the assertion failure and
360 call botch() to do whatever the application wants with the information */
361 static void
362 xbotch (mem, e, s, file, line)
363 PTR_T mem;
364 int e;
365 const char *s;
366 const char *file;
367 int line;
369 fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
370 file ? file : _("unknown"), line);
371 #ifdef MALLOC_REGISTER
372 if (mem != NULL && malloc_register)
373 mregister_describe_mem (mem, stderr);
374 #endif
375 (void)fflush (stderr);
376 botch(s, file, line);
379 /* Coalesce two adjacent free blocks off the free list for size NU - 1,
380 as long as we can find two adjacent free blocks. nextf[NU -1] is
381 assumed to not be busy; the caller (morecore()) checks for this.
382 BUSY[NU] must be set to 1. */
383 static void
384 bcoalesce (nu)
385 register int nu;
387 register union mhead *mp, *mp1, *mp2;
388 register int nbuck;
389 unsigned long siz;
391 nbuck = nu - 1;
392 if (nextf[nbuck] == 0 || busy[nbuck])
393 return;
395 busy[nbuck] = 1;
396 siz = binsize (nbuck);
398 mp2 = mp1 = nextf[nbuck];
399 mp = CHAIN (mp1);
400 while (mp && mp != (union mhead *)((char *)mp1 + siz))
402 mp2 = mp1;
403 mp1 = mp;
404 mp = CHAIN (mp);
407 if (mp == 0)
409 busy[nbuck] = 0;
410 return;
413 /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
414 CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
415 if (mp2 != mp1 && CHAIN(mp2) != mp1)
417 busy[nbuck] = 0;
418 xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
421 #ifdef MALLOC_DEBUG
422 if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
424 busy[nbuck] = 0;
425 return; /* not adjacent */
427 #endif
429 /* Since they are adjacent, remove them from the free list */
430 if (mp1 == nextf[nbuck])
431 nextf[nbuck] = CHAIN (mp);
432 else
433 CHAIN (mp2) = CHAIN (mp);
434 busy[nbuck] = 0;
436 #ifdef MALLOC_STATS
437 _mstats.tbcoalesce++;
438 _mstats.ncoalesce[nbuck]++;
439 #endif
441 /* And add the combined two blocks to nextf[NU]. */
442 mp1->mh_alloc = ISFREE;
443 mp1->mh_index = nu;
444 CHAIN (mp1) = nextf[nu];
445 nextf[nu] = mp1;
448 /* Split a block at index > NU (but less than SPLIT_MAX) into a set of
449 blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
450 is assumed to be empty. Must be called with signals blocked (e.g.,
451 by morecore()). BUSY[NU] must be set to 1. */
452 static void
453 bsplit (nu)
454 register int nu;
456 register union mhead *mp;
457 int nbuck, nblks, split_max;
458 unsigned long siz;
460 split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
462 if (nu >= SPLIT_MID)
464 for (nbuck = split_max; nbuck > nu; nbuck--)
466 if (busy[nbuck] || nextf[nbuck] == 0)
467 continue;
468 break;
471 else
473 for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
475 if (busy[nbuck] || nextf[nbuck] == 0)
476 continue;
477 break;
481 if (nbuck > split_max || nbuck <= nu)
482 return;
484 /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
485 and nbuck is below some threshold. */
487 /* Remove the block from the chain of larger blocks. */
488 busy[nbuck] = 1;
489 mp = nextf[nbuck];
490 nextf[nbuck] = CHAIN (mp);
491 busy[nbuck] = 0;
493 #ifdef MALLOC_STATS
494 _mstats.tbsplit++;
495 _mstats.nsplit[nbuck]++;
496 #endif
498 /* Figure out how many blocks we'll get. */
499 siz = binsize (nu);
500 nblks = binsize (nbuck) / siz;
502 /* Split the block and put it on the requested chain. */
503 nextf[nu] = mp;
504 while (1)
506 mp->mh_alloc = ISFREE;
507 mp->mh_index = nu;
508 if (--nblks <= 0) break;
509 CHAIN (mp) = (union mhead *)((char *)mp + siz);
510 mp = (union mhead *)((char *)mp + siz);
512 CHAIN (mp) = 0;
515 /* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
516 but is busy. This avoids memory orphaning. */
517 static void
518 xsplit (mp, nu)
519 union mhead *mp;
520 int nu;
522 union mhead *nh;
523 int nbuck, nblks, split_max;
524 unsigned long siz;
526 nbuck = nu - 1;
527 while (nbuck >= SPLIT_MIN && busy[nbuck])
528 nbuck--;
529 if (nbuck < SPLIT_MIN)
530 return;
532 #ifdef MALLOC_STATS
533 _mstats.tbsplit++;
534 _mstats.nsplit[nu]++;
535 #endif
537 /* Figure out how many blocks we'll get. */
538 siz = binsize (nu); /* original block size */
539 nblks = siz / binsize (nbuck); /* should be 2 most of the time */
541 /* And add it to nextf[nbuck] */
542 siz = binsize (nbuck); /* XXX - resetting here */
543 nh = mp;
544 while (1)
546 mp->mh_alloc = ISFREE;
547 mp->mh_index = nbuck;
548 if (--nblks <= 0) break;
549 CHAIN (mp) = (union mhead *)((char *)mp + siz);
550 mp = (union mhead *)((char *)mp + siz);
552 busy[nbuck] = 1;
553 CHAIN (mp) = nextf[nbuck];
554 nextf[nbuck] = nh;
555 busy[nbuck] = 0;
558 void
559 _malloc_block_signals (setp, osetp)
560 sigset_t *setp, *osetp;
562 #ifdef HAVE_POSIX_SIGNALS
563 sigfillset (setp);
564 sigemptyset (osetp);
565 sigprocmask (SIG_BLOCK, setp, osetp);
566 #else
567 # if defined (HAVE_BSD_SIGNALS)
568 *osetp = sigsetmask (-1);
569 # endif
570 #endif
573 void
574 _malloc_unblock_signals (setp, osetp)
575 sigset_t *setp, *osetp;
577 #ifdef HAVE_POSIX_SIGNALS
578 sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
579 #else
580 # if defined (HAVE_BSD_SIGNALS)
581 sigsetmask (*osetp);
582 # endif
583 #endif
586 /* Return some memory to the system by reducing the break. This is only
587 called with NU > pagebucket, so we're always assured of giving back
588 more than one page of memory. */
589 static void
590 lesscore (nu) /* give system back some memory */
591 register int nu; /* size index we're discarding */
593 long siz;
595 siz = binsize (nu);
596 /* Should check for errors here, I guess. */
597 sbrk (-siz);
598 memtop -= siz;
600 #ifdef MALLOC_STATS
601 _mstats.nsbrk++;
602 _mstats.tsbrk -= siz;
603 _mstats.nlesscore[nu]++;
604 #endif
607 /* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
608 static void
609 morecore (nu)
610 register int nu; /* size index to get more of */
612 register union mhead *mp;
613 register int nblks;
614 register long siz;
615 long sbrk_amt; /* amount to get via sbrk() */
616 sigset_t set, oset;
617 int blocked_sigs;
619 /* Block all signals in case we are executed from a signal handler. */
620 blocked_sigs = 0;
621 #ifdef SHELL
622 # if defined (SIGCHLD)
623 if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
624 # else
625 if (running_trap || signal_is_trapped (SIGINT))
626 # endif
627 #endif
629 _malloc_block_signals (&set, &oset);
630 blocked_sigs = 1;
633 siz = binsize (nu); /* size of desired block for nextf[nu] */
635 if (siz < 0)
636 goto morecore_done; /* oops */
638 #ifdef MALLOC_STATS
639 _mstats.nmorecore[nu]++;
640 #endif
642 /* Try to split a larger block here, if we're within the range of sizes
643 to split. */
644 if (nu >= SPLIT_MIN && nu <= malloc_mmap_threshold)
646 bsplit (nu);
647 if (nextf[nu] != 0)
648 goto morecore_done;
651 /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
652 if we can, and we're within the range of the block coalescing limits. */
653 if (nu >= COMBINE_MIN && nu < COMBINE_MAX && nu <= malloc_mmap_threshold && busy[nu - 1] == 0 && nextf[nu - 1])
655 bcoalesce (nu);
656 if (nextf[nu] != 0)
657 goto morecore_done;
660 /* Take at least a page, and figure out how many blocks of the requested
661 size we're getting. */
662 if (siz <= pagesz)
664 sbrk_amt = pagesz;
665 nblks = sbrk_amt / siz;
667 else
669 /* We always want to request an integral multiple of the page size
670 from the kernel, so let's compute whether or not `siz' is such
671 an amount. If it is, we can just request it. If not, we want
672 the smallest integral multiple of pagesize that is larger than
673 `siz' and will satisfy the request. */
674 sbrk_amt = siz & (pagesz - 1);
675 if (sbrk_amt == 0)
676 sbrk_amt = siz;
677 else
678 sbrk_amt = siz + pagesz - sbrk_amt;
679 nblks = 1;
682 #if defined (USE_MMAP)
683 if (nu > malloc_mmap_threshold)
685 mp = (union mhead *)mmap (0, sbrk_amt, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
686 if ((void *)mp == MAP_FAILED)
687 goto morecore_done;
688 nextf[nu] = mp;
689 mp->mh_alloc = ISFREE;
690 mp->mh_index = nu;
691 CHAIN (mp) = 0;
692 #ifdef MALLOC_STATS
693 _mstats.nmmap++;
694 _mstats.tmmap += sbrk_amt;
695 #endif
696 goto morecore_done;
698 #endif
701 #ifdef MALLOC_STATS
702 _mstats.nsbrk++;
703 _mstats.tsbrk += sbrk_amt;
704 #endif
706 mp = (union mhead *) sbrk (sbrk_amt);
708 /* Totally out of memory. */
709 if ((long)mp == -1)
710 goto morecore_done;
712 memtop += sbrk_amt;
714 /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */
715 if ((long)mp & MALIGN_MASK)
717 mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
718 nblks--;
721 /* save new header and link the nblks blocks together */
722 nextf[nu] = mp;
723 while (1)
725 mp->mh_alloc = ISFREE;
726 mp->mh_index = nu;
727 if (--nblks <= 0) break;
728 CHAIN (mp) = (union mhead *)((char *)mp + siz);
729 mp = (union mhead *)((char *)mp + siz);
731 CHAIN (mp) = 0;
733 morecore_done:
734 if (blocked_sigs)
735 _malloc_unblock_signals (&set, &oset);
738 static void
739 malloc_debug_dummy ()
741 write (1, "malloc_debug_dummy\n", 19);
744 #if SIZEOF_CHAR_P == 8
745 #define PREPOP_BIN 3
746 #define PREPOP_SIZE 64
747 #else
748 #define PREPOP_BIN 2
749 #define PREPOP_SIZE 32
750 #endif
752 static int
753 pagealign ()
755 register int nunits;
756 register union mhead *mp;
757 long sbrk_needed;
758 char *curbrk;
760 pagesz = getpagesize ();
761 if (pagesz < 1024)
762 pagesz = 1024;
764 /* OK, how much do we need to allocate to make things page-aligned?
765 Some of this partial page will be wasted space, but we'll use as
766 much as we can. Once we figure out how much to advance the break
767 pointer, go ahead and do it. */
768 memtop = curbrk = sbrk (0);
769 sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
770 if (sbrk_needed < 0)
771 sbrk_needed += pagesz;
773 /* Now allocate the wasted space. */
774 if (sbrk_needed)
776 #ifdef MALLOC_STATS
777 _mstats.nsbrk++;
778 _mstats.tsbrk += sbrk_needed;
779 #endif
780 curbrk = sbrk (sbrk_needed);
781 if ((long)curbrk == -1)
782 return -1;
783 memtop += sbrk_needed;
785 /* Take the memory which would otherwise be wasted and populate the most
786 popular bin (3 == 64 bytes) with it. Add whatever we need to curbrk
787 to make things 64-byte aligned, compute how many 64-byte chunks we're
788 going to get, and set up the bin. */
789 curbrk += sbrk_needed & (PREPOP_SIZE - 1);
790 sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
791 nunits = sbrk_needed / PREPOP_SIZE;
793 if (nunits > 0)
795 mp = (union mhead *)curbrk;
797 nextf[PREPOP_BIN] = mp;
798 while (1)
800 mp->mh_alloc = ISFREE;
801 mp->mh_index = PREPOP_BIN;
802 if (--nunits <= 0) break;
803 CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
804 mp = (union mhead *)((char *)mp + PREPOP_SIZE);
806 CHAIN(mp) = 0;
810 /* compute which bin corresponds to the page size. */
811 for (nunits = 7; nunits < NBUCKETS; nunits++)
812 if (pagesz <= binsize(nunits))
813 break;
814 pagebucket = nunits;
816 return 0;
819 static PTR_T
820 internal_malloc (n, file, line, flags) /* get a block */
821 size_t n;
822 const char *file;
823 int line, flags;
825 register union mhead *p;
826 register int nunits;
827 register char *m, *z;
828 long nbytes;
829 mguard_t mg;
831 /* Get the system page size and align break pointer so future sbrks will
832 be page-aligned. The page size must be at least 1K -- anything
833 smaller is increased. */
834 if (pagesz == 0)
835 if (pagealign () < 0)
836 return ((PTR_T)NULL);
838 /* Figure out how many bytes are required, rounding up to the nearest
839 multiple of 8, then figure out which nextf[] area to use. Try to
840 be smart about where to start searching -- if the number of bytes
841 needed is greater than the page size, we can start at pagebucket. */
842 nbytes = ALLOCATED_BYTES(n);
843 nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
844 for ( ; nunits < NBUCKETS; nunits++)
845 if (nbytes <= binsize(nunits))
846 break;
848 /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */
849 if (nunits >= NBUCKETS)
850 return ((PTR_T) NULL);
852 /* In case this is reentrant use of malloc from signal handler,
853 pick a block size that no other malloc level is currently
854 trying to allocate. That's the easiest harmless way not to
855 interfere with the other level of execution. */
856 #ifdef MALLOC_STATS
857 if (busy[nunits]) _mstats.nrecurse++;
858 #endif
859 while (busy[nunits]) nunits++;
860 busy[nunits] = 1;
862 if (nunits > maxbuck)
863 maxbuck = nunits;
865 /* If there are no blocks of the appropriate size, go get some */
866 if (nextf[nunits] == 0)
867 morecore (nunits);
869 /* Get one block off the list, and set the new list head */
870 if ((p = nextf[nunits]) == NULL)
872 busy[nunits] = 0;
873 return NULL;
875 nextf[nunits] = CHAIN (p);
876 busy[nunits] = 0;
878 /* Check for free block clobbered */
879 /* If not for this check, we would gobble a clobbered free chain ptr
880 and bomb out on the NEXT allocate of this size block */
881 if (p->mh_alloc != ISFREE || p->mh_index != nunits)
882 xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
884 /* Fill in the info, and set up the magic numbers for range checking. */
885 p->mh_alloc = ISALLOC;
886 p->mh_magic2 = MAGIC2;
887 p->mh_nbytes = n;
889 #if SIZEOF_CHAR_P == 8
890 /* Begin guard */
891 MALLOC_MEMSET ((char *)p->mh_magic8, MAGIC1, 8);
892 #endif
894 /* End guard */
895 mg.i = n;
896 z = mg.s;
897 m = (char *) (p + 1) + n;
898 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
900 #ifdef MEMSCRAMBLE
901 if (n)
902 MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
903 #endif
904 #ifdef MALLOC_STATS
905 _mstats.nmalloc[nunits]++;
906 _mstats.tmalloc[nunits]++;
907 _mstats.nmal++;
908 _mstats.bytesreq += n;
909 #endif /* MALLOC_STATS */
911 #ifdef MALLOC_TRACE
912 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
913 mtrace_alloc ("malloc", p + 1, n, file, line);
914 else if (_malloc_trace_buckets[nunits])
915 mtrace_alloc ("malloc", p + 1, n, file, line);
916 #endif
918 #ifdef MALLOC_REGISTER
919 if (malloc_register && (flags & MALLOC_NOREG) == 0)
920 mregister_alloc ("malloc", p + 1, n, file, line);
921 #endif
923 #ifdef MALLOC_WATCH
924 if (_malloc_nwatch > 0)
925 _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
926 #endif
928 #if defined (MALLOC_DEBUG)
929 z = (char *) (p + 1);
930 /* Check alignment of returned pointer */
931 if ((unsigned long)z & MALIGN_MASK)
932 fprintf (stderr, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n",
933 file ? file : _("unknown"), line, p->mh_nbytes, MALIGN_MASK+1);
934 #endif
936 return (PTR_T) (p + 1);
939 static void
940 internal_free (mem, file, line, flags)
941 PTR_T mem;
942 const char *file;
943 int line, flags;
945 register union mhead *p;
946 register char *ap, *z;
947 register int nunits;
948 register unsigned int nbytes;
949 int ubytes; /* caller-requested size */
950 mguard_t mg;
952 if ((ap = (char *)mem) == 0)
953 return;
955 p = (union mhead *) ap - 1;
957 if (p->mh_alloc == ISMEMALIGN)
959 ap -= p->mh_nbytes;
960 p = (union mhead *) ap - 1;
963 #if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH)
964 if (malloc_trace || malloc_register || _malloc_nwatch > 0)
965 ubytes = p->mh_nbytes;
966 #endif
968 if (p->mh_alloc != ISALLOC)
970 if (p->mh_alloc == ISFREE)
971 xbotch (mem, ERR_DUPFREE,
972 _("free: called with already freed block argument"), file, line);
973 else
974 xbotch (mem, ERR_UNALLOC,
975 _("free: called with unallocated block argument"), file, line);
978 ASSERT (p->mh_magic2 == MAGIC2);
980 nunits = p->mh_index;
981 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
982 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
983 are now used for the number of bytes allocated, a simple check of
984 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
985 We sanity-check the value of mh_nbytes against the size of the blocks
986 in the appropriate bucket before we use it. This can still cause problems
987 and obscure errors if mh_nbytes is wrong but still within range; the
988 checks against the size recorded at the end of the chunk will probably
989 fail then. Using MALLOC_REGISTER will help here, since it saves the
990 original number of bytes requested. */
992 if (IN_BUCKET(nbytes, nunits) == 0)
993 xbotch (mem, ERR_UNDERFLOW,
994 _("free: underflow detected; mh_nbytes out of range"), file, line);
995 #if SIZEOF_CHAR_P == 8
997 int i;
998 for (i = 0, z = p->mh_magic8; i < 8; i++)
999 if (*z++ != MAGIC1)
1000 xbotch (mem, ERR_UNDERFLOW,
1001 _("free: underflow detected; magic8 corrupted"), file, line);
1003 #endif
1005 ap += p->mh_nbytes;
1006 z = mg.s;
1007 *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
1008 if (mg.i != p->mh_nbytes)
1009 xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
1011 #if defined (USE_MMAP)
1012 if (nunits > malloc_mmap_threshold)
1014 munmap (p, binsize (nunits));
1015 #if defined (MALLOC_STATS)
1016 _mstats.nlesscore[nunits]++;
1017 #endif
1018 goto free_return;
1020 #endif
1022 #if GLIBC21
1023 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == sbrk (0)))
1024 #else
1025 if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
1026 #endif
1028 /* If above LESSCORE_FRC, give back unconditionally. This should be set
1029 high enough to be infrequently encountered. If between LESSCORE_MIN
1030 and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
1031 there's already a block on the free list. */
1032 if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
1034 lesscore (nunits);
1035 /* keeps the tracing and registering code in one place */
1036 goto free_return;
1040 #ifdef MEMSCRAMBLE
1041 if (p->mh_nbytes)
1042 MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
1043 #endif
1045 ASSERT (nunits < NBUCKETS);
1047 if (busy[nunits] == 1)
1049 xsplit (p, nunits); /* split block and add to different chain */
1050 goto free_return;
1053 p->mh_alloc = ISFREE;
1054 /* Protect against signal handlers calling malloc. */
1055 busy[nunits] = 1;
1056 /* Put this block on the free list. */
1057 CHAIN (p) = nextf[nunits];
1058 nextf[nunits] = p;
1059 busy[nunits] = 0;
1061 free_return:
1062 ; /* Empty statement in case this is the end of the function */
1064 #ifdef MALLOC_STATS
1065 _mstats.nmalloc[nunits]--;
1066 _mstats.nfre++;
1067 #endif /* MALLOC_STATS */
1069 #ifdef MALLOC_TRACE
1070 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1071 mtrace_free (mem, ubytes, file, line);
1072 else if (_malloc_trace_buckets[nunits])
1073 mtrace_free (mem, ubytes, file, line);
1074 #endif
1076 #ifdef MALLOC_REGISTER
1077 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1078 mregister_free (mem, ubytes, file, line);
1079 #endif
1081 #ifdef MALLOC_WATCH
1082 if (_malloc_nwatch > 0)
1083 _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
1084 #endif
1087 static PTR_T
1088 internal_realloc (mem, n, file, line, flags)
1089 PTR_T mem;
1090 register size_t n;
1091 const char *file;
1092 int line, flags;
1094 register union mhead *p;
1095 register u_bits32_t tocopy;
1096 register unsigned int nbytes;
1097 register int nunits;
1098 register char *m, *z;
1099 mguard_t mg;
1101 #ifdef MALLOC_STATS
1102 _mstats.nrealloc++;
1103 #endif
1105 if (n == 0)
1107 internal_free (mem, file, line, MALLOC_INTERNAL);
1108 return (NULL);
1110 if ((p = (union mhead *) mem) == 0)
1111 return internal_malloc (n, file, line, MALLOC_INTERNAL);
1113 p--;
1114 nunits = p->mh_index;
1115 ASSERT (nunits < NBUCKETS);
1117 if (p->mh_alloc != ISALLOC)
1118 xbotch (mem, ERR_UNALLOC,
1119 _("realloc: called with unallocated block argument"), file, line);
1121 ASSERT (p->mh_magic2 == MAGIC2);
1122 nbytes = ALLOCATED_BYTES(p->mh_nbytes);
1123 /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
1124 are now used for the number of bytes allocated, a simple check of
1125 mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
1126 We sanity-check the value of mh_nbytes against the size of the blocks
1127 in the appropriate bucket before we use it. This can still cause problems
1128 and obscure errors if mh_nbytes is wrong but still within range; the
1129 checks against the size recorded at the end of the chunk will probably
1130 fail then. Using MALLOC_REGISTER will help here, since it saves the
1131 original number of bytes requested. */
1132 if (IN_BUCKET(nbytes, nunits) == 0)
1133 xbotch (mem, ERR_UNDERFLOW,
1134 _("realloc: underflow detected; mh_nbytes out of range"), file, line);
1135 #if SIZEOF_CHAR_P == 8
1137 int i;
1138 for (i = 0, z = p->mh_magic8; i < 8; i++)
1139 if (*z++ != MAGIC1)
1140 xbotch (mem, ERR_UNDERFLOW,
1141 _("realloc: underflow detected; magic8 corrupted"), file, line);
1144 #endif
1146 m = (char *)mem + (tocopy = p->mh_nbytes);
1147 z = mg.s;
1148 *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
1149 if (mg.i != p->mh_nbytes)
1150 xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
1152 #ifdef MALLOC_WATCH
1153 if (_malloc_nwatch > 0)
1154 _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
1155 #endif
1156 #ifdef MALLOC_STATS
1157 _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
1158 #endif
1160 /* If we're reallocating to the same size as previously, return now */
1161 if (n == p->mh_nbytes)
1162 return mem;
1164 /* See if desired size rounds to same power of 2 as actual size. */
1165 nbytes = ALLOCATED_BYTES(n);
1167 /* If ok, use the same block, just marking its size as changed. */
1168 if (RIGHT_BUCKET(nbytes, nunits) || RIGHT_BUCKET(nbytes, nunits-1))
1170 /* Compensate for increment above. */
1171 m -= 4;
1173 *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
1174 m = (char *)mem + (p->mh_nbytes = n);
1176 mg.i = n;
1177 z = mg.s;
1178 *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
1180 return mem;
1183 if (n < tocopy)
1184 tocopy = n;
1186 #ifdef MALLOC_STATS
1187 _mstats.nrcopy++;
1188 #endif
1190 /* If we are using mmap and have mremap, we could use it here. */
1192 if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
1193 return 0;
1194 FASTCOPY (mem, m, tocopy);
1195 internal_free (mem, file, line, MALLOC_INTERNAL);
1197 #ifdef MALLOC_TRACE
1198 if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
1199 mtrace_alloc ("realloc", m, n, file, line);
1200 else if (_malloc_trace_buckets[nunits])
1201 mtrace_alloc ("realloc", m, n, file, line);
1202 #endif
1204 #ifdef MALLOC_REGISTER
1205 if (malloc_register && (flags & MALLOC_NOREG) == 0)
1206 mregister_alloc ("realloc", m, n, file, line);
1207 #endif
1209 #ifdef MALLOC_WATCH
1210 if (_malloc_nwatch > 0)
1211 _malloc_ckwatch (m, file, line, W_RESIZED, n);
1212 #endif
1214 return m;
1217 static PTR_T
1218 internal_memalign (alignment, size, file, line, flags)
1219 size_t alignment;
1220 size_t size;
1221 const char *file;
1222 int line, flags;
1224 register char *ptr;
1225 register char *aligned;
1226 register union mhead *p;
1228 ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
1230 if (ptr == 0)
1231 return 0;
1232 /* If entire block has the desired alignment, just accept it. */
1233 if (((long) ptr & (alignment - 1)) == 0)
1234 return ptr;
1235 /* Otherwise, get address of byte in the block that has that alignment. */
1236 aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
1238 /* Store a suitable indication of how to free the block,
1239 so that free can find the true beginning of it. */
1240 p = (union mhead *) aligned - 1;
1241 p->mh_nbytes = aligned - ptr;
1242 p->mh_alloc = ISMEMALIGN;
1244 return aligned;
1248 posix_memalign (memptr, alignment, size)
1249 void **memptr;
1250 size_t alignment, size;
1252 void *mem;
1254 /* Perform posix-mandated error checking here */
1255 if ((alignment % sizeof (void *) != 0) || alignment == 0)
1256 return EINVAL;
1257 else if (powerof2 (alignment) == 0)
1258 return EINVAL;
1260 mem = internal_memalign (alignment, size, (char *)0, 0, 0);
1261 if (mem != 0)
1263 *memptr = mem;
1264 return 0;
1266 return ENOMEM;
1269 size_t
1270 malloc_usable_size (mem)
1271 void *mem;
1273 register union mhead *p;
1274 register char *ap;
1275 register int maxbytes;
1278 if ((ap = (char *)mem) == 0)
1279 return 0;
1281 /* Find the true start of the memory block to discover which bin */
1282 p = (union mhead *) ap - 1;
1283 if (p->mh_alloc == ISMEMALIGN)
1285 ap -= p->mh_nbytes;
1286 p = (union mhead *) ap - 1;
1289 /* XXX - should we return 0 if ISFREE? */
1290 maxbytes = binsize(p->mh_index);
1292 /* So the usable size is the maximum number of bytes in the bin less the
1293 malloc overhead */
1294 maxbytes -= MOVERHEAD + MSLOP;
1295 return (maxbytes);
1298 #if !defined (NO_VALLOC)
1299 /* This runs into trouble with getpagesize on HPUX, and Multimax machines.
1300 Patching out seems cleaner than the ugly fix needed. */
1301 static PTR_T
1302 internal_valloc (size, file, line, flags)
1303 size_t size;
1304 const char *file;
1305 int line, flags;
1307 return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
1309 #endif /* !NO_VALLOC */
1311 #ifndef NO_CALLOC
1312 static PTR_T
1313 internal_calloc (n, s, file, line, flags)
1314 size_t n, s;
1315 const char *file;
1316 int line, flags;
1318 size_t total;
1319 PTR_T result;
1321 total = n * s;
1322 result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
1323 if (result)
1324 memset (result, 0, total);
1325 return result;
1328 static void
1329 internal_cfree (p, file, line, flags)
1330 PTR_T p;
1331 const char *file;
1332 int line, flags;
1334 internal_free (p, file, line, flags|MALLOC_INTERNAL);
1336 #endif /* !NO_CALLOC */
1338 #ifdef MALLOC_STATS
1340 malloc_free_blocks (size)
1341 int size;
1343 int nfree;
1344 register union mhead *p;
1346 nfree = 0;
1347 for (p = nextf[size]; p; p = CHAIN (p))
1348 nfree++;
1350 return nfree;
1352 #endif
1354 #if defined (MALLOC_WRAPFUNCS)
1355 PTR_T
1356 sh_malloc (bytes, file, line)
1357 size_t bytes;
1358 const char *file;
1359 int line;
1361 return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
1364 PTR_T
1365 sh_realloc (ptr, size, file, line)
1366 PTR_T ptr;
1367 size_t size;
1368 const char *file;
1369 int line;
1371 return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
1374 void
1375 sh_free (mem, file, line)
1376 PTR_T mem;
1377 const char *file;
1378 int line;
1380 internal_free (mem, file, line, MALLOC_WRAPPER);
1383 PTR_T
1384 sh_memalign (alignment, size, file, line)
1385 size_t alignment;
1386 size_t size;
1387 const char *file;
1388 int line;
1390 return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
1393 #ifndef NO_CALLOC
1394 PTR_T
1395 sh_calloc (n, s, file, line)
1396 size_t n, s;
1397 const char *file;
1398 int line;
1400 return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
1403 void
1404 sh_cfree (mem, file, line)
1405 PTR_T mem;
1406 const char *file;
1407 int line;
1409 internal_cfree (mem, file, line, MALLOC_WRAPPER);
1411 #endif
1413 #ifndef NO_VALLOC
1414 PTR_T
1415 sh_valloc (size, file, line)
1416 size_t size;
1417 const char *file;
1418 int line;
1420 return internal_valloc (size, file, line, MALLOC_WRAPPER);
1422 #endif /* !NO_VALLOC */
1424 #endif /* MALLOC_WRAPFUNCS */
1426 /* Externally-available functions that call their internal counterparts. */
1428 PTR_T
1429 malloc (size)
1430 size_t size;
1432 return internal_malloc (size, (char *)NULL, 0, 0);
1435 PTR_T
1436 realloc (mem, nbytes)
1437 PTR_T mem;
1438 size_t nbytes;
1440 return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
1443 void
1444 free (mem)
1445 PTR_T mem;
1447 internal_free (mem, (char *)NULL, 0, 0);
1450 PTR_T
1451 memalign (alignment, size)
1452 size_t alignment;
1453 size_t size;
1455 return internal_memalign (alignment, size, (char *)NULL, 0, 0);
1458 #ifndef NO_VALLOC
1459 PTR_T
1460 valloc (size)
1461 size_t size;
1463 return internal_valloc (size, (char *)NULL, 0, 0);
1465 #endif
1467 #ifndef NO_CALLOC
1468 PTR_T
1469 calloc (n, s)
1470 size_t n, s;
1472 return internal_calloc (n, s, (char *)NULL, 0, 0);
1475 void
1476 cfree (mem)
1477 PTR_T mem;
1479 internal_cfree (mem, (char *)NULL, 0, 0);
1481 #endif