26763: fix problem on failed cd -s to relative path
[zsh.git] / Src / mem.c
blob1a3e75997698c7b1b31eb1b50f05f6020dca1215
1 /*
2 * mem.c - memory management
4 * This file is part of zsh, the Z shell.
6 * Copyright (c) 1992-1997 Paul Falstad
7 * All rights reserved.
9 * Permission is hereby granted, without written agreement and without
10 * license or royalty fees, to use, copy, modify, and distribute this
11 * software and to distribute modified versions of this software for any
12 * purpose, provided that the above copyright notice and the following
13 * two paragraphs appear in all copies of this software.
15 * In no event shall Paul Falstad or the Zsh Development Group be liable
16 * to any party for direct, indirect, special, incidental, or consequential
17 * damages arising out of the use of this software and its documentation,
18 * even if Paul Falstad and the Zsh Development Group have been advised of
19 * the possibility of such damage.
21 * Paul Falstad and the Zsh Development Group specifically disclaim any
22 * warranties, including, but not limited to, the implied warranties of
23 * merchantability and fitness for a particular purpose. The software
24 * provided hereunder is on an "as is" basis, and Paul Falstad and the
25 * Zsh Development Group have no obligation to provide maintenance,
26 * support, updates, enhancements, or modifications.
30 #include "zsh.mdh"
31 #include "mem.pro"
34 There are two ways to allocate memory in zsh. The first way is
35 to call zalloc/zshcalloc, which call malloc/calloc directly. It
36 is legal to call realloc() or free() on memory allocated this way.
37 The second way is to call zhalloc/hcalloc, which allocates memory
38 from one of the memory pools on the heap stack. Such memory pools
39 will automatically created when the heap allocation routines are
40 called. To be sure that they are freed at appropriate times
41 one should call pushheap() before one starts using heaps and
42 popheap() after that (when the memory allocated on the heaps since
43 the last pushheap() isn't needed anymore).
44 pushheap() saves the states of all currently allocated heaps and
45 popheap() resets them to the last state saved and destroys the
46 information about that state. If you called pushheap() and
47 allocated some memory on the heaps and then come to a place where
48 you don't need the allocated memory anymore but you still want
49 to allocate memory on the heap, you should call freeheap(). This
50 works like popheap(), only that it doesn't free the information
51 about the heap states (i.e. the heaps are like after the call to
52 pushheap() and you have to call popheap some time later).
54 Memory allocated in this way does not have to be freed explicitly;
55 it will all be freed when the pool is destroyed. In fact,
56 attempting to free this memory may result in a core dump.
58 If possible, the heaps are allocated using mmap() so that the
59 (*real*) heap isn't filled up with empty zsh heaps. If mmap()
60 is not available and zsh's own allocator is used, we use a simple trick
61 to avoid that: we allocate a large block of memory before allocating
62 a heap pool, this memory is freed again immediately after the pool
63 is allocated. If there are only small blocks on the free list this
64 guarantees that the memory for the pool is at the end of the memory
65 which means that we can give it back to the system when the pool is
66 freed.
68 hrealloc(char *p, size_t old, size_t new) is an optimisation
69 with a similar interface to realloc(). Typically the new size
70 will be larger than the old one, since there is no gain in
71 shrinking the allocation (indeed, that will confused hrealloc()
72 since it will forget that the unused space once belonged to this
73 pointer). However, new == 0 is a special case; then if we
74 had to allocate a special heap for this memory it is freed at
75 that point.
78 #if defined(HAVE_SYS_MMAN_H) && defined(HAVE_MMAP) && defined(HAVE_MUNMAP)
80 #include <sys/mman.h>
82 #if defined(MAP_ANONYMOUS) && defined(MAP_PRIVATE)
84 #define USE_MMAP 1
85 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE)
87 #endif
88 #endif
90 #ifdef ZSH_MEM_WARNING
91 # ifndef DEBUG
92 # define DEBUG 1
93 # endif
94 #endif
96 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
98 static int h_m[1025], h_push, h_pop, h_free;
100 #endif
102 /* Make sure we align to the longest fundamental type. */
103 union mem_align {
104 zlong l;
105 double d;
108 #define H_ISIZE sizeof(union mem_align)
109 #define HEAPSIZE (16384 - H_ISIZE)
110 /* Memory available for user data in default arena size */
111 #define HEAP_ARENA_SIZE (HEAPSIZE - sizeof(struct heap))
112 #define HEAPFREE (16384 - H_ISIZE)
114 /* Memory available for user data in heap h */
115 #define ARENA_SIZEOF(h) ((h)->size - sizeof(struct heap))
117 /* list of zsh heaps */
119 static Heap heaps;
121 /* a heap with free space, not always correct (it will be the last heap
122 * if that was newly allocated but it may also be another one) */
124 static Heap fheap;
126 /* Use new heaps from now on. This returns the old heap-list. */
128 /**/
129 mod_export Heap
130 new_heaps(void)
132 Heap h;
134 queue_signals();
135 h = heaps;
137 fheap = heaps = NULL;
138 unqueue_signals();
140 return h;
143 /* Re-install the old heaps again, freeing the new ones. */
145 /**/
146 mod_export void
147 old_heaps(Heap old)
149 Heap h, n;
151 queue_signals();
152 for (h = heaps; h; h = n) {
153 n = h->next;
154 DPUTS(h->sp, "BUG: old_heaps() with pushed heaps");
155 #ifdef USE_MMAP
156 munmap((void *) h, h->size);
157 #else
158 zfree(h, HEAPSIZE);
159 #endif
161 heaps = old;
162 fheap = NULL;
163 unqueue_signals();
166 /* Temporarily switch to other heaps (or back again). */
168 /**/
169 mod_export Heap
170 switch_heaps(Heap new)
172 Heap h;
174 queue_signals();
175 h = heaps;
177 heaps = new;
178 fheap = NULL;
179 unqueue_signals();
181 return h;
184 /* save states of zsh heaps */
186 /**/
187 mod_export void
188 pushheap(void)
190 Heap h;
191 Heapstack hs;
193 queue_signals();
195 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
196 h_push++;
197 #endif
199 for (h = heaps; h; h = h->next) {
200 DPUTS(!h->used, "BUG: empty heap");
201 hs = (Heapstack) zalloc(sizeof(*hs));
202 hs->next = h->sp;
203 h->sp = hs;
204 hs->used = h->used;
206 unqueue_signals();
209 /* reset heaps to previous state */
211 /**/
212 mod_export void
213 freeheap(void)
215 Heap h, hn, hl = NULL;
217 queue_signals();
219 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
220 h_free++;
221 #endif
223 fheap = NULL;
224 for (h = heaps; h; h = hn) {
225 hn = h->next;
226 if (h->sp) {
227 #ifdef ZSH_MEM_DEBUG
228 memset(arena(h) + h->sp->used, 0xff, h->used - h->sp->used);
229 #endif
230 h->used = h->sp->used;
231 if (!fheap && h->used < ARENA_SIZEOF(h))
232 fheap = h;
233 hl = h;
234 } else {
235 #ifdef USE_MMAP
236 munmap((void *) h, h->size);
237 #else
238 zfree(h, HEAPSIZE);
239 #endif
242 if (hl)
243 hl->next = NULL;
244 else
245 heaps = NULL;
247 unqueue_signals();
250 /* reset heap to previous state and destroy state information */
252 /**/
253 mod_export void
254 popheap(void)
256 Heap h, hn, hl = NULL;
257 Heapstack hs;
259 queue_signals();
261 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
262 h_pop++;
263 #endif
265 fheap = NULL;
266 for (h = heaps; h; h = hn) {
267 hn = h->next;
268 if ((hs = h->sp)) {
269 h->sp = hs->next;
270 #ifdef ZSH_MEM_DEBUG
271 memset(arena(h) + hs->used, 0xff, h->used - hs->used);
272 #endif
273 h->used = hs->used;
274 if (!fheap && h->used < ARENA_SIZEOF(h))
275 fheap = h;
276 zfree(hs, sizeof(*hs));
278 hl = h;
279 } else {
280 #ifdef USE_MMAP
281 munmap((void *) h, h->size);
282 #else
283 zfree(h, HEAPSIZE);
284 #endif
287 if (hl)
288 hl->next = NULL;
289 else
290 heaps = NULL;
292 unqueue_signals();
295 #ifdef USE_MMAP
297 * Utility function to allocate a heap area of at least *n bytes.
298 * *n will be rounded up to the next page boundary.
300 static Heap
301 mmap_heap_alloc(size_t *n)
303 Heap h;
304 static size_t pgsz = 0;
306 if (!pgsz) {
308 #ifdef _SC_PAGESIZE
309 pgsz = sysconf(_SC_PAGESIZE); /* SVR4 */
310 #else
311 # ifdef _SC_PAGE_SIZE
312 pgsz = sysconf(_SC_PAGE_SIZE); /* HPUX */
313 # else
314 pgsz = getpagesize();
315 # endif
316 #endif
318 pgsz--;
320 *n = (*n + pgsz) & ~pgsz;
321 h = (Heap) mmap(NULL, *n, PROT_READ | PROT_WRITE,
322 MMAP_FLAGS, -1, 0);
323 if (h == ((Heap) -1)) {
324 zerr("fatal error: out of heap memory");
325 exit(1);
328 return h;
330 #endif
332 /* check whether a pointer is within a memory pool */
334 /**/
335 mod_export void *
336 zheapptr(void *p)
338 Heap h;
339 queue_signals();
340 for (h = heaps; h; h = h->next)
341 if ((char *)p >= arena(h) &&
342 (char *)p + H_ISIZE < arena(h) + ARENA_SIZEOF(h))
343 break;
344 unqueue_signals();
345 return (h ? p : 0);
348 /* allocate memory from the current memory pool */
350 /**/
351 mod_export void *
352 zhalloc(size_t size)
354 Heap h;
355 size_t n;
357 size = (size + H_ISIZE - 1) & ~(H_ISIZE - 1);
359 queue_signals();
361 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG)
362 h_m[size < (1024 * H_ISIZE) ? (size / H_ISIZE) : 1024]++;
363 #endif
365 /* find a heap with enough free space */
367 for (h = ((fheap && ARENA_SIZEOF(fheap) >= (size + fheap->used))
368 ? fheap : heaps);
369 h; h = h->next) {
370 if (ARENA_SIZEOF(h) >= (n = size + h->used)) {
371 void *ret;
373 h->used = n;
374 ret = arena(h) + n - size;
375 unqueue_signals();
376 return ret;
380 Heap hp;
381 /* not found, allocate new heap */
382 #if defined(ZSH_MEM) && !defined(USE_MMAP)
383 static int called = 0;
384 void *foo = called ? (void *)malloc(HEAPFREE) : NULL;
385 /* tricky, see above */
386 #endif
388 n = HEAP_ARENA_SIZE > size ? HEAPSIZE : size + sizeof(*h);
389 for (hp = NULL, h = heaps; h; hp = h, h = h->next);
391 #ifdef USE_MMAP
392 h = mmap_heap_alloc(&n);
393 #else
394 h = (Heap) zalloc(n);
395 #endif
397 #if defined(ZSH_MEM) && !defined(USE_MMAP)
398 if (called)
399 zfree(foo, HEAPFREE);
400 called = 1;
401 #endif
403 h->size = n;
404 h->used = size;
405 h->next = NULL;
406 h->sp = NULL;
408 if (hp)
409 hp->next = h;
410 else
411 heaps = h;
412 fheap = h;
414 unqueue_signals();
415 return arena(h);
419 /**/
420 mod_export void *
421 hrealloc(char *p, size_t old, size_t new)
423 Heap h, ph;
425 old = (old + H_ISIZE - 1) & ~(H_ISIZE - 1);
426 new = (new + H_ISIZE - 1) & ~(H_ISIZE - 1);
428 if (old == new)
429 return p;
430 if (!old && !p)
431 return zhalloc(new);
433 /* find the heap with p */
435 queue_signals();
436 for (h = heaps, ph = NULL; h; ph = h, h = h->next)
437 if (p >= arena(h) && p < arena(h) + ARENA_SIZEOF(h))
438 break;
440 DPUTS(!h, "BUG: hrealloc() called for non-heap memory.");
441 DPUTS(h->sp && arena(h) + h->sp->used > p,
442 "BUG: hrealloc() wants to realloc pushed memory");
445 * If the end of the old chunk is before the used pointer,
446 * more memory has been zhalloc'ed afterwards.
447 * We can't tell if that's still in use, obviously, since
448 * that's the whole point of heap memory.
449 * We have no choice other than to grab some more memory
450 * somewhere else and copy in the old stuff.
452 if (p + old < arena(h) + h->used) {
453 if (new > old) {
454 char *ptr = (char *) zhalloc(new);
455 memcpy(ptr, p, old);
456 #ifdef ZSH_MEM_DEBUG
457 memset(p, 0xff, old);
458 #endif
459 unqueue_signals();
460 return ptr;
461 } else {
462 unqueue_signals();
463 return new ? p : NULL;
467 DPUTS(p + old != arena(h) + h->used, "BUG: hrealloc more than allocated");
470 * We now know there's nothing afterwards in the heap, now see if
471 * there's nothing before. Then we can reallocate the whole thing.
472 * Otherwise, we need to keep the stuff at the start of the heap,
473 * then allocate a new one too; this is handled below. (This will
474 * guarantee we occupy a full heap next time round, provided we
475 * don't use the heap for anything else.)
477 if (p == arena(h)) {
479 * Zero new seems to be a special case saying we've finished
480 * with the specially reallocated memory, see scanner() in glob.c.
482 if (!new) {
483 if (ph)
484 ph->next = h->next;
485 else
486 heaps = h->next;
487 fheap = NULL;
488 #ifdef USE_MMAP
489 munmap((void *) h, h->size);
490 #else
491 zfree(h, HEAPSIZE);
492 #endif
493 unqueue_signals();
494 return NULL;
496 if (new > ARENA_SIZEOF(h)) {
498 * Not enough memory in this heap. Allocate a new
499 * one of sufficient size.
501 * To avoid this happening too often, allocate
502 * chunks in multiples of HEAPSIZE.
503 * (Historical note: there didn't used to be any
504 * point in this since we didn't consistently record
505 * the allocated size of the heap, but now we do.)
507 size_t n = (new + sizeof(*h) + HEAPSIZE);
508 n -= n % HEAPSIZE;
509 fheap = NULL;
511 #ifdef USE_MMAP
514 * I don't know any easy portable way of requesting
515 * a mmap'd segment be extended, so simply allocate
516 * a new one and copy.
518 Heap hnew;
520 hnew = mmap_heap_alloc(&n);
521 /* Copy the entire heap, header (with next pointer) included */
522 memcpy(hnew, h, h->size);
523 munmap((void *)h, h->size);
524 h = hnew;
526 #else
527 h = (Heap) realloc(h, n);
528 #endif
530 h->size = n;
531 if (ph)
532 ph->next = h;
533 else
534 heaps = h;
536 h->used = new;
537 unqueue_signals();
538 return arena(h);
540 #ifndef USE_MMAP
541 DPUTS(h->used > ARENA_SIZEOF(h), "BUG: hrealloc at invalid address");
542 #endif
543 if (h->used + (new - old) <= ARENA_SIZEOF(h)) {
544 h->used += new - old;
545 unqueue_signals();
546 return p;
547 } else {
548 char *t = zhalloc(new);
549 memcpy(t, p, old > new ? new : old);
550 h->used -= old;
551 #ifdef ZSH_MEM_DEBUG
552 memset(p, 0xff, old);
553 #endif
554 unqueue_signals();
555 return t;
559 /* allocate memory from the current memory pool and clear it */
561 /**/
562 mod_export void *
563 hcalloc(size_t size)
565 void *ptr;
567 ptr = zhalloc(size);
568 memset(ptr, 0, size);
569 return ptr;
572 /* allocate permanent memory */
574 /**/
575 mod_export void *
576 zalloc(size_t size)
578 void *ptr;
580 if (!size)
581 size = 1;
582 queue_signals();
583 if (!(ptr = (void *) malloc(size))) {
584 zerr("fatal error: out of memory");
585 exit(1);
587 unqueue_signals();
589 return ptr;
592 /**/
593 mod_export void *
594 zshcalloc(size_t size)
596 void *ptr;
598 if (!size)
599 size = 1;
600 queue_signals();
601 if (!(ptr = (void *) malloc(size))) {
602 zerr("fatal error: out of memory");
603 exit(1);
605 unqueue_signals();
606 memset(ptr, 0, size);
608 return ptr;
611 /* This front-end to realloc is used to make sure we have a realloc *
612 * that conforms to POSIX realloc. Older realloc's can fail if *
613 * passed a NULL pointer, but POSIX realloc should handle this. A *
614 * better solution would be for configure to check if realloc is *
615 * POSIX compliant, but I'm not sure how to do that. */
617 /**/
618 mod_export void *
619 zrealloc(void *ptr, size_t size)
621 queue_signals();
622 if (ptr) {
623 if (size) {
624 /* Do normal realloc */
625 if (!(ptr = (void *) realloc(ptr, size))) {
626 zerr("fatal error: out of memory");
627 exit(1);
629 unqueue_signals();
630 return ptr;
632 else
633 /* If ptr is not NULL, but size is zero, *
634 * then object pointed to is freed. */
635 free(ptr);
637 ptr = NULL;
638 } else {
639 /* If ptr is NULL, then behave like malloc */
640 ptr = malloc(size);
642 unqueue_signals();
644 return ptr;
647 /**/
648 #ifdef ZSH_MEM
651 Below is a simple segment oriented memory allocator for systems on
652 which it is better than the system's one. Memory is given in blocks
653 aligned to an integer multiple of sizeof(union mem_align), which will
654 probably be 64-bit as it is the longer of zlong or double. Each block is
655 preceded by a header which contains the length of the data part (in
656 bytes). In allocated blocks only this field of the structure m_hdr is
657 senseful. In free blocks the second field (next) is a pointer to the next
658 free segment on the free list.
660 On top of this simple allocator there is a second allocator for small
661 chunks of data. It should be both faster and less space-consuming than
662 using the normal segment mechanism for such blocks.
663 For the first M_NSMALL-1 possible sizes memory is allocated in arrays
664 that can hold M_SNUM blocks. Each array is stored in one segment of the
665 main allocator. In these segments the third field of the header structure
666 (free) contains a pointer to the first free block in the array. The
667 last field (used) gives the number of already used blocks in the array.
669 If the macro name ZSH_MEM_DEBUG is defined, some information about the memory
670 usage is stored. This information can than be viewed by calling the
671 builtin `mem' (which is only available if ZSH_MEM_DEBUG is set).
673 If ZSH_MEM_WARNING is defined, error messages are printed in case of errors.
675 If ZSH_SECURE_FREE is defined, free() checks if the given address is really
676 one that was returned by malloc(), it ignores it if it wasn't (printing
677 an error message if ZSH_MEM_WARNING is also defined).
679 #if !defined(__hpux) && !defined(DGUX) && !defined(__osf__)
680 # if defined(_BSD)
681 # ifndef HAVE_BRK_PROTO
682 extern int brk _((caddr_t));
683 # endif
684 # ifndef HAVE_SBRK_PROTO
685 extern caddr_t sbrk _((int));
686 # endif
687 # else
688 # ifndef HAVE_BRK_PROTO
689 extern int brk _((void *));
690 # endif
691 # ifndef HAVE_SBRK_PROTO
692 extern void *sbrk _((int));
693 # endif
694 # endif
695 #endif
697 #if defined(_BSD) && !defined(STDC_HEADERS)
698 # define FREE_RET_T int
699 # define FREE_ARG_T char *
700 # define FREE_DO_RET
701 # define MALLOC_RET_T char *
702 # define MALLOC_ARG_T size_t
703 #else
704 # define FREE_RET_T void
705 # define FREE_ARG_T void *
706 # define MALLOC_RET_T void *
707 # define MALLOC_ARG_T size_t
708 #endif
710 /* structure for building free list in blocks holding small blocks */
712 struct m_shdr {
713 struct m_shdr *next; /* next one on free list */
714 #ifdef PAD_64_BIT
715 /* dummy to make this 64-bit aligned */
716 struct m_shdr *dummy;
717 #endif
720 struct m_hdr {
721 zlong len; /* length of memory block */
722 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
723 /* either 1 or 2 zlong's, whichever makes up 64 bits. */
724 zlong dummy1;
725 #endif
726 struct m_hdr *next; /* if free: next on free list
727 if block of small blocks: next one with
728 small blocks of same size*/
729 struct m_shdr *free; /* if block of small blocks: free list */
730 zlong used; /* if block of small blocks: number of used
731 blocks */
732 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
733 zlong dummy2;
734 #endif
738 /* alignment for memory blocks */
740 #define M_ALIGN (sizeof(union mem_align))
742 /* length of memory header, length of first field of memory header and
743 minimal size of a block left free (if we allocate memory and take a
744 block from the free list that is larger than needed, it must have at
745 least M_MIN extra bytes to be splitted; if it has, the rest is put on
746 the free list) */
748 #define M_HSIZE (sizeof(struct m_hdr))
749 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
750 # define M_ISIZE (2*sizeof(zlong))
751 #else
752 # define M_ISIZE (sizeof(zlong))
753 #endif
754 #define M_MIN (2 * M_ISIZE)
756 /* M_FREE is the number of bytes that have to be free before memory is
757 * given back to the system
758 * M_KEEP is the number of bytes that will be kept when memory is given
759 * back; note that this has to be less than M_FREE
760 * M_ALLOC is the number of extra bytes to request from the system */
762 #define M_FREE 32768
763 #define M_KEEP 16384
764 #define M_ALLOC M_KEEP
766 /* a pointer to the last free block, a pointer to the free list (the blocks
767 on this list are kept in order - lowest address first) */
769 static struct m_hdr *m_lfree, *m_free;
771 /* system's pagesize */
773 static long m_pgsz = 0;
775 /* the highest and the lowest valid memory addresses, kept for fast validity
776 checks in free() and to find out if and when we can give memory back to
777 the system */
779 static char *m_high, *m_low;
781 /* Management of blocks for small blocks:
782 Such blocks are kept in lists (one list for each of the sizes that are
783 allocated in such blocks). The lists are stored in the m_small array.
784 M_SIDX() calculates the index into this array for a given size. M_SNUM
785 is the size (in small blocks) of such blocks. M_SLEN() calculates the
786 size of the small blocks held in a memory block, given a pointer to the
787 header of it. M_SBLEN() gives the size of a memory block that can hold
788 an array of small blocks, given the size of these small blocks. M_BSLEN()
789 calculates the size of the small blocks held in a memory block, given the
790 length of that block (including the header of the memory block. M_NSMALL
791 is the number of possible block sizes that small blocks should be used
792 for. */
795 #define M_SIDX(S) ((S) / M_ISIZE)
796 #define M_SNUM 128
797 #define M_SLEN(M) ((M)->len / M_SNUM)
798 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE)
799 /* Include the dummy in the alignment */
800 #define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) + \
801 2*sizeof(zlong) + sizeof(struct m_hdr *))
802 #define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) - \
803 2*sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM)
804 #else
805 #define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) + \
806 sizeof(zlong) + sizeof(struct m_hdr *))
807 #define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) - \
808 sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM)
809 #endif
810 #define M_NSMALL 8
812 static struct m_hdr *m_small[M_NSMALL];
814 #ifdef ZSH_MEM_DEBUG
816 static int m_s = 0, m_b = 0;
817 static int m_m[1025], m_f[1025];
819 static struct m_hdr *m_l;
821 #endif /* ZSH_MEM_DEBUG */
823 MALLOC_RET_T
824 malloc(MALLOC_ARG_T size)
826 struct m_hdr *m, *mp, *mt;
827 long n, s, os = 0;
828 #ifndef USE_MMAP
829 struct heap *h, *hp, *hf = NULL, *hfp = NULL;
830 #endif
832 /* some systems want malloc to return the highest valid address plus one
833 if it is called with an argument of zero.
835 TODO: really? Suppose we allocate more memory, so
836 that this is now in bounds, then a more rational application
837 that thinks it can free() anything it malloc'ed, even
838 of zero length, calls free for it? Aren't we in big
839 trouble? Wouldn't it be safer just to allocate some
840 memory anyway?
842 If the above comment is really correct, then at least
843 we need to check in free() if we're freeing memory
844 at m_high.
847 if (!size)
848 #if 1
849 size = 1;
850 #else
851 return (MALLOC_RET_T) m_high;
852 #endif
854 queue_signals(); /* just queue signals rather than handling them */
856 /* first call, get page size */
858 if (!m_pgsz) {
860 #ifdef _SC_PAGESIZE
861 m_pgsz = sysconf(_SC_PAGESIZE); /* SVR4 */
862 #else
863 # ifdef _SC_PAGE_SIZE
864 m_pgsz = sysconf(_SC_PAGE_SIZE); /* HPUX */
865 # else
866 m_pgsz = getpagesize();
867 # endif
868 #endif
870 m_free = m_lfree = NULL;
872 size = (size + M_ALIGN - 1) & ~(M_ALIGN - 1);
874 /* Do we need a small block? */
876 if ((s = M_SIDX(size)) && s < M_NSMALL) {
877 /* yep, find a memory block with free small blocks of the
878 appropriate size (if we find it in this list, this means that
879 it has room for at least one more small block) */
880 for (mp = NULL, m = m_small[s]; m && !m->free; mp = m, m = m->next);
882 if (m) {
883 /* we found one */
884 struct m_shdr *sh = m->free;
886 m->free = sh->next;
887 m->used++;
889 /* if all small blocks in this block are allocated, the block is
890 put at the end of the list blocks with small blocks of this
891 size (i.e., we try to keep blocks with free blocks at the
892 beginning of the list, to make the search faster) */
894 if (m->used == M_SNUM && m->next) {
895 for (mt = m; mt->next; mt = mt->next);
897 mt->next = m;
898 if (mp)
899 mp->next = m->next;
900 else
901 m_small[s] = m->next;
902 m->next = NULL;
904 #ifdef ZSH_MEM_DEBUG
905 m_m[size / M_ISIZE]++;
906 #endif
908 unqueue_signals();
909 return (MALLOC_RET_T) sh;
911 /* we still want a small block but there were no block with a free
912 small block of the requested size; so we use the real allocation
913 routine to allocate a block for small blocks of this size */
914 os = size;
915 size = M_SBLEN(size);
916 } else
917 s = 0;
919 /* search the free list for an block of at least the requested size */
920 for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next);
922 #ifndef USE_MMAP
924 /* if there is an empty zsh heap at a lower address we steal it and take
925 the memory from it, putting the rest on the free list (remember
926 that the blocks on the free list are ordered) */
928 for (hp = NULL, h = heaps; h; hp = h, h = h->next)
929 if (!h->used &&
930 (!hf || h < hf) &&
931 (!m || ((char *)m) > ((char *)h)))
932 hf = h, hfp = hp;
934 if (hf) {
935 /* we found such a heap */
936 Heapstack hso, hsn;
938 /* delete structures on the list holding the heap states */
939 for (hso = hf->sp; hso; hso = hsn) {
940 hsn = hso->next;
941 zfree(hso, sizeof(*hso));
943 /* take it from the list of heaps */
944 if (hfp)
945 hfp->next = hf->next;
946 else
947 heaps = hf->next;
948 /* now we simply free it and than search the free list again */
949 zfree(hf, HEAPSIZE);
951 for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next);
953 #endif
954 if (!m) {
955 long nal;
956 /* no matching free block was found, we have to request new
957 memory from the system */
958 n = (size + M_HSIZE + M_ALLOC + m_pgsz - 1) & ~(m_pgsz - 1);
960 if (((char *)(m = (struct m_hdr *)sbrk(n))) == ((char *)-1)) {
961 DPUTS1(1, "MEM: allocation error at sbrk, size %L.", n);
962 unqueue_signals();
963 return NULL;
965 if ((nal = ((long)(char *)m) & (M_ALIGN-1))) {
966 if ((char *)sbrk(M_ALIGN - nal) == (char *)-1) {
967 DPUTS(1, "MEM: allocation error at sbrk.");
968 unqueue_signals();
969 return NULL;
971 m = (struct m_hdr *) ((char *)m + (M_ALIGN - nal));
973 /* set m_low, for the check in free() */
974 if (!m_low)
975 m_low = (char *)m;
977 #ifdef ZSH_MEM_DEBUG
978 m_s += n;
980 if (!m_l)
981 m_l = m;
982 #endif
984 /* save new highest address */
985 m_high = ((char *)m) + n;
987 /* initialize header */
988 m->len = n - M_ISIZE;
989 m->next = NULL;
991 /* put it on the free list and set m_lfree pointing to it */
992 if ((mp = m_lfree))
993 m_lfree->next = m;
994 m_lfree = m;
996 if ((n = m->len - size) > M_MIN) {
997 /* the block we want to use has more than M_MIN bytes plus the
998 number of bytes that were requested; we split it in two and
999 leave the rest on the free list */
1000 struct m_hdr *mtt = (struct m_hdr *)(((char *)m) + M_ISIZE + size);
1002 mtt->len = n - M_ISIZE;
1003 mtt->next = m->next;
1005 m->len = size;
1007 /* put the rest on the list */
1008 if (m_lfree == m)
1009 m_lfree = mtt;
1011 if (mp)
1012 mp->next = mtt;
1013 else
1014 m_free = mtt;
1015 } else if (mp) {
1016 /* the block we found wasn't the first one on the free list */
1017 if (m == m_lfree)
1018 m_lfree = mp;
1019 mp->next = m->next;
1020 } else {
1021 /* it was the first one */
1022 m_free = m->next;
1023 if (m == m_lfree)
1024 m_lfree = m_free;
1027 if (s) {
1028 /* we are allocating a block that should hold small blocks */
1029 struct m_shdr *sh, *shn;
1031 /* build the free list in this block and set `used' filed */
1032 m->free = sh = (struct m_shdr *)(((char *)m) +
1033 sizeof(struct m_hdr) + os);
1035 for (n = M_SNUM - 2; n--; sh = shn)
1036 shn = sh->next = sh + s;
1037 sh->next = NULL;
1039 m->used = 1;
1041 /* put the block on the list of blocks holding small blocks if
1042 this size */
1043 m->next = m_small[s];
1044 m_small[s] = m;
1046 #ifdef ZSH_MEM_DEBUG
1047 m_m[os / M_ISIZE]++;
1048 #endif
1050 unqueue_signals();
1051 return (MALLOC_RET_T) (((char *)m) + sizeof(struct m_hdr));
1053 #ifdef ZSH_MEM_DEBUG
1054 m_m[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++;
1055 #endif
1057 unqueue_signals();
1058 return (MALLOC_RET_T) & m->next;
1061 /* this is an internal free(); the second argument may, but need not hold
1062 the size of the block the first argument is pointing to; if it is the
1063 right size of this block, freeing it will be faster, though; the value
1064 0 for this parameter means: `don't know' */
1066 /**/
1067 mod_export void
1068 zfree(void *p, int sz)
1070 struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt = NULL;
1071 int i;
1072 # ifdef DEBUG
1073 int osz = sz;
1074 # endif
1076 #ifdef ZSH_SECURE_FREE
1077 sz = 0;
1078 #else
1079 sz = (sz + M_ALIGN - 1) & ~(M_ALIGN - 1);
1080 #endif
1082 if (!p)
1083 return;
1085 /* first a simple check if the given address is valid */
1086 if (((char *)p) < m_low || ((char *)p) > m_high ||
1087 ((long)p) & (M_ALIGN - 1)) {
1088 DPUTS(1, "BUG: attempt to free storage at invalid address");
1089 return;
1092 queue_signals();
1094 fr_rec:
1096 if ((i = sz / M_ISIZE) < M_NSMALL || !sz)
1097 /* if the given sizes says that it is a small block, find the
1098 memory block holding it; we search all blocks with blocks
1099 of at least the given size; if the size parameter is zero,
1100 this means, that all blocks are searched */
1101 for (; i < M_NSMALL; i++) {
1102 for (mp = NULL, mt = m_small[i];
1103 mt && (((char *)mt) > ((char *)p) ||
1104 (((char *)mt) + mt->len) < ((char *)p));
1105 mp = mt, mt = mt->next);
1107 if (mt) {
1108 /* we found the block holding the small block */
1109 struct m_shdr *sh = (struct m_shdr *)p;
1111 #ifdef ZSH_SECURE_FREE
1112 struct m_shdr *sh2;
1114 /* check if the given address is equal to the address of
1115 the first small block plus an integer multiple of the
1116 block size */
1117 if ((((char *)p) - (((char *)mt) + sizeof(struct m_hdr))) %
1118 M_BSLEN(mt->len)) {
1120 DPUTS(1, "BUG: attempt to free storage at invalid address");
1121 unqueue_signals();
1122 return;
1124 /* check, if the address is on the (block-intern) free list */
1125 for (sh2 = mt->free; sh2; sh2 = sh2->next)
1126 if (((char *)p) == ((char *)sh2)) {
1128 DPUTS(1, "BUG: attempt to free already free storage");
1129 unqueue_signals();
1130 return;
1132 #endif
1133 DPUTS(M_BSLEN(mt->len) < osz,
1134 "BUG: attempt to free more than allocated.");
1136 #ifdef ZSH_MEM_DEBUG
1137 m_f[M_BSLEN(mt->len) / M_ISIZE]++;
1138 memset(sh, 0xff, M_BSLEN(mt->len));
1139 #endif
1141 /* put the block onto the free list */
1142 sh->next = mt->free;
1143 mt->free = sh;
1145 if (--mt->used) {
1146 /* if there are still used blocks in this block, we
1147 put it at the beginning of the list with blocks
1148 holding small blocks of the same size (since we
1149 know that there is at least one free block in it,
1150 this will make allocation of small blocks faster;
1151 it also guarantees that long living memory blocks
1152 are preferred over younger ones */
1153 if (mp) {
1154 mp->next = mt->next;
1155 mt->next = m_small[i];
1156 m_small[i] = mt;
1158 unqueue_signals();
1159 return;
1161 /* if there are no more used small blocks in this
1162 block, we free the whole block */
1163 if (mp)
1164 mp->next = mt->next;
1165 else
1166 m_small[i] = mt->next;
1168 m = mt;
1169 p = (void *) & m->next;
1171 break;
1172 } else if (sz) {
1173 /* if we didn't find a block and a size was given, try it
1174 again as if no size were given */
1175 sz = 0;
1176 goto fr_rec;
1179 #ifdef ZSH_MEM_DEBUG
1180 if (!mt)
1181 m_f[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++;
1182 #endif
1184 #ifdef ZSH_SECURE_FREE
1185 /* search all memory blocks, if one of them is at the given address */
1186 for (mt = (struct m_hdr *)m_low;
1187 ((char *)mt) < m_high;
1188 mt = (struct m_hdr *)(((char *)mt) + M_ISIZE + mt->len))
1189 if (((char *)p) == ((char *)&mt->next))
1190 break;
1192 /* no block was found at the given address */
1193 if (((char *)mt) >= m_high) {
1194 DPUTS(1, "BUG: attempt to free storage at invalid address");
1195 unqueue_signals();
1196 return;
1198 #endif
1200 /* see if the block is on the free list */
1201 for (mp = NULL, mt = m_free; mt && mt < m; mp = mt, mt = mt->next);
1203 if (m == mt) {
1204 /* it is, ouch! */
1205 DPUTS(1, "BUG: attempt to free already free storage");
1206 unqueue_signals();
1207 return;
1209 DPUTS(m->len < osz, "BUG: attempt to free more than allocated");
1210 #ifdef ZSH_MEM_DEBUG
1211 memset(p, 0xff, m->len);
1212 #endif
1213 if (mt && ((char *)mt) == (((char *)m) + M_ISIZE + m->len)) {
1214 /* the block after the one we are freeing is free, we put them
1215 together */
1216 m->len += mt->len + M_ISIZE;
1217 m->next = mt->next;
1219 if (mt == m_lfree)
1220 m_lfree = m;
1221 } else
1222 m->next = mt;
1224 if (mp && ((char *)m) == (((char *)mp) + M_ISIZE + mp->len)) {
1225 /* the block before the one we are freeing is free, we put them
1226 together */
1227 mp->len += m->len + M_ISIZE;
1228 mp->next = m->next;
1230 if (m == m_lfree)
1231 m_lfree = mp;
1232 } else if (mp)
1233 /* otherwise, we just put it on the free list */
1234 mp->next = m;
1235 else {
1236 m_free = m;
1237 if (!m_lfree)
1238 m_lfree = m_free;
1241 /* if the block we have just freed was at the end of the process heap
1242 and now there is more than one page size of memory, we can give
1243 it back to the system (and we do it ;-) */
1244 if ((((char *)m_lfree) + M_ISIZE + m_lfree->len) == m_high &&
1245 m_lfree->len >= m_pgsz + M_MIN + M_FREE) {
1246 long n = (m_lfree->len - M_MIN - M_KEEP) & ~(m_pgsz - 1);
1248 m_lfree->len -= n;
1249 #ifdef HAVE_BRK
1250 if (brk(m_high -= n) == -1) {
1251 #else
1252 m_high -= n;
1253 if (sbrk(-n) == (void *)-1) {
1254 #endif /* HAVE_BRK */
1255 DPUTS(1, "MEM: allocation error at brk.");
1258 #ifdef ZSH_MEM_DEBUG
1259 m_b += n;
1260 #endif
1262 unqueue_signals();
1265 FREE_RET_T
1266 free(FREE_ARG_T p)
1268 zfree(p, 0); /* 0 means: size is unknown */
1270 #ifdef FREE_DO_RET
1271 return 0;
1272 #endif
1275 /* this one is for strings (and only strings, real strings, real C strings,
1276 those that have a zero byte at the end) */
1278 /**/
1279 mod_export void
1280 zsfree(char *p)
1282 if (p)
1283 zfree(p, strlen(p) + 1);
1286 MALLOC_RET_T
1287 realloc(MALLOC_RET_T p, MALLOC_ARG_T size)
1289 struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt;
1290 char *r;
1291 int i, l = 0;
1293 /* some system..., see above */
1294 if (!p && size)
1295 return (MALLOC_RET_T) malloc(size);
1296 /* and some systems even do this... */
1297 if (!p || !size)
1298 return (MALLOC_RET_T) p;
1300 queue_signals(); /* just queue signals caught rather than handling them */
1302 /* check if we are reallocating a small block, if we do, we have
1303 to compute the size of the block from the sort of block it is in */
1304 for (i = 0; i < M_NSMALL; i++) {
1305 for (mp = NULL, mt = m_small[i];
1306 mt && (((char *)mt) > ((char *)p) ||
1307 (((char *)mt) + mt->len) < ((char *)p));
1308 mp = mt, mt = mt->next);
1310 if (mt) {
1311 l = M_BSLEN(mt->len);
1312 break;
1315 if (!l)
1316 /* otherwise the size of the block is in the memory just before
1317 the given address */
1318 l = m->len;
1320 /* now allocate the new block, copy the old contents, and free the
1321 old block */
1322 r = malloc(size);
1323 memcpy(r, (char *)p, (size > l) ? l : size);
1324 free(p);
1326 unqueue_signals();
1327 return (MALLOC_RET_T) r;
1330 MALLOC_RET_T
1331 calloc(MALLOC_ARG_T n, MALLOC_ARG_T size)
1333 long l;
1334 char *r;
1336 if (!(l = n * size))
1337 return (MALLOC_RET_T) m_high;
1339 r = malloc(l);
1341 memset(r, 0, l);
1343 return (MALLOC_RET_T) r;
1346 #ifdef ZSH_MEM_DEBUG
1348 /**/
1350 bin_mem(char *name, char **argv, Options ops, int func)
1352 int i, ii, fi, ui, j;
1353 struct m_hdr *m, *mf, *ms;
1354 char *b, *c, buf[40];
1355 long u = 0, f = 0, to, cu;
1357 queue_signals();
1358 if (OPT_ISSET(ops,'v')) {
1359 printf("The lower and the upper addresses of the heap. Diff gives\n");
1360 printf("the difference between them, i.e. the size of the heap.\n\n");
1362 printf("low mem %ld\t high mem %ld\t diff %ld\n",
1363 (long)m_l, (long)m_high, (long)(m_high - ((char *)m_l)));
1365 if (OPT_ISSET(ops,'v')) {
1366 printf("\nThe number of bytes that were allocated using sbrk() and\n");
1367 printf("the number of bytes that were given back to the system\n");
1368 printf("via brk().\n");
1370 printf("\nsbrk %d\tbrk %d\n", m_s, m_b);
1372 if (OPT_ISSET(ops,'v')) {
1373 printf("\nInformation about the sizes that were allocated or freed.\n");
1374 printf("For each size that were used the number of mallocs and\n");
1375 printf("frees is shown. Diff gives the difference between these\n");
1376 printf("values, i.e. the number of blocks of that size that is\n");
1377 printf("currently allocated. Total is the product of size and diff,\n");
1378 printf("i.e. the number of bytes that are allocated for blocks of\n");
1379 printf("this size. The last field gives the accumulated number of\n");
1380 printf("bytes for all sizes.\n");
1382 printf("\nsize\tmalloc\tfree\tdiff\ttotal\tcum\n");
1383 for (i = 0, cu = 0; i < 1024; i++)
1384 if (m_m[i] || m_f[i]) {
1385 to = (long) i * M_ISIZE * (m_m[i] - m_f[i]);
1386 printf("%ld\t%d\t%d\t%d\t%ld\t%ld\n",
1387 (long)i * M_ISIZE, m_m[i], m_f[i], m_m[i] - m_f[i],
1388 to, (cu += to));
1391 if (m_m[i] || m_f[i])
1392 printf("big\t%d\t%d\t%d\n", m_m[i], m_f[i], m_m[i] - m_f[i]);
1394 if (OPT_ISSET(ops,'v')) {
1395 printf("\nThe list of memory blocks. For each block the following\n");
1396 printf("information is shown:\n\n");
1397 printf("num\tthe number of this block\n");
1398 printf("tnum\tlike num but counted separately for used and free\n");
1399 printf("\tblocks\n");
1400 printf("addr\tthe address of this block\n");
1401 printf("len\tthe length of the block\n");
1402 printf("state\tthe state of this block, this can be:\n");
1403 printf("\t used\tthis block is used for one big block\n");
1404 printf("\t free\tthis block is free\n");
1405 printf("\t small\tthis block is used for an array of small blocks\n");
1406 printf("cum\tthe accumulated sizes of the blocks, counted\n");
1407 printf("\tseparately for used and free blocks\n");
1408 printf("\nFor blocks holding small blocks the number of free\n");
1409 printf("blocks, the number of used blocks and the size of the\n");
1410 printf("blocks is shown. For otherwise used blocks the first few\n");
1411 printf("bytes are shown as an ASCII dump.\n");
1413 printf("\nblock list:\nnum\ttnum\taddr\t\tlen\tstate\tcum\n");
1414 for (m = m_l, mf = m_free, ii = fi = ui = 1; ((char *)m) < m_high;
1415 m = (struct m_hdr *)(((char *)m) + M_ISIZE + m->len), ii++) {
1416 for (j = 0, ms = NULL; j < M_NSMALL && !ms; j++)
1417 for (ms = m_small[j]; ms; ms = ms->next)
1418 if (ms == m)
1419 break;
1421 if (m == mf)
1422 buf[0] = '\0';
1423 else if (m == ms)
1424 sprintf(buf, "%ld %ld %ld", (long)(M_SNUM - ms->used),
1425 (long)ms->used,
1426 (long)(m->len - sizeof(struct m_hdr)) / M_SNUM + 1);
1428 else {
1429 for (i = 0, b = buf, c = (char *)&m->next; i < 20 && i < m->len;
1430 i++, c++)
1431 *b++ = (*c >= ' ' && *c < 127) ? *c : '.';
1432 *b = '\0';
1435 printf("%d\t%d\t%ld\t%ld\t%s\t%ld\t%s\n", ii,
1436 (m == mf) ? fi++ : ui++,
1437 (long)m, (long)m->len,
1438 (m == mf) ? "free" : ((m == ms) ? "small" : "used"),
1439 (m == mf) ? (f += m->len) : (u += m->len),
1440 buf);
1442 if (m == mf)
1443 mf = mf->next;
1446 if (OPT_ISSET(ops,'v')) {
1447 printf("\nHere is some information about the small blocks used.\n");
1448 printf("For each size the arrays with the number of free and the\n");
1449 printf("number of used blocks are shown.\n");
1451 printf("\nsmall blocks:\nsize\tblocks (free/used)\n");
1453 for (i = 0; i < M_NSMALL; i++)
1454 if (m_small[i]) {
1455 printf("%ld\t", (long)i * M_ISIZE);
1457 for (ii = 0, m = m_small[i]; m; m = m->next) {
1458 printf("(%ld/%ld) ", (long)(M_SNUM - m->used),
1459 (long)m->used);
1460 if (!((++ii) & 7))
1461 printf("\n\t");
1463 putchar('\n');
1465 if (OPT_ISSET(ops,'v')) {
1466 printf("\n\nBelow is some information about the allocation\n");
1467 printf("behaviour of the zsh heaps. First the number of times\n");
1468 printf("pushheap(), popheap(), and freeheap() were called.\n");
1470 printf("\nzsh heaps:\n\n");
1472 printf("push %d\tpop %d\tfree %d\n\n", h_push, h_pop, h_free);
1474 if (OPT_ISSET(ops,'v')) {
1475 printf("\nThe next list shows for several sizes the number of times\n");
1476 printf("memory of this size were taken from heaps.\n\n");
1478 printf("size\tmalloc\ttotal\n");
1479 for (i = 0; i < 1024; i++)
1480 if (h_m[i])
1481 printf("%ld\t%d\t%ld\n", (long)i * H_ISIZE, h_m[i],
1482 (long)i * H_ISIZE * h_m[i]);
1483 if (h_m[1024])
1484 printf("big\t%d\n", h_m[1024]);
1486 unqueue_signals();
1487 return 0;
1490 #endif
1492 /**/
1493 #else /* not ZSH_MEM */
1495 /**/
1496 mod_export void
1497 zfree(void *p, UNUSED(int sz))
1499 if (p)
1500 free(p);
1503 /**/
1504 mod_export void
1505 zsfree(char *p)
1507 if (p)
1508 free(p);
1511 /**/
1512 #endif