1 /* Free a block of memory allocated by `malloc'.
2 Copyright 1990, 1991, 1992 Free Software Foundation
3 Written May 1989 by Mike Haertel.
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; see the file COPYING.LIB. If
17 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
18 Cambridge, MA 02139, USA.
20 The author may be reached (Email) at the address mike@ai.mit.edu,
21 or (US mail) as Mike Haertel c/o Free Software Foundation. */
23 #ifndef _MALLOC_INTERNAL
24 #define _MALLOC_INTERNAL
28 /* Debugging hook for free. */
29 void (*__free_hook
) __P ((__ptr_t __ptr
));
31 /* List of blocks allocated by memalign. */
32 struct alignlist
*_aligned_blocks
= NULL
;
34 /* Return memory to the heap.
35 Like `free' but don't call a __free_hook if there is one. */
43 struct list
*prev
, *next
;
47 type
= _heapinfo
[block
].busy
.type
;
51 /* Get as many statistics as early as we can. */
53 _bytes_used
-= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
54 _bytes_free
+= _heapinfo
[block
].busy
.info
.size
* BLOCKSIZE
;
56 /* Find the free cluster previous to this one in the free list.
57 Start searching at the last block referenced; this may benefit
58 programs with locality of allocation. */
62 i
= _heapinfo
[i
].free
.prev
;
66 i
= _heapinfo
[i
].free
.next
;
67 while (i
> 0 && i
< block
);
68 i
= _heapinfo
[i
].free
.prev
;
71 /* Determine how to link this block into the free list. */
72 if (block
== i
+ _heapinfo
[i
].free
.size
)
74 /* Coalesce this block with its predecessor. */
75 _heapinfo
[i
].free
.size
+= _heapinfo
[block
].busy
.info
.size
;
80 /* Really link this block back into the free list. */
81 _heapinfo
[block
].free
.size
= _heapinfo
[block
].busy
.info
.size
;
82 _heapinfo
[block
].free
.next
= _heapinfo
[i
].free
.next
;
83 _heapinfo
[block
].free
.prev
= i
;
84 _heapinfo
[i
].free
.next
= block
;
85 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
89 /* Now that the block is linked in, see if we can coalesce it
90 with its successor (by deleting its successor from the list
91 and adding in its size). */
92 if (block
+ _heapinfo
[block
].free
.size
== _heapinfo
[block
].free
.next
)
94 _heapinfo
[block
].free
.size
95 += _heapinfo
[_heapinfo
[block
].free
.next
].free
.size
;
96 _heapinfo
[block
].free
.next
97 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.next
;
98 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
= block
;
102 /* Now see if we can return stuff to the system. */
103 blocks
= _heapinfo
[block
].free
.size
;
104 if (blocks
>= FINAL_FREE_BLOCKS
&& block
+ blocks
== _heaplimit
105 && (*__morecore
) (0) == ADDRESS (block
+ blocks
))
107 register size_t bytes
= blocks
* BLOCKSIZE
;
108 _heaplimit
-= blocks
;
109 (*__morecore
) (-bytes
);
110 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
111 = _heapinfo
[block
].free
.next
;
112 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
113 = _heapinfo
[block
].free
.prev
;
114 block
= _heapinfo
[block
].free
.prev
;
116 _bytes_free
-= bytes
;
119 /* Set the next search to begin at this block. */
124 /* Do some of the statistics. */
126 _bytes_used
-= 1 << type
;
128 _bytes_free
+= 1 << type
;
130 /* Get the address of the first free fragment in this block. */
131 prev
= (struct list
*) ((char *) ADDRESS (block
) +
132 (_heapinfo
[block
].busy
.info
.frag
.first
<< type
));
134 if (_heapinfo
[block
].busy
.info
.frag
.nfree
== (BLOCKSIZE
>> type
) - 1)
136 /* If all fragments of this block are free, remove them
137 from the fragment list and free the whole block. */
139 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> type
); ++i
)
141 prev
->prev
->next
= next
;
143 next
->prev
= prev
->prev
;
144 _heapinfo
[block
].busy
.type
= 0;
145 _heapinfo
[block
].busy
.info
.size
= 1;
147 /* Keep the statistics accurate. */
149 _bytes_used
+= BLOCKSIZE
;
150 _chunks_free
-= BLOCKSIZE
>> type
;
151 _bytes_free
-= BLOCKSIZE
;
153 free (ADDRESS (block
));
155 else if (_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
157 /* If some fragments of this block are free, link this
158 fragment into the fragment list after the first free
159 fragment of this block. */
160 next
= (struct list
*) ptr
;
161 next
->next
= prev
->next
;
164 if (next
->next
!= NULL
)
165 next
->next
->prev
= next
;
166 ++_heapinfo
[block
].busy
.info
.frag
.nfree
;
170 /* No fragments of this block are free, so link this
171 fragment into the fragment list and announce that
172 it is the first free fragment of this block. */
173 prev
= (struct list
*) ptr
;
174 _heapinfo
[block
].busy
.info
.frag
.nfree
= 1;
175 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
176 ((unsigned long int) ((char *) ptr
- (char *) NULL
)
177 % BLOCKSIZE
>> type
);
178 prev
->next
= _fraghead
[type
].next
;
179 prev
->prev
= &_fraghead
[type
];
180 prev
->prev
->next
= prev
;
181 if (prev
->next
!= NULL
)
182 prev
->next
->prev
= prev
;
188 /* Return memory to the heap. */
193 register struct alignlist
*l
;
198 for (l
= _aligned_blocks
; l
!= NULL
; l
= l
->next
)
199 if (l
->aligned
== ptr
)
201 l
->aligned
= NULL
; /* Mark the slot in the list as free. */
206 if (__free_hook
!= NULL
)
207 (*__free_hook
) (ptr
);
209 _free_internal (ptr
);