1 /* Memory allocator `malloc'.
2 Copyright 1990, 1991, 1992, 1993 Free Software Foundation
3 Written May 1989 by Mike Haertel.
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public
16 License along with this library; see the file COPYING.LIB. If
17 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
18 Cambridge, MA 02139, USA.
20 The author may be reached (Email) at the address mike@ai.mit.edu,
21 or (US mail) as Mike Haertel c/o Free Software Foundation. */
23 #ifndef _MALLOC_INTERNAL
24 #define _MALLOC_INTERNAL
28 /* How to really get more memory. */
29 __ptr_t (*__morecore
) __P ((ptrdiff_t __size
)) = __default_morecore
;
31 /* Debugging hook for `malloc'. */
32 __ptr_t (*__malloc_hook
) __P ((size_t __size
));
34 /* Pointer to the base of the first block. */
37 /* Block information table. Allocated with align/__free (not malloc/free). */
38 malloc_info
*_heapinfo
;
40 /* Number of info entries. */
41 static size_t heapsize
;
43 /* Search index in the info table. */
46 /* Limit of valid info table indices. */
49 /* Free lists for each fragment size. */
50 struct list _fraghead
[BLOCKLOG
];
52 /* Instrumentation. */
58 /* Are you experienced? */
59 int __malloc_initialized
;
61 void (*__after_morecore_hook
) __P ((void));
63 /* Aligned allocation. */
64 static __ptr_t align
__P ((size_t));
70 unsigned long int adj
;
72 result
= (*__morecore
) (size
);
73 adj
= (unsigned long int) ((unsigned long int) ((char *) result
-
74 (char *) NULL
)) % BLOCKSIZE
;
77 adj
= BLOCKSIZE
- adj
;
78 (void) (*__morecore
) (adj
);
79 result
= (char *) result
+ adj
;
82 if (__after_morecore_hook
)
83 (*__after_morecore_hook
) ();
88 /* Set everything up and remember that we have. */
89 static int initialize
__P ((void));
93 heapsize
= HEAP
/ BLOCKSIZE
;
94 _heapinfo
= (malloc_info
*) align (heapsize
* sizeof (malloc_info
));
95 if (_heapinfo
== NULL
)
97 memset (_heapinfo
, 0, heapsize
* sizeof (malloc_info
));
98 _heapinfo
[0].free
.size
= 0;
99 _heapinfo
[0].free
.next
= _heapinfo
[0].free
.prev
= 0;
101 _heapbase
= (char *) _heapinfo
;
102 __malloc_initialized
= 1;
106 /* Get neatly aligned memory, initializing or
107 growing the heap info table as necessary. */
108 static __ptr_t morecore
__P ((size_t));
114 malloc_info
*newinfo
, *oldinfo
;
117 result
= align (size
);
121 /* Check if we need to grow the info table. */
122 if ((size_t) BLOCK ((char *) result
+ size
) > heapsize
)
125 while ((size_t) BLOCK ((char *) result
+ size
) > newsize
)
127 newinfo
= (malloc_info
*) align (newsize
* sizeof (malloc_info
));
130 (*__morecore
) (-size
);
133 memset (newinfo
, 0, newsize
* sizeof (malloc_info
));
134 memcpy (newinfo
, _heapinfo
, heapsize
* sizeof (malloc_info
));
136 newinfo
[BLOCK (oldinfo
)].busy
.type
= 0;
137 newinfo
[BLOCK (oldinfo
)].busy
.info
.size
138 = BLOCKIFY (heapsize
* sizeof (malloc_info
));
140 _free_internal (oldinfo
);
144 _heaplimit
= BLOCK ((char *) result
+ size
);
148 /* Allocate memory from the heap. */
154 size_t block
, blocks
, lastblocks
, start
;
158 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
159 valid address you can realloc and free (though not dereference).
161 It turns out that some extant code (sunrpc, at least Ultrix's version)
162 expects `malloc (0)' to return non-NULL and breaks otherwise.
170 if (__malloc_hook
!= NULL
)
171 return (*__malloc_hook
) (size
);
173 if (!__malloc_initialized
)
177 if (size
< sizeof (struct list
))
178 size
= sizeof (struct list
);
180 /* Determine the allocation policy based on the request size. */
181 if (size
<= BLOCKSIZE
/ 2)
183 /* Small allocation to receive a fragment of a block.
184 Determine the logarithm to base two of the fragment size. */
185 register size_t log
= 1;
187 while ((size
/= 2) != 0)
190 /* Look in the fragment lists for a
191 free fragment of the desired size. */
192 next
= _fraghead
[log
].next
;
195 /* There are free fragments of this size.
196 Pop a fragment out of the fragment list and return it.
197 Update the block's nfree and first counters. */
198 result
= (__ptr_t
) next
;
199 next
->prev
->next
= next
->next
;
200 if (next
->next
!= NULL
)
201 next
->next
->prev
= next
->prev
;
202 block
= BLOCK (result
);
203 if (--_heapinfo
[block
].busy
.info
.frag
.nfree
!= 0)
204 _heapinfo
[block
].busy
.info
.frag
.first
= (unsigned long int)
205 ((unsigned long int) ((char *) next
->next
- (char *) NULL
)
208 /* Update the statistics. */
210 _bytes_used
+= 1 << log
;
212 _bytes_free
-= 1 << log
;
216 /* No free fragments of the desired size, so get a new block
217 and break it into fragments, returning the first. */
218 result
= malloc (BLOCKSIZE
);
222 /* Link all fragments but the first into the free list. */
223 for (i
= 1; i
< (size_t) (BLOCKSIZE
>> log
); ++i
)
225 next
= (struct list
*) ((char *) result
+ (i
<< log
));
226 next
->next
= _fraghead
[log
].next
;
227 next
->prev
= &_fraghead
[log
];
228 next
->prev
->next
= next
;
229 if (next
->next
!= NULL
)
230 next
->next
->prev
= next
;
233 /* Initialize the nfree and first counters for this block. */
234 block
= BLOCK (result
);
235 _heapinfo
[block
].busy
.type
= log
;
236 _heapinfo
[block
].busy
.info
.frag
.nfree
= i
- 1;
237 _heapinfo
[block
].busy
.info
.frag
.first
= i
- 1;
239 _chunks_free
+= (BLOCKSIZE
>> log
) - 1;
240 _bytes_free
+= BLOCKSIZE
- (1 << log
);
241 _bytes_used
-= BLOCKSIZE
- (1 << log
);
246 /* Large allocation to receive one or more blocks.
247 Search the free list in a circle starting at the last place visited.
248 If we loop completely around without finding a large enough
249 space we will have to get more memory from the system. */
250 blocks
= BLOCKIFY (size
);
251 start
= block
= _heapindex
;
252 while (_heapinfo
[block
].free
.size
< blocks
)
254 block
= _heapinfo
[block
].free
.next
;
257 /* Need to get more from the system. Check to see if
258 the new core will be contiguous with the final free
259 block; if so we don't need to get as much. */
260 block
= _heapinfo
[0].free
.prev
;
261 lastblocks
= _heapinfo
[block
].free
.size
;
262 if (_heaplimit
!= 0 && block
+ lastblocks
== _heaplimit
&&
263 (*__morecore
) (0) == ADDRESS (block
+ lastblocks
) &&
264 (morecore ((blocks
- lastblocks
) * BLOCKSIZE
)) != NULL
)
266 _heapinfo
[block
].free
.size
= blocks
;
267 _bytes_free
+= (blocks
- lastblocks
) * BLOCKSIZE
;
270 result
= morecore (blocks
* BLOCKSIZE
);
273 block
= BLOCK (result
);
274 _heapinfo
[block
].busy
.type
= 0;
275 _heapinfo
[block
].busy
.info
.size
= blocks
;
277 _bytes_used
+= blocks
* BLOCKSIZE
;
282 /* At this point we have found a suitable free list entry.
283 Figure out how to remove what we need from the list. */
284 result
= ADDRESS (block
);
285 if (_heapinfo
[block
].free
.size
> blocks
)
287 /* The block we found has a bit left over,
288 so relink the tail end back into the free list. */
289 _heapinfo
[block
+ blocks
].free
.size
290 = _heapinfo
[block
].free
.size
- blocks
;
291 _heapinfo
[block
+ blocks
].free
.next
292 = _heapinfo
[block
].free
.next
;
293 _heapinfo
[block
+ blocks
].free
.prev
294 = _heapinfo
[block
].free
.prev
;
295 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
296 = _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
297 = _heapindex
= block
+ blocks
;
301 /* The block exactly matches our requirements,
302 so just remove it from the list. */
303 _heapinfo
[_heapinfo
[block
].free
.next
].free
.prev
304 = _heapinfo
[block
].free
.prev
;
305 _heapinfo
[_heapinfo
[block
].free
.prev
].free
.next
306 = _heapindex
= _heapinfo
[block
].free
.next
;
310 _heapinfo
[block
].busy
.type
= 0;
311 _heapinfo
[block
].busy
.info
.size
= blocks
;
313 _bytes_used
+= blocks
* BLOCKSIZE
;
314 _bytes_free
-= blocks
* BLOCKSIZE
;