Merge branch 'master' of /pub/scm/gpxe
[gpxe.git] / src / core / malloc.c
blob2d892f42d8fddc10fba601901c8f18ac5b515e96
1 /*
2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <stddef.h>
20 #include <stdint.h>
21 #include <string.h>
22 #include <strings.h>
23 #include <io.h>
24 #include <gpxe/list.h>
25 #include <gpxe/init.h>
26 #include <gpxe/malloc.h>
28 /** @file
30 * Dynamic memory allocation
34 /** A free block of memory */
35 struct memory_block {
36 /** List of free blocks */
37 struct list_head list;
38 /** Size of this block */
39 size_t size;
42 #define MIN_MEMBLOCK_SIZE \
43 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
45 /** A block of allocated memory complete with size information */
46 struct autosized_block {
47 /** Size of this block */
48 size_t size;
49 /** Remaining data */
50 char data[0];
53 /**
54 * Address for zero-length memory blocks
56 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
57 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
59 * This is consistent with the ANSI C standards, which state that
60 * "either NULL or a pointer suitable to be passed to free()" must be
61 * returned in these cases. Using a special non-NULL value means that
62 * the caller can take a NULL return value to indicate failure,
63 * without first having to check for a requested size of zero.
65 * Code outside of malloc.c do not ever need to refer to the actual
66 * value of @c NOWHERE; this is an internal definition.
68 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
70 /** List of free memory blocks */
71 static LIST_HEAD ( free_blocks );
73 /** Total amount of free memory */
74 size_t freemem;
76 /**
77 * Heap size
79 * Currently fixed at 128kB.
81 #define HEAP_SIZE ( 128 * 1024 )
83 /** The heap itself */
84 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
86 /**
87 * Allocate a memory block
89 * @v size Requested size
90 * @v align Physical alignment
91 * @ret ptr Memory block, or NULL
93 * Allocates a memory block @b physically aligned as requested. No
94 * guarantees are provided for the alignment of the virtual address.
96 * @c align must be a power of two. @c size may not be zero.
98 void * alloc_memblock ( size_t size, size_t align ) {
99 struct memory_block *block;
100 size_t align_mask;
101 size_t pre_size;
102 ssize_t post_size;
103 struct memory_block *pre;
104 struct memory_block *post;
106 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
107 * calculate alignment mask.
109 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
110 align_mask = ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 );
112 DBG ( "Allocating %#zx (aligned %#zx)\n", size, align );
114 /* Search through blocks for the first one with enough space */
115 list_for_each_entry ( block, &free_blocks, list ) {
116 pre_size = ( - virt_to_phys ( block ) ) & align_mask;
117 post_size = block->size - pre_size - size;
118 if ( post_size >= 0 ) {
119 /* Split block into pre-block, block, and
120 * post-block. After this split, the "pre"
121 * block is the one currently linked into the
122 * free list.
124 pre = block;
125 block = ( ( ( void * ) pre ) + pre_size );
126 post = ( ( ( void * ) block ) + size );
127 DBG ( "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
128 ( ( ( void * ) pre ) + pre->size ), pre, block,
129 post, ( ( ( void * ) pre ) + pre->size ) );
130 /* If there is a "post" block, add it in to
131 * the free list. Leak it if it is too small
132 * (which can happen only at the very end of
133 * the heap).
135 if ( ( size_t ) post_size >= MIN_MEMBLOCK_SIZE ) {
136 post->size = post_size;
137 list_add ( &post->list, &pre->list );
139 /* Shrink "pre" block, leaving the main block
140 * isolated and no longer part of the free
141 * list.
143 pre->size = pre_size;
144 /* If there is no "pre" block, remove it from
145 * the list. Also remove it (i.e. leak it) if
146 * it is too small, which can happen only at
147 * the very start of the heap.
149 if ( pre_size < MIN_MEMBLOCK_SIZE )
150 list_del ( &pre->list );
151 /* Update total free memory */
152 freemem -= size;
153 /* Return allocated block */
154 DBG ( "Allocated [%p,%p)\n", block,
155 ( ( ( void * ) block ) + size ) );
156 return block;
160 DBG ( "Failed to allocate %#zx (aligned %#zx)\n", size, align );
161 return NULL;
165 * Free a memory block
167 * @v ptr Memory allocated by alloc_memblock(), or NULL
168 * @v size Size of the memory
170 * If @c ptr is NULL, no action is taken.
172 void free_memblock ( void *ptr, size_t size ) {
173 struct memory_block *freeing;
174 struct memory_block *block;
175 ssize_t gap_before;
176 ssize_t gap_after = -1;
178 /* Allow for ptr==NULL */
179 if ( ! ptr )
180 return;
182 /* Round up size to match actual size that alloc_memblock()
183 * would have used.
185 size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
186 freeing = ptr;
187 freeing->size = size;
188 DBG ( "Freeing [%p,%p)\n", freeing, ( ( ( void * ) freeing ) + size ));
190 /* Insert/merge into free list */
191 list_for_each_entry ( block, &free_blocks, list ) {
192 /* Calculate gaps before and after the "freeing" block */
193 gap_before = ( ( ( void * ) freeing ) -
194 ( ( ( void * ) block ) + block->size ) );
195 gap_after = ( ( ( void * ) block ) -
196 ( ( ( void * ) freeing ) + freeing->size ) );
197 /* Merge with immediately preceding block, if possible */
198 if ( gap_before == 0 ) {
199 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
200 ( ( ( void * ) block ) + block->size ), freeing,
201 ( ( ( void * ) freeing ) + freeing->size ),block,
202 ( ( ( void * ) freeing ) + freeing->size ) );
203 block->size += size;
204 list_del ( &block->list );
205 freeing = block;
207 /* Stop processing as soon as we reach a following block */
208 if ( gap_after >= 0 )
209 break;
212 /* Insert before the immediately following block. If
213 * possible, merge the following block into the "freeing"
214 * block.
216 DBG ( "[%p,%p)\n", freeing, ( ( ( void * ) freeing ) + freeing->size));
217 list_add_tail ( &freeing->list, &block->list );
218 if ( gap_after == 0 ) {
219 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
220 ( ( ( void * ) freeing ) + freeing->size ), block,
221 ( ( ( void * ) block ) + block->size ), freeing,
222 ( ( ( void * ) block ) + block->size ) );
223 freeing->size += block->size;
224 list_del ( &block->list );
227 /* Update free memory counter */
228 freemem += size;
232 * Reallocate memory
234 * @v old_ptr Memory previously allocated by malloc(), or NULL
235 * @v new_size Requested size
236 * @ret new_ptr Allocated memory, or NULL
238 * Allocates memory with no particular alignment requirement. @c
239 * new_ptr will be aligned to at least a multiple of sizeof(void*).
240 * If @c old_ptr is non-NULL, then the contents of the newly allocated
241 * memory will be the same as the contents of the previously allocated
242 * memory, up to the minimum of the old and new sizes. The old memory
243 * will be freed.
245 * If allocation fails the previously allocated block is left
246 * untouched and NULL is returned.
248 * Calling realloc() with a new size of zero is a valid way to free a
249 * memory block.
251 void * realloc ( void *old_ptr, size_t new_size ) {
252 struct autosized_block *old_block;
253 struct autosized_block *new_block;
254 size_t old_total_size;
255 size_t new_total_size;
256 size_t old_size;
257 void *new_ptr = NOWHERE;
259 /* Allocate new memory if necessary. If allocation fails,
260 * return without touching the old block.
262 if ( new_size ) {
263 new_total_size = ( new_size +
264 offsetof ( struct autosized_block, data ) );
265 new_block = alloc_memblock ( new_total_size, 1 );
266 if ( ! new_block )
267 return NULL;
268 new_block->size = new_total_size;
269 new_ptr = &new_block->data;
272 /* Copy across relevant part of the old data region (if any),
273 * then free it. Note that at this point either (a) new_ptr
274 * is valid, or (b) new_size is 0; either way, the memcpy() is
275 * valid.
277 if ( old_ptr && ( old_ptr != NOWHERE ) ) {
278 old_block = container_of ( old_ptr, struct autosized_block,
279 data );
280 old_total_size = old_block->size;
281 old_size = ( old_total_size -
282 offsetof ( struct autosized_block, data ) );
283 memcpy ( new_ptr, old_ptr,
284 ( ( old_size < new_size ) ? old_size : new_size ) );
285 free_memblock ( old_block, old_total_size );
288 return new_ptr;
292 * Allocate memory
294 * @v size Requested size
295 * @ret ptr Memory, or NULL
297 * Allocates memory with no particular alignment requirement. @c ptr
298 * will be aligned to at least a multiple of sizeof(void*).
300 void * malloc ( size_t size ) {
301 return realloc ( NULL, size );
305 * Free memory
307 * @v ptr Memory allocated by malloc(), or NULL
309 * Memory allocated with malloc_dma() cannot be freed with free(); it
310 * must be freed with free_dma() instead.
312 * If @c ptr is NULL, no action is taken.
314 void free ( void *ptr ) {
315 realloc ( ptr, 0 );
319 * Allocate cleared memory
321 * @v size Requested size
322 * @ret ptr Allocated memory
324 * Allocate memory as per malloc(), and zero it.
326 * This function name is non-standard, but pretty intuitive.
327 * zalloc(size) is always equivalent to calloc(1,size)
329 void * zalloc ( size_t size ) {
330 void *data;
332 data = malloc ( size );
333 if ( data )
334 memset ( data, 0, size );
335 return data;
339 * Add memory to allocation pool
341 * @v start Start address
342 * @v end End address
344 * Adds a block of memory [start,end) to the allocation pool. This is
345 * a one-way operation; there is no way to reclaim this memory.
347 * @c start must be aligned to at least a multiple of sizeof(void*).
349 void mpopulate ( void *start, size_t len ) {
350 /* Prevent free_memblock() from rounding up len beyond the end
351 * of what we were actually given...
353 free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
357 * Initialise the heap
360 static void init_heap ( void ) {
361 mpopulate ( heap, sizeof ( heap ) );
364 /** Memory allocator initialisation function */
365 struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
366 .initialise = init_heap,
369 #if 0
370 #include <stdio.h>
372 * Dump free block list
375 void mdumpfree ( void ) {
376 struct memory_block *block;
378 printf ( "Free block list:\n" );
379 list_for_each_entry ( block, &free_blocks, list ) {
380 printf ( "[%p,%p] (size %#zx)\n", block,
381 ( ( ( void * ) block ) + block->size ), block->size );
384 #endif