2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER
);
26 #include <gpxe/list.h>
27 #include <gpxe/init.h>
28 #include <gpxe/malloc.h>
32 * Dynamic memory allocation
36 /** A free block of memory */
38 /** List of free blocks */
39 struct list_head list
;
40 /** Size of this block */
44 #define MIN_MEMBLOCK_SIZE \
45 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
47 /** A block of allocated memory complete with size information */
48 struct autosized_block
{
49 /** Size of this block */
56 * Address for zero-length memory blocks
58 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
59 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
61 * This is consistent with the ANSI C standards, which state that
62 * "either NULL or a pointer suitable to be passed to free()" must be
63 * returned in these cases. Using a special non-NULL value means that
64 * the caller can take a NULL return value to indicate failure,
65 * without first having to check for a requested size of zero.
67 * Code outside of malloc.c do not ever need to refer to the actual
68 * value of @c NOWHERE; this is an internal definition.
70 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
72 /** List of free memory blocks */
73 static LIST_HEAD ( free_blocks
);
75 /** Total amount of free memory */
81 * Currently fixed at 512kB.
83 #define HEAP_SIZE ( 512 * 1024 )
85 /** The heap itself */
86 static char heap
[HEAP_SIZE
] __attribute__ (( aligned ( __alignof__(void *) )));
89 * Allocate a memory block
91 * @v size Requested size
92 * @v align Physical alignment
93 * @ret ptr Memory block, or NULL
95 * Allocates a memory block @b physically aligned as requested. No
96 * guarantees are provided for the alignment of the virtual address.
98 * @c align must be a power of two. @c size may not be zero.
100 void * alloc_memblock ( size_t size
, size_t align
) {
101 struct memory_block
*block
;
105 struct memory_block
*pre
;
106 struct memory_block
*post
;
108 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
109 * calculate alignment mask.
111 size
= ( size
+ MIN_MEMBLOCK_SIZE
- 1 ) & ~( MIN_MEMBLOCK_SIZE
- 1 );
112 align_mask
= ( align
- 1 ) | ( MIN_MEMBLOCK_SIZE
- 1 );
114 DBG ( "Allocating %#zx (aligned %#zx)\n", size
, align
);
116 /* Search through blocks for the first one with enough space */
117 list_for_each_entry ( block
, &free_blocks
, list
) {
118 pre_size
= ( - virt_to_phys ( block
) ) & align_mask
;
119 post_size
= block
->size
- pre_size
- size
;
120 if ( post_size
>= 0 ) {
121 /* Split block into pre-block, block, and
122 * post-block. After this split, the "pre"
123 * block is the one currently linked into the
127 block
= ( ( ( void * ) pre
) + pre_size
);
128 post
= ( ( ( void * ) block
) + size
);
129 DBG ( "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre
,
130 ( ( ( void * ) pre
) + pre
->size
), pre
, block
,
131 post
, ( ( ( void * ) pre
) + pre
->size
) );
132 /* If there is a "post" block, add it in to
133 * the free list. Leak it if it is too small
134 * (which can happen only at the very end of
137 if ( ( size_t ) post_size
>= MIN_MEMBLOCK_SIZE
) {
138 post
->size
= post_size
;
139 list_add ( &post
->list
, &pre
->list
);
141 /* Shrink "pre" block, leaving the main block
142 * isolated and no longer part of the free
145 pre
->size
= pre_size
;
146 /* If there is no "pre" block, remove it from
147 * the list. Also remove it (i.e. leak it) if
148 * it is too small, which can happen only at
149 * the very start of the heap.
151 if ( pre_size
< MIN_MEMBLOCK_SIZE
)
152 list_del ( &pre
->list
);
153 /* Update total free memory */
155 /* Return allocated block */
156 DBG ( "Allocated [%p,%p)\n", block
,
157 ( ( ( void * ) block
) + size
) );
162 DBG ( "Failed to allocate %#zx (aligned %#zx)\n", size
, align
);
167 * Free a memory block
169 * @v ptr Memory allocated by alloc_memblock(), or NULL
170 * @v size Size of the memory
172 * If @c ptr is NULL, no action is taken.
174 void free_memblock ( void *ptr
, size_t size
) {
175 struct memory_block
*freeing
;
176 struct memory_block
*block
;
178 ssize_t gap_after
= -1;
180 /* Allow for ptr==NULL */
184 /* Round up size to match actual size that alloc_memblock()
187 size
= ( size
+ MIN_MEMBLOCK_SIZE
- 1 ) & ~( MIN_MEMBLOCK_SIZE
- 1 );
189 freeing
->size
= size
;
190 DBG ( "Freeing [%p,%p)\n", freeing
, ( ( ( void * ) freeing
) + size
));
192 /* Insert/merge into free list */
193 list_for_each_entry ( block
, &free_blocks
, list
) {
194 /* Calculate gaps before and after the "freeing" block */
195 gap_before
= ( ( ( void * ) freeing
) -
196 ( ( ( void * ) block
) + block
->size
) );
197 gap_after
= ( ( ( void * ) block
) -
198 ( ( ( void * ) freeing
) + freeing
->size
) );
199 /* Merge with immediately preceding block, if possible */
200 if ( gap_before
== 0 ) {
201 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", block
,
202 ( ( ( void * ) block
) + block
->size
), freeing
,
203 ( ( ( void * ) freeing
) + freeing
->size
),block
,
204 ( ( ( void * ) freeing
) + freeing
->size
) );
206 list_del ( &block
->list
);
209 /* Stop processing as soon as we reach a following block */
210 if ( gap_after
>= 0 )
214 /* Insert before the immediately following block. If
215 * possible, merge the following block into the "freeing"
218 DBG ( "[%p,%p)\n", freeing
, ( ( ( void * ) freeing
) + freeing
->size
));
219 list_add_tail ( &freeing
->list
, &block
->list
);
220 if ( gap_after
== 0 ) {
221 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing
,
222 ( ( ( void * ) freeing
) + freeing
->size
), block
,
223 ( ( ( void * ) block
) + block
->size
), freeing
,
224 ( ( ( void * ) block
) + block
->size
) );
225 freeing
->size
+= block
->size
;
226 list_del ( &block
->list
);
229 /* Update free memory counter */
236 * @v old_ptr Memory previously allocated by malloc(), or NULL
237 * @v new_size Requested size
238 * @ret new_ptr Allocated memory, or NULL
240 * Allocates memory with no particular alignment requirement. @c
241 * new_ptr will be aligned to at least a multiple of sizeof(void*).
242 * If @c old_ptr is non-NULL, then the contents of the newly allocated
243 * memory will be the same as the contents of the previously allocated
244 * memory, up to the minimum of the old and new sizes. The old memory
247 * If allocation fails the previously allocated block is left
248 * untouched and NULL is returned.
250 * Calling realloc() with a new size of zero is a valid way to free a
253 void * realloc ( void *old_ptr
, size_t new_size
) {
254 struct autosized_block
*old_block
;
255 struct autosized_block
*new_block
;
256 size_t old_total_size
;
257 size_t new_total_size
;
259 void *new_ptr
= NOWHERE
;
261 /* Allocate new memory if necessary. If allocation fails,
262 * return without touching the old block.
265 new_total_size
= ( new_size
+
266 offsetof ( struct autosized_block
, data
) );
267 new_block
= alloc_memblock ( new_total_size
, 1 );
270 new_block
->size
= new_total_size
;
271 new_ptr
= &new_block
->data
;
274 /* Copy across relevant part of the old data region (if any),
275 * then free it. Note that at this point either (a) new_ptr
276 * is valid, or (b) new_size is 0; either way, the memcpy() is
279 if ( old_ptr
&& ( old_ptr
!= NOWHERE
) ) {
280 old_block
= container_of ( old_ptr
, struct autosized_block
,
282 old_total_size
= old_block
->size
;
283 old_size
= ( old_total_size
-
284 offsetof ( struct autosized_block
, data
) );
285 memcpy ( new_ptr
, old_ptr
,
286 ( ( old_size
< new_size
) ? old_size
: new_size
) );
287 free_memblock ( old_block
, old_total_size
);
296 * @v size Requested size
297 * @ret ptr Memory, or NULL
299 * Allocates memory with no particular alignment requirement. @c ptr
300 * will be aligned to at least a multiple of sizeof(void*).
302 void * malloc ( size_t size
) {
303 return realloc ( NULL
, size
);
309 * @v ptr Memory allocated by malloc(), or NULL
311 * Memory allocated with malloc_dma() cannot be freed with free(); it
312 * must be freed with free_dma() instead.
314 * If @c ptr is NULL, no action is taken.
316 void free ( void *ptr
) {
321 * Allocate cleared memory
323 * @v size Requested size
324 * @ret ptr Allocated memory
326 * Allocate memory as per malloc(), and zero it.
328 * This function name is non-standard, but pretty intuitive.
329 * zalloc(size) is always equivalent to calloc(1,size)
331 void * zalloc ( size_t size
) {
334 data
= malloc ( size
);
336 memset ( data
, 0, size
);
341 * Add memory to allocation pool
343 * @v start Start address
346 * Adds a block of memory [start,end) to the allocation pool. This is
347 * a one-way operation; there is no way to reclaim this memory.
349 * @c start must be aligned to at least a multiple of sizeof(void*).
351 void mpopulate ( void *start
, size_t len
) {
352 /* Prevent free_memblock() from rounding up len beyond the end
353 * of what we were actually given...
355 free_memblock ( start
, ( len
& ~( MIN_MEMBLOCK_SIZE
- 1 ) ) );
359 * Initialise the heap
362 static void init_heap ( void ) {
363 mpopulate ( heap
, sizeof ( heap
) );
366 /** Memory allocator initialisation function */
367 struct init_fn heap_init_fn
__init_fn ( INIT_EARLY
) = {
368 .initialise
= init_heap
,
374 * Dump free block list
377 void mdumpfree ( void ) {
378 struct memory_block
*block
;
380 printf ( "Free block list:\n" );
381 list_for_each_entry ( block
, &free_blocks
, list
) {
382 printf ( "[%p,%p] (size %#zx)\n", block
,
383 ( ( ( void * ) block
) + block
->size
), block
->size
);