2 * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 FILE_LICENCE ( GPL2_OR_LATER
);
26 #include <gpxe/list.h>
27 #include <gpxe/init.h>
28 #include <gpxe/malloc.h>
32 * Dynamic memory allocation
36 /** A free block of memory */
38 /** List of free blocks */
39 struct list_head list
;
40 /** Size of this block */
44 #define MIN_MEMBLOCK_SIZE \
45 ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
47 /** A block of allocated memory complete with size information */
48 struct autosized_block
{
49 /** Size of this block */
56 * Address for zero-length memory blocks
58 * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
59 * NOWHERE. Calling @c free(NOWHERE) will have no effect.
61 * This is consistent with the ANSI C standards, which state that
62 * "either NULL or a pointer suitable to be passed to free()" must be
63 * returned in these cases. Using a special non-NULL value means that
64 * the caller can take a NULL return value to indicate failure,
65 * without first having to check for a requested size of zero.
67 * Code outside of malloc.c do not ever need to refer to the actual
68 * value of @c NOWHERE; this is an internal definition.
70 #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
72 /** List of free memory blocks */
73 static LIST_HEAD ( free_blocks
);
75 /** Total amount of free memory */
81 * Currently fixed at 128kB.
83 #define HEAP_SIZE ( 128 * 1024 )
85 /** The heap itself */
86 static char heap
[HEAP_SIZE
] __attribute__ (( aligned ( __alignof__(void *) )));
89 * Discard some cached data
91 * @ret discarded Number of cached items discarded
93 static unsigned int discard_cache ( void ) {
94 struct cache_discarder
*discarder
;
95 unsigned int discarded
= 0;
97 for_each_table_entry ( discarder
, CACHE_DISCARDERS
) {
98 discarded
+= discarder
->discard();
104 * Allocate a memory block
106 * @v size Requested size
107 * @v align Physical alignment
108 * @ret ptr Memory block, or NULL
110 * Allocates a memory block @b physically aligned as requested. No
111 * guarantees are provided for the alignment of the virtual address.
113 * @c align must be a power of two. @c size may not be zero.
115 void * alloc_memblock ( size_t size
, size_t align
) {
116 struct memory_block
*block
;
120 struct memory_block
*pre
;
121 struct memory_block
*post
;
123 /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
124 * calculate alignment mask.
126 size
= ( size
+ MIN_MEMBLOCK_SIZE
- 1 ) & ~( MIN_MEMBLOCK_SIZE
- 1 );
127 align_mask
= ( align
- 1 ) | ( MIN_MEMBLOCK_SIZE
- 1 );
129 DBG ( "Allocating %#zx (aligned %#zx)\n", size
, align
);
131 /* Search through blocks for the first one with enough space */
132 list_for_each_entry ( block
, &free_blocks
, list
) {
133 pre_size
= ( - virt_to_phys ( block
) ) & align_mask
;
134 post_size
= block
->size
- pre_size
- size
;
135 if ( post_size
>= 0 ) {
136 /* Split block into pre-block, block, and
137 * post-block. After this split, the "pre"
138 * block is the one currently linked into the
142 block
= ( ( ( void * ) pre
) + pre_size
);
143 post
= ( ( ( void * ) block
) + size
);
144 DBG ( "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre
,
145 ( ( ( void * ) pre
) + pre
->size
),
147 ( ( ( void * ) pre
) + pre
->size
) );
148 /* If there is a "post" block, add it in to
149 * the free list. Leak it if it is too small
150 * (which can happen only at the very end of
153 if ( (size_t) post_size
>= MIN_MEMBLOCK_SIZE
) {
154 post
->size
= post_size
;
155 list_add ( &post
->list
, &pre
->list
);
157 /* Shrink "pre" block, leaving the main block
158 * isolated and no longer part of the free
161 pre
->size
= pre_size
;
162 /* If there is no "pre" block, remove it from
163 * the list. Also remove it (i.e. leak it) if
164 * it is too small, which can happen only at
165 * the very start of the heap.
167 if ( pre_size
< MIN_MEMBLOCK_SIZE
)
168 list_del ( &pre
->list
);
169 /* Update total free memory */
171 /* Return allocated block */
172 DBG ( "Allocated [%p,%p)\n", block
,
173 ( ( ( void * ) block
) + size
) );
178 /* Try discarding some cached data to free up memory */
179 if ( ! discard_cache() ) {
180 /* Nothing available to discard */
181 DBG ( "Failed to allocate %#zx (aligned %#zx)\n",
189 * Free a memory block
191 * @v ptr Memory allocated by alloc_memblock(), or NULL
192 * @v size Size of the memory
194 * If @c ptr is NULL, no action is taken.
196 void free_memblock ( void *ptr
, size_t size
) {
197 struct memory_block
*freeing
;
198 struct memory_block
*block
;
200 ssize_t gap_after
= -1;
202 /* Allow for ptr==NULL */
206 /* Round up size to match actual size that alloc_memblock()
209 size
= ( size
+ MIN_MEMBLOCK_SIZE
- 1 ) & ~( MIN_MEMBLOCK_SIZE
- 1 );
211 freeing
->size
= size
;
212 DBG ( "Freeing [%p,%p)\n", freeing
, ( ( ( void * ) freeing
) + size
));
214 /* Insert/merge into free list */
215 list_for_each_entry ( block
, &free_blocks
, list
) {
216 /* Calculate gaps before and after the "freeing" block */
217 gap_before
= ( ( ( void * ) freeing
) -
218 ( ( ( void * ) block
) + block
->size
) );
219 gap_after
= ( ( ( void * ) block
) -
220 ( ( ( void * ) freeing
) + freeing
->size
) );
221 /* Merge with immediately preceding block, if possible */
222 if ( gap_before
== 0 ) {
223 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", block
,
224 ( ( ( void * ) block
) + block
->size
), freeing
,
225 ( ( ( void * ) freeing
) + freeing
->size
),block
,
226 ( ( ( void * ) freeing
) + freeing
->size
) );
228 list_del ( &block
->list
);
231 /* Stop processing as soon as we reach a following block */
232 if ( gap_after
>= 0 )
236 /* Insert before the immediately following block. If
237 * possible, merge the following block into the "freeing"
240 DBG ( "[%p,%p)\n", freeing
, ( ( ( void * ) freeing
) + freeing
->size
));
241 list_add_tail ( &freeing
->list
, &block
->list
);
242 if ( gap_after
== 0 ) {
243 DBG ( "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing
,
244 ( ( ( void * ) freeing
) + freeing
->size
), block
,
245 ( ( ( void * ) block
) + block
->size
), freeing
,
246 ( ( ( void * ) block
) + block
->size
) );
247 freeing
->size
+= block
->size
;
248 list_del ( &block
->list
);
251 /* Update free memory counter */
258 * @v old_ptr Memory previously allocated by malloc(), or NULL
259 * @v new_size Requested size
260 * @ret new_ptr Allocated memory, or NULL
262 * Allocates memory with no particular alignment requirement. @c
263 * new_ptr will be aligned to at least a multiple of sizeof(void*).
264 * If @c old_ptr is non-NULL, then the contents of the newly allocated
265 * memory will be the same as the contents of the previously allocated
266 * memory, up to the minimum of the old and new sizes. The old memory
269 * If allocation fails the previously allocated block is left
270 * untouched and NULL is returned.
272 * Calling realloc() with a new size of zero is a valid way to free a
275 void * realloc ( void *old_ptr
, size_t new_size
) {
276 struct autosized_block
*old_block
;
277 struct autosized_block
*new_block
;
278 size_t old_total_size
;
279 size_t new_total_size
;
281 void *new_ptr
= NOWHERE
;
283 /* Allocate new memory if necessary. If allocation fails,
284 * return without touching the old block.
287 new_total_size
= ( new_size
+
288 offsetof ( struct autosized_block
, data
) );
289 new_block
= alloc_memblock ( new_total_size
, 1 );
292 new_block
->size
= new_total_size
;
293 new_ptr
= &new_block
->data
;
296 /* Copy across relevant part of the old data region (if any),
297 * then free it. Note that at this point either (a) new_ptr
298 * is valid, or (b) new_size is 0; either way, the memcpy() is
301 if ( old_ptr
&& ( old_ptr
!= NOWHERE
) ) {
302 old_block
= container_of ( old_ptr
, struct autosized_block
,
304 old_total_size
= old_block
->size
;
305 old_size
= ( old_total_size
-
306 offsetof ( struct autosized_block
, data
) );
307 memcpy ( new_ptr
, old_ptr
,
308 ( ( old_size
< new_size
) ? old_size
: new_size
) );
309 free_memblock ( old_block
, old_total_size
);
318 * @v size Requested size
319 * @ret ptr Memory, or NULL
321 * Allocates memory with no particular alignment requirement. @c ptr
322 * will be aligned to at least a multiple of sizeof(void*).
324 void * malloc ( size_t size
) {
325 return realloc ( NULL
, size
);
331 * @v ptr Memory allocated by malloc(), or NULL
333 * Memory allocated with malloc_dma() cannot be freed with free(); it
334 * must be freed with free_dma() instead.
336 * If @c ptr is NULL, no action is taken.
338 void free ( void *ptr
) {
343 * Allocate cleared memory
345 * @v size Requested size
346 * @ret ptr Allocated memory
348 * Allocate memory as per malloc(), and zero it.
350 * This function name is non-standard, but pretty intuitive.
351 * zalloc(size) is always equivalent to calloc(1,size)
353 void * zalloc ( size_t size
) {
356 data
= malloc ( size
);
358 memset ( data
, 0, size
);
363 * Add memory to allocation pool
365 * @v start Start address
368 * Adds a block of memory [start,end) to the allocation pool. This is
369 * a one-way operation; there is no way to reclaim this memory.
371 * @c start must be aligned to at least a multiple of sizeof(void*).
373 void mpopulate ( void *start
, size_t len
) {
374 /* Prevent free_memblock() from rounding up len beyond the end
375 * of what we were actually given...
377 free_memblock ( start
, ( len
& ~( MIN_MEMBLOCK_SIZE
- 1 ) ) );
381 * Initialise the heap
384 static void init_heap ( void ) {
385 mpopulate ( heap
, sizeof ( heap
) );
388 /** Memory allocator initialisation function */
389 struct init_fn heap_init_fn
__init_fn ( INIT_EARLY
) = {
390 .initialise
= init_heap
,
396 * Dump free block list
399 void mdumpfree ( void ) {
400 struct memory_block
*block
;
402 printf ( "Free block list:\n" );
403 list_for_each_entry ( block
, &free_blocks
, list
) {
404 printf ( "[%p,%p] (size %#zx)\n", block
,
405 ( ( ( void * ) block
) + block
->size
), block
->size
);