1 /* A replacement malloc with:
2 - Much reduced code size;
3 - Smaller RAM footprint;
4 - The ability to handle downward-growing heaps;
7 - Probably higher memory fragmentation;
8 - Doesn't support threads (but, if it did support threads,
9 it wouldn't need a global lock, only a compare-and-swap instruction);
10 - Assumes the maximum alignment required is the alignment of a pointer;
11 - Assumes that memory is already there and doesn't need to be allocated.
13 * Synopsis of public routines
16 Return a pointer to a newly allocated chunk of at least n bytes, or null
17 if no space is available.
19 Release the chunk of memory pointed to by p, or no effect if p is null.
20 realloc(void* p, size_t n);
21 Return a pointer to a chunk of size n that contains the same data
22 as does chunk p up to the minimum of (n, p's size) bytes, or null
23 if no space is available. The returned pointer may or may not be
24 the same as p. If p is null, equivalent to malloc. Unless the
25 #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
26 size argument of zero (re)allocates a minimum-sized chunk.
27 memalign(size_t alignment, size_t n);
28 Return a pointer to a newly allocated chunk of n bytes, aligned
29 in accord with the alignment argument, which must be a power of
30 two. Will fail if 'alignment' is too large.
31 calloc(size_t unit, size_t quantity);
32 Returns a pointer to quantity * unit bytes, with all locations
35 Equivalent to free(p).
36 malloc_trim(size_t pad);
37 Release all but pad bytes of freed top-most memory back
38 to the system. Return 1 if successful, else 0.
39 malloc_usable_size(void* p);
40 Report the number usable allocated bytes associated with allocated
41 chunk p. This may or may not report more bytes than were requested,
42 due to alignment and minimum size constraints.
44 Prints brief summary statistics on stderr.
46 Returns (by copy) a struct containing various summary statistics.
47 mallopt(int parameter_number, int parameter_value)
48 Changes one of the tunable parameters described below. Returns
49 1 if successful in changing the parameter, else 0. Actually, returns 0
50 always, as no parameter can be changed.
54 #define MALLOC_DIRECTION -1
57 #ifndef MALLOC_DIRECTION
58 #define MALLOC_DIRECTION 1
65 void* realloc(void*, size_t);
66 void* memalign(size_t, size_t);
68 void* pvalloc(size_t);
69 void* calloc(size_t, size_t);
71 int malloc_trim(size_t);
72 size_t malloc_usable_size(void*);
73 void malloc_stats(void);
74 int mallopt(int, int);
75 struct mallinfo
mallinfo(void);
77 typedef struct freelist_entry
{
79 struct freelist_entry
*next
;
82 extern void * __malloc_end
;
83 extern fle __malloc_freelist
;
85 /* Return the number of bytes that need to be added to X to make it
86 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
87 #define M_ALIGN(x, align) (-(size_t)(x) & ((align) - 1))
89 /* Return the number of bytes that need to be subtracted from X to make it
90 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
91 #define M_ALIGN_SUB(x, align) ((size_t)(x) & ((align) - 1))
93 extern void __malloc_start
;
95 /* This is the minimum gap allowed between __malloc_end and the top of
96 the stack. This is only checked for when __malloc_end is
97 decreased; if instead the stack grows into the heap, silent data
98 corruption will result. */
99 #define MALLOC_MINIMUM_GAP 32
102 register void * stack_pointer
asm ("r15");
103 #define MALLOC_LIMIT stack_pointer
105 #define MALLOC_LIMIT __builtin_frame_address (0)
108 #if MALLOC_DIRECTION < 0
109 #define CAN_ALLOC_P(required) \
110 (((size_t) __malloc_end - (size_t)MALLOC_LIMIT \
111 - MALLOC_MINIMUM_GAP) >= (required))
113 #define CAN_ALLOC_P(required) \
114 (((size_t)MALLOC_LIMIT - (size_t) __malloc_end \
115 - MALLOC_MINIMUM_GAP) >= (required))
118 /* real_size is the size we actually have to allocate, allowing for
119 overhead and alignment. */
120 #define REAL_SIZE(sz) \
121 ((sz) < sizeof (struct freelist_entry) - sizeof (size_t) \
122 ? sizeof (struct freelist_entry) \
123 : sz + sizeof (size_t) + M_ALIGN(sz, sizeof (size_t)))
127 void * __malloc_end
= &__malloc_start
;
128 fle __malloc_freelist
;
136 /* real_size is the size we actually have to allocate, allowing for
137 overhead and alignment. */
138 size_t real_size
= REAL_SIZE (sz
);
140 /* Look for the first block on the freelist that is large enough. */
141 for (nextfree
= &__malloc_freelist
;
143 nextfree
= &(*nextfree
)->next
)
147 if (block
->size
>= real_size
)
149 /* If the block found is just the right size, remove it from
150 the free list. Otherwise, split it. */
151 if (block
->size
< real_size
+ sizeof (struct freelist_entry
))
153 *nextfree
= block
->next
;
154 return (void *)&block
->next
;
158 size_t newsize
= block
->size
- real_size
;
159 fle newnext
= block
->next
;
160 *nextfree
= (fle
)((size_t)block
+ real_size
);
161 (*nextfree
)->size
= newsize
;
162 (*nextfree
)->next
= newnext
;
167 /* If this is the last block on the freelist, and it was too small,
170 && __malloc_end
== (void *)((size_t)block
+ block
->size
))
172 size_t moresize
= real_size
- block
->size
;
173 if (! CAN_ALLOC_P (moresize
))
177 if (MALLOC_DIRECTION
< 0)
179 block
= __malloc_end
= (void *)((size_t)block
- moresize
);
183 __malloc_end
= (void *)((size_t)block
+ real_size
);
190 /* No free space at the end of the free list. Allocate new space
193 if (! CAN_ALLOC_P (real_size
))
196 if (MALLOC_DIRECTION
> 0)
198 block
= __malloc_end
;
199 __malloc_end
= (void *)((size_t)__malloc_end
+ real_size
);
203 block
= __malloc_end
= (void *)((size_t)__malloc_end
- real_size
);
206 block
->size
= real_size
;
207 return (void *)&block
->next
;
218 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
223 /* Look on the freelist to see if there's a free block just before
224 or just after this block. */
225 for (nextfree
= &__malloc_freelist
;
227 nextfree
= &(*nextfree
)->next
)
229 fle thisblock
= *nextfree
;
230 if ((size_t)thisblock
+ thisblock
->size
== (size_t) block
)
232 thisblock
->size
+= block
->size
;
233 if (MALLOC_DIRECTION
> 0
235 && (size_t) block
+ block
->size
== (size_t) thisblock
->next
)
237 thisblock
->size
+= thisblock
->next
->size
;
238 thisblock
->next
= thisblock
->next
->next
;
242 else if ((size_t) thisblock
== (size_t) block
+ block
->size
)
244 if (MALLOC_DIRECTION
< 0
246 && (size_t) block
== ((size_t) thisblock
->next
247 + thisblock
->next
->size
))
249 *nextfree
= thisblock
->next
;
250 thisblock
->next
->size
+= block
->size
+ thisblock
->size
;
254 block
->size
+= thisblock
->size
;
255 block
->next
= thisblock
->next
;
260 else if ((MALLOC_DIRECTION
> 0
261 && (size_t) thisblock
> (size_t) block
)
262 || (MALLOC_DIRECTION
< 0
263 && (size_t) thisblock
< (size_t) block
))
267 block
->next
= *nextfree
;
273 #ifdef DEFINE_REALLOC
275 realloc (void *block_p
, size_t sz
)
277 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
278 size_t real_size
= REAL_SIZE (sz
);
279 size_t old_real_size
;
284 old_real_size
= block
->size
;
286 /* Perhaps we need to allocate more space. */
287 if (old_real_size
< real_size
)
290 size_t old_size
= old_real_size
- sizeof (size_t);
292 /* Need to allocate, copy, and free. */
293 result
= malloc (sz
);
296 memcpy (result
, block_p
, old_size
< sz
? old_size
: sz
);
300 /* Perhaps we can free some space. */
301 if (old_real_size
- real_size
>= sizeof (struct freelist_entry
))
303 fle newblock
= (fle
)((size_t)block
+ real_size
);
304 block
->size
= real_size
;
305 newblock
->size
= old_real_size
- real_size
;
306 free (&newblock
->next
);
314 calloc (size_t n
, size_t elem_size
)
317 size_t sz
= n
* elem_size
;
318 result
= malloc (sz
);
320 memset (result
, 0, sz
);
333 #ifdef DEFINE_MEMALIGN
335 memalign (size_t align
, size_t sz
)
340 /* real_size is the size we actually have to allocate, allowing for
341 overhead and alignment. */
342 size_t real_size
= REAL_SIZE (sz
);
344 /* Some sanity checking on 'align'. */
345 if ((align
& (align
- 1)) != 0
349 /* Look for the first block on the freelist that is large enough. */
350 /* One tricky part is this: We want the result to be a valid pointer
351 to free. That means that there has to be room for a size_t
352 before the block. If there's additional space before the block,
353 it should go on the freelist, or it'll be lost---we could add it
354 to the size of the block before it in memory, but finding the
355 previous block is expensive. */
356 for (nextfree
= &__malloc_freelist
;
358 nextfree
= &(*nextfree
)->next
)
363 /* If we've run out of free blocks, allocate more space. */
366 old_size
= real_size
;
367 if (MALLOC_DIRECTION
< 0)
369 old_size
+= M_ALIGN_SUB (((size_t)__malloc_end
370 - old_size
+ sizeof (size_t)),
372 if (! CAN_ALLOC_P (old_size
))
374 block
= __malloc_end
= (void *)((size_t)__malloc_end
- old_size
);
378 block
= __malloc_end
;
379 old_size
+= M_ALIGN ((size_t)__malloc_end
+ sizeof (size_t),
381 if (! CAN_ALLOC_P (old_size
))
383 __malloc_end
= (void *)((size_t)__malloc_end
+ old_size
);
386 block
->size
= old_size
;
392 old_size
= block
->size
;
396 before_size
= M_ALIGN (&block
->next
, align
);
397 if (before_size
!= 0)
398 before_size
= sizeof (*block
) + M_ALIGN (&(block
+1)->next
, align
);
400 /* If this is the last block on the freelist, and it is too small,
403 && old_size
< real_size
+ before_size
404 && __malloc_end
== (void *)((size_t)block
+ block
->size
))
406 if (MALLOC_DIRECTION
< 0)
408 size_t moresize
= real_size
- block
->size
;
409 moresize
+= M_ALIGN_SUB ((size_t)&block
->next
- moresize
, align
);
410 if (! CAN_ALLOC_P (moresize
))
412 block
= __malloc_end
= (void *)((size_t)block
- moresize
);
414 block
->size
= old_size
= old_size
+ moresize
;
419 if (! CAN_ALLOC_P (before_size
+ real_size
- block
->size
))
421 __malloc_end
= (void *)((size_t)block
+ before_size
+ real_size
);
422 block
->size
= old_size
= before_size
+ real_size
;
425 /* Two out of the four cases below will now be possible; which
426 two depends on MALLOC_DIRECTION. */
429 if (old_size
>= real_size
+ before_size
)
431 /* This block will do. If there needs to be space before it,
433 if (before_size
!= 0)
435 fle old_block
= block
;
437 old_block
->size
= before_size
;
438 block
= (fle
)((size_t)block
+ before_size
);
440 /* If there's no space after the block, we're now nearly
441 done; just make a note of the size required.
442 Otherwise, we need to create a new free space block. */
443 if (old_size
- before_size
444 <= real_size
+ sizeof (struct freelist_entry
))
446 block
->size
= old_size
- before_size
;
447 return (void *)&block
->next
;
452 new_block
= (fle
)((size_t)block
+ real_size
);
453 new_block
->size
= old_size
- before_size
- real_size
;
454 if (MALLOC_DIRECTION
> 0)
456 new_block
->next
= old_block
->next
;
457 old_block
->next
= new_block
;
461 new_block
->next
= old_block
;
462 *nextfree
= new_block
;
469 /* If the block found is just the right size, remove it from
470 the free list. Otherwise, split it. */
471 if (old_size
<= real_size
+ sizeof (struct freelist_entry
))
473 *nextfree
= block
->next
;
474 return (void *)&block
->next
;
478 size_t newsize
= old_size
- real_size
;
479 fle newnext
= block
->next
;
480 *nextfree
= (fle
)((size_t)block
+ real_size
);
481 (*nextfree
)->size
= newsize
;
482 (*nextfree
)->next
= newnext
;
490 block
->size
= real_size
;
491 return (void *)&block
->next
;
499 return memalign (128, sz
);
502 #ifdef DEFINE_PVALLOC
506 return memalign (128, sz
+ M_ALIGN (sz
, 128));
510 #ifdef DEFINE_MALLINFO
522 memset (&r
, 0, sizeof (r
));
526 for (fr
= __malloc_freelist
; fr
; fr
= fr
->next
)
528 free_size
+= fr
->size
;
533 if (MALLOC_DIRECTION
> 0)
534 atend
= (void *)((size_t)fr
+ fr
->size
) == __malloc_end
;
536 atend
= (void *)fr
== __malloc_end
;
538 r
.keepcost
= fr
->size
;
542 if (MALLOC_DIRECTION
> 0)
543 total_size
= (char *)__malloc_end
- (char *)&__malloc_start
;
545 total_size
= (char *)&__malloc_start
- (char *)__malloc_end
;
548 /* Fixme: should walk through all the in-use blocks and see if
552 r
.arena
= total_size
;
553 r
.fordblks
= free_size
;
554 r
.uordblks
= total_size
- free_size
;
555 r
.ordblks
= free_blocks
;
560 #ifdef DEFINE_MALLOC_STATS
572 fprintf (fp
, "malloc has reserved %u bytes between %p and %p\n",
573 i
.arena
, &__malloc_start
, __malloc_end
);
574 fprintf (fp
, "there are %u bytes free in %u chunks\n",
575 i
.fordblks
, i
.ordblks
);
576 fprintf (fp
, "of which %u bytes are at the end of the reserved space\n",
578 fprintf (fp
, "and %u bytes are in use.\n", i
.uordblks
);
582 #ifdef DEFINE_MALLOC_USABLE_SIZE
584 malloc_usable_size (void *block_p
)
586 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
587 return block
->size
- sizeof (size_t);
591 #ifdef DEFINE_MALLOPT
593 mallopt (int n
, int v
)