1 /* A replacement malloc with:
2 - Much reduced code size;
3 - Smaller RAM footprint;
4 - The ability to handle downward-growing heaps;
7 - Probably higher memory fragmentation;
8 - Doesn't support threads (but, if it did support threads,
9 it wouldn't need a global lock, only a compare-and-swap instruction);
10 - Assumes the maximum alignment required is the alignment of a pointer;
11 - Assumes that memory is already there and doesn't need to be allocated.
13 * Synopsis of public routines
16 Return a pointer to a newly allocated chunk of at least n bytes, or null
17 if no space is available.
19 Release the chunk of memory pointed to by p, or no effect if p is null.
20 realloc(void* p, size_t n);
21 Return a pointer to a chunk of size n that contains the same data
22 as does chunk p up to the minimum of (n, p's size) bytes, or null
23 if no space is available. The returned pointer may or may not be
24 the same as p. If p is null, equivalent to malloc. Unless the
25 #define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
26 size argument of zero (re)allocates a minimum-sized chunk.
27 memalign(size_t alignment, size_t n);
28 Return a pointer to a newly allocated chunk of n bytes, aligned
29 in accord with the alignment argument, which must be a power of
30 two. Will fail if 'alignment' is too large.
31 calloc(size_t unit, size_t quantity);
32 Returns a pointer to quantity * unit bytes, with all locations
35 Equivalent to free(p).
36 malloc_trim(size_t pad);
37 Release all but pad bytes of freed top-most memory back
38 to the system. Return 1 if successful, else 0.
39 malloc_usable_size(void* p);
40 Report the number usable allocated bytes associated with allocated
41 chunk p. This may or may not report more bytes than were requested,
42 due to alignment and minimum size constraints.
44 Prints brief summary statistics on stderr.
46 Returns (by copy) a struct containing various summary statistics.
47 mallopt(int parameter_number, int parameter_value)
48 Changes one of the tunable parameters described below. Returns
49 1 if successful in changing the parameter, else 0. Actually, returns 0
50 always, as no parameter can be changed.
54 #define MALLOC_DIRECTION -1
57 #ifndef MALLOC_DIRECTION
58 #define MALLOC_DIRECTION 1
65 void* realloc(void*, size_t);
66 void* memalign(size_t, size_t);
68 void* pvalloc(size_t);
69 void* calloc(size_t, size_t);
71 int malloc_trim(size_t);
72 size_t malloc_usable_size(void*);
73 void malloc_stats(void);
74 int mallopt(int, int);
75 struct mallinfo
mallinfo(void);
77 typedef struct freelist_entry
{
79 struct freelist_entry
*next
;
82 extern void * __malloc_end
;
83 extern fle __malloc_freelist
;
85 /* Return the number of bytes that need to be added to X to make it
86 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
87 #define M_ALIGN(x, align) (-(size_t)(x) & ((align) - 1))
89 /* Return the number of bytes that need to be subtracted from X to make it
90 aligned to an ALIGN boundary. ALIGN must be a power of 2. */
91 #define M_ALIGN_SUB(x, align) ((size_t)(x) & ((align) - 1))
93 extern void __malloc_start
;
95 /* This is the minimum gap allowed between __malloc_end and the top of
96 the stack. This is only checked for when __malloc_end is
97 decreased; if instead the stack grows into the heap, silent data
98 corruption will result. */
99 #define MALLOC_MINIMUM_GAP 32
102 register void * stack_pointer
asm ("r15");
103 #define MALLOC_LIMIT stack_pointer
105 #define MALLOC_LIMIT __builtin_frame_address (0)
108 #if MALLOC_DIRECTION < 0
109 #define CAN_ALLOC_P(required) \
110 (((size_t) __malloc_end - (size_t)MALLOC_LIMIT \
111 - MALLOC_MINIMUM_GAP) >= (required))
113 #define CAN_ALLOC_P(required) \
114 (((size_t)MALLOC_LIMIT - (size_t) __malloc_end \
115 - MALLOC_MINIMUM_GAP) >= (required))
118 /* real_size is the size we actually have to allocate, allowing for
119 overhead and alignment. */
120 #define REAL_SIZE(sz) \
121 ((sz) < sizeof (struct freelist_entry) - sizeof (size_t) \
122 ? sizeof (struct freelist_entry) \
123 : sz + sizeof (size_t) + M_ALIGN(sz, sizeof (size_t)))
127 void * __malloc_end
= &__malloc_start
;
128 fle __malloc_freelist
;
136 /* real_size is the size we actually have to allocate, allowing for
137 overhead and alignment. */
138 size_t real_size
= REAL_SIZE (sz
);
140 /* Look for the first block on the freelist that is large enough. */
141 for (nextfree
= &__malloc_freelist
;
143 nextfree
= &(*nextfree
)->next
)
147 if (block
->size
>= real_size
)
149 /* If the block found is just the right size, remove it from
150 the free list. Otherwise, split it. */
151 if (block
->size
< real_size
+ sizeof (struct freelist_entry
))
153 *nextfree
= block
->next
;
154 return (void *)&block
->next
;
158 size_t newsize
= block
->size
- real_size
;
159 fle newnext
= block
->next
;
160 *nextfree
= (fle
)((size_t)block
+ real_size
);
161 (*nextfree
)->size
= newsize
;
162 (*nextfree
)->next
= newnext
;
167 /* If this is the last block on the freelist, and it was too small,
170 && __malloc_end
== (void *)((size_t)block
+ block
->size
))
172 size_t moresize
= real_size
- block
->size
;
173 if (! CAN_ALLOC_P (moresize
))
177 if (MALLOC_DIRECTION
< 0)
179 block
= __malloc_end
= (void *)((size_t)block
- moresize
);
183 __malloc_end
= (void *)((size_t)block
+ real_size
);
190 /* No free space at the end of the free list. Allocate new space
193 if (! CAN_ALLOC_P (real_size
))
196 if (MALLOC_DIRECTION
> 0)
198 block
= __malloc_end
;
199 __malloc_end
= (void *)((size_t)__malloc_end
+ real_size
);
203 block
= __malloc_end
= (void *)((size_t)__malloc_end
- real_size
);
206 block
->size
= real_size
;
207 return (void *)&block
->next
;
218 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
223 /* Look on the freelist to see if there's a free block just before
224 or just after this block. */
225 for (nextfree
= &__malloc_freelist
;
227 nextfree
= &(*nextfree
)->next
)
229 fle thisblock
= *nextfree
;
230 if ((size_t)thisblock
+ thisblock
->size
== (size_t) block
)
232 thisblock
->size
+= block
->size
;
233 if (MALLOC_DIRECTION
> 0
235 && (size_t) block
+ block
->size
== (size_t) thisblock
->next
)
237 thisblock
->size
+= thisblock
->next
->size
;
238 thisblock
->next
= thisblock
->next
->next
;
242 else if ((size_t) thisblock
== (size_t) block
+ block
->size
)
244 if (MALLOC_DIRECTION
< 0
246 && (size_t) block
== ((size_t) thisblock
->next
247 + thisblock
->next
->size
))
249 *nextfree
= thisblock
->next
;
250 thisblock
->next
->size
+= block
->size
+ thisblock
->size
;
254 block
->size
+= thisblock
->size
;
255 block
->next
= thisblock
->next
;
260 else if ((MALLOC_DIRECTION
> 0
261 && (size_t) thisblock
> (size_t) block
)
262 || (MALLOC_DIRECTION
< 0
263 && (size_t) thisblock
< (size_t) block
))
267 block
->next
= *nextfree
;
273 #ifdef DEFINE_REALLOC
277 realloc (void *block_p
, size_t sz
)
279 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
280 size_t real_size
= REAL_SIZE (sz
);
281 size_t old_real_size
;
286 old_real_size
= block
->size
;
288 /* Perhaps we need to allocate more space. */
289 if (old_real_size
< real_size
)
292 size_t old_size
= old_real_size
- sizeof (size_t);
294 /* Need to allocate, copy, and free. */
295 result
= malloc (sz
);
298 memcpy (result
, block_p
, old_size
< sz
? old_size
: sz
);
302 /* Perhaps we can free some space. */
303 if (old_real_size
- real_size
>= sizeof (struct freelist_entry
))
305 fle newblock
= (fle
)((size_t)block
+ real_size
);
306 block
->size
= real_size
;
307 newblock
->size
= old_real_size
- real_size
;
308 free (&newblock
->next
);
318 calloc (size_t n
, size_t elem_size
)
321 size_t sz
= n
* elem_size
;
322 result
= malloc (sz
);
324 memset (result
, 0, sz
);
337 #ifdef DEFINE_MEMALIGN
339 memalign (size_t align
, size_t sz
)
344 /* real_size is the size we actually have to allocate, allowing for
345 overhead and alignment. */
346 size_t real_size
= REAL_SIZE (sz
);
348 /* Some sanity checking on 'align'. */
349 if ((align
& (align
- 1)) != 0
353 /* Look for the first block on the freelist that is large enough. */
354 /* One tricky part is this: We want the result to be a valid pointer
355 to free. That means that there has to be room for a size_t
356 before the block. If there's additional space before the block,
357 it should go on the freelist, or it'll be lost---we could add it
358 to the size of the block before it in memory, but finding the
359 previous block is expensive. */
360 for (nextfree
= &__malloc_freelist
;
362 nextfree
= &(*nextfree
)->next
)
367 /* If we've run out of free blocks, allocate more space. */
370 old_size
= real_size
;
371 if (MALLOC_DIRECTION
< 0)
373 old_size
+= M_ALIGN_SUB (((size_t)__malloc_end
374 - old_size
+ sizeof (size_t)),
376 if (! CAN_ALLOC_P (old_size
))
378 block
= __malloc_end
= (void *)((size_t)__malloc_end
- old_size
);
382 block
= __malloc_end
;
383 old_size
+= M_ALIGN ((size_t)__malloc_end
+ sizeof (size_t),
385 if (! CAN_ALLOC_P (old_size
))
387 __malloc_end
= (void *)((size_t)__malloc_end
+ old_size
);
390 block
->size
= old_size
;
396 old_size
= block
->size
;
400 before_size
= M_ALIGN (&block
->next
, align
);
401 if (before_size
!= 0)
402 before_size
= sizeof (*block
) + M_ALIGN (&(block
+1)->next
, align
);
404 /* If this is the last block on the freelist, and it is too small,
407 && old_size
< real_size
+ before_size
408 && __malloc_end
== (void *)((size_t)block
+ block
->size
))
410 if (MALLOC_DIRECTION
< 0)
412 size_t moresize
= real_size
- block
->size
;
413 moresize
+= M_ALIGN_SUB ((size_t)&block
->next
- moresize
, align
);
414 if (! CAN_ALLOC_P (moresize
))
416 block
= __malloc_end
= (void *)((size_t)block
- moresize
);
418 block
->size
= old_size
= old_size
+ moresize
;
423 if (! CAN_ALLOC_P (before_size
+ real_size
- block
->size
))
425 __malloc_end
= (void *)((size_t)block
+ before_size
+ real_size
);
426 block
->size
= old_size
= before_size
+ real_size
;
429 /* Two out of the four cases below will now be possible; which
430 two depends on MALLOC_DIRECTION. */
433 if (old_size
>= real_size
+ before_size
)
435 /* This block will do. If there needs to be space before it,
437 if (before_size
!= 0)
439 fle old_block
= block
;
441 old_block
->size
= before_size
;
442 block
= (fle
)((size_t)block
+ before_size
);
444 /* If there's no space after the block, we're now nearly
445 done; just make a note of the size required.
446 Otherwise, we need to create a new free space block. */
447 if (old_size
- before_size
448 <= real_size
+ sizeof (struct freelist_entry
))
450 block
->size
= old_size
- before_size
;
451 return (void *)&block
->next
;
456 new_block
= (fle
)((size_t)block
+ real_size
);
457 new_block
->size
= old_size
- before_size
- real_size
;
458 if (MALLOC_DIRECTION
> 0)
460 new_block
->next
= old_block
->next
;
461 old_block
->next
= new_block
;
465 new_block
->next
= old_block
;
466 *nextfree
= new_block
;
473 /* If the block found is just the right size, remove it from
474 the free list. Otherwise, split it. */
475 if (old_size
<= real_size
+ sizeof (struct freelist_entry
))
477 *nextfree
= block
->next
;
478 return (void *)&block
->next
;
482 size_t newsize
= old_size
- real_size
;
483 fle newnext
= block
->next
;
484 *nextfree
= (fle
)((size_t)block
+ real_size
);
485 (*nextfree
)->size
= newsize
;
486 (*nextfree
)->next
= newnext
;
494 block
->size
= real_size
;
495 return (void *)&block
->next
;
503 return memalign (128, sz
);
506 #ifdef DEFINE_PVALLOC
510 return memalign (128, sz
+ M_ALIGN (sz
, 128));
514 #ifdef DEFINE_MALLINFO
527 memset (&r
, 0, sizeof (r
));
531 for (fr
= __malloc_freelist
; fr
; fr
= fr
->next
)
533 free_size
+= fr
->size
;
538 if (MALLOC_DIRECTION
> 0)
539 atend
= (void *)((size_t)fr
+ fr
->size
) == __malloc_end
;
541 atend
= (void *)fr
== __malloc_end
;
543 r
.keepcost
= fr
->size
;
547 if (MALLOC_DIRECTION
> 0)
548 total_size
= (char *)__malloc_end
- (char *)&__malloc_start
;
550 total_size
= (char *)&__malloc_start
- (char *)__malloc_end
;
553 /* Fixme: should walk through all the in-use blocks and see if
557 r
.arena
= total_size
;
558 r
.fordblks
= free_size
;
559 r
.uordblks
= total_size
- free_size
;
560 r
.ordblks
= free_blocks
;
565 #ifdef DEFINE_MALLOC_STATS
577 fprintf (fp
, "malloc has reserved %u bytes between %p and %p\n",
578 i
.arena
, &__malloc_start
, __malloc_end
);
579 fprintf (fp
, "there are %u bytes free in %u chunks\n",
580 i
.fordblks
, i
.ordblks
);
581 fprintf (fp
, "of which %u bytes are at the end of the reserved space\n",
583 fprintf (fp
, "and %u bytes are in use.\n", i
.uordblks
);
587 #ifdef DEFINE_MALLOC_USABLE_SIZE
589 malloc_usable_size (void *block_p
)
591 fle block
= (fle
)((size_t) block_p
- offsetof (struct freelist_entry
, next
));
592 return block
->size
- sizeof (size_t);
596 #ifdef DEFINE_MALLOPT
598 mallopt (int n
, int v
)