Cygwin: mmap: allow remapping part of an existing anonymous mapping
[newlib-cygwin.git] / newlib / libc / stdlib / nano-mallocr.c
blob030be44add84700474bd4059cb546303082497ae
1 /*
2 * Copyright (c) 2012, 2013 ARM Ltd
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the company may not be used to endorse or promote
14 * products derived from this software without specific prior written
15 * permission.
17 * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Implementation of <<malloc>> <<free>> <<calloc>> <<realloc>>, optional
30 * as to be reenterable.
32 * Interface documentation refer to malloc.c.
35 #include <stdio.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <malloc.h>
39 #include <stdint.h>
41 #if DEBUG
42 #include <assert.h>
43 #else
44 #undef assert
45 #define assert(x) ((void)0)
46 #endif
48 #ifndef MAX
49 #define MAX(a,b) ((a) >= (b) ? (a) : (b))
50 #endif
52 #define _SBRK_R(X) _sbrk_r(X)
54 #ifdef _LIBC
56 #include <sys/config.h>
57 #include <reent.h>
59 #define RARG struct _reent *reent_ptr,
60 #define RONEARG struct _reent *reent_ptr
61 #define RCALL reent_ptr,
62 #define RONECALL reent_ptr
64 #define MALLOC_LOCK __malloc_lock(reent_ptr)
65 #define MALLOC_UNLOCK __malloc_unlock(reent_ptr)
67 #define RERRNO _REENT_ERRNO(reent_ptr)
69 #define nano_malloc _malloc_r
70 #define nano_free _free_r
71 #define nano_realloc _realloc_r
72 #define nano_memalign _memalign_r
73 #define nano_valloc _valloc_r
74 #define nano_pvalloc _pvalloc_r
75 #define nano_calloc _calloc_r
76 #define nano_cfree _cfree_r
77 #define nano_malloc_usable_size _malloc_usable_size_r
78 #define nano_malloc_stats _malloc_stats_r
79 #define nano_mallinfo _mallinfo_r
80 #define nano_mallopt _mallopt_r
82 #else /* ! _LIBC */
84 #define RARG
85 #define RONEARG
86 #define RCALL
87 #define RONECALL
88 #define MALLOC_LOCK
89 #define MALLOC_UNLOCK
90 #define RERRNO errno
92 #define nano_malloc malloc
93 #define nano_free free
94 #define nano_realloc realloc
95 #define nano_memalign memalign
96 #define nano_valloc valloc
97 #define nano_pvalloc pvalloc
98 #define nano_calloc calloc
99 #define nano_cfree cfree
100 #define nano_malloc_usable_size malloc_usable_size
101 #define nano_malloc_stats malloc_stats
102 #define nano_mallinfo mallinfo
103 #define nano_mallopt mallopt
104 #endif /* ! _LIBC */
106 /* Redefine names to avoid conflict with user names */
107 #define free_list __malloc_free_list
108 #define sbrk_start __malloc_sbrk_start
109 #define current_mallinfo __malloc_current_mallinfo
111 #define ALIGN_PTR(ptr, align) \
112 (((ptr) + (align) - (intptr_t)1) & ~((align) - (intptr_t)1))
113 #define ALIGN_SIZE(size, align) \
114 (((size) + (align) - (size_t)1) & ~((align) - (size_t)1))
116 /* Alignment of allocated block */
117 #define MALLOC_ALIGN (8U)
118 #define CHUNK_ALIGN (sizeof(void*))
119 #define MALLOC_PADDING ((MAX(MALLOC_ALIGN, CHUNK_ALIGN)) - CHUNK_ALIGN)
121 /* as well as the minimal allocation size
122 * to hold a free pointer */
123 #define MALLOC_MINSIZE (sizeof(void *))
124 #define MALLOC_PAGE_ALIGN (0x1000)
125 #define MAX_ALLOC_SIZE (0x80000000U)
127 typedef size_t malloc_size_t;
129 typedef struct malloc_chunk
131 /* --------------------------------------
132 * chunk->| size |
133 * --------------------------------------
134 * | Padding for alignment |
135 * | This includes padding inserted by |
136 * | the compiler (to align fields) and |
137 * | explicit padding inserted by this |
138 * | implementation. If any explicit |
139 * | padding is being used then the |
140 * | sizeof (size) bytes at |
141 * | mem_ptr - CHUNK_OFFSET must be |
142 * | initialized with the negative |
143 * | offset to size. |
144 * --------------------------------------
145 * mem_ptr->| When allocated: data |
146 * | When freed: pointer to next free |
147 * | chunk |
148 * --------------------------------------
150 /* size of the allocated payload area, including size before
151 CHUNK_OFFSET */
152 long size;
154 /* since here, the memory is either the next free block, or data load */
155 struct malloc_chunk * next;
156 }chunk;
159 #define CHUNK_OFFSET ((malloc_size_t)(&(((struct malloc_chunk *)0)->next)))
161 /* size of smallest possible chunk. A memory piece smaller than this size
162 * won't be able to create a chunk */
163 #define MALLOC_MINCHUNK (CHUNK_OFFSET + MALLOC_PADDING + MALLOC_MINSIZE)
165 /* Forward data declarations */
166 extern chunk * free_list;
167 extern char * sbrk_start;
168 extern struct mallinfo current_mallinfo;
170 /* Forward function declarations */
171 extern void * nano_malloc(RARG malloc_size_t);
172 extern void nano_free (RARG void * free_p);
173 extern void nano_cfree(RARG void * ptr);
174 extern void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem);
175 extern void nano_malloc_stats(RONEARG);
176 extern malloc_size_t nano_malloc_usable_size(RARG void * ptr);
177 extern void * nano_realloc(RARG void * ptr, malloc_size_t size);
178 extern void * nano_memalign(RARG size_t align, size_t s);
179 extern int nano_mallopt(RARG int parameter_number, int parameter_value);
180 extern void * nano_valloc(RARG size_t s);
181 extern void * nano_pvalloc(RARG size_t s);
183 static inline chunk * get_chunk_from_ptr(void * ptr)
185 /* Assume that there is no explicit padding in the
186 chunk, and that the chunk starts at ptr - CHUNK_OFFSET. */
187 chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
189 /* c->size being negative indicates that there is explicit padding in
190 the chunk. In which case, c->size is currently the negative offset to
191 the true size. */
192 if (c->size < 0) c = (chunk *)((char *)c + c->size);
193 return c;
196 #ifdef DEFINE_MALLOC
197 /* List list header of free blocks */
198 chunk * free_list = NULL;
200 /* Starting point of memory allocated from system */
201 char * sbrk_start = NULL;
203 /** Function sbrk_aligned
204 * Algorithm:
205 * Use sbrk() to obtain more memory and ensure it is CHUNK_ALIGN aligned
206 * Optimise for the case that it is already aligned - only ask for extra
207 * padding after we know we need it
209 static void* sbrk_aligned(RARG malloc_size_t s)
211 char *p, *align_p;
213 if (sbrk_start == NULL) sbrk_start = _SBRK_R(RCALL 0);
215 p = _SBRK_R(RCALL s);
217 /* sbrk returns -1 if fail to allocate */
218 if (p == (void *)-1)
219 return p;
221 align_p = (char*)ALIGN_PTR((uintptr_t)p, CHUNK_ALIGN);
222 if (align_p != p)
224 /* p is not aligned, ask for a few more bytes so that we have s
225 * bytes reserved from align_p. */
226 p = _SBRK_R(RCALL align_p - p);
227 if (p == (void *)-1)
228 return p;
230 return align_p;
233 /** Function nano_malloc
234 * Algorithm:
235 * Walk through the free list to find the first match. If fails to find
236 * one, call sbrk to allocate a new chunk.
238 void * nano_malloc(RARG malloc_size_t s)
240 chunk *p, *r;
241 char * ptr, * align_ptr;
242 int offset;
244 malloc_size_t alloc_size;
246 alloc_size = ALIGN_SIZE(s, CHUNK_ALIGN); /* size of aligned data load */
247 alloc_size += MALLOC_PADDING; /* padding */
248 alloc_size += CHUNK_OFFSET; /* size of chunk head */
249 alloc_size = MAX(alloc_size, MALLOC_MINCHUNK);
251 if (alloc_size >= MAX_ALLOC_SIZE || alloc_size < s)
253 RERRNO = ENOMEM;
254 return NULL;
257 MALLOC_LOCK;
259 p = free_list;
260 r = p;
262 while (r)
264 int rem = r->size - alloc_size;
265 if (rem >= 0)
267 if (rem >= MALLOC_MINCHUNK)
269 if (p == r)
271 /* First item in the list, break it into two chunks
272 * and return the first one */
273 r->size = alloc_size;
274 free_list = (chunk *)((char *)r + alloc_size);
275 free_list->size = rem;
276 free_list->next = r->next;
277 } else {
278 /* Any other item in the list. Split and return
279 * the first one */
280 r->size = alloc_size;
281 p->next = (chunk *)((char *)r + alloc_size);
282 p->next->size = rem;
283 p->next->next = r->next;
286 /* Find a chunk that is exactly the size or slightly bigger
287 * than requested size, just return this chunk */
288 else if (p == r)
290 /* Now it implies p==r==free_list. Move the free_list
291 * to next chunk */
292 free_list = r->next;
294 else
296 /* Normal case. Remove it from free_list */
297 p->next = r->next;
299 break;
301 p=r;
302 r=r->next;
305 /* Failed to find a appropriate chunk. Ask for more memory */
306 if (r == NULL)
308 r = sbrk_aligned(RCALL alloc_size);
310 /* sbrk returns -1 if fail to allocate */
311 if (r == (void *)-1)
313 /* sbrk didn't have the requested amount. Let's check
314 * if the last item in the free list is adjacent to the
315 * current heap end (sbrk(0)). In that case, only ask
316 * for the difference in size and merge them */
317 p = free_list;
318 r = p;
320 while (r)
322 p=r;
323 r=r->next;
326 if (p != NULL && (char *)p + p->size == (char *)_SBRK_R(RCALL 0))
328 /* The last free item has the heap end as neighbour.
329 * Let's ask for a smaller amount and merge */
330 alloc_size -= p->size;
332 if (sbrk_aligned(RCALL alloc_size) != (void *)-1)
334 p->size += alloc_size;
336 /* Remove chunk from free_list. Since p != NULL there is
337 at least one chunk */
338 r = free_list;
339 if (r->next == NULL)
341 /* There is only a single chunk, remove it */
342 free_list = NULL;
344 else
346 /* Search for the chunk before the one to be removed */
347 while (p != r->next)
349 r = r->next;
351 r->next = NULL;
353 r = p;
355 else
357 RERRNO = ENOMEM;
358 MALLOC_UNLOCK;
359 return NULL;
362 else
364 RERRNO = ENOMEM;
365 MALLOC_UNLOCK;
366 return NULL;
369 else
371 r->size = alloc_size;
374 MALLOC_UNLOCK;
376 ptr = (char *)r + CHUNK_OFFSET;
378 align_ptr = (char *)ALIGN_PTR((uintptr_t)ptr, MALLOC_ALIGN);
379 offset = align_ptr - ptr;
381 if (offset)
383 /* Initialize sizeof (malloc_chunk.size) bytes at
384 align_ptr - CHUNK_OFFSET with negative offset to the
385 size field (at the start of the chunk).
387 The negative offset to size from align_ptr - CHUNK_OFFSET is
388 the size of any remaining padding minus CHUNK_OFFSET. This is
389 equivalent to the total size of the padding, because the size of
390 any remaining padding is the total size of the padding minus
391 CHUNK_OFFSET.
393 Note that the size of the padding must be at least CHUNK_OFFSET.
395 The rest of the padding is not initialized. */
396 *(long *)((char *)r + offset) = -offset;
399 assert(align_ptr + s <= (char *)r + alloc_size);
400 return align_ptr;
402 #endif /* DEFINE_MALLOC */
404 #ifdef DEFINE_FREE
405 #define MALLOC_CHECK_DOUBLE_FREE
407 /** Function nano_free
408 * Implementation of libc free.
409 * Algorithm:
410 * Maintain a global free chunk single link list, headed by global
411 * variable free_list.
412 * When free, insert the to-be-freed chunk into free list. The place to
413 * insert should make sure all chunks are sorted by address from low to
414 * high. Then merge with neighbor chunks if adjacent.
416 void nano_free (RARG void * free_p)
418 chunk * p_to_free;
419 chunk * p, * q;
421 if (free_p == NULL) return;
423 p_to_free = get_chunk_from_ptr(free_p);
425 MALLOC_LOCK;
426 if (free_list == NULL)
428 /* Set first free list element */
429 p_to_free->next = free_list;
430 free_list = p_to_free;
431 MALLOC_UNLOCK;
432 return;
435 if (p_to_free < free_list)
437 if ((char *)p_to_free + p_to_free->size == (char *)free_list)
439 /* Chunk to free is just before the first element of
440 * free list */
441 p_to_free->size += free_list->size;
442 p_to_free->next = free_list->next;
444 else
446 /* Insert before current free_list */
447 p_to_free->next = free_list;
449 free_list = p_to_free;
450 MALLOC_UNLOCK;
451 return;
454 q = free_list;
455 /* Walk through the free list to find the place for insert. */
458 p = q;
459 q = q->next;
460 } while (q && q <= p_to_free);
462 /* Now p <= p_to_free and either q == NULL or q > p_to_free
463 * Try to merge with chunks immediately before/after it. */
465 if ((char *)p + p->size == (char *)p_to_free)
467 /* Chunk to be freed is adjacent
468 * to a free chunk before it */
469 p->size += p_to_free->size;
470 /* If the merged chunk is also adjacent
471 * to the chunk after it, merge again */
472 if ((char *)p + p->size == (char *) q)
474 p->size += q->size;
475 p->next = q->next;
478 #ifdef MALLOC_CHECK_DOUBLE_FREE
479 else if ((char *)p + p->size > (char *)p_to_free)
481 /* Report double free fault */
482 RERRNO = ENOMEM;
483 MALLOC_UNLOCK;
484 return;
486 #endif
487 else if ((char *)p_to_free + p_to_free->size == (char *) q)
489 /* Chunk to be freed is adjacent
490 * to a free chunk after it */
491 p_to_free->size += q->size;
492 p_to_free->next = q->next;
493 p->next = p_to_free;
495 else
497 /* Not adjacent to any chunk. Just insert it. Resulting
498 * a fragment. */
499 p_to_free->next = q;
500 p->next = p_to_free;
502 MALLOC_UNLOCK;
504 #endif /* DEFINE_FREE */
506 #ifdef DEFINE_CFREE
507 void nano_cfree(RARG void * ptr)
509 nano_free(RCALL ptr);
511 #endif /* DEFINE_CFREE */
513 #ifdef DEFINE_CALLOC
514 /* Function nano_calloc
515 * Implement calloc simply by calling malloc and set zero */
516 void * nano_calloc(RARG malloc_size_t n, malloc_size_t elem)
518 malloc_size_t bytes;
519 void * mem;
521 if (__builtin_mul_overflow (n, elem, &bytes))
523 RERRNO = ENOMEM;
524 return NULL;
526 mem = nano_malloc(RCALL bytes);
527 if (mem != NULL) memset(mem, 0, bytes);
528 return mem;
530 #endif /* DEFINE_CALLOC */
532 #ifdef DEFINE_REALLOC
533 /* Function nano_realloc
534 * Implement realloc by malloc + memcpy */
535 void * nano_realloc(RARG void * ptr, malloc_size_t size)
537 void * mem;
538 chunk * p_to_realloc;
539 malloc_size_t old_size;
541 if (ptr == NULL) return nano_malloc(RCALL size);
543 if (size == 0)
545 nano_free(RCALL ptr);
546 return NULL;
549 old_size = nano_malloc_usable_size(RCALL ptr);
550 if (size <= old_size && (old_size >> 1) < size)
551 return ptr;
553 mem = nano_malloc(RCALL size);
554 if (mem != NULL)
556 if (old_size > size)
557 old_size = size;
558 memcpy(mem, ptr, old_size);
559 nano_free(RCALL ptr);
561 return mem;
563 #endif /* DEFINE_REALLOC */
565 #ifdef DEFINE_MALLINFO
566 struct mallinfo current_mallinfo={0,0,0,0,0,0,0,0,0,0};
568 struct mallinfo nano_mallinfo(RONEARG)
570 char * sbrk_now;
571 chunk * pf;
572 size_t free_size = 0;
573 size_t total_size;
575 MALLOC_LOCK;
577 if (sbrk_start == NULL) total_size = 0;
578 else {
579 sbrk_now = _SBRK_R(RCALL 0);
581 if (sbrk_now == (void *)-1)
582 total_size = (size_t)-1;
583 else
584 total_size = (size_t) (sbrk_now - sbrk_start);
587 for (pf = free_list; pf; pf = pf->next)
588 free_size += pf->size;
590 current_mallinfo.arena = total_size;
591 current_mallinfo.fordblks = free_size;
592 current_mallinfo.uordblks = total_size - free_size;
594 MALLOC_UNLOCK;
595 return current_mallinfo;
597 #endif /* DEFINE_MALLINFO */
599 #ifdef DEFINE_MALLOC_STATS
600 void nano_malloc_stats(RONEARG)
602 nano_mallinfo(RONECALL);
603 fiprintf(stderr, "max system bytes = %10u\n",
604 current_mallinfo.arena);
605 fiprintf(stderr, "system bytes = %10u\n",
606 current_mallinfo.arena);
607 fiprintf(stderr, "in use bytes = %10u\n",
608 current_mallinfo.uordblks);
610 #endif /* DEFINE_MALLOC_STATS */
612 #ifdef DEFINE_MALLOC_USABLE_SIZE
613 malloc_size_t nano_malloc_usable_size(RARG void * ptr)
615 chunk * c = (chunk *)((char *)ptr - CHUNK_OFFSET);
616 int size_or_offset = c->size;
618 if (size_or_offset < 0)
620 /* Padding is used. Excluding the padding size */
621 c = (chunk *)((char *)c + c->size);
622 return c->size - CHUNK_OFFSET + size_or_offset;
624 return c->size - CHUNK_OFFSET;
626 #endif /* DEFINE_MALLOC_USABLE_SIZE */
628 #ifdef DEFINE_MEMALIGN
629 /* Function nano_memalign
630 * Allocate memory block aligned at specific boundary.
631 * align: required alignment. Must be power of 2. Return NULL
632 * if not power of 2. Undefined behavior is bigger than
633 * pointer value range.
634 * s: required size.
635 * Return: allocated memory pointer aligned to align
636 * Algorithm: Malloc a big enough block, padding pointer to aligned
637 * address, then truncate and free the tail if too big.
638 * Record the offset of align pointer and original pointer
639 * in the padding area.
641 void * nano_memalign(RARG size_t align, size_t s)
643 chunk * chunk_p;
644 malloc_size_t size_allocated, offset, ma_size, size_with_padding;
645 char * allocated, * aligned_p;
647 /* Return NULL if align isn't power of 2 */
648 if ((align & (align-1)) != 0) return NULL;
650 align = MAX(align, MALLOC_ALIGN);
652 /* Make sure ma_size does not overflow */
653 if (s > __SIZE_MAX__ - CHUNK_ALIGN)
655 RERRNO = ENOMEM;
656 return NULL;
658 ma_size = ALIGN_SIZE(MAX(s, MALLOC_MINSIZE), CHUNK_ALIGN);
660 /* Make sure size_with_padding does not overflow */
661 if (ma_size > __SIZE_MAX__ - (align - MALLOC_ALIGN))
663 RERRNO = ENOMEM;
664 return NULL;
666 size_with_padding = ma_size + (align - MALLOC_ALIGN);
668 allocated = nano_malloc(RCALL size_with_padding);
669 if (allocated == NULL) return NULL;
671 chunk_p = get_chunk_from_ptr(allocated);
672 aligned_p = (char *)ALIGN_PTR(
673 (uintptr_t)((char *)chunk_p + CHUNK_OFFSET),
674 (uintptr_t)align);
675 offset = aligned_p - ((char *)chunk_p + CHUNK_OFFSET);
677 if (offset)
679 if (offset >= MALLOC_MINCHUNK)
681 /* Padding is too large, free it */
682 chunk * front_chunk = chunk_p;
683 chunk_p = (chunk *)((char *)chunk_p + offset);
684 chunk_p->size = front_chunk->size - offset;
685 front_chunk->size = offset;
686 nano_free(RCALL (char *)front_chunk + CHUNK_OFFSET);
688 else
690 /* Padding is used. Need to set a jump offset for aligned pointer
691 * to get back to chunk head */
692 assert(offset >= sizeof(int));
693 *(long *)((char *)chunk_p + offset) = -offset;
697 size_allocated = chunk_p->size;
698 if ((char *)chunk_p + size_allocated >
699 (aligned_p + ma_size + MALLOC_MINCHUNK))
701 /* allocated much more than what's required for padding, free
702 * tail part */
703 chunk * tail_chunk = (chunk *)(aligned_p + ma_size);
704 chunk_p->size = aligned_p + ma_size - (char *)chunk_p;
705 tail_chunk->size = size_allocated - chunk_p->size;
706 nano_free(RCALL (char *)tail_chunk + CHUNK_OFFSET);
708 return aligned_p;
710 #endif /* DEFINE_MEMALIGN */
712 #ifdef DEFINE_MALLOPT
713 int nano_mallopt(RARG int parameter_number, int parameter_value)
715 return 0;
717 #endif /* DEFINE_MALLOPT */
719 #ifdef DEFINE_VALLOC
720 void * nano_valloc(RARG size_t s)
722 return nano_memalign(RCALL MALLOC_PAGE_ALIGN, s);
724 #endif /* DEFINE_VALLOC */
726 #ifdef DEFINE_PVALLOC
727 void * nano_pvalloc(RARG size_t s)
729 /* Make sure size given to nano_valloc does not overflow */
730 if (s > __SIZE_MAX__ - MALLOC_PAGE_ALIGN)
732 RERRNO = ENOMEM;
733 return NULL;
735 return nano_valloc(RCALL ALIGN_SIZE(s, MALLOC_PAGE_ALIGN));
737 #endif /* DEFINE_PVALLOC */