1 /* This code is based on mallocr.c written by Doug Lea which is released
2 to the public domain. Any changes to libc/stdlib/mallocr.c
3 should be reflected here as well. */
15 #endif /*__cplusplus*/
28 #include <stddef.h> /* for size_t */
30 #include <sys/types.h>
37 #include <sys/config.h>
40 In newlib, all the publically visible routines take a reentrancy
41 pointer. We don't currently do anything much with it, but we do
42 pass it to the lock routine.
49 #define MALLOC_LOCK __malloc_lock(reent_ptr)
50 #define MALLOC_UNLOCK __malloc_unlock(reent_ptr)
53 #define malloc_getpagesize (128)
55 #define malloc_getpagesize (4096)
59 extern void __malloc_lock(struct _reent
*);
60 extern void __malloc_unlock(struct _reent
*);
62 extern void __malloc_lock();
63 extern void __malloc_unlock();
67 #define RARG struct _reent *reent_ptr,
68 #define RONEARG struct _reent *reent_ptr
70 #define RARG reent_ptr
71 #define RONEARG reent_ptr
72 #define RDECL struct _reent *reent_ptr;
75 #define RCALL reent_ptr,
76 #define RONECALL reent_ptr
79 Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
80 lock and unlock the malloc data structures. MALLOC_LOCK may be
93 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
94 of chunk sizes. On a 64-bit machine, you can reduce malloc
95 overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
96 at the expense of not being able to handle requests greater than
97 2^31. This limitation is hardly ever a concern; you are encouraged
98 to set this. However, the default version is the same as size_t.
101 #ifndef INTERNAL_SIZE_T
102 #define INTERNAL_SIZE_T size_t
106 Following is needed on implementations whereby long > size_t.
107 The problem is caused because the code performs subtractions of
108 size_t values and stores the result in long values. In the case
109 where long > size_t and the first value is actually less than
110 the second value, the resultant value is positive. For example,
111 (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
112 which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF. This is due to the
113 fact that assignment from unsigned to signed won't sign extend.
116 #ifdef SIZE_T_SMALLER_THAN_LONG
117 #define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
119 #define long_sub_size_t(x, y) ( (long)(x - y) )
123 REALLOC_ZERO_BYTES_FREES should be set if a call to
124 realloc with zero bytes should be the same as a call to free.
125 Some people think it should. Otherwise, since this malloc
126 returns a unique pointer for malloc(0), so does realloc(p, 0).
129 /* The following macros are only invoked with (2n+1)-multiples of
130 INTERNAL_SIZE_T units, with a positive integer n. This is exploited
131 for fast inline execution when n is small. */
133 #define MALLOC_ZERO(charp, nbytes) \
135 INTERNAL_SIZE_T mzsz = (nbytes); \
136 if(mzsz <= 9*sizeof(mzsz)) { \
137 INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \
138 if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \
140 if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \
142 if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
147 } else memset((charp), 0, mzsz); \
150 #define MALLOC_COPY(dest,src,nbytes) \
152 INTERNAL_SIZE_T mcsz = (nbytes); \
153 if(mcsz <= 9*sizeof(mcsz)) { \
154 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \
155 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \
156 if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
157 *mcdst++ = *mcsrc++; \
158 if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
159 *mcdst++ = *mcsrc++; \
160 if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
161 *mcdst++ = *mcsrc++; }}} \
162 *mcdst++ = *mcsrc++; \
163 *mcdst++ = *mcsrc++; \
165 } else memcpy(dest, src, mcsz); \
168 #define vECCALLOc _vec_calloc_r
170 #define mEMALIGn _memalign_r
171 #define vECREALLOc _vec_realloc_r
175 Void_t
* vECREALLOc(RARG Void_t
*, size_t);
176 Void_t
* vECCALLOc(RARG
size_t, size_t);
178 Void_t
* vECREALLOc();
184 }; /* end of extern "C" */
193 INTERNAL_SIZE_T prev_size
; /* Size of previous chunk (if free). */
194 INTERNAL_SIZE_T size
; /* Size in bytes, including overhead. */
195 struct malloc_chunk
* fd
; /* double links -- used only if free. */
196 struct malloc_chunk
* bk
;
199 typedef struct malloc_chunk
* mchunkptr
;
201 /* sizes, alignments */
203 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
204 #define MALLOC_ALIGN 16
205 #define MALLOC_ALIGNMENT 16
206 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
207 #define MINSIZE (sizeof(struct malloc_chunk))
209 /* conversion from malloc headers to user pointers, and back */
211 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
212 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
213 /* pad request bytes into a usable size */
215 #define request2size(req) \
216 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
217 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
218 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
221 /* Check if m has acceptable alignment */
223 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
226 Physical chunk operations
230 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
232 #define PREV_INUSE 0x1
234 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
236 #define IS_MMAPPED 0x2
238 /* Bits to mask off when extracting size */
240 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
243 /* Ptr to next physical malloc_chunk. */
245 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
247 /* Ptr to previous physical malloc_chunk */
249 #define prev_chunk(p)\
250 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
253 /* Treat space at ptr + offset as a chunk */
255 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
261 Dealing with use bits
264 /* extract p's inuse bit */
267 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
269 /* extract inuse bit of previous chunk */
271 #define prev_inuse(p) ((p)->size & PREV_INUSE)
273 /* check for mmap()'ed chunk */
275 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
277 /* set/clear chunk as in use without otherwise disturbing */
279 #define set_inuse(p)\
280 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
282 #define clear_inuse(p)\
283 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
285 /* check/set/clear inuse bits in known places */
287 #define inuse_bit_at_offset(p, s)\
288 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
290 #define set_inuse_bit_at_offset(p, s)\
291 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
293 #define clear_inuse_bit_at_offset(p, s)\
294 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
299 Dealing with size fields
302 /* Get size, ignoring use bits */
304 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
306 /* Set size at head, without disturbing its use bit */
308 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
310 /* Set size/use ignoring previous bits in header */
312 #define set_head(p, s) ((p)->size = (s))
316 #ifdef DEFINE_VECREALLOC
320 Void_t
* vECREALLOc(RARG Void_t
* oldmem
, size_t bytes
)
322 Void_t
* vECREALLOc(RARG oldmem
, bytes
) RDECL Void_t
* oldmem
; size_t bytes
;
325 INTERNAL_SIZE_T nb
; /* padded request size */
327 mchunkptr oldp
; /* chunk corresponding to oldmem */
328 INTERNAL_SIZE_T oldsize
; /* its size */
330 mchunkptr newp
; /* chunk to return */
331 INTERNAL_SIZE_T newsize
; /* its size */
332 Void_t
* newmem
; /* corresponding user mem */
334 mchunkptr remainder
; /* holds split off extra space from newp */
335 INTERNAL_SIZE_T remainder_size
; /* its size */
337 #ifdef REALLOC_ZERO_BYTES_FREES
338 if (bytes
== 0) { fREe(RCALL oldmem
); return 0; }
342 /* realloc of null is supposed to be same as malloc */
343 if (oldmem
== 0) return mEMALIGn(RCALL
16, bytes
);
347 newp
= oldp
= mem2chunk(oldmem
);
348 newsize
= oldsize
= chunksize(oldp
);
350 nb
= request2size(bytes
);
352 if ((long)(oldsize
) < (long)(nb
))
356 newmem
= mEMALIGn (RCALL
16, bytes
);
358 if (newmem
== 0) /* propagate failure */
364 /* copy, free, and exit */
365 MALLOC_COPY(newmem
, oldmem
, oldsize
- SIZE_SZ
);
371 remainder_size
= long_sub_size_t(newsize
, nb
);
373 if (remainder_size
>= (long)MINSIZE
) /* split off remainder */
375 remainder
= chunk_at_offset(newp
, nb
);
376 set_head_size(newp
, nb
);
377 set_head(remainder
, remainder_size
| PREV_INUSE
);
378 set_inuse_bit_at_offset(remainder
, remainder_size
);
379 fREe(RCALL
chunk2mem(remainder
)); /* let free() deal with it */
383 set_head_size(newp
, newsize
);
384 set_inuse_bit_at_offset(newp
, newsize
);
388 return chunk2mem(newp
);
391 #endif /* DEFINE_VECREALLOC */
394 #ifdef DEFINE_VECCALLOC
398 calloc calls malloc, then zeroes out the allocated chunk.
403 Void_t
* vECCALLOc(RARG
size_t n
, size_t elem_size
)
405 Void_t
* vECCALLOc(RARG n
, elem_size
) RDECL
size_t n
; size_t elem_size
;
408 INTERNAL_SIZE_T sz
= n
* elem_size
;
412 mem
= mEMALIGn (RCALL
16, sz
);
419 MALLOC_ZERO(mem
, sz
);
423 #endif /* DEFINE_VECCALLOC */