[docs] Add LICENSE.txt to the root of the mono-repo
[llvm-project.git] / libcxxabi / src / fallback_malloc.cpp
blob591efbefc8a52bf550704a4efa25595c410d292b
1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "fallback_malloc.h"
11 #include <__threading_support>
12 #ifndef _LIBCXXABI_HAS_NO_THREADS
13 #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
14 #pragma comment(lib, "pthread")
15 #endif
16 #endif
18 #include <assert.h>
19 #include <stdlib.h> // for malloc, calloc, free
20 #include <string.h> // for memset
21 #include <new> // for std::__libcpp_aligned_{alloc,free}
23 // A small, simple heap manager based (loosely) on
24 // the startup heap manager from FreeBSD, optimized for space.
26 // Manages a fixed-size memory pool, supports malloc and free only.
27 // No support for realloc.
29 // Allocates chunks in multiples of four bytes, with a four byte header
30 // for each chunk. The overhead of each chunk is kept low by keeping pointers
31 // as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
33 namespace {
35 // When POSIX threads are not available, make the mutex operations a nop
36 #ifndef _LIBCXXABI_HAS_NO_THREADS
37 static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
38 #else
39 static _LIBCPP_CONSTINIT void* heap_mutex = 0;
40 #endif
42 class mutexor {
43 public:
44 #ifndef _LIBCXXABI_HAS_NO_THREADS
45 mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
46 std::__libcpp_mutex_lock(mtx_);
48 ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
49 #else
50 mutexor(void*) {}
51 ~mutexor() {}
52 #endif
53 private:
54 mutexor(const mutexor& rhs);
55 mutexor& operator=(const mutexor& rhs);
56 #ifndef _LIBCXXABI_HAS_NO_THREADS
57 std::__libcpp_mutex_t* mtx_;
58 #endif
61 static const size_t HEAP_SIZE = 512;
62 char heap[HEAP_SIZE] __attribute__((aligned));
64 typedef unsigned short heap_offset;
65 typedef unsigned short heap_size;
67 // On both 64 and 32 bit targets heap_node should have the following properties
68 // Size: 4
69 // Alignment: 2
70 struct heap_node {
71 heap_offset next_node; // offset into heap
72 heap_size len; // size in units of "sizeof(heap_node)"
75 // All pointers returned by fallback_malloc must be at least aligned
76 // as RequiredAligned. Note that RequiredAlignment can be greater than
77 // alignof(std::max_align_t) on 64 bit systems compiling 32 bit code.
78 struct FallbackMaxAlignType {
79 } __attribute__((aligned));
80 const size_t RequiredAlignment = alignof(FallbackMaxAlignType);
82 static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0,
83 "The required alignment must be evenly divisible by the sizeof(heap_node)");
85 // The number of heap_node's that can fit in a chunk of memory with the size
86 // of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4.
87 const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node);
89 static const heap_node* list_end =
90 (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
91 static heap_node* freelist = NULL;
93 heap_node* node_from_offset(const heap_offset offset) {
94 return (heap_node*)(heap + (offset * sizeof(heap_node)));
97 heap_offset offset_from_node(const heap_node* ptr) {
98 return static_cast<heap_offset>(
99 static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
100 sizeof(heap_node));
103 // Return a pointer to the first address, 'A', in `heap` that can actually be
104 // used to represent a heap_node. 'A' must be aligned so that
105 // '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this
106 // address should be 12 bytes after the first 16 byte boundary.
107 heap_node* getFirstAlignedNodeInHeap() {
108 heap_node* node = (heap_node*)heap;
109 const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node);
110 size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment;
111 size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset;
112 size_t NElemOffset = requiredOffset / sizeof(heap_node);
113 return node + NElemOffset;
116 void init_heap() {
117 freelist = getFirstAlignedNodeInHeap();
118 freelist->next_node = offset_from_node(list_end);
119 freelist->len = static_cast<heap_size>(list_end - freelist);
122 // How big a chunk we allocate
123 size_t alloc_size(size_t len) {
124 return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
127 bool is_fallback_ptr(void* ptr) {
128 return ptr >= heap && ptr < (heap + HEAP_SIZE);
131 void* fallback_malloc(size_t len) {
132 heap_node *p, *prev;
133 const size_t nelems = alloc_size(len);
134 mutexor mtx(&heap_mutex);
136 if (NULL == freelist)
137 init_heap();
139 // Walk the free list, looking for a "big enough" chunk
140 for (p = freelist, prev = 0; p && p != list_end;
141 prev = p, p = node_from_offset(p->next_node)) {
143 // Check the invariant that all heap_nodes pointers 'p' are aligned
144 // so that 'p + 1' has an alignment of at least RequiredAlignment
145 assert(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0);
147 // Calculate the number of extra padding elements needed in order
148 // to split 'p' and create a properly aligned heap_node from the tail
149 // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems'
150 // will be a multiple of NodesPerAlignment.
151 size_t aligned_nelems = nelems;
152 if (p->len > nelems) {
153 heap_size remaining_len = static_cast<heap_size>(p->len - nelems);
154 aligned_nelems += remaining_len % NodesPerAlignment;
157 // chunk is larger and we can create a properly aligned heap_node
158 // from the tail. In this case we shorten 'p' and return the tail.
159 if (p->len > aligned_nelems) {
160 heap_node* q;
161 p->len = static_cast<heap_size>(p->len - aligned_nelems);
162 q = p + p->len;
163 q->next_node = 0;
164 q->len = static_cast<heap_size>(aligned_nelems);
165 void* ptr = q + 1;
166 assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
167 return ptr;
170 // The chunk is the exact size or the chunk is larger but not large
171 // enough to split due to alignment constraints.
172 if (p->len >= nelems) {
173 if (prev == 0)
174 freelist = node_from_offset(p->next_node);
175 else
176 prev->next_node = p->next_node;
177 p->next_node = 0;
178 void* ptr = p + 1;
179 assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
180 return ptr;
183 return NULL; // couldn't find a spot big enough
186 // Return the start of the next block
187 heap_node* after(struct heap_node* p) { return p + p->len; }
189 void fallback_free(void* ptr) {
190 struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
191 struct heap_node *p, *prev;
193 mutexor mtx(&heap_mutex);
195 #ifdef DEBUG_FALLBACK_MALLOC
196 std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
197 #endif
199 for (p = freelist, prev = 0; p && p != list_end;
200 prev = p, p = node_from_offset(p->next_node)) {
201 #ifdef DEBUG_FALLBACK_MALLOC
202 std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
203 offset_from_node(p), offset_from_node(cp),
204 offset_from_node(after(p)), offset_from_node(after(cp)));
205 #endif
206 if (after(p) == cp) {
207 #ifdef DEBUG_FALLBACK_MALLOC
208 std::printf(" Appending onto chunk at %d\n", offset_from_node(p));
209 #endif
210 p->len = static_cast<heap_size>(
211 p->len + cp->len); // make the free heap_node larger
212 return;
213 } else if (after(cp) == p) { // there's a free heap_node right after
214 #ifdef DEBUG_FALLBACK_MALLOC
215 std::printf(" Appending free chunk at %d\n", offset_from_node(p));
216 #endif
217 cp->len = static_cast<heap_size>(cp->len + p->len);
218 if (prev == 0) {
219 freelist = cp;
220 cp->next_node = p->next_node;
221 } else
222 prev->next_node = offset_from_node(cp);
223 return;
226 // Nothing to merge with, add it to the start of the free list
227 #ifdef DEBUG_FALLBACK_MALLOC
228 std::printf(" Making new free list entry %d\n", offset_from_node(cp));
229 #endif
230 cp->next_node = offset_from_node(freelist);
231 freelist = cp;
234 #ifdef INSTRUMENT_FALLBACK_MALLOC
235 size_t print_free_list() {
236 struct heap_node *p, *prev;
237 heap_size total_free = 0;
238 if (NULL == freelist)
239 init_heap();
241 for (p = freelist, prev = 0; p && p != list_end;
242 prev = p, p = node_from_offset(p->next_node)) {
243 std::printf("%sOffset: %d\tsize: %d Next: %d\n",
244 (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);
245 total_free += p->len;
247 std::printf("Total Free space: %d\n", total_free);
248 return total_free;
250 #endif
251 } // end unnamed namespace
253 namespace __cxxabiv1 {
255 struct __attribute__((aligned)) __aligned_type {};
257 void* __aligned_malloc_with_fallback(size_t size) {
258 #if defined(_WIN32)
259 if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))
260 return dest;
261 #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
262 if (void* dest = ::malloc(size))
263 return dest;
264 #else
265 if (size == 0)
266 size = 1;
267 if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))
268 return dest;
269 #endif
270 return fallback_malloc(size);
273 void* __calloc_with_fallback(size_t count, size_t size) {
274 void* ptr = ::calloc(count, size);
275 if (NULL != ptr)
276 return ptr;
277 // if calloc fails, fall back to emergency stash
278 ptr = fallback_malloc(size * count);
279 if (NULL != ptr)
280 ::memset(ptr, 0, size * count);
281 return ptr;
284 void __aligned_free_with_fallback(void* ptr) {
285 if (is_fallback_ptr(ptr))
286 fallback_free(ptr);
287 else {
288 #if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
289 ::free(ptr);
290 #else
291 std::__libcpp_aligned_free(ptr);
292 #endif
296 void __free_with_fallback(void* ptr) {
297 if (is_fallback_ptr(ptr))
298 fallback_free(ptr);
299 else
300 ::free(ptr);
303 } // namespace __cxxabiv1