2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 This file contains a simple executable memory allocator
30 It is assumed, that executable code blocks are usually medium (or sometimes
31 large) memory blocks, and the allocator is not too frequently called (less
32 optimized than other allocators). Thus, using it as a generic allocator is
36 Memory is allocated in continuous memory areas called chunks by alloc_chunk()
38 [ block ][ block ] ... [ block ][ block terminator ]
40 All blocks and the block terminator is started with block_header. The block
41 header contains the size of the previous and the next block. These sizes
42 can also contain special values.
44 0 - The block is a free_block, with a different size member.
45 1 - The block is a block terminator.
46 n - The block is used at the moment, and the value contains its size.
48 0 - This is the first block of the memory chunk.
49 n - The size of the previous block.
51 Using these size values we can go forward or backward on the block chain.
52 The unused blocks are stored in a chain list pointed by free_blocks. This
53 list is useful if we need to find a suitable memory area when the allocator
56 When a block is freed, the new free block is connected to its adjacent free
59 [ free block ][ used block ][ free block ]
60 and "used block" is freed, the three blocks are connected together:
61 [ one big free block ]
64 /* --------------------------------------------------------------------- */
65 /* System (OS) functions */
66 /* --------------------------------------------------------------------- */
69 #define CHUNK_SIZE 0x10000
72 alloc_chunk / free_chunk :
73 * allocate executable system memory chunks
74 * the size is always divisible by CHUNK_SIZE
75 SLJIT_ALLOCATOR_LOCK / SLJIT_ALLOCATOR_UNLOCK :
76 * provided as part of sljitUtils
77 * only the allocator requires this lock, sljit is fully thread safe
78 as it only uses local variables
82 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
84 static SLJIT_INLINE
void* alloc_chunk(sljit_uw size
)
86 return VirtualAlloc(NULL
, size
, MEM_COMMIT
| MEM_RESERVE
, PAGE_EXECUTE_READWRITE
);
89 static SLJIT_INLINE
void free_chunk(void *chunk
, sljit_uw size
)
91 SLJIT_UNUSED_ARG(size
);
92 VirtualFree(chunk
, 0, MEM_RELEASE
);
97 #if defined(__APPLE__) && defined(MAP_JIT)
99 On macOS systems, returns MAP_JIT if it is defined _and_ we're running on a
100 version where it's OK to have more than one JIT block or where MAP_JIT is
102 On non-macOS systems, returns MAP_JIT if it is defined.
104 #include <TargetConditionals.h>
106 #if defined SLJIT_CONFIG_X86 && SLJIT_CONFIG_X86
108 #include <sys/utsname.h>
111 #define SLJIT_MAP_JIT (get_map_jit_flag())
113 static SLJIT_INLINE
int get_map_jit_flag()
118 static int map_jit_flag
= -1;
120 if (map_jit_flag
< 0) {
124 /* Kernel version for 10.14.0 (Mojave) or later */
125 if (atoi(name
.release
) >= 18) {
126 page_size
= get_page_alignment() + 1;
127 /* Only use MAP_JIT if a hardened runtime is used */
128 ptr
= mmap(NULL
, page_size
, PROT_WRITE
| PROT_EXEC
,
129 MAP_PRIVATE
| MAP_ANON
, -1, 0);
131 if (ptr
!= MAP_FAILED
)
132 munmap(ptr
, page_size
);
134 map_jit_flag
= MAP_JIT
;
139 #endif /* MAP_ANON */
140 #else /* !SLJIT_CONFIG_X86 */
141 #if !(defined SLJIT_CONFIG_ARM && SLJIT_CONFIG_ARM)
142 #error "Unsupported architecture"
143 #endif /* SLJIT_CONFIG_ARM */
144 #include <AvailabilityMacros.h>
147 #define SLJIT_MAP_JIT (MAP_JIT)
148 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec) \
149 apple_update_wx_flags(enable_exec)
151 static SLJIT_INLINE
void apple_update_wx_flags(sljit_s32 enable_exec
)
153 #if MAC_OS_X_VERSION_MIN_REQUIRED >= 110000
154 pthread_jit_write_protect_np(enable_exec
);
156 #error "Must target Big Sur or newer"
159 #endif /* SLJIT_CONFIG_X86 */
160 #else /* !TARGET_OS_OSX */
161 #define SLJIT_MAP_JIT (MAP_JIT)
162 #endif /* TARGET_OS_OSX */
163 #endif /* __APPLE__ && MAP_JIT */
164 #ifndef SLJIT_UPDATE_WX_FLAGS
165 #define SLJIT_UPDATE_WX_FLAGS(from, to, enable_exec)
166 #endif /* !SLJIT_UPDATE_WX_FLAGS */
167 #ifndef SLJIT_MAP_JIT
168 #define SLJIT_MAP_JIT (0)
169 #endif /* !SLJIT_MAP_JIT */
171 static SLJIT_INLINE
void* alloc_chunk(sljit_uw size
)
174 int prot
= PROT_READ
| PROT_WRITE
| PROT_EXEC
;
175 int flags
= MAP_PRIVATE
;
179 prot
|= PROT_MAX(prot
);
183 flags
|= MAP_ANON
| SLJIT_MAP_JIT
;
184 #else /* !MAP_ANON */
185 if (SLJIT_UNLIKELY((dev_zero
< 0) && open_dev_zero()))
189 #endif /* MAP_ANON */
191 retval
= mmap(NULL
, size
, prot
, flags
, fd
, 0);
192 if (retval
== MAP_FAILED
)
195 if (mprotect(retval
, size
, PROT_READ
| PROT_WRITE
| PROT_EXEC
) < 0) {
196 munmap(retval
, size
);
200 SLJIT_UPDATE_WX_FLAGS(retval
, (uint8_t *)retval
+ size
, 0);
205 static SLJIT_INLINE
void free_chunk(void *chunk
, sljit_uw size
)
212 /* --------------------------------------------------------------------- */
213 /* Common functions */
214 /* --------------------------------------------------------------------- */
216 #define CHUNK_MASK (~(CHUNK_SIZE - 1))
218 struct block_header
{
224 struct block_header header
;
225 struct free_block
*next
;
226 struct free_block
*prev
;
230 #define AS_BLOCK_HEADER(base, offset) \
231 ((struct block_header*)(((sljit_u8*)base) + offset))
232 #define AS_FREE_BLOCK(base, offset) \
233 ((struct free_block*)(((sljit_u8*)base) + offset))
234 #define MEM_START(base) ((void*)(((sljit_u8*)base) + sizeof(struct block_header)))
235 #define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7)
237 static struct free_block
* free_blocks
;
238 static sljit_uw allocated_size
;
239 static sljit_uw total_size
;
241 static SLJIT_INLINE
void sljit_insert_free_block(struct free_block
*free_block
, sljit_uw size
)
243 free_block
->header
.size
= 0;
244 free_block
->size
= size
;
246 free_block
->next
= free_blocks
;
247 free_block
->prev
= NULL
;
249 free_blocks
->prev
= free_block
;
250 free_blocks
= free_block
;
253 static SLJIT_INLINE
void sljit_remove_free_block(struct free_block
*free_block
)
255 if (free_block
->next
)
256 free_block
->next
->prev
= free_block
->prev
;
258 if (free_block
->prev
)
259 free_block
->prev
->next
= free_block
->next
;
261 SLJIT_ASSERT(free_blocks
== free_block
);
262 free_blocks
= free_block
->next
;
266 SLJIT_API_FUNC_ATTRIBUTE
void* sljit_malloc_exec(sljit_uw size
)
268 struct block_header
*header
;
269 struct block_header
*next_header
;
270 struct free_block
*free_block
;
273 SLJIT_ALLOCATOR_LOCK();
274 if (size
< (64 - sizeof(struct block_header
)))
275 size
= (64 - sizeof(struct block_header
));
276 size
= ALIGN_SIZE(size
);
278 free_block
= free_blocks
;
280 if (free_block
->size
>= size
) {
281 chunk_size
= free_block
->size
;
282 SLJIT_UPDATE_WX_FLAGS(NULL
, NULL
, 0);
283 if (chunk_size
> size
+ 64) {
284 /* We just cut a block from the end of the free block. */
286 free_block
->size
= chunk_size
;
287 header
= AS_BLOCK_HEADER(free_block
, chunk_size
);
288 header
->prev_size
= chunk_size
;
289 AS_BLOCK_HEADER(header
, size
)->prev_size
= size
;
292 sljit_remove_free_block(free_block
);
293 header
= (struct block_header
*)free_block
;
296 allocated_size
+= size
;
298 SLJIT_ALLOCATOR_UNLOCK();
299 return MEM_START(header
);
301 free_block
= free_block
->next
;
304 chunk_size
= (size
+ sizeof(struct block_header
) + CHUNK_SIZE
- 1) & CHUNK_MASK
;
305 header
= (struct block_header
*)alloc_chunk(chunk_size
);
307 SLJIT_ALLOCATOR_UNLOCK();
311 chunk_size
-= sizeof(struct block_header
);
312 total_size
+= chunk_size
;
314 header
->prev_size
= 0;
315 if (chunk_size
> size
+ 64) {
316 /* Cut the allocated space into a free and a used block. */
317 allocated_size
+= size
;
321 free_block
= AS_FREE_BLOCK(header
, size
);
322 free_block
->header
.prev_size
= size
;
323 sljit_insert_free_block(free_block
, chunk_size
);
324 next_header
= AS_BLOCK_HEADER(free_block
, chunk_size
);
327 /* All space belongs to this allocation. */
328 allocated_size
+= chunk_size
;
329 header
->size
= chunk_size
;
330 next_header
= AS_BLOCK_HEADER(header
, chunk_size
);
332 next_header
->size
= 1;
333 next_header
->prev_size
= chunk_size
;
334 SLJIT_ALLOCATOR_UNLOCK();
335 return MEM_START(header
);
338 SLJIT_API_FUNC_ATTRIBUTE
void sljit_free_exec(void* ptr
)
340 struct block_header
*header
;
341 struct free_block
* free_block
;
343 SLJIT_ALLOCATOR_LOCK();
344 header
= AS_BLOCK_HEADER(ptr
, -(sljit_sw
)sizeof(struct block_header
));
345 allocated_size
-= header
->size
;
347 /* Connecting free blocks together if possible. */
348 SLJIT_UPDATE_WX_FLAGS(NULL
, NULL
, 0);
350 /* If header->prev_size == 0, free_block will equal to header.
351 In this case, free_block->header.size will be > 0. */
352 free_block
= AS_FREE_BLOCK(header
, -(sljit_sw
)header
->prev_size
);
353 if (SLJIT_UNLIKELY(!free_block
->header
.size
)) {
354 free_block
->size
+= header
->size
;
355 header
= AS_BLOCK_HEADER(free_block
, free_block
->size
);
356 header
->prev_size
= free_block
->size
;
359 free_block
= (struct free_block
*)header
;
360 sljit_insert_free_block(free_block
, header
->size
);
363 header
= AS_BLOCK_HEADER(free_block
, free_block
->size
);
364 if (SLJIT_UNLIKELY(!header
->size
)) {
365 free_block
->size
+= ((struct free_block
*)header
)->size
;
366 sljit_remove_free_block((struct free_block
*)header
);
367 header
= AS_BLOCK_HEADER(free_block
, free_block
->size
);
368 header
->prev_size
= free_block
->size
;
371 /* The whole chunk is free. */
372 if (SLJIT_UNLIKELY(!free_block
->header
.prev_size
&& header
->size
== 1)) {
373 /* If this block is freed, we still have (allocated_size / 2) free space. */
374 if (total_size
- free_block
->size
> (allocated_size
* 3 / 2)) {
375 total_size
-= free_block
->size
;
376 sljit_remove_free_block(free_block
);
377 free_chunk(free_block
, free_block
->size
+ sizeof(struct block_header
));
381 SLJIT_UPDATE_WX_FLAGS(NULL
, NULL
, 1);
382 SLJIT_ALLOCATOR_UNLOCK();
385 SLJIT_API_FUNC_ATTRIBUTE
void sljit_free_unused_memory_exec(void)
387 struct free_block
* free_block
;
388 struct free_block
* next_free_block
;
390 SLJIT_ALLOCATOR_LOCK();
391 SLJIT_UPDATE_WX_FLAGS(NULL
, NULL
, 0);
393 free_block
= free_blocks
;
395 next_free_block
= free_block
->next
;
396 if (!free_block
->header
.prev_size
&&
397 AS_BLOCK_HEADER(free_block
, free_block
->size
)->size
== 1) {
398 total_size
-= free_block
->size
;
399 sljit_remove_free_block(free_block
);
400 free_chunk(free_block
, free_block
->size
+ sizeof(struct block_header
));
402 free_block
= next_free_block
;
405 SLJIT_ASSERT((total_size
&& free_blocks
) || (!total_size
&& !free_blocks
));
406 SLJIT_UPDATE_WX_FLAGS(NULL
, NULL
, 1);
407 SLJIT_ALLOCATOR_UNLOCK();