2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* ------------------------------------------------------------------------ */
29 /* ------------------------------------------------------------------------ */
31 /* Executable Allocator */
33 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) \
34 && !(defined SLJIT_WX_EXECUTABLE_ALLOCATOR && SLJIT_WX_EXECUTABLE_ALLOCATOR)
35 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
36 #define SLJIT_ALLOCATOR_LOCK()
37 #define SLJIT_ALLOCATOR_UNLOCK()
38 #elif !(defined _WIN32)
41 static pthread_mutex_t allocator_lock
= PTHREAD_MUTEX_INITIALIZER
;
43 #define SLJIT_ALLOCATOR_LOCK() pthread_mutex_lock(&allocator_lock)
44 #define SLJIT_ALLOCATOR_UNLOCK() pthread_mutex_unlock(&allocator_lock)
46 static HANDLE allocator_lock
;
48 static SLJIT_INLINE
void allocator_grab_lock(void)
51 if (SLJIT_UNLIKELY(!InterlockedCompareExchangePointer(&allocator_lock
, NULL
, NULL
))) {
52 lock
= CreateMutex(NULL
, FALSE
, NULL
);
53 if (InterlockedCompareExchangePointer(&allocator_lock
, lock
, NULL
))
56 WaitForSingleObject(allocator_lock
, INFINITE
);
59 #define SLJIT_ALLOCATOR_LOCK() allocator_grab_lock()
60 #define SLJIT_ALLOCATOR_UNLOCK() ReleaseMutex(allocator_lock)
61 #endif /* thread implementation */
62 #endif /* SLJIT_EXECUTABLE_ALLOCATOR && !SLJIT_WX_EXECUTABLE_ALLOCATOR */
64 /* ------------------------------------------------------------------------ */
66 /* ------------------------------------------------------------------------ */
68 #if ((defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) \
69 && !(defined SLJIT_UTIL_SIMPLE_STACK_ALLOCATION && SLJIT_UTIL_SIMPLE_STACK_ALLOCATION)) \
70 || ((defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) \
71 && !((defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR) \
72 || (defined SLJIT_WX_EXECUTABLE_ALLOCATOR && SLJIT_WX_EXECUTABLE_ALLOCATOR)))
75 /* Provides mmap function. */
76 #include <sys/types.h>
81 #define MAP_ANON MAP_ANONYMOUS
82 #endif /* MAP_ANONYMOUS */
83 #endif /* !MAP_ANON */
90 #define SLJIT_CLOEXEC O_CLOEXEC
91 #else /* !O_CLOEXEC */
92 #define SLJIT_CLOEXEC 0
93 #endif /* O_CLOEXEC */
95 /* Some old systems do not have MAP_ANON. */
96 static int dev_zero
= -1;
98 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
100 static SLJIT_INLINE
int open_dev_zero(void)
102 dev_zero
= open("/dev/zero", O_RDWR
| SLJIT_CLOEXEC
);
107 #else /* !SLJIT_SINGLE_THREADED */
111 static pthread_mutex_t dev_zero_mutex
= PTHREAD_MUTEX_INITIALIZER
;
113 static SLJIT_INLINE
int open_dev_zero(void)
115 pthread_mutex_lock(&dev_zero_mutex
);
116 if (SLJIT_UNLIKELY(dev_zero
< 0))
117 dev_zero
= open("/dev/zero", O_RDWR
| SLJIT_CLOEXEC
);
119 pthread_mutex_unlock(&dev_zero_mutex
);
123 #endif /* SLJIT_SINGLE_THREADED */
125 #endif /* !MAP_ANON */
127 #endif /* open_dev_zero */
129 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) \
130 || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
134 static SLJIT_INLINE sljit_sw
get_page_alignment(void) {
136 static sljit_sw sljit_page_align
;
137 if (!sljit_page_align
) {
139 sljit_page_align
= si
.dwPageSize
- 1;
141 return sljit_page_align
;
148 static SLJIT_INLINE sljit_sw
get_page_alignment(void) {
149 static sljit_sw sljit_page_align
= -1;
150 if (sljit_page_align
< 0) {
152 sljit_page_align
= sysconf(_SC_PAGESIZE
);
154 sljit_page_align
= getpagesize();
156 /* Should never happen. */
157 if (sljit_page_align
< 0)
158 sljit_page_align
= 4096;
161 return sljit_page_align
;
166 #endif /* get_page_alignment() */
168 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
170 #if (defined SLJIT_UTIL_SIMPLE_STACK_ALLOCATION && SLJIT_UTIL_SIMPLE_STACK_ALLOCATION)
172 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_stack
* SLJIT_FUNC
sljit_allocate_stack(sljit_uw start_size
, sljit_uw max_size
, void *allocator_data
)
174 struct sljit_stack
*stack
;
177 SLJIT_UNUSED_ARG(allocator_data
);
179 if (start_size
> max_size
|| start_size
< 1)
182 stack
= (struct sljit_stack
*)SLJIT_MALLOC(sizeof(struct sljit_stack
), allocator_data
);
186 ptr
= SLJIT_MALLOC(max_size
, allocator_data
);
188 SLJIT_FREE(stack
, allocator_data
);
192 stack
->min_start
= (sljit_u8
*)ptr
;
193 stack
->end
= stack
->min_start
+ max_size
;
194 stack
->start
= stack
->end
- start_size
;
195 stack
->top
= stack
->end
;
199 SLJIT_API_FUNC_ATTRIBUTE
void SLJIT_FUNC
sljit_free_stack(struct sljit_stack
*stack
, void *allocator_data
)
201 SLJIT_UNUSED_ARG(allocator_data
);
202 SLJIT_FREE((void*)stack
->min_start
, allocator_data
);
203 SLJIT_FREE(stack
, allocator_data
);
206 SLJIT_API_FUNC_ATTRIBUTE sljit_u8
*SLJIT_FUNC
sljit_stack_resize(struct sljit_stack
*stack
, sljit_u8
*new_start
)
208 if ((new_start
< stack
->min_start
) || (new_start
>= stack
->end
))
210 stack
->start
= new_start
;
214 #else /* !SLJIT_UTIL_SIMPLE_STACK_ALLOCATION */
218 SLJIT_API_FUNC_ATTRIBUTE
void SLJIT_FUNC
sljit_free_stack(struct sljit_stack
*stack
, void *allocator_data
)
220 SLJIT_UNUSED_ARG(allocator_data
);
221 VirtualFree((void*)stack
->min_start
, 0, MEM_RELEASE
);
222 SLJIT_FREE(stack
, allocator_data
);
227 SLJIT_API_FUNC_ATTRIBUTE
void SLJIT_FUNC
sljit_free_stack(struct sljit_stack
*stack
, void *allocator_data
)
229 SLJIT_UNUSED_ARG(allocator_data
);
230 munmap((void*)stack
->min_start
, stack
->end
- stack
->min_start
);
231 SLJIT_FREE(stack
, allocator_data
);
236 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_stack
* SLJIT_FUNC
sljit_allocate_stack(sljit_uw start_size
, sljit_uw max_size
, void *allocator_data
)
238 struct sljit_stack
*stack
;
242 SLJIT_UNUSED_ARG(allocator_data
);
244 if (start_size
> max_size
|| start_size
< 1)
247 stack
= (struct sljit_stack
*)SLJIT_MALLOC(sizeof(struct sljit_stack
), allocator_data
);
251 /* Align max_size. */
252 page_align
= get_page_alignment();
253 max_size
= (max_size
+ page_align
) & ~page_align
;
256 ptr
= VirtualAlloc(NULL
, max_size
, MEM_RESERVE
, PAGE_READWRITE
);
258 SLJIT_FREE(stack
, allocator_data
);
262 stack
->min_start
= (sljit_u8
*)ptr
;
263 stack
->end
= stack
->min_start
+ max_size
;
264 stack
->start
= stack
->end
;
266 if (sljit_stack_resize(stack
, stack
->end
- start_size
) == NULL
) {
267 sljit_free_stack(stack
, allocator_data
);
272 ptr
= mmap(NULL
, max_size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
273 #else /* !MAP_ANON */
274 if (SLJIT_UNLIKELY((dev_zero
< 0) && open_dev_zero())) {
275 SLJIT_FREE(stack
, allocator_data
);
278 ptr
= mmap(NULL
, max_size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, dev_zero
, 0);
279 #endif /* MAP_ANON */
280 if (ptr
== MAP_FAILED
) {
281 SLJIT_FREE(stack
, allocator_data
);
284 stack
->min_start
= (sljit_u8
*)ptr
;
285 stack
->end
= stack
->min_start
+ max_size
;
286 stack
->start
= stack
->end
- start_size
;
289 stack
->top
= stack
->end
;
293 SLJIT_API_FUNC_ATTRIBUTE sljit_u8
*SLJIT_FUNC
sljit_stack_resize(struct sljit_stack
*stack
, sljit_u8
*new_start
)
295 #if defined _WIN32 || defined(POSIX_MADV_DONTNEED)
296 sljit_uw aligned_old_start
;
297 sljit_uw aligned_new_start
;
301 if ((new_start
< stack
->min_start
) || (new_start
>= stack
->end
))
305 page_align
= get_page_alignment();
307 aligned_new_start
= (sljit_uw
)new_start
& ~page_align
;
308 aligned_old_start
= ((sljit_uw
)stack
->start
) & ~page_align
;
309 if (aligned_new_start
!= aligned_old_start
) {
310 if (aligned_new_start
< aligned_old_start
) {
311 if (!VirtualAlloc((void*)aligned_new_start
, aligned_old_start
- aligned_new_start
, MEM_COMMIT
, PAGE_READWRITE
))
315 if (!VirtualFree((void*)aligned_old_start
, aligned_new_start
- aligned_old_start
, MEM_DECOMMIT
))
319 #elif defined(POSIX_MADV_DONTNEED)
320 if (stack
->start
< new_start
) {
321 page_align
= get_page_alignment();
323 aligned_new_start
= (sljit_uw
)new_start
& ~page_align
;
324 aligned_old_start
= ((sljit_uw
)stack
->start
) & ~page_align
;
326 if (aligned_new_start
> aligned_old_start
) {
327 posix_madvise((void*)aligned_old_start
, aligned_new_start
- aligned_old_start
, POSIX_MADV_DONTNEED
);
329 madvise((void*)aligned_old_start
, aligned_new_start
- aligned_old_start
, MADV_FREE
);
330 #endif /* MADV_FREE */
335 stack
->start
= new_start
;
339 #endif /* SLJIT_UTIL_SIMPLE_STACK_ALLOCATION */
341 #endif /* SLJIT_UTIL_STACK */