2 * Stack-less Just-In-Time compiler
4 * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* ------------------------------------------------------------------------ */
29 /* ------------------------------------------------------------------------ */
31 /* Executable Allocator */
33 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) \
34 && !(defined SLJIT_WX_EXECUTABLE_ALLOCATOR && SLJIT_WX_EXECUTABLE_ALLOCATOR)
35 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
36 #define SLJIT_ALLOCATOR_LOCK()
37 #define SLJIT_ALLOCATOR_UNLOCK()
38 #elif !(defined _WIN32)
41 static pthread_mutex_t allocator_lock
= PTHREAD_MUTEX_INITIALIZER
;
43 #define SLJIT_ALLOCATOR_LOCK() pthread_mutex_lock(&allocator_lock)
44 #define SLJIT_ALLOCATOR_UNLOCK() pthread_mutex_unlock(&allocator_lock)
46 static HANDLE allocator_lock
;
48 static SLJIT_INLINE
void allocator_grab_lock(void)
51 if (SLJIT_UNLIKELY(!InterlockedCompareExchangePointer(&allocator_lock
, NULL
, NULL
))) {
52 lock
= CreateMutex(NULL
, FALSE
, NULL
);
53 if (InterlockedCompareExchangePointer(&allocator_lock
, lock
, NULL
))
56 WaitForSingleObject(allocator_lock
, INFINITE
);
59 #define SLJIT_ALLOCATOR_LOCK() allocator_grab_lock()
60 #define SLJIT_ALLOCATOR_UNLOCK() ReleaseMutex(allocator_lock)
61 #endif /* thread implementation */
62 #endif /* SLJIT_EXECUTABLE_ALLOCATOR && !SLJIT_WX_EXECUTABLE_ALLOCATOR */
64 /* ------------------------------------------------------------------------ */
66 /* ------------------------------------------------------------------------ */
68 #if ((defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) \
69 && !(defined SLJIT_UTIL_SIMPLE_STACK_ALLOCATION && SLJIT_UTIL_SIMPLE_STACK_ALLOCATION)) \
70 || ((defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) \
71 && !((defined SLJIT_PROT_EXECUTABLE_ALLOCATOR && SLJIT_PROT_EXECUTABLE_ALLOCATOR) \
72 || (defined SLJIT_WX_EXECUTABLE_ALLOCATOR && SLJIT_WX_EXECUTABLE_ALLOCATOR)))
75 /* Provides mmap function. */
76 #include <sys/types.h>
81 #define MAP_ANON MAP_ANONYMOUS
82 #endif /* MAP_ANONYMOUS */
83 #endif /* !MAP_ANON */
90 #define SLJIT_CLOEXEC O_CLOEXEC
91 #else /* !O_CLOEXEC */
92 #define SLJIT_CLOEXEC 0
93 #endif /* O_CLOEXEC */
95 /* Some old systems do not have MAP_ANON. */
96 static int dev_zero
= -1;
98 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
100 static SLJIT_INLINE
int open_dev_zero(void)
102 dev_zero
= open("/dev/zero", O_RDWR
| SLJIT_CLOEXEC
);
107 #else /* !SLJIT_SINGLE_THREADED */
111 static pthread_mutex_t dev_zero_mutex
= PTHREAD_MUTEX_INITIALIZER
;
113 static SLJIT_INLINE
int open_dev_zero(void)
115 pthread_mutex_lock(&dev_zero_mutex
);
116 if (SLJIT_UNLIKELY(dev_zero
< 0))
117 dev_zero
= open("/dev/zero", O_RDWR
| SLJIT_CLOEXEC
);
119 pthread_mutex_unlock(&dev_zero_mutex
);
123 #endif /* SLJIT_SINGLE_THREADED */
125 #endif /* !MAP_ANON */
127 #endif /* open_dev_zero */
129 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) \
130 || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
134 static SLJIT_INLINE sljit_uw
get_page_alignment(void) {
136 static sljit_uw sljit_page_align
= 0;
137 if (!sljit_page_align
) {
139 sljit_page_align
= (sljit_uw
)si
.dwPageSize
- 1;
141 return sljit_page_align
;
148 static SLJIT_INLINE sljit_uw
get_page_alignment(void) {
149 static sljit_uw sljit_page_align
= 0;
153 if (!sljit_page_align
) {
155 align
= sysconf(_SC_PAGESIZE
);
157 align
= getpagesize();
159 /* Should never happen. */
162 sljit_page_align
= (sljit_uw
)align
- 1;
164 return sljit_page_align
;
169 #endif /* get_page_alignment() */
171 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
173 #if (defined SLJIT_UTIL_SIMPLE_STACK_ALLOCATION && SLJIT_UTIL_SIMPLE_STACK_ALLOCATION)
175 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_stack
* SLJIT_FUNC
sljit_allocate_stack(sljit_uw start_size
, sljit_uw max_size
, void *allocator_data
)
177 struct sljit_stack
*stack
;
180 SLJIT_UNUSED_ARG(allocator_data
);
182 if (start_size
> max_size
|| start_size
< 1)
185 stack
= (struct sljit_stack
*)SLJIT_MALLOC(sizeof(struct sljit_stack
), allocator_data
);
189 ptr
= SLJIT_MALLOC(max_size
, allocator_data
);
191 SLJIT_FREE(stack
, allocator_data
);
195 stack
->min_start
= (sljit_u8
*)ptr
;
196 stack
->end
= stack
->min_start
+ max_size
;
197 stack
->start
= stack
->end
- start_size
;
198 stack
->top
= stack
->end
;
202 SLJIT_API_FUNC_ATTRIBUTE
void SLJIT_FUNC
sljit_free_stack(struct sljit_stack
*stack
, void *allocator_data
)
204 SLJIT_UNUSED_ARG(allocator_data
);
205 SLJIT_FREE((void*)stack
->min_start
, allocator_data
);
206 SLJIT_FREE(stack
, allocator_data
);
209 SLJIT_API_FUNC_ATTRIBUTE sljit_u8
*SLJIT_FUNC
sljit_stack_resize(struct sljit_stack
*stack
, sljit_u8
*new_start
)
211 if ((new_start
< stack
->min_start
) || (new_start
>= stack
->end
))
213 stack
->start
= new_start
;
217 #else /* !SLJIT_UTIL_SIMPLE_STACK_ALLOCATION */
221 SLJIT_API_FUNC_ATTRIBUTE
void SLJIT_FUNC
sljit_free_stack(struct sljit_stack
*stack
, void *allocator_data
)
223 SLJIT_UNUSED_ARG(allocator_data
);
224 VirtualFree((void*)stack
->min_start
, 0, MEM_RELEASE
);
225 SLJIT_FREE(stack
, allocator_data
);
230 SLJIT_API_FUNC_ATTRIBUTE
void SLJIT_FUNC
sljit_free_stack(struct sljit_stack
*stack
, void *allocator_data
)
232 SLJIT_UNUSED_ARG(allocator_data
);
233 munmap((void*)stack
->min_start
, (size_t)(stack
->end
- stack
->min_start
));
234 SLJIT_FREE(stack
, allocator_data
);
239 SLJIT_API_FUNC_ATTRIBUTE
struct sljit_stack
* SLJIT_FUNC
sljit_allocate_stack(sljit_uw start_size
, sljit_uw max_size
, void *allocator_data
)
241 struct sljit_stack
*stack
;
245 SLJIT_UNUSED_ARG(allocator_data
);
247 if (start_size
> max_size
|| start_size
< 1)
250 stack
= (struct sljit_stack
*)SLJIT_MALLOC(sizeof(struct sljit_stack
), allocator_data
);
254 /* Align max_size. */
255 page_align
= get_page_alignment();
256 max_size
= (max_size
+ page_align
) & ~page_align
;
259 ptr
= VirtualAlloc(NULL
, max_size
, MEM_RESERVE
, PAGE_READWRITE
);
261 SLJIT_FREE(stack
, allocator_data
);
265 stack
->min_start
= (sljit_u8
*)ptr
;
266 stack
->end
= stack
->min_start
+ max_size
;
267 stack
->start
= stack
->end
;
269 if (sljit_stack_resize(stack
, stack
->end
- start_size
) == NULL
) {
270 sljit_free_stack(stack
, allocator_data
);
275 ptr
= mmap(NULL
, max_size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
276 #else /* !MAP_ANON */
277 if (SLJIT_UNLIKELY((dev_zero
< 0) && open_dev_zero())) {
278 SLJIT_FREE(stack
, allocator_data
);
281 ptr
= mmap(NULL
, max_size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, dev_zero
, 0);
282 #endif /* MAP_ANON */
283 if (ptr
== MAP_FAILED
) {
284 SLJIT_FREE(stack
, allocator_data
);
287 stack
->min_start
= (sljit_u8
*)ptr
;
288 stack
->end
= stack
->min_start
+ max_size
;
289 stack
->start
= stack
->end
- start_size
;
292 stack
->top
= stack
->end
;
296 SLJIT_API_FUNC_ATTRIBUTE sljit_u8
*SLJIT_FUNC
sljit_stack_resize(struct sljit_stack
*stack
, sljit_u8
*new_start
)
298 #if defined _WIN32 || defined(POSIX_MADV_DONTNEED)
299 sljit_uw aligned_old_start
;
300 sljit_uw aligned_new_start
;
304 if ((new_start
< stack
->min_start
) || (new_start
>= stack
->end
))
308 page_align
= get_page_alignment();
310 aligned_new_start
= (sljit_uw
)new_start
& ~page_align
;
311 aligned_old_start
= ((sljit_uw
)stack
->start
) & ~page_align
;
312 if (aligned_new_start
!= aligned_old_start
) {
313 if (aligned_new_start
< aligned_old_start
) {
314 if (!VirtualAlloc((void*)aligned_new_start
, aligned_old_start
- aligned_new_start
, MEM_COMMIT
, PAGE_READWRITE
))
318 if (!VirtualFree((void*)aligned_old_start
, aligned_new_start
- aligned_old_start
, MEM_DECOMMIT
))
322 #elif defined(POSIX_MADV_DONTNEED)
323 if (stack
->start
< new_start
) {
324 page_align
= get_page_alignment();
326 aligned_new_start
= (sljit_uw
)new_start
& ~page_align
;
327 aligned_old_start
= ((sljit_uw
)stack
->start
) & ~page_align
;
329 if (aligned_new_start
> aligned_old_start
) {
330 posix_madvise((void*)aligned_old_start
, aligned_new_start
- aligned_old_start
, POSIX_MADV_DONTNEED
);
332 madvise((void*)aligned_old_start
, aligned_new_start
- aligned_old_start
, MADV_FREE
);
333 #endif /* MADV_FREE */
338 stack
->start
= new_start
;
342 #endif /* SLJIT_UTIL_SIMPLE_STACK_ALLOCATION */
344 #endif /* SLJIT_UTIL_STACK */