ssa: move optimizations from P_BinaryOp to P_BinaryConstOp
[ajla.git] / thread.h
blob3a7637c92ee31210b615fe9621ff5e852ab19382
1 /*
2 * Copyright (C) 2024 Mikulas Patocka
4 * This file is part of Ajla.
6 * Ajla is free software: you can redistribute it and/or modify it under the
7 * terms of the GNU General Public License as published by the Free Software
8 * Foundation, either version 3 of the License, or (at your option) any later
9 * version.
11 * Ajla is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
13 * A PARTICULAR PURPOSE. See the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along with
16 * Ajla. If not, see <https://www.gnu.org/licenses/>.
19 #ifndef AJLA_THREAD_H
20 #define AJLA_THREAD_H
22 #include "list.h"
23 #include "asm.h"
25 #if defined(__BIONIC__) || defined(__minix__) || defined(UNUSUAL_THREAD)
26 #undef HAVE___THREAD
27 #endif
29 #if defined(THREAD_NONE)
30 #define thread_volatile
31 #else
32 #define thread_volatile volatile
33 #endif
35 #if defined(THREAD_NONE)
36 #undef HAVE___THREAD
37 #define HAVE___THREAD
38 #endif
40 #define tls_verify_type_common_(type) \
41 ajla_assert(sizeof(type) <= sizeof(void *), (file_line, "tls_verify_type_common_: too big type: %d > %d", (int)sizeof(type), (int)sizeof(type)))
43 #if defined(HAVE___THREAD)
44 #define tls_verify_type_(type, variable) ((void)(&variable - (type *)&variable), tls_verify_type_common_(type))
45 #define tls_decl(type, variable) HAVE___THREAD type variable = (type)0
46 #define tls_decl_extern(type, variable) extern HAVE___THREAD type variable
47 typedef void tls_t_;
48 #define tls_get_(variable) (*variable)
49 #define tls_get__nocheck tls_get_
50 #define tls_get_cast(type)
51 #define tls_set_(variable, value) do { (*variable = (value)); } while (0)
52 #define tls_set__nocheck tls_set_
53 #define tls_set_cast
54 #else
55 #define tls_verify_type_(type, variable) tls_verify_type_common_(type)
56 #endif
58 #ifndef THREAD_NONE
59 void tls_destructor_call(void);
60 #endif
62 #if defined(OS_WIN32) || defined(OS_CYGWIN)
64 #define thread_concurrency_win32_ \
65 do { \
66 SYSTEM_INFO info; \
67 GetSystemInfo(&info); \
68 if (info.dwNumberOfProcessors > 0) \
69 return info.dwNumberOfProcessors; \
70 warning("GetSystemInfo returned zero processors"); \
71 } while (0)
73 #endif
75 #if defined(THREAD_OS2)
77 #if defined(HAVE_SYS_BUILTIN_H) && defined(HAVE_SYS_FMUTEX_H) && defined(HAVE__FMUTEX_CREATE) && !defined(UNUSUAL_THREAD)
78 #define OS2_USE_FMUTEX
79 #endif
81 #ifdef OS2_USE_FMUTEX
82 #include <sys/builtin.h>
83 #include <sys/fmutex.h>
84 typedef _fmutex mutex_t;
85 #else
86 typedef HMTX mutex_t;
87 #endif
89 typedef struct {
90 mutex_t mutex;
91 struct list wait_list;
92 } cond_t;
94 typedef struct os2_thread *thread_t;
95 typedef void thread_function_t(void *arg);
96 #define thread_function_decl(name, content) static void name(void attr_unused *arg) { content }
98 extern mutex_t thread_spawn_mutex;
100 #if !defined(HAVE___THREAD)
101 typedef unsigned char os2_tls_key_t;
102 #define OS2_THREAD_KEY_MAX 16
103 #define tls_decl(type, variable) os2_tls_key_t variable
104 #define tls_decl_extern(type, variable) extern os2_tls_key_t variable
105 typedef os2_tls_key_t tls_t_;
106 #endif
108 #define rwmutex_fallback
110 #elif defined(THREAD_WIN32)
112 typedef CRITICAL_SECTION mutex_t;
113 typedef CRITICAL_SECTION rwmutex_t;
114 extern bool rwmutex_supported;
116 typedef struct {
117 mutex_t mutex;
118 struct list wait_list;
119 } cond_t;
121 typedef struct win32_thread *thread_t;
122 typedef void thread_function_t(void *arg);
123 #define thread_function_decl(name, content) static void name(void attr_unused *arg) { content }
125 #if !defined(HAVE___THREAD)
126 typedef DWORD win32_tls_key_t;
127 #define tls_decl(type, variable) win32_tls_key_t variable
128 #define tls_decl_extern(type, variable) extern win32_tls_key_t variable
129 typedef win32_tls_key_t tls_t_;
130 #endif
132 #elif defined(THREAD_POSIX)
134 #include <pthread.h>
135 #ifndef UNUSUAL_SPINLOCK
136 typedef pthread_mutex_t mutex_t;
137 #else
138 typedef pthread_spinlock_t mutex_t;
139 #endif
140 typedef pthread_rwlock_t rwmutex_t;
141 #define rwmutex_supported 1
142 typedef struct {
143 pthread_mutex_t mutex;
144 pthread_cond_t cond;
145 } cond_t;
146 typedef pthread_t thread_t;
147 typedef void *thread_function_t(void *arg);
148 #define thread_function_decl(name, content) static void *name(void attr_unused *arg) { asm_setup_thread(); { content } tls_destructor_call(); return NULL; }
150 #if !defined(HAVE___THREAD)
151 #ifdef HAVE_PTHREAD_KEY_T_ASSIGN
152 #define tls_decl_initializer_ = (pthread_key_t)-1 /* catch uninitialized tls's */
153 #else
154 #define tls_decl_initializer_
155 #endif
156 #define tls_decl(type, variable) pthread_key_t variable tls_decl_initializer_
157 #define tls_decl_extern(type, variable) extern pthread_key_t variable
158 typedef pthread_key_t tls_t_;
159 #endif
161 #elif defined(THREAD_HAIKU)
163 #include <kernel/OS.h>
164 #include <pthread.h>
166 typedef pthread_mutex_t mutex_t;
167 typedef sem_id rwmutex_t;
168 #define rwmutex_supported 1
169 typedef struct {
170 mutex_t mutex;
171 struct list wait_list;
172 } cond_t;
174 typedef struct haiku_thread *thread_t;
175 typedef void thread_function_t(void *arg);
176 #define thread_function_decl(name, content) static void name(void attr_unused *arg) { content }
178 #elif defined(THREAD_NONE)
180 #if defined(DEBUG_OBJECT_POSSIBLE)
181 typedef struct {
182 unsigned char state;
183 } mutex_t;
184 typedef struct {
185 int state;
186 } rwmutex_t;
187 #else
188 typedef EMPTY_TYPE mutex_t;
189 typedef EMPTY_TYPE rwmutex_t;
190 #endif
191 #define rwmutex_supported 0
193 typedef struct {
194 mutex_t mutex;
195 } cond_t;
197 #else
199 error: no threads
201 #endif
204 #ifdef rwmutex_fallback
205 typedef mutex_t rwmutex_t;
206 #define rwmutex_supported 0
207 #define do_rwmutex_init do_mutex_init
208 #define do_rwmutex_done do_mutex_done
209 #define do_rwmutex_lock_read do_mutex_lock
210 #define do_rwmutex_unlock_read do_mutex_unlock
211 #define do_rwmutex_lock_write do_mutex_lock
212 #define do_rwmutex_unlock_write do_mutex_unlock
213 #endif
216 #if defined(DEBUG_OBJECT_POSSIBLE) || !defined(THREAD_NONE)
217 void mutex_init_position(mutex_t * argument_position);
218 void mutex_done_position(mutex_t * argument_position);
219 void attr_fastcall mutex_lock_position(mutex_t * argument_position);
220 bool attr_fastcall mutex_trylock_position(mutex_t * argument_position);
221 void attr_fastcall mutex_unlock_position(mutex_t * argument_position);
222 #else
223 static inline void mutex_init_position(mutex_t attr_unused *m argument_position) { }
224 static inline void mutex_done_position(mutex_t attr_unused *m argument_position) { }
225 static inline void mutex_lock_position(mutex_t attr_unused *m argument_position) { }
226 static inline bool mutex_trylock_position(mutex_t attr_unused *m argument_position) { return true; }
227 static inline void mutex_unlock_position(mutex_t attr_unused *m argument_position) { }
228 #endif
229 #define mutex_init(x) mutex_init_position(x pass_file_line)
230 #define mutex_done(x) mutex_done_position(x pass_file_line)
231 #define mutex_lock(x) mutex_lock_position(x pass_file_line)
232 #define mutex_trylock(x) mutex_trylock_position(x pass_file_line)
233 #define mutex_unlock(x) mutex_unlock_position(x pass_file_line)
236 #if defined(DEBUG_OBJECT_POSSIBLE) || !defined(THREAD_NONE)
237 void rwmutex_init_position(rwmutex_t * argument_position);
238 void rwmutex_done_position(rwmutex_t * argument_position);
239 void attr_fastcall rwmutex_lock_read_position(rwmutex_t * argument_position);
240 void attr_fastcall rwmutex_unlock_read_position(rwmutex_t * argument_position);
241 void attr_fastcall rwmutex_lock_write_position(rwmutex_t * argument_position);
242 void attr_fastcall rwmutex_unlock_write_position(rwmutex_t * argument_position);
243 #else
244 static inline void rwmutex_init_position(rwmutex_t attr_unused *m argument_position) { }
245 static inline void rwmutex_done_position(rwmutex_t attr_unused *m argument_position) { }
246 static inline void rwmutex_lock_read_position(rwmutex_t attr_unused *m argument_position) { }
247 static inline void rwmutex_unlock_read_position(rwmutex_t attr_unused *m argument_position) { }
248 static inline void rwmutex_lock_write_position(rwmutex_t attr_unused *m argument_position) { }
249 static inline void rwmutex_unlock_write_position(rwmutex_t attr_unused *m argument_position) { }
250 #endif
251 #define rwmutex_init(x) rwmutex_init_position(x pass_file_line)
252 #define rwmutex_done(x) rwmutex_done_position(x pass_file_line)
253 #define rwmutex_lock_read(x) rwmutex_lock_read_position(x pass_file_line)
254 #define rwmutex_unlock_read(x) rwmutex_unlock_read_position(x pass_file_line)
255 #define rwmutex_lock_write(x) rwmutex_lock_write_position(x pass_file_line)
256 #define rwmutex_unlock_write(x) rwmutex_unlock_write_position(x pass_file_line)
259 #if defined(DEBUG_OBJECT_POSSIBLE) || !defined(THREAD_NONE)
260 void cond_init_position(cond_t * argument_position);
261 void cond_done_position(cond_t * argument_position);
262 void attr_fastcall cond_lock_position(cond_t * argument_position);
263 void attr_fastcall cond_unlock_position(cond_t * argument_position);
264 void attr_fastcall cond_unlock_signal_position(cond_t * argument_position);
265 void attr_fastcall cond_unlock_broadcast_position(cond_t * argument_position);
266 void attr_fastcall cond_wait_position(cond_t * argument_position);
267 bool attr_fastcall cond_wait_us_position(cond_t *, uint32_t argument_position);
268 #else
269 static inline void cond_init_position(cond_t attr_unused *c argument_position) { }
270 static inline void cond_done_position(cond_t attr_unused *c argument_position) { }
271 static inline void cond_lock_position(cond_t attr_unused *c argument_position) { }
272 static inline void cond_unlock_position(cond_t attr_unused *c argument_position) { }
273 static inline void cond_unlock_signal_position(cond_t attr_unused *c argument_position) { }
274 static inline void cond_unlock_broadcast_position(cond_t attr_unused *c argument_position) { }
275 static inline void cond_wait_position(cond_t attr_unused *c argument_position) { }
276 static inline bool cond_wait_us_position(cond_t attr_unused *c, uint32_t attr_unused us argument_position) { return false; }
277 #endif
278 #define cond_init(x) cond_init_position(x pass_file_line)
279 #define cond_done(x) cond_done_position(x pass_file_line)
280 #define cond_lock(x) cond_lock_position(x pass_file_line)
281 #define cond_unlock(x) cond_unlock_position(x pass_file_line)
282 #define cond_unlock_signal(x) cond_unlock_signal_position(x pass_file_line)
283 #define cond_unlock_broadcast(x) cond_unlock_broadcast_position(x pass_file_line)
284 #define cond_wait(x) cond_wait_position(x pass_file_line)
285 #define cond_wait_us(x, y) cond_wait_us_position(x, y pass_file_line)
288 #ifdef THREAD_NONE
289 #define thread_needs_barriers false
290 static inline unsigned thread_concurrency(void) { return 1; }
291 #else
292 extern uchar_efficient_t thread_needs_barriers;
293 unsigned thread_concurrency(void);
294 #endif
297 #ifndef THREAD_NONE
298 typedef enum {
299 PRIORITY_COMPUTE,
300 PRIORITY_IO,
301 PRIORITY_TIMER,
302 } thread_priority_t;
303 bool thread_spawn_position(thread_t *, thread_function_t *, void *, thread_priority_t priority, ajla_error_t *err argument_position);
304 void thread_join_position(thread_t * argument_position);
305 #define thread_spawn(t, fn, data, priority, err) thread_spawn_position(t, fn, data, priority, err pass_file_line)
306 #define thread_join(t) thread_join_position(t pass_file_line)
307 #endif
310 void tls_init__position(tls_t_ * argument_position);
311 void tls_done__position(tls_t_ * argument_position);
312 #if !defined(HAVE___THREAD)
313 uintptr_t attr_fastcall tls_get__position(const tls_t_ * argument_position);
314 void attr_fastcall tls_set__position(const tls_t_ *, uintptr_t argument_position);
315 uintptr_t attr_fastcall tls_get__nocheck(const tls_t_ *);
316 void attr_fastcall tls_set__nocheck(const tls_t_ *, uintptr_t);
317 #define tls_get_cast(type) (type)
318 #define tls_set_cast (uintptr_t)
319 #endif
320 #define tls_init_(x) tls_init__position(x pass_file_line)
321 #define tls_done_(x) tls_done__position(x pass_file_line)
322 #if !defined(HAVE___THREAD)
323 #define tls_get_(x) tls_get__position(x pass_file_line)
324 #define tls_set_(x, y) tls_set__position(x, y pass_file_line)
325 #endif
327 #define tls_init(type, variable) \
328 do { \
329 tls_verify_type_(type, variable); \
330 tls_init_(&variable); \
331 } while (0)
333 #define tls_done(type, variable) \
334 do { \
335 tls_verify_type_(type, variable); \
336 tls_done_(&variable); \
337 } while (0)
339 #define tls_get(type, variable) \
340 (tls_verify_type_(type, variable), tls_get_cast(type)tls_get_(&variable))
342 #define tls_set(type, variable, value) \
343 do { \
344 tls_verify_type_(type, variable); \
345 tls_set_(&variable, tls_set_cast(value)); \
346 } while (0)
348 #define tls_get_nocheck(type, variable) \
349 (tls_verify_type_(type, variable), tls_get_cast(type)tls_get__nocheck(&variable))
351 #define tls_set_nocheck(type, variable, value) \
352 do { \
353 tls_verify_type_(type, variable); \
354 tls_set__nocheck(&variable, tls_set_cast(value)); \
355 } while (0)
357 #ifdef THREAD_NONE
358 typedef EMPTY_TYPE tls_destructor_t;
359 typedef void tls_destructor_fn(tls_destructor_t *);
360 static inline void tls_destructor_position(tls_destructor_t attr_unused *destr, tls_destructor_fn attr_unused *fn argument_position) { }
361 #else
362 struct tls_destructor_s;
363 typedef void tls_destructor_fn(struct tls_destructor_s *);
364 typedef struct tls_destructor_s {
365 struct tls_destructor_s *previous;
366 tls_destructor_fn *fn;
367 } tls_destructor_t;
368 void tls_destructor_position(tls_destructor_t *, tls_destructor_fn * argument_position);
369 #endif
370 #define tls_destructor(dest, fn) tls_destructor_position(dest, fn pass_file_line)
373 * See smp_read_barrier_depends() in Linux for explanation.
374 * If we don't know how to do this barrier, we don't define this macro and
375 * the user must not use lockless access model.
377 #if defined(THREAD_NONE)
378 #define barrier_data_dependency() do { } while (0)
379 #elif defined(__alpha)
380 #if defined(HAVE_C11_ATOMICS) && !defined(UNUSUAL)
381 #define barrier_data_dependency() atomic_thread_fence(memory_order_seq_cst)
382 #elif defined(HAVE_SYNC_AND_FETCH) && !defined(UNUSUAL)
383 #define barrier_data_dependency() __sync_synchronize()
384 #elif defined(HAVE_GCC_ASSEMBLER) && !defined(UNUSUAL)
385 #define barrier_data_dependency() __asm__ volatile ("mb":::"memory")
386 #endif
387 #elif defined(__ADSPBLACKFIN__)
388 #else
389 #define barrier_data_dependency() do { } while (0)
390 #endif
393 * The mutex_unlock/mutex_lock sequence serves as a memory write barrier.
395 * On powerpc, mutex_unlock/mutex_lock doesn't serve as a memory barrier for
396 * threads that don't take the lock, so we must add barrier explicitly.
398 * See smp_mb__after_unlock_lock() in Linux for explanation.
400 #if defined(THREAD_NONE)
401 #define barrier_write_before_unlock_lock() do { } while (0)
402 #elif defined(__powerpc__)
403 #if defined(HAVE_C11_ATOMICS) && !defined(UNUSUAL)
404 #define barrier_write_before_unlock_lock() atomic_thread_fence(memory_order_seq_cst)
405 #elif defined(HAVE_SYNC_AND_FETCH) && !defined(UNUSUAL)
406 #define barrier_write_before_unlock_lock() __sync_synchronize()
407 #elif defined(HAVE_GCC_ASSEMBLER)
408 #define barrier_write_before_unlock_lock() __asm__ volatile("sync":::"memory")
409 #else
410 #endif
411 #else
412 #define barrier_write_before_unlock_lock() do { } while (0)
413 #endif
416 * A write barrier before lock, it makes sure that previous writes are not
417 * reordered with the following content of locked region.
419 #if defined(THREAD_NONE) || defined(THREAD_OS2) || (defined(ARCH_X86) && !defined(UNUSUAL))
420 #define barrier_write_before_lock() do { } while (0)
421 #elif defined(HAVE_C11_ATOMICS) && !defined(UNUSUAL)
422 #define barrier_write_before_lock() atomic_thread_fence(memory_order_seq_cst)
423 #elif defined(HAVE_SYNC_AND_FETCH) && !defined(UNUSUAL)
424 #define barrier_write_before_lock() __sync_synchronize()
425 #elif defined(THREAD_WIN32)
426 #if (defined(MemoryBarrier) || defined(__buildmemorybarrier)) && !defined(UNUSUAL_THREAD)
427 #define barrier_write_before_lock() MemoryBarrier()
428 #else
429 #define barrier_write_before_lock() \
430 do { \
431 LONG x; \
432 InterlockedExchange(&x, 0); \
433 } while (0)
434 #endif
435 #else
436 void barrier_write_before_lock(void);
437 #endif
440 #if !defined(THREAD_NONE) && defined(DEBUG_TRACE)
441 void thread_set_id(int id);
442 int thread_get_id(void);
443 #else
444 #define thread_set_id(id) do { } while (0)
445 #define thread_get_id() 0
446 #endif
449 bool thread_enable_debugging_option(const char *option, size_t l);
451 void thread_init(void);
452 void thread_done(void);
454 #endif