2 /**********************************************************************
8 Copyright (C) 2004-2007 Koichi Sasada
10 **********************************************************************/
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #define WIN32_WAIT_TIMEOUT 10 /* 10 ms */
19 #define native_thread_yield() Sleep(0)
20 #define remove_signal_thread_list(th)
22 static volatile DWORD ruby_native_thread_key
= TLS_OUT_OF_INDEXES
;
24 static int native_mutex_lock(rb_thread_lock_t
*);
25 static int native_mutex_unlock(rb_thread_lock_t
*);
26 static int native_mutex_trylock(rb_thread_lock_t
*);
27 static void native_mutex_initialize(rb_thread_lock_t
*);
29 static void native_cond_signal(rb_thread_cond_t
*cond
);
30 static void native_cond_broadcast(rb_thread_cond_t
*cond
);
31 static void native_cond_wait(rb_thread_cond_t
*cond
, rb_thread_lock_t
*mutex
);
32 static void native_cond_initialize(rb_thread_cond_t
*cond
);
33 static void native_cond_destroy(rb_thread_cond_t
*cond
);
36 ruby_thread_from_native(void)
38 return TlsGetValue(ruby_native_thread_key
);
42 ruby_thread_set_native(rb_thread_t
*th
)
44 return TlsSetValue(ruby_native_thread_key
, th
);
48 Init_native_thread(void)
50 rb_thread_t
*th
= GET_THREAD();
52 ruby_native_thread_key
= TlsAlloc();
53 DuplicateHandle(GetCurrentProcess(),
56 &th
->thread_id
, 0, FALSE
, DUPLICATE_SAME_ACCESS
);
58 th
->native_thread_data
.interrupt_event
= CreateEvent(0, TRUE
, FALSE
, 0);
60 thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
61 th
, GET_THREAD()->thread_id
,
62 th
->native_thread_data
.interrupt_event
);
69 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER
|
70 FORMAT_MESSAGE_FROM_SYSTEM
|
71 FORMAT_MESSAGE_IGNORE_INSERTS
,
74 MAKELANGID(LANG_NEUTRAL
, SUBLANG_DEFAULT
),
75 (LPTSTR
) & lpMsgBuf
, 0, NULL
);
76 rb_bug("%s", (char*)lpMsgBuf
);
80 w32_set_event(HANDLE handle
)
82 if (SetEvent(handle
) == 0) {
88 w32_reset_event(HANDLE handle
)
90 if (ResetEvent(handle
) == 0) {
96 w32_wait_events(HANDLE
*events
, int count
, DWORD timeout
, rb_thread_t
*th
)
98 HANDLE
*targets
= events
;
102 thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
103 events
, count
, timeout
, th
);
104 if (th
&& (intr
= th
->native_thread_data
.interrupt_event
)) {
105 w32_reset_event(intr
);
106 if (RUBY_VM_INTERRUPTED(th
)) {
110 targets
= ALLOCA_N(HANDLE
, count
+ 1);
111 memcpy(targets
, events
, sizeof(HANDLE
) * count
);
113 targets
[count
++] = intr
;
114 thread_debug(" * handle: %p (count: %d, intr)\n", intr
, count
);
117 thread_debug(" WaitForMultipleObjects start (count: %d)\n", count
);
118 ret
= WaitForMultipleObjects(count
, targets
, FALSE
, timeout
);
119 thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret
);
121 if (ret
== WAIT_OBJECT_0
+ count
- 1 && th
) {
124 if (ret
== -1 && THREAD_DEBUG
) {
127 for (i
= 0; i
< count
; i
++) {
128 thread_debug(" * error handle %d - %s\n", i
,
129 GetHandleInformation(targets
[i
], &dmy
) ? "OK" : "NG");
135 static void ubf_handle(void *ptr
);
136 #define ubf_select ubf_handle
139 rb_w32_wait_events_blocking(HANDLE
*events
, int num
, DWORD timeout
)
141 return w32_wait_events(events
, num
, timeout
, GET_THREAD());
145 rb_w32_wait_events(HANDLE
*events
, int num
, DWORD timeout
)
149 BLOCKING_REGION(ret
= rb_w32_wait_events_blocking(events
, num
, timeout
),
150 ubf_handle
, GET_THREAD());
155 w32_close_handle(HANDLE handle
)
157 if (CloseHandle(handle
) == 0) {
163 w32_resume_thread(HANDLE handle
)
165 if (ResumeThread(handle
) == -1) {
171 #define HAVE__BEGINTHREADEX 1
173 #undef HAVE__BEGINTHREADEX
176 #ifdef HAVE__BEGINTHREADEX
177 #define start_thread (HANDLE)_beginthreadex
178 typedef unsigned long (_stdcall
*w32_thread_start_func
)(void*);
180 #define start_thread CreateThread
181 typedef LPTHREAD_START_ROUTINE w32_thread_start_func
;
185 w32_create_thread(DWORD stack_size
, w32_thread_start_func func
, void *val
)
187 return start_thread(0, stack_size
, func
, val
, CREATE_SUSPENDED
, 0);
191 rb_w32_sleep(unsigned long msec
)
193 return w32_wait_events(0, 0, msec
, GET_THREAD());
197 rb_w32_Sleep(unsigned long msec
)
201 BLOCKING_REGION(ret
= rb_w32_sleep(msec
),
202 ubf_handle
, GET_THREAD());
207 native_sleep(rb_thread_t
*th
, struct timeval
*tv
)
212 msec
= tv
->tv_sec
* 1000 + tv
->tv_usec
/ 1000;
222 native_mutex_lock(&th
->interrupt_lock
);
223 th
->unblock
.func
= ubf_handle
;
224 th
->unblock
.arg
= th
;
225 native_mutex_unlock(&th
->interrupt_lock
);
227 if (RUBY_VM_INTERRUPTED(th
)) {
228 /* interrupted. return immediate */
231 thread_debug("native_sleep start (%lu)\n", msec
);
232 ret
= w32_wait_events(0, 0, msec
, th
);
233 thread_debug("native_sleep done (%lu)\n", ret
);
236 native_mutex_lock(&th
->interrupt_lock
);
237 th
->unblock
.func
= 0;
239 native_mutex_unlock(&th
->interrupt_lock
);
245 native_mutex_lock(rb_thread_lock_t
*lock
)
250 thread_debug("native_mutex_lock: %p\n", *lock
);
251 result
= w32_wait_events(&*lock
, 1, INFINITE
, 0);
254 /* get mutex object */
255 thread_debug("acquire mutex: %p\n", *lock
);
257 case WAIT_OBJECT_0
+ 1:
260 thread_debug("acquire mutex interrupted: %p\n", *lock
);
263 thread_debug("timeout mutex: %p\n", *lock
);
266 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
269 rb_bug("win32_mutex_lock: unknown result (%d)", result
);
275 EnterCriticalSection(lock
);
281 native_mutex_unlock(rb_thread_lock_t
*lock
)
284 thread_debug("release mutex: %p\n", *lock
);
285 return ReleaseMutex(*lock
);
287 LeaveCriticalSection(lock
);
293 native_mutex_trylock(rb_thread_lock_t
*lock
)
297 thread_debug("native_mutex_trylock: %p\n", *lock
);
298 result
= w32_wait_events(&*lock
, 1, 1, 0);
299 thread_debug("native_mutex_trylock result: %d\n", result
);
308 return TryEnterCriticalSection(lock
) == 0;
313 native_mutex_initialize(rb_thread_lock_t
*lock
)
316 *lock
= CreateMutex(NULL
, FALSE
, NULL
);
320 /* thread_debug("initialize mutex: %p\n", *lock); */
322 InitializeCriticalSection(lock
);
327 native_mutex_destroy(rb_thread_lock_t
*lock
)
330 w32_close_handle(lock
);
332 DeleteCriticalSection(lock
);
336 struct cond_event_entry
{
337 struct cond_event_entry
* next
;
341 struct rb_thread_cond_struct
{
342 struct cond_event_entry
*next
;
343 struct cond_event_entry
*last
;
347 native_cond_signal(rb_thread_cond_t
*cond
)
349 /* cond is guarded by mutex */
350 struct cond_event_entry
*e
= cond
->next
;
353 cond
->next
= e
->next
;
357 rb_bug("native_cond_signal: no pending threads");
362 native_cond_broadcast(rb_thread_cond_t
*cond
)
364 /* cond is guarded by mutex */
365 struct cond_event_entry
*e
= cond
->next
;
375 native_cond_wait(rb_thread_cond_t
*cond
, rb_thread_lock_t
*mutex
)
378 struct cond_event_entry entry
;
381 entry
.event
= CreateEvent(0, FALSE
, FALSE
, 0);
383 /* cond is guarded by mutex */
385 cond
->last
->next
= &entry
;
393 native_mutex_unlock(mutex
);
395 r
= WaitForSingleObject(entry
.event
, INFINITE
);
396 if (r
!= WAIT_OBJECT_0
) {
397 rb_bug("native_cond_wait: WaitForSingleObject returns %lu", r
);
400 native_mutex_lock(mutex
);
402 w32_close_handle(entry
.event
);
406 native_cond_initialize(rb_thread_cond_t
*cond
)
413 native_cond_destroy(rb_thread_cond_t
*cond
)
419 ruby_init_stack(VALUE
*addr
)
423 #define CHECK_ERR(expr) \
424 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
427 native_thread_init_stack(rb_thread_t
*th
)
429 MEMORY_BASIC_INFORMATION mi
;
433 CHECK_ERR(VirtualQuery(&mi
, &mi
, sizeof(mi
)));
434 base
= mi
.AllocationBase
;
435 end
= mi
.BaseAddress
;
436 end
+= mi
.RegionSize
;
439 if (space
> 1024*1024) space
= 1024*1024;
440 th
->machine_stack_start
= (VALUE
*)end
- 1;
441 th
->machine_stack_maxsize
= size
- space
;
445 native_thread_destroy(rb_thread_t
*th
)
447 HANDLE intr
= th
->native_thread_data
.interrupt_event
;
448 native_mutex_destroy(&th
->interrupt_lock
);
449 thread_debug("close handle - intr: %p, thid: %p\n", intr
, th
->thread_id
);
450 th
->native_thread_data
.interrupt_event
= 0;
451 w32_close_handle(intr
);
454 static unsigned long _stdcall
455 thread_start_func_1(void *th_ptr
)
457 rb_thread_t
*th
= th_ptr
;
459 volatile HANDLE thread_id
= th
->thread_id
;
461 native_thread_init_stack(th
);
462 th
->native_thread_data
.interrupt_event
= CreateEvent(0, TRUE
, FALSE
, 0);
465 thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th
,
466 th
->thread_id
, th
->native_thread_data
.interrupt_event
);
467 thread_start_func_2(th
, &stack_start
, 0);
469 w32_close_handle(thread_id
);
470 thread_debug("thread deleted (th: %p)\n", th
);
475 native_thread_create(rb_thread_t
*th
)
477 size_t stack_size
= 4 * 1024; /* 4KB */
478 th
->thread_id
= w32_create_thread(stack_size
, thread_start_func_1
, th
);
480 if ((th
->thread_id
) == 0) {
481 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
482 rb_raise(rb_eThreadError
, "can't create Thread (%d)", errno
);
485 w32_resume_thread(th
->thread_id
);
489 thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %d\n",
491 th
->native_thread_data
.interrupt_event
, stack_size
);
497 native_thread_join(HANDLE th
)
499 w32_wait_events(&th
, 1, 0, 0);
502 #if USE_NATIVE_THREAD_PRIORITY
505 native_thread_apply_priority(rb_thread_t
*th
)
507 int priority
= th
->priority
;
508 if (th
->priority
> 0) {
509 priority
= THREAD_PRIORITY_ABOVE_NORMAL
;
511 else if (th
->priority
< 0) {
512 priority
= THREAD_PRIORITY_BELOW_NORMAL
;
515 priority
= THREAD_PRIORITY_NORMAL
;
518 SetThreadPriority(th
->thread_id
, priority
);
521 #endif /* USE_NATIVE_THREAD_PRIORITY */
524 ubf_handle(void *ptr
)
526 typedef BOOL (WINAPI
*cancel_io_func_t
)(HANDLE
);
527 static cancel_io_func_t cancel_func
= NULL
;
528 rb_thread_t
*th
= (rb_thread_t
*)ptr
;
529 thread_debug("ubf_handle: %p\n", th
);
532 cancel_func
= (cancel_io_func_t
)GetProcAddress(GetModuleHandle("kernel32"), "CancelSynchronousIo");
534 cancel_func
= (cancel_io_func_t
)-1;
536 if (cancel_func
!= (cancel_io_func_t
)-1)
537 cancel_func((HANDLE
)th
->thread_id
);
539 w32_set_event(th
->native_thread_data
.interrupt_event
);
542 static HANDLE timer_thread_id
= 0;
544 static unsigned long _stdcall
545 timer_thread_func(void *dummy
)
547 thread_debug("timer_thread\n");
548 while (system_working
) {
549 Sleep(WIN32_WAIT_TIMEOUT
);
550 timer_thread_function(dummy
);
552 thread_debug("timer killed\n");
557 rb_thread_create_timer_thread(void)
559 if (timer_thread_id
== 0) {
560 timer_thread_id
= w32_create_thread(1024 + (THREAD_DEBUG
? BUFSIZ
: 0),
561 timer_thread_func
, GET_VM());
562 w32_resume_thread(timer_thread_id
);
566 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */