1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
51 #ifndef USE_NATIVE_THREAD_PRIORITY
52 #define USE_NATIVE_THREAD_PRIORITY 0
53 #define RUBY_THREAD_PRIORITY_MAX 3
54 #define RUBY_THREAD_PRIORITY_MIN -3
58 #define THREAD_DEBUG 0
64 static void sleep_timeval(rb_thread_t
*th
, struct timeval time
);
65 static void sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
);
66 static void sleep_forever(rb_thread_t
*th
, int nodeadlock
);
67 static double timeofday(void);
68 struct timeval
rb_time_interval(VALUE
);
69 static int rb_thread_dead(rb_thread_t
*th
);
71 static void rb_check_deadlock(rb_vm_t
*vm
);
73 void rb_signal_exec(rb_thread_t
*th
, int sig
);
74 void rb_disable_interrupt(void);
76 static const VALUE eKillSignal
= INT2FIX(0);
77 static const VALUE eTerminateSignal
= INT2FIX(1);
78 static volatile int system_working
= 1;
81 st_delete_wrap(st_table
*table
, st_data_t key
)
83 st_delete(table
, &key
, 0);
86 /********************************************************************************/
88 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
90 static void set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
91 struct rb_unblock_callback
*old
);
92 static void reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
);
94 #define GVL_UNLOCK_BEGIN() do { \
95 rb_thread_t *_th_stored = GET_THREAD(); \
96 rb_gc_save_machine_context(_th_stored); \
97 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
99 #define GVL_UNLOCK_END() \
100 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
101 rb_thread_set_current(_th_stored); \
104 #define BLOCKING_REGION_CORE(exec) do { \
105 GVL_UNLOCK_BEGIN(); {\
111 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
112 rb_thread_t *__th = GET_THREAD(); \
113 enum rb_thread_status __prev_status = __th->status; \
114 struct rb_unblock_callback __oldubf; \
115 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
116 __th->status = THREAD_STOPPED; \
117 thread_debug("enter blocking region (%p)\n", __th); \
118 BLOCKING_REGION_CORE(exec); \
119 thread_debug("leave blocking region (%p)\n", __th); \
120 remove_signal_thread_list(__th); \
121 reset_unblock_function(__th, &__oldubf); \
122 if (__th->status == THREAD_STOPPED) { \
123 __th->status = __prev_status; \
125 RUBY_VM_CHECK_INTS(); \
129 #ifdef HAVE_VA_ARGS_MACRO
130 void rb_thread_debug(const char *file
, int line
, const char *fmt
, ...);
131 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
132 #define POSITION_FORMAT "%s:%d:"
133 #define POSITION_ARGS ,file, line
135 void rb_thread_debug(const char *fmt
, ...);
136 #define thread_debug rb_thread_debug
137 #define POSITION_FORMAT
138 #define POSITION_ARGS
141 # if THREAD_DEBUG < 0
142 static int rb_thread_debug_enabled
;
145 rb_thread_s_debug(void)
147 return INT2NUM(rb_thread_debug_enabled
);
151 rb_thread_s_debug_set(VALUE self
, VALUE val
)
153 rb_thread_debug_enabled
= RTEST(val
);
157 # define rb_thread_debug_enabled THREAD_DEBUG
160 #define thread_debug if(0)printf
164 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
166 NOINLINE(static int thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
,
167 VALUE
*register_stack_start
));
168 static void timer_thread_function(void *);
171 #include "thread_win32.c"
173 #define DEBUG_OUT() \
174 WaitForSingleObject(&debug_mutex, INFINITE); \
175 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
177 ReleaseMutex(&debug_mutex);
179 #elif defined(HAVE_PTHREAD_H)
180 #include "thread_pthread.c"
182 #define DEBUG_OUT() \
183 pthread_mutex_lock(&debug_mutex); \
184 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
186 pthread_mutex_unlock(&debug_mutex);
189 #error "unsupported thread type"
193 static int debug_mutex_initialized
= 1;
194 static rb_thread_lock_t debug_mutex
;
198 #ifdef HAVE_VA_ARGS_MACRO
199 const char *file
, int line
,
201 const char *fmt
, ...)
206 if (!rb_thread_debug_enabled
) return;
208 if (debug_mutex_initialized
== 1) {
209 debug_mutex_initialized
= 0;
210 native_mutex_initialize(&debug_mutex
);
214 vsnprintf(buf
, BUFSIZ
, fmt
, args
);
223 set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
224 struct rb_unblock_callback
*old
)
227 RUBY_VM_CHECK_INTS(); /* check signal or so */
228 native_mutex_lock(&th
->interrupt_lock
);
229 if (th
->interrupt_flag
) {
230 native_mutex_unlock(&th
->interrupt_lock
);
234 if (old
) *old
= th
->unblock
;
235 th
->unblock
.func
= func
;
236 th
->unblock
.arg
= arg
;
238 native_mutex_unlock(&th
->interrupt_lock
);
242 reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
)
244 native_mutex_lock(&th
->interrupt_lock
);
246 native_mutex_unlock(&th
->interrupt_lock
);
250 rb_thread_interrupt(rb_thread_t
*th
)
252 native_mutex_lock(&th
->interrupt_lock
);
253 RUBY_VM_SET_INTERRUPT(th
);
254 if (th
->unblock
.func
) {
255 (th
->unblock
.func
)(th
->unblock
.arg
);
260 native_mutex_unlock(&th
->interrupt_lock
);
265 terminate_i(st_data_t key
, st_data_t val
, rb_thread_t
*main_thread
)
269 GetThreadPtr(thval
, th
);
271 if (th
!= main_thread
) {
272 thread_debug("terminate_i: %p\n", th
);
273 rb_thread_interrupt(th
);
274 th
->thrown_errinfo
= eTerminateSignal
;
275 th
->status
= THREAD_TO_KILL
;
278 thread_debug("terminate_i: main thread (%p)\n", th
);
283 typedef struct rb_mutex_struct
285 rb_thread_lock_t lock
;
286 rb_thread_cond_t cond
;
287 struct rb_thread_struct
volatile *th
;
288 volatile int cond_waiting
, cond_notified
;
289 struct rb_mutex_struct
*next_mutex
;
292 static void rb_mutex_unlock_all(mutex_t
*mutex
);
295 rb_thread_terminate_all(void)
297 rb_thread_t
*th
= GET_THREAD(); /* main thread */
298 rb_vm_t
*vm
= th
->vm
;
299 if (vm
->main_thread
!= th
) {
300 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm
->main_thread
, th
);
303 /* unlock all locking mutexes */
304 if (th
->keeping_mutexes
) {
305 rb_mutex_unlock_all(th
->keeping_mutexes
);
308 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th
);
309 st_foreach(vm
->living_threads
, terminate_i
, (st_data_t
)th
);
311 while (!rb_thread_alone()) {
313 if (EXEC_TAG() == 0) {
314 rb_thread_schedule();
317 /* ignore exception */
325 thread_cleanup_func_before_exec(void *th_ptr
)
327 rb_thread_t
*th
= th_ptr
;
328 th
->status
= THREAD_KILLED
;
329 th
->machine_stack_start
= th
->machine_stack_end
= 0;
331 th
->machine_register_stack_start
= th
->machine_register_stack_end
= 0;
336 thread_cleanup_func(void *th_ptr
)
338 rb_thread_t
*th
= th_ptr
;
339 thread_cleanup_func_before_exec(th_ptr
);
340 native_thread_destroy(th
);
343 extern void ruby_error_print(void);
344 static VALUE
rb_thread_raise(int, VALUE
*, rb_thread_t
*);
345 void rb_thread_recycle_stack_release(VALUE
*);
348 ruby_thread_init_stack(rb_thread_t
*th
)
350 native_thread_init_stack(th
);
354 thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
, VALUE
*register_stack_start
)
357 VALUE args
= th
->first_args
;
359 rb_thread_t
*join_th
;
360 rb_thread_t
*main_th
;
361 VALUE errinfo
= Qnil
;
363 th
->machine_stack_start
= stack_start
;
365 th
->machine_register_stack_start
= register_stack_start
;
367 thread_debug("thread start: %p\n", th
);
369 native_mutex_lock(&th
->vm
->global_vm_lock
);
371 thread_debug("thread start (get lock): %p\n", th
);
372 rb_thread_set_current(th
);
375 if ((state
= EXEC_TAG()) == 0) {
376 SAVE_ROOT_JMPBUF(th
, {
377 if (th
->first_proc
) {
378 GetProcPtr(th
->first_proc
, proc
);
380 th
->local_lfp
= proc
->block
.lfp
;
381 th
->local_svar
= Qnil
;
382 th
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
,
383 RARRAY_LEN(args
), RARRAY_PTR(args
), 0);
386 th
->value
= (*th
->first_func
)((void *)th
->first_args
);
391 errinfo
= th
->errinfo
;
392 if (NIL_P(errinfo
)) errinfo
= rb_errinfo();
393 if (state
== TAG_FATAL
) {
394 /* fatal error within this thread, need to stop whole script */
396 else if (rb_obj_is_kind_of(errinfo
, rb_eSystemExit
)) {
397 if (th
->safe_level
>= 4) {
398 th
->errinfo
= rb_exc_new3(rb_eSecurityError
,
399 rb_sprintf("Insecure exit at level %d", th
->safe_level
));
403 else if (th
->safe_level
< 4 &&
404 (th
->vm
->thread_abort_on_exception
||
405 th
->abort_on_exception
|| RTEST(ruby_debug
))) {
406 /* exit on main_thread */
414 th
->status
= THREAD_KILLED
;
415 thread_debug("thread end: %p\n", th
);
417 main_th
= th
->vm
->main_thread
;
419 if (TYPE(errinfo
) == T_OBJECT
) {
420 /* treat with normal error object */
421 rb_thread_raise(1, &errinfo
, main_th
);
426 /* locking_mutex must be Qfalse */
427 if (th
->locking_mutex
!= Qfalse
) {
428 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
429 th
, th
->locking_mutex
);
432 /* unlock all locking mutexes */
433 if (th
->keeping_mutexes
) {
434 rb_mutex_unlock_all(th
->keeping_mutexes
);
435 th
->keeping_mutexes
= NULL
;
438 /* delete self from living_threads */
439 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
441 /* wake up joinning threads */
442 join_th
= th
->join_list_head
;
444 if (join_th
== main_th
) errinfo
= Qnil
;
445 rb_thread_interrupt(join_th
);
446 switch (join_th
->status
) {
447 case THREAD_STOPPED
: case THREAD_STOPPED_FOREVER
:
448 join_th
->status
= THREAD_RUNNABLE
;
451 join_th
= join_th
->join_list_next
;
453 if (th
!= main_th
) rb_check_deadlock(th
->vm
);
455 if (!th
->root_fiber
) {
456 rb_thread_recycle_stack_release(th
->stack
);
460 thread_cleanup_func(th
);
461 native_mutex_unlock(&th
->vm
->global_vm_lock
);
467 thread_create_core(VALUE thval
, VALUE args
, VALUE (*fn
)(ANYARGS
))
471 if (OBJ_FROZEN(GET_THREAD()->thgroup
)) {
472 rb_raise(rb_eThreadError
,
473 "can't start a new thread (frozen ThreadGroup)");
475 GetThreadPtr(thval
, th
);
477 /* setup thread environment */
479 th
->first_proc
= fn
? Qfalse
: rb_block_proc();
480 th
->first_args
= args
; /* GC: shouldn't put before above line */
482 th
->priority
= GET_THREAD()->priority
;
483 th
->thgroup
= GET_THREAD()->thgroup
;
485 native_mutex_initialize(&th
->interrupt_lock
);
487 st_insert(th
->vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
488 native_thread_create(th
);
493 thread_s_new(int argc
, VALUE
*argv
, VALUE klass
)
496 VALUE thread
= rb_thread_alloc(klass
);
497 rb_obj_call_init(thread
, argc
, argv
);
498 GetThreadPtr(thread
, th
);
499 if (!th
->first_args
) {
500 rb_raise(rb_eThreadError
, "uninitialized thread - check `%s#initialize'",
501 rb_class2name(klass
));
508 * Thread.start([args]*) {|args| block } => thread
509 * Thread.fork([args]*) {|args| block } => thread
511 * Basically the same as <code>Thread::new</code>. However, if class
512 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
513 * subclass will not invoke the subclass's <code>initialize</code> method.
517 thread_start(VALUE klass
, VALUE args
)
519 return thread_create_core(rb_thread_alloc(klass
), args
, 0);
523 thread_initialize(VALUE thread
, VALUE args
)
526 if (!rb_block_given_p()) {
527 rb_raise(rb_eThreadError
, "must be called with a block");
529 GetThreadPtr(thread
, th
);
530 if (th
->first_args
) {
531 VALUE
rb_proc_location(VALUE self
);
532 VALUE proc
= th
->first_proc
, line
, loc
;
534 if (!proc
|| !RTEST(loc
= rb_proc_location(proc
))) {
535 rb_raise(rb_eThreadError
, "already initialized thread");
537 file
= RSTRING_PTR(RARRAY_PTR(loc
)[0]);
538 if (NIL_P(line
= RARRAY_PTR(loc
)[1])) {
539 rb_raise(rb_eThreadError
, "already initialized thread - %s",
542 rb_raise(rb_eThreadError
, "already initialized thread - %s:%d",
543 file
, NUM2INT(line
));
545 return thread_create_core(thread
, args
, 0);
549 rb_thread_create(VALUE (*fn
)(ANYARGS
), void *arg
)
551 return thread_create_core(rb_thread_alloc(rb_cThread
), (VALUE
)arg
, fn
);
555 /* +infty, for this purpose */
556 #define DELAY_INFTY 1E30
559 rb_thread_t
*target
, *waiting
;
565 remove_from_join_list(VALUE arg
)
567 struct join_arg
*p
= (struct join_arg
*)arg
;
568 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
570 if (target_th
->status
!= THREAD_KILLED
) {
571 rb_thread_t
**pth
= &target_th
->join_list_head
;
575 *pth
= th
->join_list_next
;
578 pth
= &(*pth
)->join_list_next
;
586 thread_join_sleep(VALUE arg
)
588 struct join_arg
*p
= (struct join_arg
*)arg
;
589 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
590 double now
, limit
= p
->limit
;
592 while (target_th
->status
!= THREAD_KILLED
) {
594 sleep_forever(th
, 1);
599 thread_debug("thread_join: timeout (thid: %p)\n",
600 (void *)target_th
->thread_id
);
603 sleep_wait_for_interrupt(th
, limit
- now
);
605 thread_debug("thread_join: interrupted (thid: %p)\n",
606 (void *)target_th
->thread_id
);
612 thread_join(rb_thread_t
*target_th
, double delay
)
614 rb_thread_t
*th
= GET_THREAD();
617 arg
.target
= target_th
;
619 arg
.limit
= timeofday() + delay
;
620 arg
.forever
= delay
== DELAY_INFTY
;
622 thread_debug("thread_join (thid: %p)\n", (void *)target_th
->thread_id
);
624 if (target_th
->status
!= THREAD_KILLED
) {
625 th
->join_list_next
= target_th
->join_list_head
;
626 target_th
->join_list_head
= th
;
627 if (!rb_ensure(thread_join_sleep
, (VALUE
)&arg
,
628 remove_from_join_list
, (VALUE
)&arg
)) {
633 thread_debug("thread_join: success (thid: %p)\n",
634 (void *)target_th
->thread_id
);
636 if (target_th
->errinfo
!= Qnil
) {
637 VALUE err
= target_th
->errinfo
;
642 else if (TYPE(target_th
->errinfo
) == T_NODE
) {
643 rb_exc_raise(vm_make_jump_tag_but_local_jump(
644 GET_THROWOBJ_STATE(err
), GET_THROWOBJ_VAL(err
)));
647 /* normal exception */
651 return target_th
->self
;
657 * thr.join(limit) => thr
659 * The calling thread will suspend execution and run <i>thr</i>. Does not
660 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
661 * the time limit expires, <code>nil</code> will be returned, otherwise
662 * <i>thr</i> is returned.
664 * Any threads not joined will be killed when the main program exits. If
665 * <i>thr</i> had previously raised an exception and the
666 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
667 * (so the exception has not yet been processed) it will be processed at this
670 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
671 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
672 * x.join # Let x thread finish, a will be killed on exit.
678 * The following example illustrates the <i>limit</i> parameter.
680 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
681 * puts "Waiting" until y.join(0.15)
695 thread_join_m(int argc
, VALUE
*argv
, VALUE self
)
697 rb_thread_t
*target_th
;
698 double delay
= DELAY_INFTY
;
701 GetThreadPtr(self
, target_th
);
703 rb_scan_args(argc
, argv
, "01", &limit
);
705 delay
= rb_num2dbl(limit
);
708 return thread_join(target_th
, delay
);
715 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
718 * a = Thread.new { 2 + 2 }
723 thread_value(VALUE self
)
726 GetThreadPtr(self
, th
);
727 thread_join(th
, DELAY_INFTY
);
735 static struct timeval
736 double2timeval(double d
)
740 time
.tv_sec
= (int)d
;
741 time
.tv_usec
= (int)((d
- (int)d
) * 1e6
);
742 if (time
.tv_usec
< 0) {
743 time
.tv_usec
+= (long)1e6
;
750 sleep_forever(rb_thread_t
*th
, int deadlockable
)
752 enum rb_thread_status prev_status
= th
->status
;
754 th
->status
= deadlockable
? THREAD_STOPPED_FOREVER
: THREAD_STOPPED
;
758 rb_check_deadlock(th
->vm
);
764 RUBY_VM_CHECK_INTS();
765 } while (th
->status
== THREAD_STOPPED_FOREVER
);
766 th
->status
= prev_status
;
770 getclockofday(struct timeval
*tp
)
772 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
775 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0) {
776 tp
->tv_sec
= ts
.tv_sec
;
777 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
781 gettimeofday(tp
, NULL
);
786 sleep_timeval(rb_thread_t
*th
, struct timeval tv
)
788 struct timeval to
, tvn
;
789 enum rb_thread_status prev_status
= th
->status
;
792 to
.tv_sec
+= tv
.tv_sec
;
793 if ((to
.tv_usec
+= tv
.tv_usec
) >= 1000000) {
795 to
.tv_usec
-= 1000000;
798 th
->status
= THREAD_STOPPED
;
800 native_sleep(th
, &tv
);
801 RUBY_VM_CHECK_INTS();
803 if (to
.tv_sec
< tvn
.tv_sec
) break;
804 if (to
.tv_sec
== tvn
.tv_sec
&& to
.tv_usec
<= tvn
.tv_usec
) break;
805 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
806 (long)to
.tv_sec
, to
.tv_usec
,
807 (long)tvn
.tv_sec
, tvn
.tv_usec
);
808 tv
.tv_sec
= to
.tv_sec
- tvn
.tv_sec
;
809 if ((tv
.tv_usec
= to
.tv_usec
- tvn
.tv_usec
) < 0) {
811 tv
.tv_usec
+= 1000000;
813 } while (th
->status
== THREAD_STOPPED
);
814 th
->status
= prev_status
;
818 rb_thread_sleep_forever()
820 thread_debug("rb_thread_sleep_forever\n");
821 sleep_forever(GET_THREAD(), 0);
825 rb_thread_sleep_deadly()
827 thread_debug("rb_thread_sleep_deadly\n");
828 sleep_forever(GET_THREAD(), 1);
834 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
837 if (clock_gettime(CLOCK_MONOTONIC
, &tp
) == 0) {
838 return (double)tp
.tv_sec
+ (double)tp
.tv_nsec
* 1e-9;
843 gettimeofday(&tv
, NULL
);
844 return (double)tv
.tv_sec
+ (double)tv
.tv_usec
* 1e-6;
849 sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
)
851 sleep_timeval(th
, double2timeval(sleepsec
));
855 sleep_for_polling(rb_thread_t
*th
)
859 time
.tv_usec
= 100 * 1000; /* 0.1 sec */
860 sleep_timeval(th
, time
);
864 rb_thread_wait_for(struct timeval time
)
866 rb_thread_t
*th
= GET_THREAD();
867 sleep_timeval(th
, time
);
871 rb_thread_polling(void)
873 RUBY_VM_CHECK_INTS();
874 if (!rb_thread_alone()) {
875 rb_thread_t
*th
= GET_THREAD();
876 sleep_for_polling(th
);
881 * CAUTION: This function causes thread switching.
882 * rb_thread_check_ints() check ruby's interrupts.
883 * some interrupt needs thread switching/invoke handlers,
888 rb_thread_check_ints(void)
890 RUBY_VM_CHECK_INTS();
893 struct timeval
rb_time_timeval();
896 rb_thread_sleep(int sec
)
898 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec
)));
902 rb_thread_schedule(void)
904 thread_debug("rb_thread_schedule\n");
905 if (!rb_thread_alone()) {
906 rb_thread_t
*th
= GET_THREAD();
908 thread_debug("rb_thread_schedule/switch start\n");
910 rb_gc_save_machine_context(th
);
911 native_mutex_unlock(&th
->vm
->global_vm_lock
);
913 native_thread_yield();
915 native_mutex_lock(&th
->vm
->global_vm_lock
);
917 rb_thread_set_current(th
);
918 thread_debug("rb_thread_schedule/switch done\n");
920 RUBY_VM_CHECK_INTS();
924 int rb_thread_critical
; /* TODO: dummy variable */
927 rb_thread_blocking_region(
928 rb_blocking_function_t
*func
, void *data1
,
929 rb_unblock_function_t
*ubf
, void *data2
)
932 rb_thread_t
*th
= GET_THREAD();
934 if (ubf
== RB_UBF_DFL
) {
950 * Invokes the thread scheduler to pass execution to another thread.
952 * a = Thread.new { print "a"; Thread.pass;
953 * print "b"; Thread.pass;
955 * b = Thread.new { print "x"; Thread.pass;
956 * print "y"; Thread.pass;
967 thread_s_pass(VALUE klass
)
969 rb_thread_schedule();
978 rb_thread_execute_interrupts(rb_thread_t
*th
)
980 if (th
->raised_flag
) return;
982 while (th
->interrupt_flag
) {
983 enum rb_thread_status status
= th
->status
;
984 int timer_interrupt
= th
->interrupt_flag
& 0x01;
985 int finalizer_interrupt
= th
->interrupt_flag
& 0x04;
987 th
->status
= THREAD_RUNNABLE
;
988 th
->interrupt_flag
= 0;
990 /* signal handling */
991 if (th
->exec_signal
) {
992 int sig
= th
->exec_signal
;
994 rb_signal_exec(th
, sig
);
997 /* exception from another thread */
998 if (th
->thrown_errinfo
) {
999 VALUE err
= th
->thrown_errinfo
;
1000 th
->thrown_errinfo
= 0;
1001 thread_debug("rb_thread_execute_interrupts: %ld\n", err
);
1003 if (err
== eKillSignal
|| err
== eTerminateSignal
) {
1004 th
->errinfo
= INT2FIX(TAG_FATAL
);
1005 TH_JUMP_TAG(th
, TAG_FATAL
);
1011 th
->status
= status
;
1013 if (finalizer_interrupt
) {
1014 rb_gc_finalize_deferred();
1017 if (timer_interrupt
) {
1018 #if USE_NATIVE_THREAD_PRIORITY
1019 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
1020 rb_thread_schedule();
1022 if (th
->slice
> 0) {
1027 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
1028 rb_thread_schedule();
1029 if (th
->slice
< 0) {
1034 th
->slice
= th
->priority
;
1044 rb_gc_mark_threads(void)
1049 /*****************************************************/
1052 rb_thread_ready(rb_thread_t
*th
)
1054 rb_thread_interrupt(th
);
1058 rb_thread_raise(int argc
, VALUE
*argv
, rb_thread_t
*th
)
1063 if (rb_thread_dead(th
)) {
1067 if (th
->thrown_errinfo
!= 0 || th
->raised_flag
) {
1068 rb_thread_schedule();
1072 exc
= rb_make_exception(argc
, argv
);
1073 th
->thrown_errinfo
= exc
;
1074 rb_thread_ready(th
);
1079 rb_thread_signal_raise(void *thptr
, int sig
)
1082 rb_thread_t
*th
= thptr
;
1084 argv
[0] = rb_eSignal
;
1085 argv
[1] = INT2FIX(sig
);
1086 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1090 rb_thread_signal_exit(void *thptr
)
1093 rb_thread_t
*th
= thptr
;
1095 argv
[0] = rb_eSystemExit
;
1096 argv
[1] = rb_str_new2("exit");
1097 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1101 rb_thread_set_raised(rb_thread_t
*th
)
1103 if (th
->raised_flag
& RAISED_EXCEPTION
) {
1106 th
->raised_flag
|= RAISED_EXCEPTION
;
1111 rb_thread_reset_raised(rb_thread_t
*th
)
1113 if (!(th
->raised_flag
& RAISED_EXCEPTION
)) {
1116 th
->raised_flag
&= ~RAISED_EXCEPTION
;
1121 rb_thread_fd_close(int fd
)
1128 * thr.raise(exception)
1130 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1131 * caller does not have to be <i>thr</i>.
1133 * Thread.abort_on_exception = true
1134 * a = Thread.new { sleep(200) }
1137 * <em>produces:</em>
1139 * prog.rb:3: Gotcha (RuntimeError)
1140 * from prog.rb:2:in `initialize'
1141 * from prog.rb:2:in `new'
1146 thread_raise_m(int argc
, VALUE
*argv
, VALUE self
)
1149 GetThreadPtr(self
, th
);
1150 rb_thread_raise(argc
, argv
, th
);
1157 * thr.exit => thr or nil
1158 * thr.kill => thr or nil
1159 * thr.terminate => thr or nil
1161 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1162 * is already marked to be killed, <code>exit</code> returns the
1163 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1168 rb_thread_kill(VALUE thread
)
1172 GetThreadPtr(thread
, th
);
1174 if (th
!= GET_THREAD() && th
->safe_level
< 4) {
1177 if (th
->status
== THREAD_TO_KILL
|| th
->status
== THREAD_KILLED
) {
1180 if (th
== th
->vm
->main_thread
) {
1181 rb_exit(EXIT_SUCCESS
);
1184 thread_debug("rb_thread_kill: %p (%p)\n", th
, (void *)th
->thread_id
);
1186 rb_thread_interrupt(th
);
1187 th
->thrown_errinfo
= eKillSignal
;
1188 th
->status
= THREAD_TO_KILL
;
1196 * Thread.kill(thread) => thread
1198 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1201 * a = Thread.new { loop { count += 1 } }
1203 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1205 * a.alive? #=> false
1209 rb_thread_s_kill(VALUE obj
, VALUE th
)
1211 return rb_thread_kill(th
);
1217 * Thread.exit => thread
1219 * Terminates the currently running thread and schedules another thread to be
1220 * run. If this thread is already marked to be killed, <code>exit</code>
1221 * returns the <code>Thread</code>. If this is the main thread, or the last
1222 * thread, exit the process.
1226 rb_thread_exit(void)
1228 return rb_thread_kill(GET_THREAD()->self
);
1236 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1237 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1239 * c = Thread.new { Thread.stop; puts "hey!" }
1242 * <em>produces:</em>
1248 rb_thread_wakeup(VALUE thread
)
1251 GetThreadPtr(thread
, th
);
1253 if (th
->status
== THREAD_KILLED
) {
1254 rb_raise(rb_eThreadError
, "killed thread");
1256 rb_thread_ready(th
);
1257 if (th
->status
!= THREAD_TO_KILL
) {
1258 th
->status
= THREAD_RUNNABLE
;
1268 * Wakes up <i>thr</i>, making it eligible for scheduling.
1270 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1276 * <em>produces:</em>
1284 rb_thread_run(VALUE thread
)
1286 rb_thread_wakeup(thread
);
1287 rb_thread_schedule();
1294 * Thread.stop => nil
1296 * Stops execution of the current thread, putting it into a ``sleep'' state,
1297 * and schedules execution of another thread.
1299 * a = Thread.new { print "a"; Thread.stop; print "c" }
1305 * <em>produces:</em>
1311 rb_thread_stop(void)
1313 if (rb_thread_alone()) {
1314 rb_raise(rb_eThreadError
,
1315 "stopping only thread\n\tnote: use sleep to stop forever");
1317 rb_thread_sleep_deadly();
1322 thread_list_i(st_data_t key
, st_data_t val
, void *data
)
1324 VALUE ary
= (VALUE
)data
;
1326 GetThreadPtr((VALUE
)key
, th
);
1328 switch (th
->status
) {
1329 case THREAD_RUNNABLE
:
1330 case THREAD_STOPPED
:
1331 case THREAD_STOPPED_FOREVER
:
1332 case THREAD_TO_KILL
:
1333 rb_ary_push(ary
, th
->self
);
1340 /********************************************************************/
1344 * Thread.list => array
1346 * Returns an array of <code>Thread</code> objects for all threads that are
1347 * either runnable or stopped.
1349 * Thread.new { sleep(200) }
1350 * Thread.new { 1000000.times {|i| i*i } }
1351 * Thread.new { Thread.stop }
1352 * Thread.list.each {|t| p t}
1354 * <em>produces:</em>
1356 * #<Thread:0x401b3e84 sleep>
1357 * #<Thread:0x401b3f38 run>
1358 * #<Thread:0x401b3fb0 sleep>
1359 * #<Thread:0x401bdf4c run>
1363 rb_thread_list(void)
1365 VALUE ary
= rb_ary_new();
1366 st_foreach(GET_THREAD()->vm
->living_threads
, thread_list_i
, ary
);
1371 rb_thread_current(void)
1373 return GET_THREAD()->self
;
1378 * Thread.current => thread
1380 * Returns the currently executing thread.
1382 * Thread.current #=> #<Thread:0x401bdf4c run>
1386 thread_s_current(VALUE klass
)
1388 return rb_thread_current();
1392 rb_thread_main(void)
1394 return GET_THREAD()->vm
->main_thread
->self
;
1398 rb_thread_s_main(VALUE klass
)
1400 return rb_thread_main();
1406 * Thread.abort_on_exception => true or false
1408 * Returns the status of the global ``abort on exception'' condition. The
1409 * default is <code>false</code>. When set to <code>true</code>, or if the
1410 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1411 * command line option <code>-d</code> was specified) all threads will abort
1412 * (the process will <code>exit(0)</code>) if an exception is raised in any
1413 * thread. See also <code>Thread::abort_on_exception=</code>.
1417 rb_thread_s_abort_exc(void)
1419 return GET_THREAD()->vm
->thread_abort_on_exception
? Qtrue
: Qfalse
;
1425 * Thread.abort_on_exception= boolean => true or false
1427 * When set to <code>true</code>, all threads will abort if an exception is
1428 * raised. Returns the new state.
1430 * Thread.abort_on_exception = true
1431 * t1 = Thread.new do
1432 * puts "In new thread"
1433 * raise "Exception from thread"
1436 * puts "not reached"
1438 * <em>produces:</em>
1441 * prog.rb:4: Exception from thread (RuntimeError)
1442 * from prog.rb:2:in `initialize'
1443 * from prog.rb:2:in `new'
1448 rb_thread_s_abort_exc_set(VALUE self
, VALUE val
)
1451 GET_THREAD()->vm
->thread_abort_on_exception
= RTEST(val
);
1458 * thr.abort_on_exception => true or false
1460 * Returns the status of the thread-local ``abort on exception'' condition for
1461 * <i>thr</i>. The default is <code>false</code>. See also
1462 * <code>Thread::abort_on_exception=</code>.
1466 rb_thread_abort_exc(VALUE thread
)
1469 GetThreadPtr(thread
, th
);
1470 return th
->abort_on_exception
? Qtrue
: Qfalse
;
1476 * thr.abort_on_exception= boolean => true or false
1478 * When set to <code>true</code>, causes all threads (including the main
1479 * program) to abort if an exception is raised in <i>thr</i>. The process will
1480 * effectively <code>exit(0)</code>.
1484 rb_thread_abort_exc_set(VALUE thread
, VALUE val
)
1489 GetThreadPtr(thread
, th
);
1490 th
->abort_on_exception
= RTEST(val
);
1497 * thr.group => thgrp or nil
1499 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1500 * the thread is not a member of any group.
1502 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1506 rb_thread_group(VALUE thread
)
1510 GetThreadPtr(thread
, th
);
1511 group
= th
->thgroup
;
1520 thread_status_name(enum rb_thread_status status
)
1523 case THREAD_RUNNABLE
:
1525 case THREAD_STOPPED
:
1526 case THREAD_STOPPED_FOREVER
:
1528 case THREAD_TO_KILL
:
1538 rb_thread_dead(rb_thread_t
*th
)
1540 return th
->status
== THREAD_KILLED
;
1546 * thr.status => string, false or nil
1548 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1549 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1550 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1551 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1552 * terminated with an exception.
1554 * a = Thread.new { raise("die now") }
1555 * b = Thread.new { Thread.stop }
1556 * c = Thread.new { Thread.exit }
1557 * d = Thread.new { sleep }
1558 * d.kill #=> #<Thread:0x401b3678 aborting>
1560 * b.status #=> "sleep"
1561 * c.status #=> false
1562 * d.status #=> "aborting"
1563 * Thread.current.status #=> "run"
1567 rb_thread_status(VALUE thread
)
1570 GetThreadPtr(thread
, th
);
1572 if (rb_thread_dead(th
)) {
1573 if (!NIL_P(th
->errinfo
) && !FIXNUM_P(th
->errinfo
)
1579 return rb_str_new2(thread_status_name(th
->status
));
1585 * thr.alive? => true or false
1587 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1589 * thr = Thread.new { }
1590 * thr.join #=> #<Thread:0x401b3fb0 dead>
1591 * Thread.current.alive? #=> true
1592 * thr.alive? #=> false
1596 rb_thread_alive_p(VALUE thread
)
1599 GetThreadPtr(thread
, th
);
1601 if (rb_thread_dead(th
))
1608 * thr.stop? => true or false
1610 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1612 * a = Thread.new { Thread.stop }
1613 * b = Thread.current
1619 rb_thread_stop_p(VALUE thread
)
1622 GetThreadPtr(thread
, th
);
1624 if (rb_thread_dead(th
))
1626 if (th
->status
== THREAD_STOPPED
|| th
->status
== THREAD_STOPPED_FOREVER
)
1633 * thr.safe_level => integer
1635 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1636 * levels can help when implementing sandboxes which run insecure code.
1638 * thr = Thread.new { $SAFE = 3; sleep }
1639 * Thread.current.safe_level #=> 0
1640 * thr.safe_level #=> 3
1644 rb_thread_safe_level(VALUE thread
)
1647 GetThreadPtr(thread
, th
);
1649 return INT2NUM(th
->safe_level
);
1654 * thr.inspect => string
1656 * Dump the name, id, and status of _thr_ to a string.
1660 rb_thread_inspect(VALUE thread
)
1662 const char *cname
= rb_obj_classname(thread
);
1667 GetThreadPtr(thread
, th
);
1668 status
= thread_status_name(th
->status
);
1669 str
= rb_sprintf("#<%s:%p %s>", cname
, (void *)thread
, status
);
1670 OBJ_INFECT(str
, thread
);
1676 rb_thread_local_aref(VALUE thread
, ID id
)
1681 GetThreadPtr(thread
, th
);
1682 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1683 rb_raise(rb_eSecurityError
, "Insecure: thread locals");
1685 if (!th
->local_storage
) {
1688 if (st_lookup(th
->local_storage
, id
, &val
)) {
1696 * thr[sym] => obj or nil
1698 * Attribute Reference---Returns the value of a thread-local variable, using
1699 * either a symbol or a string name. If the specified variable does not exist,
1700 * returns <code>nil</code>.
1702 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1703 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1704 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1705 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1707 * <em>produces:</em>
1709 * #<Thread:0x401b3b3c sleep>: C
1710 * #<Thread:0x401b3bc8 sleep>: B
1711 * #<Thread:0x401b3c68 sleep>: A
1712 * #<Thread:0x401bdf4c run>:
1716 rb_thread_aref(VALUE thread
, VALUE id
)
1718 return rb_thread_local_aref(thread
, rb_to_id(id
));
1722 rb_thread_local_aset(VALUE thread
, ID id
, VALUE val
)
1725 GetThreadPtr(thread
, th
);
1727 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1728 rb_raise(rb_eSecurityError
, "Insecure: can't modify thread locals");
1730 if (OBJ_FROZEN(thread
)) {
1731 rb_error_frozen("thread locals");
1733 if (!th
->local_storage
) {
1734 th
->local_storage
= st_init_numtable();
1737 st_delete_wrap(th
->local_storage
, id
);
1740 st_insert(th
->local_storage
, id
, val
);
1746 * thr[sym] = obj => obj
1748 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1749 * using either a symbol or a string. See also <code>Thread#[]</code>.
1753 rb_thread_aset(VALUE self
, ID id
, VALUE val
)
1755 return rb_thread_local_aset(self
, rb_to_id(id
), val
);
1760 * thr.key?(sym) => true or false
1762 * Returns <code>true</code> if the given string (or symbol) exists as a
1763 * thread-local variable.
1765 * me = Thread.current
1767 * me.key?(:oliver) #=> true
1768 * me.key?(:stanley) #=> false
1772 rb_thread_key_p(VALUE self
, VALUE key
)
1775 ID id
= rb_to_id(key
);
1777 GetThreadPtr(self
, th
);
1779 if (!th
->local_storage
) {
1782 if (st_lookup(th
->local_storage
, id
, 0)) {
1789 thread_keys_i(ID key
, VALUE value
, VALUE ary
)
1791 rb_ary_push(ary
, ID2SYM(key
));
1796 vm_living_thread_num(rb_vm_t
*vm
)
1798 return vm
->living_threads
->num_entries
;
1805 if (GET_THREAD()->vm
->living_threads
) {
1806 num
= vm_living_thread_num(GET_THREAD()->vm
);
1807 thread_debug("rb_thread_alone: %d\n", num
);
1816 * Returns an an array of the names of the thread-local variables (as Symbols).
1818 * thr = Thread.new do
1819 * Thread.current[:cat] = 'meow'
1820 * Thread.current["dog"] = 'woof'
1822 * thr.join #=> #<Thread:0x401b3f10 dead>
1823 * thr.keys #=> [:dog, :cat]
1827 rb_thread_keys(VALUE self
)
1830 VALUE ary
= rb_ary_new();
1831 GetThreadPtr(self
, th
);
1833 if (th
->local_storage
) {
1834 st_foreach(th
->local_storage
, thread_keys_i
, ary
);
1841 * thr.priority => integer
1843 * Returns the priority of <i>thr</i>. Default is inherited from the
1844 * current thread which creating the new thread, or zero for the
1845 * initial main thread; higher-priority threads will run before
1846 * lower-priority threads.
1848 * Thread.current.priority #=> 0
1852 rb_thread_priority(VALUE thread
)
1855 GetThreadPtr(thread
, th
);
1856 return INT2NUM(th
->priority
);
1862 * thr.priority= integer => thr
1864 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1865 * will run before lower-priority threads.
1867 * count1 = count2 = 0
1869 * loop { count1 += 1 }
1874 * loop { count2 += 1 }
1883 rb_thread_priority_set(VALUE thread
, VALUE prio
)
1886 GetThreadPtr(thread
, th
);
1891 #if USE_NATIVE_THREAD_PRIORITY
1892 th
->priority
= NUM2INT(prio
);
1893 native_thread_apply_priority(th
);
1895 priority
= NUM2INT(prio
);
1896 if (priority
> RUBY_THREAD_PRIORITY_MAX
) {
1897 priority
= RUBY_THREAD_PRIORITY_MAX
;
1899 else if (priority
< RUBY_THREAD_PRIORITY_MIN
) {
1900 priority
= RUBY_THREAD_PRIORITY_MIN
;
1902 th
->priority
= priority
;
1903 th
->slice
= priority
;
1905 return INT2NUM(th
->priority
);
1910 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1912 rb_fd_init(volatile rb_fdset_t
*fds
)
1915 fds
->fdset
= ALLOC(fd_set
);
1916 FD_ZERO(fds
->fdset
);
1920 rb_fd_term(rb_fdset_t
*fds
)
1922 if (fds
->fdset
) xfree(fds
->fdset
);
1928 rb_fd_zero(rb_fdset_t
*fds
)
1931 MEMZERO(fds
->fdset
, fd_mask
, howmany(fds
->maxfd
, NFDBITS
));
1932 FD_ZERO(fds
->fdset
);
1937 rb_fd_resize(int n
, rb_fdset_t
*fds
)
1939 int m
= howmany(n
+ 1, NFDBITS
) * sizeof(fd_mask
);
1940 int o
= howmany(fds
->maxfd
, NFDBITS
) * sizeof(fd_mask
);
1942 if (m
< sizeof(fd_set
)) m
= sizeof(fd_set
);
1943 if (o
< sizeof(fd_set
)) o
= sizeof(fd_set
);
1946 fds
->fdset
= realloc(fds
->fdset
, m
);
1947 memset((char *)fds
->fdset
+ o
, 0, m
- o
);
1949 if (n
>= fds
->maxfd
) fds
->maxfd
= n
+ 1;
1953 rb_fd_set(int n
, rb_fdset_t
*fds
)
1955 rb_fd_resize(n
, fds
);
1956 FD_SET(n
, fds
->fdset
);
1960 rb_fd_clr(int n
, rb_fdset_t
*fds
)
1962 if (n
>= fds
->maxfd
) return;
1963 FD_CLR(n
, fds
->fdset
);
1967 rb_fd_isset(int n
, const rb_fdset_t
*fds
)
1969 if (n
>= fds
->maxfd
) return 0;
1970 return FD_ISSET(n
, fds
->fdset
) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1974 rb_fd_copy(rb_fdset_t
*dst
, const fd_set
*src
, int max
)
1976 int size
= howmany(max
, NFDBITS
) * sizeof(fd_mask
);
1978 if (size
< sizeof(fd_set
)) size
= sizeof(fd_set
);
1980 dst
->fdset
= realloc(dst
->fdset
, size
);
1981 memcpy(dst
->fdset
, src
, size
);
1985 rb_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
)
1987 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
1989 rb_fd_resize(n
- 1, readfds
);
1990 r
= rb_fd_ptr(readfds
);
1993 rb_fd_resize(n
- 1, writefds
);
1994 w
= rb_fd_ptr(writefds
);
1997 rb_fd_resize(n
- 1, exceptfds
);
1998 e
= rb_fd_ptr(exceptfds
);
2000 return select(n
, r
, w
, e
, timeout
);
2008 #define FD_ZERO(f) rb_fd_zero(f)
2009 #define FD_SET(i, f) rb_fd_set(i, f)
2010 #define FD_CLR(i, f) rb_fd_clr(i, f)
2011 #define FD_ISSET(i, f) rb_fd_isset(i, f)
2015 #if defined(__CYGWIN__) || defined(_WIN32)
2017 cmp_tv(const struct timeval
*a
, const struct timeval
*b
)
2019 long d
= (a
->tv_sec
- b
->tv_sec
);
2020 return (d
!= 0) ? d
: (a
->tv_usec
- b
->tv_usec
);
2024 subtract_tv(struct timeval
*rest
, const struct timeval
*wait
)
2026 while (rest
->tv_usec
< wait
->tv_usec
) {
2027 if (rest
->tv_sec
<= wait
->tv_sec
) {
2031 rest
->tv_usec
+= 1000 * 1000;
2033 rest
->tv_sec
-= wait
->tv_sec
;
2034 rest
->tv_usec
-= wait
->tv_usec
;
2040 do_select(int n
, fd_set
*read
, fd_set
*write
, fd_set
*except
,
2041 struct timeval
*timeout
)
2044 fd_set orig_read
, orig_write
, orig_except
;
2048 struct timeval wait_rest
;
2049 # if defined(__CYGWIN__) || defined(_WIN32)
2050 struct timeval start_time
;
2054 # if defined(__CYGWIN__) || defined(_WIN32)
2055 gettimeofday(&start_time
, NULL
);
2056 limit
= (double)start_time
.tv_sec
+ (double)start_time
.tv_usec
*1e-6;
2058 limit
= timeofday();
2060 limit
+= (double)timeout
->tv_sec
+(double)timeout
->tv_usec
*1e-6;
2061 wait_rest
= *timeout
;
2062 timeout
= &wait_rest
;
2066 if (read
) orig_read
= *read
;
2067 if (write
) orig_write
= *write
;
2068 if (except
) orig_except
= *except
;
2073 #if defined(__CYGWIN__) || defined(_WIN32)
2076 /* polling duration: 100ms */
2077 struct timeval wait_100ms
, *wait
;
2078 wait_100ms
.tv_sec
= 0;
2079 wait_100ms
.tv_usec
= 100 * 1000; /* 100 ms */
2082 wait
= (timeout
== 0 || cmp_tv(&wait_100ms
, timeout
) > 0) ? &wait_100ms
: timeout
;
2085 result
= select(n
, read
, write
, except
, wait
);
2086 if (result
< 0) lerrno
= errno
;
2087 if (result
!= 0) break;
2089 if (read
) *read
= orig_read
;
2090 if (write
) *write
= orig_write
;
2091 if (except
) *except
= orig_except
;
2094 struct timeval elapsed
;
2095 gettimeofday(&elapsed
, NULL
);
2096 subtract_tv(&elapsed
, &start_time
);
2097 if (!subtract_tv(timeout
, &elapsed
)) {
2101 if (cmp_tv(&wait_100ms
, timeout
) < 0) wait
= timeout
;
2103 } while (__th
->interrupt_flag
== 0);
2105 } while (result
== 0 && !finish
);
2109 result
= select(n
, read
, write
, except
, timeout
);
2110 if (result
< 0) lerrno
= errno
;
2111 }, ubf_select
, GET_THREAD());
2122 if (read
) *read
= orig_read
;
2123 if (write
) *write
= orig_write
;
2124 if (except
) *except
= orig_except
;
2127 double d
= limit
- timeofday();
2129 wait_rest
.tv_sec
= (unsigned int)d
;
2130 wait_rest
.tv_usec
= (long)((d
-(double)wait_rest
.tv_sec
)*1e6
);
2131 if (wait_rest
.tv_sec
< 0) wait_rest
.tv_sec
= 0;
2132 if (wait_rest
.tv_usec
< 0) wait_rest
.tv_usec
= 0;
2144 rb_thread_wait_fd_rw(int fd
, int read
)
2147 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd
, read
? "read" : "write");
2150 rb_raise(rb_eIOError
, "closed stream");
2152 while (result
<= 0) {
2158 result
= do_select(fd
+ 1, rb_fd_ptr(&set
), 0, 0, 0);
2161 result
= do_select(fd
+ 1, 0, rb_fd_ptr(&set
), 0, 0);
2171 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd
, read
? "read" : "write");
2175 rb_thread_wait_fd(int fd
)
2177 rb_thread_wait_fd_rw(fd
, 1);
2181 rb_thread_fd_writable(int fd
)
2183 rb_thread_wait_fd_rw(fd
, 0);
2188 rb_thread_select(int max
, fd_set
* read
, fd_set
* write
, fd_set
* except
,
2189 struct timeval
*timeout
)
2191 if (!read
&& !write
&& !except
) {
2193 rb_thread_sleep_forever();
2196 rb_thread_wait_for(*timeout
);
2200 return do_select(max
, read
, write
, except
, timeout
);
2209 #ifdef USE_CONSERVATIVE_STACK_END
2211 rb_gc_set_stack_end(VALUE
**stack_end_p
)
2214 *stack_end_p
= &stack_end
;
2219 rb_gc_save_machine_context(rb_thread_t
*th
)
2221 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
2222 FLUSH_REGISTER_WINDOWS
;
2224 th
->machine_register_stack_end
= rb_ia64_bsp();
2226 setjmp(th
->machine_regs
);
2233 int rb_get_next_signal(rb_vm_t
*vm
);
2236 timer_thread_function(void *arg
)
2238 rb_vm_t
*vm
= arg
; /* TODO: fix me for Multi-VM */
2240 /* for time slice */
2241 RUBY_VM_SET_TIMER_INTERRUPT(vm
->running_thread
);
2244 if (vm
->buffered_signal_size
&& vm
->main_thread
->exec_signal
== 0) {
2245 rb_thread_t
*mth
= vm
->main_thread
;
2246 enum rb_thread_status prev_status
= mth
->status
;
2247 mth
->exec_signal
= rb_get_next_signal(vm
);
2248 thread_debug("main_thread: %s\n", thread_status_name(prev_status
));
2249 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2250 (long)vm
->buffered_signal_size
, vm
->main_thread
->exec_signal
);
2251 if (mth
->status
!= THREAD_KILLED
) mth
->status
= THREAD_RUNNABLE
;
2252 rb_thread_interrupt(mth
);
2253 mth
->status
= prev_status
;
2257 /* prove profiler */
2258 if (vm
->prove_profile
.enable
) {
2259 rb_thread_t
*th
= vm
->running_thread
;
2261 if (vm
->during_gc
) {
2262 /* GC prove profiling */
2269 rb_thread_stop_timer_thread(void)
2271 if (timer_thread_id
) {
2273 native_thread_join(timer_thread_id
);
2274 timer_thread_id
= 0;
2279 rb_thread_reset_timer_thread(void)
2281 timer_thread_id
= 0;
2285 rb_thread_start_timer_thread(void)
2287 rb_thread_create_timer_thread();
2291 clear_coverage_i(st_data_t key
, st_data_t val
, st_data_t dummy
)
2294 VALUE lines
= (VALUE
)val
;
2296 for (i
= 0; i
< RARRAY_LEN(lines
); i
++) {
2297 if (RARRAY_PTR(lines
)[i
] != Qnil
) {
2298 RARRAY_PTR(lines
)[i
] = INT2FIX(0);
2305 clear_coverage(void)
2307 extern VALUE
rb_get_coverages(void);
2308 VALUE coverages
= rb_get_coverages();
2309 if (RTEST(coverages
)) {
2310 st_foreach(RHASH_TBL(coverages
), clear_coverage_i
, 0);
2315 terminate_atfork_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2319 GetThreadPtr(thval
, th
);
2321 if (th
!= current_th
) {
2322 thread_cleanup_func(th
);
2328 rb_thread_atfork(void)
2330 rb_thread_t
*th
= GET_THREAD();
2331 rb_vm_t
*vm
= th
->vm
;
2332 VALUE thval
= th
->self
;
2333 vm
->main_thread
= th
;
2335 st_foreach(vm
->living_threads
, terminate_atfork_i
, (st_data_t
)th
);
2336 st_clear(vm
->living_threads
);
2337 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2340 rb_reset_random_seed();
2344 terminate_atfork_before_exec_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2348 GetThreadPtr(thval
, th
);
2350 if (th
!= current_th
) {
2351 thread_cleanup_func_before_exec(th
);
2357 rb_thread_atfork_before_exec(void)
2359 rb_thread_t
*th
= GET_THREAD();
2360 rb_vm_t
*vm
= th
->vm
;
2361 VALUE thval
= th
->self
;
2362 vm
->main_thread
= th
;
2364 st_foreach(vm
->living_threads
, terminate_atfork_before_exec_i
, (st_data_t
)th
);
2365 st_clear(vm
->living_threads
);
2366 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2377 * Document-class: ThreadGroup
2379 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2380 * threads as a group. A <code>Thread</code> can belong to only one
2381 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2382 * remove it from any previous group.
2384 * Newly created threads belong to the same group as the thread from which they
2388 static VALUE
thgroup_s_alloc(VALUE
);
2390 thgroup_s_alloc(VALUE klass
)
2393 struct thgroup
*data
;
2395 group
= Data_Make_Struct(klass
, struct thgroup
, 0, -1, data
);
2397 data
->group
= group
;
2402 struct thgroup_list_params
{
2408 thgroup_list_i(st_data_t key
, st_data_t val
, st_data_t data
)
2410 VALUE thread
= (VALUE
)key
;
2411 VALUE ary
= ((struct thgroup_list_params
*)data
)->ary
;
2412 VALUE group
= ((struct thgroup_list_params
*)data
)->group
;
2414 GetThreadPtr(thread
, th
);
2416 if (th
->thgroup
== group
) {
2417 rb_ary_push(ary
, thread
);
2424 * thgrp.list => array
2426 * Returns an array of all existing <code>Thread</code> objects that belong to
2429 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2433 thgroup_list(VALUE group
)
2435 VALUE ary
= rb_ary_new();
2436 struct thgroup_list_params param
;
2439 param
.group
= group
;
2440 st_foreach(GET_THREAD()->vm
->living_threads
, thgroup_list_i
, (st_data_t
) & param
);
2447 * thgrp.enclose => thgrp
2449 * Prevents threads from being added to or removed from the receiving
2450 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2451 * <code>ThreadGroup</code>.
2453 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2454 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2455 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2458 * <em>produces:</em>
2460 * ThreadError: can't move from the enclosed thread group
2464 thgroup_enclose(VALUE group
)
2466 struct thgroup
*data
;
2468 Data_Get_Struct(group
, struct thgroup
, data
);
2477 * thgrp.enclosed? => true or false
2479 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2480 * ThreadGroup#enclose.
2484 thgroup_enclosed_p(VALUE group
)
2486 struct thgroup
*data
;
2488 Data_Get_Struct(group
, struct thgroup
, data
);
2497 * thgrp.add(thread) => thgrp
2499 * Adds the given <em>thread</em> to this group, removing it from any other
2500 * group to which it may have previously belonged.
2502 * puts "Initial group is #{ThreadGroup::Default.list}"
2503 * tg = ThreadGroup.new
2504 * t1 = Thread.new { sleep }
2505 * t2 = Thread.new { sleep }
2506 * puts "t1 is #{t1}"
2507 * puts "t2 is #{t2}"
2509 * puts "Initial group now #{ThreadGroup::Default.list}"
2510 * puts "tg group now #{tg.list}"
2512 * <em>produces:</em>
2514 * Initial group is #<Thread:0x401bdf4c>
2515 * t1 is #<Thread:0x401b3c90>
2516 * t2 is #<Thread:0x401b3c18>
2517 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2518 * tg group now #<Thread:0x401b3c90>
2522 thgroup_add(VALUE group
, VALUE thread
)
2525 struct thgroup
*data
;
2528 GetThreadPtr(thread
, th
);
2530 if (OBJ_FROZEN(group
)) {
2531 rb_raise(rb_eThreadError
, "can't move to the frozen thread group");
2533 Data_Get_Struct(group
, struct thgroup
, data
);
2534 if (data
->enclosed
) {
2535 rb_raise(rb_eThreadError
, "can't move to the enclosed thread group");
2542 if (OBJ_FROZEN(th
->thgroup
)) {
2543 rb_raise(rb_eThreadError
, "can't move from the frozen thread group");
2545 Data_Get_Struct(th
->thgroup
, struct thgroup
, data
);
2546 if (data
->enclosed
) {
2547 rb_raise(rb_eThreadError
,
2548 "can't move from the enclosed thread group");
2551 th
->thgroup
= group
;
2557 * Document-class: Mutex
2559 * Mutex implements a simple semaphore that can be used to coordinate access to
2560 * shared data from multiple concurrent threads.
2565 * semaphore = Mutex.new
2568 * semaphore.synchronize {
2569 * # access shared resource
2574 * semaphore.synchronize {
2575 * # access shared resource
2581 #define GetMutexPtr(obj, tobj) \
2582 Data_Get_Struct(obj, mutex_t, tobj)
2584 static const char *mutex_unlock(mutex_t
*mutex
);
2587 mutex_free(void *ptr
)
2590 mutex_t
*mutex
= ptr
;
2592 /* rb_warn("free locked mutex"); */
2593 mutex_unlock(mutex
);
2595 native_mutex_destroy(&mutex
->lock
);
2596 native_cond_destroy(&mutex
->cond
);
2602 mutex_alloc(VALUE klass
)
2607 obj
= Data_Make_Struct(klass
, mutex_t
, NULL
, mutex_free
, mutex
);
2608 native_mutex_initialize(&mutex
->lock
);
2609 native_cond_initialize(&mutex
->cond
);
2615 * Mutex.new => mutex
2617 * Creates a new Mutex
2620 mutex_initialize(VALUE self
)
2628 return mutex_alloc(rb_cMutex
);
2633 * mutex.locked? => true or false
2635 * Returns +true+ if this lock is currently held by some thread.
2638 rb_mutex_locked_p(VALUE self
)
2641 GetMutexPtr(self
, mutex
);
2642 return mutex
->th
? Qtrue
: Qfalse
;
2646 mutex_locked(rb_thread_t
*th
, VALUE self
)
2649 GetMutexPtr(self
, mutex
);
2651 if (th
->keeping_mutexes
) {
2652 mutex
->next_mutex
= th
->keeping_mutexes
;
2654 th
->keeping_mutexes
= mutex
;
2659 * mutex.try_lock => true or false
2661 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2665 rb_mutex_trylock(VALUE self
)
2668 VALUE locked
= Qfalse
;
2669 GetMutexPtr(self
, mutex
);
2671 if (mutex
->th
== GET_THREAD()) {
2672 rb_raise(rb_eThreadError
, "deadlock; recursive locking");
2675 native_mutex_lock(&mutex
->lock
);
2676 if (mutex
->th
== 0) {
2677 mutex
->th
= GET_THREAD();
2680 mutex_locked(GET_THREAD(), self
);
2682 native_mutex_unlock(&mutex
->lock
);
2688 lock_func(rb_thread_t
*th
, mutex_t
*mutex
, int last_thread
)
2690 int interrupted
= 0;
2691 #if 0 /* for debug */
2692 native_thread_yield();
2695 native_mutex_lock(&mutex
->lock
);
2696 th
->transition_for_lock
= 0;
2697 while (mutex
->th
|| (mutex
->th
= th
, 0)) {
2703 mutex
->cond_waiting
++;
2704 native_cond_wait(&mutex
->cond
, &mutex
->lock
);
2705 mutex
->cond_notified
--;
2707 if (RUBY_VM_INTERRUPTED(th
)) {
2712 th
->transition_for_lock
= 1;
2713 native_mutex_unlock(&mutex
->lock
);
2715 if (interrupted
== 2) native_thread_yield();
2716 #if 0 /* for debug */
2717 native_thread_yield();
2724 lock_interrupt(void *ptr
)
2726 mutex_t
*mutex
= (mutex_t
*)ptr
;
2727 native_mutex_lock(&mutex
->lock
);
2728 if (mutex
->cond_waiting
> 0) {
2729 native_cond_broadcast(&mutex
->cond
);
2730 mutex
->cond_notified
= mutex
->cond_waiting
;
2731 mutex
->cond_waiting
= 0;
2733 native_mutex_unlock(&mutex
->lock
);
2738 * mutex.lock => true or false
2740 * Attempts to grab the lock and waits if it isn't available.
2741 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2744 rb_mutex_lock(VALUE self
)
2746 if (rb_mutex_trylock(self
) == Qfalse
) {
2748 rb_thread_t
*th
= GET_THREAD();
2749 GetMutexPtr(self
, mutex
);
2751 while (mutex
->th
!= th
) {
2753 enum rb_thread_status prev_status
= th
->status
;
2754 int last_thread
= 0;
2755 struct rb_unblock_callback oldubf
;
2757 set_unblock_function(th
, lock_interrupt
, mutex
, &oldubf
);
2758 th
->status
= THREAD_STOPPED_FOREVER
;
2760 th
->locking_mutex
= self
;
2761 if (vm_living_thread_num(th
->vm
) == th
->vm
->sleeper
) {
2765 th
->transition_for_lock
= 1;
2766 BLOCKING_REGION_CORE({
2767 interrupted
= lock_func(th
, mutex
, last_thread
);
2769 th
->transition_for_lock
= 0;
2770 remove_signal_thread_list(th
);
2771 reset_unblock_function(th
, &oldubf
);
2773 th
->locking_mutex
= Qfalse
;
2774 if (mutex
->th
&& interrupted
== 2) {
2775 rb_check_deadlock(th
->vm
);
2777 if (th
->status
== THREAD_STOPPED_FOREVER
) {
2778 th
->status
= prev_status
;
2782 if (mutex
->th
== th
) mutex_locked(th
, self
);
2785 RUBY_VM_CHECK_INTS();
2793 mutex_unlock(mutex_t
*mutex
)
2795 const char *err
= NULL
;
2796 rb_thread_t
*th
= GET_THREAD();
2799 native_mutex_lock(&mutex
->lock
);
2801 if (mutex
->th
== 0) {
2802 err
= "Attempt to unlock a mutex which is not locked";
2804 else if (mutex
->th
!= GET_THREAD()) {
2805 err
= "Attempt to unlock a mutex which is locked by another thread";
2809 if (mutex
->cond_waiting
> 0) {
2810 /* waiting thread */
2811 native_cond_signal(&mutex
->cond
);
2812 mutex
->cond_waiting
--;
2813 mutex
->cond_notified
++;
2817 native_mutex_unlock(&mutex
->lock
);
2820 th_mutex
= th
->keeping_mutexes
;
2821 if (th_mutex
== mutex
) {
2822 th
->keeping_mutexes
= mutex
->next_mutex
;
2827 tmp_mutex
= th_mutex
->next_mutex
;
2828 if (tmp_mutex
== mutex
) {
2829 th_mutex
->next_mutex
= tmp_mutex
->next_mutex
;
2832 th_mutex
= tmp_mutex
;
2835 mutex
->next_mutex
= NULL
;
2843 * mutex.unlock => self
2845 * Releases the lock.
2846 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2849 rb_mutex_unlock(VALUE self
)
2853 GetMutexPtr(self
, mutex
);
2855 err
= mutex_unlock(mutex
);
2856 if (err
) rb_raise(rb_eThreadError
, err
);
2862 rb_mutex_unlock_all(mutex_t
*mutexes
)
2869 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
2871 mutexes
= mutex
->next_mutex
;
2872 err
= mutex_unlock(mutex
);
2873 if (err
) rb_bug("invalid keeping_mutexes: %s", err
);
2878 rb_mutex_sleep_forever(VALUE time
)
2880 rb_thread_sleep_deadly();
2885 rb_mutex_wait_for(VALUE time
)
2887 const struct timeval
*t
= (struct timeval
*)time
;
2888 rb_thread_wait_for(*t
);
2893 rb_mutex_sleep(VALUE self
, VALUE timeout
)
2898 if (!NIL_P(timeout
)) {
2899 t
= rb_time_interval(timeout
);
2901 rb_mutex_unlock(self
);
2903 if (NIL_P(timeout
)) {
2904 rb_ensure(rb_mutex_sleep_forever
, Qnil
, rb_mutex_lock
, self
);
2907 rb_ensure(rb_mutex_wait_for
, (VALUE
)&t
, rb_mutex_lock
, self
);
2909 end
= time(0) - beg
;
2910 return INT2FIX(end
);
2915 * mutex.sleep(timeout = nil) => number
2917 * Releases the lock and sleeps +timeout+ seconds if it is given and
2918 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2919 * the current thread.
2922 mutex_sleep(int argc
, VALUE
*argv
, VALUE self
)
2926 rb_scan_args(argc
, argv
, "01", &timeout
);
2927 return rb_mutex_sleep(self
, timeout
);
2932 * mutex.synchronize { ... } => result of the block
2934 * Obtains a lock, runs the block, and releases the lock when the block
2935 * completes. See the example under +Mutex+.
2939 rb_thread_synchronize(VALUE mutex
, VALUE (*func
)(VALUE arg
), VALUE arg
)
2941 rb_mutex_lock(mutex
);
2942 return rb_ensure(func
, arg
, rb_mutex_unlock
, mutex
);
2946 * Document-class: Barrier
2948 typedef struct rb_thread_list_struct rb_thread_list_t
;
2950 struct rb_thread_list_struct
{
2952 rb_thread_list_t
*next
;
2956 thlist_mark(void *ptr
)
2958 rb_thread_list_t
*q
= ptr
;
2960 for (; q
; q
= q
->next
) {
2961 rb_gc_mark(q
->th
->self
);
2966 thlist_free(void *ptr
)
2968 rb_thread_list_t
*q
= ptr
, *next
;
2970 for (; q
; q
= next
) {
2977 thlist_signal(rb_thread_list_t
**list
, unsigned int maxth
, rb_thread_t
**woken_thread
)
2980 rb_thread_list_t
*q
;
2982 while ((q
= *list
) != NULL
) {
2983 rb_thread_t
*th
= q
->th
;
2987 if (th
->status
!= THREAD_KILLED
) {
2988 rb_thread_ready(th
);
2989 if (!woken
&& woken_thread
) *woken_thread
= th
;
2990 if (++woken
>= maxth
&& maxth
) break;
2998 rb_thread_list_t
*waiting
, **tail
;
3002 barrier_mark(void *ptr
)
3004 rb_barrier_t
*b
= ptr
;
3006 if (b
->owner
) rb_gc_mark(b
->owner
->self
);
3007 thlist_mark(b
->waiting
);
3011 barrier_free(void *ptr
)
3013 rb_barrier_t
*b
= ptr
;
3016 thlist_free(b
->waiting
);
3022 barrier_alloc(VALUE klass
)
3025 rb_barrier_t
*barrier
;
3027 obj
= Data_Make_Struct(klass
, rb_barrier_t
, barrier_mark
, barrier_free
, barrier
);
3028 barrier
->owner
= GET_THREAD();
3029 barrier
->waiting
= 0;
3030 barrier
->tail
= &barrier
->waiting
;
3035 rb_barrier_new(void)
3037 return barrier_alloc(rb_cBarrier
);
3041 rb_barrier_wait(VALUE self
)
3043 rb_barrier_t
*barrier
;
3044 rb_thread_list_t
*q
;
3046 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
3047 if (!barrier
->owner
|| barrier
->owner
->status
== THREAD_KILLED
) {
3049 if (thlist_signal(&barrier
->waiting
, 1, &barrier
->owner
)) return Qfalse
;
3052 else if (barrier
->owner
== GET_THREAD()) {
3056 *barrier
->tail
= q
= ALLOC(rb_thread_list_t
);
3057 q
->th
= GET_THREAD();
3059 barrier
->tail
= &q
->next
;
3060 rb_thread_sleep_forever();
3061 return barrier
->owner
== GET_THREAD() ? Qtrue
: Qfalse
;
3066 rb_barrier_release(VALUE self
)
3068 rb_barrier_t
*barrier
;
3071 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
3072 if (barrier
->owner
!= GET_THREAD()) {
3073 rb_raise(rb_eThreadError
, "not owned");
3075 n
= thlist_signal(&barrier
->waiting
, 0, &barrier
->owner
);
3076 return n
? UINT2NUM(n
) : Qfalse
;
3079 /* variables for recursive traversals */
3080 static ID recursive_key
;
3083 recursive_check(VALUE hash
, VALUE obj
)
3085 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3089 VALUE list
= rb_hash_aref(hash
, ID2SYM(rb_frame_this_func()));
3091 if (NIL_P(list
) || TYPE(list
) != T_HASH
)
3093 if (NIL_P(rb_hash_lookup(list
, obj
)))
3100 recursive_push(VALUE hash
, VALUE obj
)
3104 sym
= ID2SYM(rb_frame_this_func());
3105 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3106 hash
= rb_hash_new();
3107 rb_thread_local_aset(rb_thread_current(), recursive_key
, hash
);
3111 list
= rb_hash_aref(hash
, sym
);
3113 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3114 list
= rb_hash_new();
3115 rb_hash_aset(hash
, sym
, list
);
3117 rb_hash_aset(list
, obj
, Qtrue
);
3122 recursive_pop(VALUE hash
, VALUE obj
)
3126 sym
= ID2SYM(rb_frame_this_func());
3127 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3130 symname
= rb_inspect(sym
);
3131 thrname
= rb_inspect(rb_thread_current());
3133 rb_raise(rb_eTypeError
, "invalid inspect_tbl hash for %s in %s",
3134 StringValuePtr(symname
), StringValuePtr(thrname
));
3136 list
= rb_hash_aref(hash
, sym
);
3137 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3138 VALUE symname
= rb_inspect(sym
);
3139 VALUE thrname
= rb_inspect(rb_thread_current());
3140 rb_raise(rb_eTypeError
, "invalid inspect_tbl list for %s in %s",
3141 StringValuePtr(symname
), StringValuePtr(thrname
));
3143 rb_hash_delete(list
, obj
);
3147 rb_exec_recursive(VALUE (*func
) (VALUE
, VALUE
, int), VALUE obj
, VALUE arg
)
3149 VALUE hash
= rb_thread_local_aref(rb_thread_current(), recursive_key
);
3150 VALUE objid
= rb_obj_id(obj
);
3152 if (recursive_check(hash
, objid
)) {
3153 return (*func
) (obj
, arg
, Qtrue
);
3156 VALUE result
= Qundef
;
3159 hash
= recursive_push(hash
, objid
);
3161 if ((state
= EXEC_TAG()) == 0) {
3162 result
= (*func
) (obj
, arg
, Qfalse
);
3165 recursive_pop(hash
, objid
);
3174 static rb_event_hook_t
*
3175 alloc_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3177 rb_event_hook_t
*hook
= ALLOC(rb_event_hook_t
);
3179 hook
->flag
= events
;
3185 thread_reset_event_flags(rb_thread_t
*th
)
3187 rb_event_hook_t
*hook
= th
->event_hooks
;
3188 rb_event_flag_t flag
= th
->event_flags
& RUBY_EVENT_VM
;
3197 rb_thread_add_event_hook(rb_thread_t
*th
,
3198 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3200 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3201 hook
->next
= th
->event_hooks
;
3202 th
->event_hooks
= hook
;
3203 thread_reset_event_flags(th
);
3207 set_threads_event_flags_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3211 GetThreadPtr(thval
, th
);
3214 th
->event_flags
|= RUBY_EVENT_VM
;
3217 th
->event_flags
&= (~RUBY_EVENT_VM
);
3223 set_threads_event_flags(int flag
)
3225 st_foreach(GET_VM()->living_threads
, set_threads_event_flags_i
, (st_data_t
) flag
);
3229 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3231 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3232 rb_vm_t
*vm
= GET_VM();
3234 hook
->next
= vm
->event_hooks
;
3235 vm
->event_hooks
= hook
;
3237 set_threads_event_flags(1);
3241 remove_event_hook(rb_event_hook_t
**root
, rb_event_hook_func_t func
)
3243 rb_event_hook_t
*prev
= NULL
, *hook
= *root
, *next
;
3247 if (func
== 0 || hook
->func
== func
) {
3249 prev
->next
= hook
->next
;
3265 rb_thread_remove_event_hook(rb_thread_t
*th
, rb_event_hook_func_t func
)
3267 int ret
= remove_event_hook(&th
->event_hooks
, func
);
3268 thread_reset_event_flags(th
);
3273 rb_remove_event_hook(rb_event_hook_func_t func
)
3275 rb_vm_t
*vm
= GET_VM();
3276 rb_event_hook_t
*hook
= vm
->event_hooks
;
3277 int ret
= remove_event_hook(&vm
->event_hooks
, func
);
3279 if (hook
!= NULL
&& vm
->event_hooks
== NULL
) {
3280 set_threads_event_flags(0);
3287 clear_trace_func_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3290 GetThreadPtr((VALUE
)key
, th
);
3291 rb_thread_remove_event_hook(th
, 0);
3296 rb_clear_trace_func(void)
3298 st_foreach(GET_VM()->living_threads
, clear_trace_func_i
, (st_data_t
) 0);
3299 rb_remove_event_hook(0);
3302 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
3306 * set_trace_func(proc) => proc
3307 * set_trace_func(nil) => nil
3309 * Establishes _proc_ as the handler for tracing, or disables
3310 * tracing if the parameter is +nil+. _proc_ takes up
3311 * to six parameters: an event name, a filename, a line number, an
3312 * object id, a binding, and the name of a class. _proc_ is
3313 * invoked whenever an event occurs. Events are: <code>c-call</code>
3314 * (call a C-language routine), <code>c-return</code> (return from a
3315 * C-language routine), <code>call</code> (call a Ruby method),
3316 * <code>class</code> (start a class or module definition),
3317 * <code>end</code> (finish a class or module definition),
3318 * <code>line</code> (execute code on a new line), <code>raise</code>
3319 * (raise an exception), and <code>return</code> (return from a Ruby
3320 * method). Tracing is disabled within the context of _proc_.
3329 * set_trace_func proc { |event, file, line, id, binding, classname|
3330 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3335 * line prog.rb:11 false
3336 * c-call prog.rb:11 new Class
3337 * c-call prog.rb:11 initialize Object
3338 * c-return prog.rb:11 initialize Object
3339 * c-return prog.rb:11 new Class
3340 * line prog.rb:12 false
3341 * call prog.rb:2 test Test
3342 * line prog.rb:3 test Test
3343 * line prog.rb:4 test Test
3344 * return prog.rb:4 test Test
3348 set_trace_func(VALUE obj
, VALUE trace
)
3350 rb_remove_event_hook(call_trace_func
);
3356 if (!rb_obj_is_proc(trace
)) {
3357 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3360 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
3365 thread_add_trace_func(rb_thread_t
*th
, VALUE trace
)
3367 if (!rb_obj_is_proc(trace
)) {
3368 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3371 rb_thread_add_event_hook(th
, call_trace_func
, RUBY_EVENT_ALL
, trace
);
3375 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
3378 GetThreadPtr(obj
, th
);
3379 thread_add_trace_func(th
, trace
);
3384 thread_set_trace_func_m(VALUE obj
, VALUE trace
)
3387 GetThreadPtr(obj
, th
);
3388 rb_thread_remove_event_hook(th
, call_trace_func
);
3393 thread_add_trace_func(th
, trace
);
3398 get_event_name(rb_event_flag_t event
)
3401 case RUBY_EVENT_LINE
:
3403 case RUBY_EVENT_CLASS
:
3405 case RUBY_EVENT_END
:
3407 case RUBY_EVENT_CALL
:
3409 case RUBY_EVENT_RETURN
:
3411 case RUBY_EVENT_C_CALL
:
3413 case RUBY_EVENT_C_RETURN
:
3415 case RUBY_EVENT_RAISE
:
3422 VALUE
ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
);
3424 struct call_trace_func_args
{
3425 rb_event_flag_t event
;
3433 call_trace_proc(VALUE args
, int tracing
)
3435 struct call_trace_func_args
*p
= (struct call_trace_func_args
*)args
;
3436 VALUE eventname
= rb_str_new2(get_event_name(p
->event
));
3437 VALUE filename
= rb_str_new2(rb_sourcefile());
3439 int line
= rb_sourceline();
3443 if (p
->event
== RUBY_EVENT_C_CALL
||
3444 p
->event
== RUBY_EVENT_C_RETURN
) {
3449 rb_thread_method_id_and_class(GET_THREAD(), &id
, &klass
);
3451 if (id
== ID_ALLOCATOR
)
3454 if (TYPE(klass
) == T_ICLASS
) {
3455 klass
= RBASIC(klass
)->klass
;
3457 else if (FL_TEST(klass
, FL_SINGLETON
)) {
3458 klass
= rb_iv_get(klass
, "__attached__");
3462 argv
[0] = eventname
;
3464 argv
[2] = INT2FIX(line
);
3465 argv
[3] = id
? ID2SYM(id
) : Qnil
;
3466 argv
[4] = p
->self
? rb_binding_new() : Qnil
;
3467 argv
[5] = klass
? klass
: Qnil
;
3469 return rb_proc_call_with_block(p
->proc
, 6, argv
, Qnil
);
3473 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3475 struct call_trace_func_args args
;
3482 ruby_suppress_tracing(call_trace_proc
, (VALUE
)&args
, Qfalse
);
3486 ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
)
3488 rb_thread_t
*th
= GET_THREAD();
3489 int state
, raised
, tracing
;
3490 VALUE result
= Qnil
;
3492 if ((tracing
= th
->tracing
) != 0 && !always
) {
3499 raised
= rb_thread_reset_raised(th
);
3502 if ((state
= EXEC_TAG()) == 0) {
3503 result
= (*func
)(arg
, tracing
);
3507 rb_thread_set_raised(th
);
3511 th
->tracing
= tracing
;
3520 * +Thread+ encapsulates the behavior of a thread of
3521 * execution, including the main thread of the Ruby script.
3523 * In the descriptions of the methods in this class, the parameter _sym_
3524 * refers to a symbol, which is either a quoted string or a
3525 * +Symbol+ (such as <code>:name</code>).
3535 rb_define_singleton_method(rb_cThread
, "new", thread_s_new
, -1);
3536 rb_define_singleton_method(rb_cThread
, "start", thread_start
, -2);
3537 rb_define_singleton_method(rb_cThread
, "fork", thread_start
, -2);
3538 rb_define_singleton_method(rb_cThread
, "main", rb_thread_s_main
, 0);
3539 rb_define_singleton_method(rb_cThread
, "current", thread_s_current
, 0);
3540 rb_define_singleton_method(rb_cThread
, "stop", rb_thread_stop
, 0);
3541 rb_define_singleton_method(rb_cThread
, "kill", rb_thread_s_kill
, 1);
3542 rb_define_singleton_method(rb_cThread
, "exit", rb_thread_exit
, 0);
3543 rb_define_singleton_method(rb_cThread
, "pass", thread_s_pass
, 0);
3544 rb_define_singleton_method(rb_cThread
, "list", rb_thread_list
, 0);
3545 rb_define_singleton_method(rb_cThread
, "abort_on_exception", rb_thread_s_abort_exc
, 0);
3546 rb_define_singleton_method(rb_cThread
, "abort_on_exception=", rb_thread_s_abort_exc_set
, 1);
3547 #if THREAD_DEBUG < 0
3548 rb_define_singleton_method(rb_cThread
, "DEBUG", rb_thread_s_debug
, 0);
3549 rb_define_singleton_method(rb_cThread
, "DEBUG=", rb_thread_s_debug_set
, 1);
3552 rb_define_method(rb_cThread
, "initialize", thread_initialize
, -2);
3553 rb_define_method(rb_cThread
, "raise", thread_raise_m
, -1);
3554 rb_define_method(rb_cThread
, "join", thread_join_m
, -1);
3555 rb_define_method(rb_cThread
, "value", thread_value
, 0);
3556 rb_define_method(rb_cThread
, "kill", rb_thread_kill
, 0);
3557 rb_define_method(rb_cThread
, "terminate", rb_thread_kill
, 0);
3558 rb_define_method(rb_cThread
, "exit", rb_thread_kill
, 0);
3559 rb_define_method(rb_cThread
, "run", rb_thread_run
, 0);
3560 rb_define_method(rb_cThread
, "wakeup", rb_thread_wakeup
, 0);
3561 rb_define_method(rb_cThread
, "[]", rb_thread_aref
, 1);
3562 rb_define_method(rb_cThread
, "[]=", rb_thread_aset
, 2);
3563 rb_define_method(rb_cThread
, "key?", rb_thread_key_p
, 1);
3564 rb_define_method(rb_cThread
, "keys", rb_thread_keys
, 0);
3565 rb_define_method(rb_cThread
, "priority", rb_thread_priority
, 0);
3566 rb_define_method(rb_cThread
, "priority=", rb_thread_priority_set
, 1);
3567 rb_define_method(rb_cThread
, "status", rb_thread_status
, 0);
3568 rb_define_method(rb_cThread
, "alive?", rb_thread_alive_p
, 0);
3569 rb_define_method(rb_cThread
, "stop?", rb_thread_stop_p
, 0);
3570 rb_define_method(rb_cThread
, "abort_on_exception", rb_thread_abort_exc
, 0);
3571 rb_define_method(rb_cThread
, "abort_on_exception=", rb_thread_abort_exc_set
, 1);
3572 rb_define_method(rb_cThread
, "safe_level", rb_thread_safe_level
, 0);
3573 rb_define_method(rb_cThread
, "group", rb_thread_group
, 0);
3575 rb_define_method(rb_cThread
, "inspect", rb_thread_inspect
, 0);
3577 cThGroup
= rb_define_class("ThreadGroup", rb_cObject
);
3578 rb_define_alloc_func(cThGroup
, thgroup_s_alloc
);
3579 rb_define_method(cThGroup
, "list", thgroup_list
, 0);
3580 rb_define_method(cThGroup
, "enclose", thgroup_enclose
, 0);
3581 rb_define_method(cThGroup
, "enclosed?", thgroup_enclosed_p
, 0);
3582 rb_define_method(cThGroup
, "add", thgroup_add
, 1);
3585 rb_thread_t
*th
= GET_THREAD();
3586 th
->thgroup
= th
->vm
->thgroup_default
= rb_obj_alloc(cThGroup
);
3587 rb_define_const(cThGroup
, "Default", th
->thgroup
);
3590 rb_cMutex
= rb_define_class("Mutex", rb_cObject
);
3591 rb_define_alloc_func(rb_cMutex
, mutex_alloc
);
3592 rb_define_method(rb_cMutex
, "initialize", mutex_initialize
, 0);
3593 rb_define_method(rb_cMutex
, "locked?", rb_mutex_locked_p
, 0);
3594 rb_define_method(rb_cMutex
, "try_lock", rb_mutex_trylock
, 0);
3595 rb_define_method(rb_cMutex
, "lock", rb_mutex_lock
, 0);
3596 rb_define_method(rb_cMutex
, "unlock", rb_mutex_unlock
, 0);
3597 rb_define_method(rb_cMutex
, "sleep", mutex_sleep
, -1);
3599 recursive_key
= rb_intern("__recursive_key__");
3600 rb_eThreadError
= rb_define_class("ThreadError", rb_eStandardError
);
3603 rb_define_global_function("set_trace_func", set_trace_func
, 1);
3604 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
3605 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
3607 /* init thread core */
3608 Init_native_thread();
3610 /* main thread setting */
3612 /* acquire global interpreter lock */
3613 rb_thread_lock_t
*lp
= &GET_THREAD()->vm
->global_vm_lock
;
3614 native_mutex_initialize(lp
);
3615 native_mutex_lock(lp
);
3616 native_mutex_initialize(&GET_THREAD()->interrupt_lock
);
3620 rb_thread_create_timer_thread();
3622 (void)native_mutex_trylock
;
3623 (void)ruby_thread_set_native
;
3627 ruby_native_thread_p(void)
3629 rb_thread_t
*th
= ruby_thread_from_native();
3631 return th
? Qtrue
: Qfalse
;
3635 check_deadlock_i(st_data_t key
, st_data_t val
, int *found
)
3639 GetThreadPtr(thval
, th
);
3641 if (th
->status
!= THREAD_STOPPED_FOREVER
|| RUBY_VM_INTERRUPTED(th
) || th
->transition_for_lock
) {
3644 else if (th
->locking_mutex
) {
3646 GetMutexPtr(th
->locking_mutex
, mutex
);
3648 native_mutex_lock(&mutex
->lock
);
3649 if (mutex
->th
== th
|| (!mutex
->th
&& mutex
->cond_notified
)) {
3652 native_mutex_unlock(&mutex
->lock
);
3655 return (*found
) ? ST_STOP
: ST_CONTINUE
;
3658 #if 0 /* for debug */
3660 debug_i(st_data_t key
, st_data_t val
, int *found
)
3664 GetThreadPtr(thval
, th
);
3666 printf("th:%p %d %d %d", th
, th
->status
, th
->interrupt_flag
, th
->transition_for_lock
);
3667 if (th
->locking_mutex
) {
3669 GetMutexPtr(th
->locking_mutex
, mutex
);
3671 native_mutex_lock(&mutex
->lock
);
3672 printf(" %p %d\n", mutex
->th
, mutex
->cond_notified
);
3673 native_mutex_unlock(&mutex
->lock
);
3682 rb_check_deadlock(rb_vm_t
*vm
)
3686 if (vm_living_thread_num(vm
) > vm
->sleeper
) return;
3687 if (vm_living_thread_num(vm
) < vm
->sleeper
) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3689 st_foreach(vm
->living_threads
, check_deadlock_i
, (st_data_t
)&found
);
3693 argv
[0] = rb_eFatal
;
3694 argv
[1] = rb_str_new2("deadlock detected");
3695 #if 0 /* for debug */
3696 printf("%d %d %p %p\n", vm
->living_threads
->num_entries
, vm
->sleeper
, GET_THREAD(), vm
->main_thread
);
3697 st_foreach(vm
->living_threads
, debug_i
, (st_data_t
)0);
3699 rb_thread_raise(2, argv
, vm
->main_thread
);
3704 update_coverage(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3706 VALUE coverage
= GET_THREAD()->cfp
->iseq
->coverage
;
3707 if (coverage
&& RBASIC(coverage
)->klass
== 0) {
3708 long line
= rb_sourceline() - 1;
3710 if (RARRAY_PTR(coverage
)[line
] == Qnil
) {
3713 count
= FIX2LONG(RARRAY_PTR(coverage
)[line
]) + 1;
3714 if (POSFIXABLE(count
)) {
3715 RARRAY_PTR(coverage
)[line
] = LONG2FIX(count
);
3721 rb_get_coverages(void)
3723 return GET_VM()->coverages
;
3727 rb_set_coverages(VALUE coverages
)
3729 GET_VM()->coverages
= coverages
;
3730 rb_add_event_hook(update_coverage
, RUBY_EVENT_COVERAGE
, Qnil
);
3734 rb_reset_coverages(void)
3736 GET_VM()->coverages
= Qfalse
;
3737 rb_remove_event_hook(update_coverage
);