1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
52 #define THREAD_DEBUG 0
58 static void sleep_timeval(rb_thread_t
*th
, struct timeval time
);
59 static void sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
);
60 static void sleep_forever(rb_thread_t
*th
, int nodeadlock
);
61 static double timeofday(void);
62 struct timeval
rb_time_interval(VALUE
);
63 static int rb_thread_dead(rb_thread_t
*th
);
65 static void rb_check_deadlock(rb_vm_t
*vm
);
67 void rb_signal_exec(rb_thread_t
*th
, int sig
);
68 void rb_disable_interrupt(void);
70 static const VALUE eKillSignal
= INT2FIX(0);
71 static const VALUE eTerminateSignal
= INT2FIX(1);
72 static volatile int system_working
= 1;
75 st_delete_wrap(st_table
*table
, st_data_t key
)
77 st_delete(table
, &key
, 0);
80 /********************************************************************************/
82 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
84 static void set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
85 struct rb_unblock_callback
*old
);
86 static void reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
);
88 #define GVL_UNLOCK_BEGIN() do { \
89 rb_thread_t *_th_stored = GET_THREAD(); \
90 rb_gc_save_machine_context(_th_stored); \
91 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
93 #define GVL_UNLOCK_END() \
94 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
95 rb_thread_set_current(_th_stored); \
98 #define BLOCKING_REGION_CORE(exec) do { \
99 GVL_UNLOCK_BEGIN(); {\
105 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
106 rb_thread_t *__th = GET_THREAD(); \
107 enum rb_thread_status __prev_status = __th->status; \
108 struct rb_unblock_callback __oldubf; \
109 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
110 __th->status = THREAD_STOPPED; \
111 thread_debug("enter blocking region (%p)\n", __th); \
112 BLOCKING_REGION_CORE(exec); \
113 thread_debug("leave blocking region (%p)\n", __th); \
114 remove_signal_thread_list(__th); \
115 reset_unblock_function(__th, &__oldubf); \
116 if (__th->status == THREAD_STOPPED) { \
117 __th->status = __prev_status; \
119 RUBY_VM_CHECK_INTS(); \
123 #ifdef HAVE_VA_ARGS_MACRO
124 void rb_thread_debug(const char *file
, int line
, const char *fmt
, ...);
125 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
126 #define POSITION_FORMAT "%s:%d:"
127 #define POSITION_ARGS ,file, line
129 void rb_thread_debug(const char *fmt
, ...);
130 #define thread_debug rb_thread_debug
131 #define POSITION_FORMAT
132 #define POSITION_ARGS
135 # if THREAD_DEBUG < 0
136 static int rb_thread_debug_enabled
;
139 rb_thread_s_debug(void)
141 return INT2NUM(rb_thread_debug_enabled
);
145 rb_thread_s_debug_set(VALUE self
, VALUE val
)
147 rb_thread_debug_enabled
= RTEST(val
);
151 # define rb_thread_debug_enabled THREAD_DEBUG
154 #define thread_debug if(0)printf
158 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
160 NOINLINE(static int thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
,
161 VALUE
*register_stack_start
));
162 static void timer_thread_function(void *);
165 #include "thread_win32.c"
167 #define DEBUG_OUT() \
168 WaitForSingleObject(&debug_mutex, INFINITE); \
169 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
171 ReleaseMutex(&debug_mutex);
173 #elif defined(HAVE_PTHREAD_H)
174 #include "thread_pthread.c"
176 #define DEBUG_OUT() \
177 pthread_mutex_lock(&debug_mutex); \
178 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
180 pthread_mutex_unlock(&debug_mutex);
183 #error "unsupported thread type"
187 static int debug_mutex_initialized
= 1;
188 static rb_thread_lock_t debug_mutex
;
192 #ifdef HAVE_VA_ARGS_MACRO
193 const char *file
, int line
,
195 const char *fmt
, ...)
200 if (!rb_thread_debug_enabled
) return;
202 if (debug_mutex_initialized
== 1) {
203 debug_mutex_initialized
= 0;
204 native_mutex_initialize(&debug_mutex
);
208 vsnprintf(buf
, BUFSIZ
, fmt
, args
);
217 set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
218 struct rb_unblock_callback
*old
)
221 RUBY_VM_CHECK_INTS(); /* check signal or so */
222 native_mutex_lock(&th
->interrupt_lock
);
223 if (th
->interrupt_flag
) {
224 native_mutex_unlock(&th
->interrupt_lock
);
228 if (old
) *old
= th
->unblock
;
229 th
->unblock
.func
= func
;
230 th
->unblock
.arg
= arg
;
232 native_mutex_unlock(&th
->interrupt_lock
);
236 reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
)
238 native_mutex_lock(&th
->interrupt_lock
);
240 native_mutex_unlock(&th
->interrupt_lock
);
244 rb_thread_interrupt(rb_thread_t
*th
)
246 native_mutex_lock(&th
->interrupt_lock
);
247 RUBY_VM_SET_INTERRUPT(th
);
248 if (th
->unblock
.func
) {
249 (th
->unblock
.func
)(th
->unblock
.arg
);
254 native_mutex_unlock(&th
->interrupt_lock
);
259 terminate_i(st_data_t key
, st_data_t val
, rb_thread_t
*main_thread
)
263 GetThreadPtr(thval
, th
);
265 if (th
!= main_thread
) {
266 thread_debug("terminate_i: %p\n", th
);
267 rb_thread_interrupt(th
);
268 th
->thrown_errinfo
= eTerminateSignal
;
269 th
->status
= THREAD_TO_KILL
;
272 thread_debug("terminate_i: main thread (%p)\n", th
);
277 typedef struct rb_mutex_struct
279 rb_thread_lock_t lock
;
280 rb_thread_cond_t cond
;
281 struct rb_thread_struct
volatile *th
;
282 volatile int cond_waiting
, cond_notified
;
283 struct rb_mutex_struct
*next_mutex
;
286 static void rb_mutex_unlock_all(mutex_t
*mutex
);
289 rb_thread_terminate_all(void)
291 rb_thread_t
*th
= GET_THREAD(); /* main thread */
292 rb_vm_t
*vm
= th
->vm
;
293 if (vm
->main_thread
!= th
) {
294 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm
->main_thread
, th
);
297 /* unlock all locking mutexes */
298 if (th
->keeping_mutexes
) {
299 rb_mutex_unlock_all(th
->keeping_mutexes
);
302 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th
);
303 st_foreach(vm
->living_threads
, terminate_i
, (st_data_t
)th
);
305 while (!rb_thread_alone()) {
307 if (EXEC_TAG() == 0) {
308 rb_thread_schedule();
311 /* ignore exception */
319 thread_cleanup_func_before_exec(void *th_ptr
)
321 rb_thread_t
*th
= th_ptr
;
322 th
->status
= THREAD_KILLED
;
323 th
->machine_stack_start
= th
->machine_stack_end
= 0;
325 th
->machine_register_stack_start
= th
->machine_register_stack_end
= 0;
330 thread_cleanup_func(void *th_ptr
)
332 rb_thread_t
*th
= th_ptr
;
333 thread_cleanup_func_before_exec(th_ptr
);
334 native_thread_destroy(th
);
337 extern void ruby_error_print(void);
338 static VALUE
rb_thread_raise(int, VALUE
*, rb_thread_t
*);
339 void rb_thread_recycle_stack_release(VALUE
*);
342 ruby_thread_init_stack(rb_thread_t
*th
)
344 native_thread_init_stack(th
);
348 thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
, VALUE
*register_stack_start
)
351 VALUE args
= th
->first_args
;
353 rb_thread_t
*join_th
;
354 rb_thread_t
*main_th
;
355 VALUE errinfo
= Qnil
;
357 th
->machine_stack_start
= stack_start
;
359 th
->machine_register_stack_start
= register_stack_start
;
361 thread_debug("thread start: %p\n", th
);
363 native_mutex_lock(&th
->vm
->global_vm_lock
);
365 thread_debug("thread start (get lock): %p\n", th
);
366 rb_thread_set_current(th
);
369 if ((state
= EXEC_TAG()) == 0) {
370 SAVE_ROOT_JMPBUF(th
, {
371 if (th
->first_proc
) {
372 GetProcPtr(th
->first_proc
, proc
);
374 th
->local_lfp
= proc
->block
.lfp
;
375 th
->local_svar
= Qnil
;
376 th
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
,
377 RARRAY_LEN(args
), RARRAY_PTR(args
), 0);
380 th
->value
= (*th
->first_func
)((void *)th
->first_args
);
385 errinfo
= th
->errinfo
;
386 if (NIL_P(errinfo
)) errinfo
= rb_errinfo();
387 if (state
== TAG_FATAL
) {
388 /* fatal error within this thread, need to stop whole script */
390 else if (rb_obj_is_kind_of(errinfo
, rb_eSystemExit
)) {
391 if (th
->safe_level
>= 4) {
392 th
->errinfo
= rb_exc_new3(rb_eSecurityError
,
393 rb_sprintf("Insecure exit at level %d", th
->safe_level
));
397 else if (th
->safe_level
< 4 &&
398 (th
->vm
->thread_abort_on_exception
||
399 th
->abort_on_exception
|| RTEST(ruby_debug
))) {
400 /* exit on main_thread */
408 th
->status
= THREAD_KILLED
;
409 thread_debug("thread end: %p\n", th
);
411 main_th
= th
->vm
->main_thread
;
413 if (TYPE(errinfo
) == T_OBJECT
) {
414 /* treat with normal error object */
415 rb_thread_raise(1, &errinfo
, main_th
);
420 /* locking_mutex must be Qfalse */
421 if (th
->locking_mutex
!= Qfalse
) {
422 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
423 th
, th
->locking_mutex
);
426 /* unlock all locking mutexes */
427 if (th
->keeping_mutexes
) {
428 rb_mutex_unlock_all(th
->keeping_mutexes
);
429 th
->keeping_mutexes
= NULL
;
432 /* delete self from living_threads */
433 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
435 /* wake up joinning threads */
436 join_th
= th
->join_list_head
;
438 if (join_th
== main_th
) errinfo
= Qnil
;
439 rb_thread_interrupt(join_th
);
440 switch (join_th
->status
) {
441 case THREAD_STOPPED
: case THREAD_STOPPED_FOREVER
:
442 join_th
->status
= THREAD_RUNNABLE
;
445 join_th
= join_th
->join_list_next
;
447 if (th
!= main_th
) rb_check_deadlock(th
->vm
);
449 if (!th
->root_fiber
) {
450 rb_thread_recycle_stack_release(th
->stack
);
454 thread_cleanup_func(th
);
455 native_mutex_unlock(&th
->vm
->global_vm_lock
);
461 thread_create_core(VALUE thval
, VALUE args
, VALUE (*fn
)(ANYARGS
))
465 if (OBJ_FROZEN(GET_THREAD()->thgroup
)) {
466 rb_raise(rb_eThreadError
,
467 "can't start a new thread (frozen ThreadGroup)");
469 GetThreadPtr(thval
, th
);
471 /* setup thread environment */
473 th
->first_proc
= fn
? Qfalse
: rb_block_proc();
474 th
->first_args
= args
; /* GC: shouldn't put before above line */
476 th
->priority
= GET_THREAD()->priority
;
477 th
->thgroup
= GET_THREAD()->thgroup
;
479 native_mutex_initialize(&th
->interrupt_lock
);
481 st_insert(th
->vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
482 native_thread_create(th
);
487 thread_s_new(int argc
, VALUE
*argv
, VALUE klass
)
490 VALUE thread
= rb_thread_alloc(klass
);
491 rb_obj_call_init(thread
, argc
, argv
);
492 GetThreadPtr(thread
, th
);
493 if (!th
->first_args
) {
494 rb_raise(rb_eThreadError
, "uninitialized thread - check `%s#initialize'",
495 rb_class2name(klass
));
502 * Thread.start([args]*) {|args| block } => thread
503 * Thread.fork([args]*) {|args| block } => thread
505 * Basically the same as <code>Thread::new</code>. However, if class
506 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
507 * subclass will not invoke the subclass's <code>initialize</code> method.
511 thread_start(VALUE klass
, VALUE args
)
513 return thread_create_core(rb_thread_alloc(klass
), args
, 0);
517 thread_initialize(VALUE thread
, VALUE args
)
520 if (!rb_block_given_p()) {
521 rb_raise(rb_eThreadError
, "must be called with a block");
523 GetThreadPtr(thread
, th
);
524 if (th
->first_args
) {
525 VALUE
rb_proc_location(VALUE self
);
526 VALUE proc
= th
->first_proc
, line
, loc
;
528 if (!proc
|| !RTEST(loc
= rb_proc_location(proc
))) {
529 rb_raise(rb_eThreadError
, "already initialized thread");
531 file
= RSTRING_PTR(RARRAY_PTR(loc
)[0]);
532 if (NIL_P(line
= RARRAY_PTR(loc
)[1])) {
533 rb_raise(rb_eThreadError
, "already initialized thread - %s",
536 rb_raise(rb_eThreadError
, "already initialized thread - %s:%d",
537 file
, NUM2INT(line
));
539 return thread_create_core(thread
, args
, 0);
543 rb_thread_create(VALUE (*fn
)(ANYARGS
), void *arg
)
545 return thread_create_core(rb_thread_alloc(rb_cThread
), (VALUE
)arg
, fn
);
549 /* +infty, for this purpose */
550 #define DELAY_INFTY 1E30
553 rb_thread_t
*target
, *waiting
;
559 remove_from_join_list(VALUE arg
)
561 struct join_arg
*p
= (struct join_arg
*)arg
;
562 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
564 if (target_th
->status
!= THREAD_KILLED
) {
565 rb_thread_t
**pth
= &target_th
->join_list_head
;
569 *pth
= th
->join_list_next
;
572 pth
= &(*pth
)->join_list_next
;
580 thread_join_sleep(VALUE arg
)
582 struct join_arg
*p
= (struct join_arg
*)arg
;
583 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
584 double now
, limit
= p
->limit
;
586 while (target_th
->status
!= THREAD_KILLED
) {
588 sleep_forever(th
, 1);
593 thread_debug("thread_join: timeout (thid: %p)\n",
594 (void *)target_th
->thread_id
);
597 sleep_wait_for_interrupt(th
, limit
- now
);
599 thread_debug("thread_join: interrupted (thid: %p)\n",
600 (void *)target_th
->thread_id
);
606 thread_join(rb_thread_t
*target_th
, double delay
)
608 rb_thread_t
*th
= GET_THREAD();
611 arg
.target
= target_th
;
613 arg
.limit
= timeofday() + delay
;
614 arg
.forever
= delay
== DELAY_INFTY
;
616 thread_debug("thread_join (thid: %p)\n", (void *)target_th
->thread_id
);
618 if (target_th
->status
!= THREAD_KILLED
) {
619 th
->join_list_next
= target_th
->join_list_head
;
620 target_th
->join_list_head
= th
;
621 if (!rb_ensure(thread_join_sleep
, (VALUE
)&arg
,
622 remove_from_join_list
, (VALUE
)&arg
)) {
627 thread_debug("thread_join: success (thid: %p)\n",
628 (void *)target_th
->thread_id
);
630 if (target_th
->errinfo
!= Qnil
) {
631 VALUE err
= target_th
->errinfo
;
636 else if (TYPE(target_th
->errinfo
) == T_NODE
) {
637 rb_exc_raise(vm_make_jump_tag_but_local_jump(
638 GET_THROWOBJ_STATE(err
), GET_THROWOBJ_VAL(err
)));
641 /* normal exception */
645 return target_th
->self
;
651 * thr.join(limit) => thr
653 * The calling thread will suspend execution and run <i>thr</i>. Does not
654 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
655 * the time limit expires, <code>nil</code> will be returned, otherwise
656 * <i>thr</i> is returned.
658 * Any threads not joined will be killed when the main program exits. If
659 * <i>thr</i> had previously raised an exception and the
660 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
661 * (so the exception has not yet been processed) it will be processed at this
664 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
665 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
666 * x.join # Let x thread finish, a will be killed on exit.
672 * The following example illustrates the <i>limit</i> parameter.
674 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
675 * puts "Waiting" until y.join(0.15)
689 thread_join_m(int argc
, VALUE
*argv
, VALUE self
)
691 rb_thread_t
*target_th
;
692 double delay
= DELAY_INFTY
;
695 GetThreadPtr(self
, target_th
);
697 rb_scan_args(argc
, argv
, "01", &limit
);
699 delay
= rb_num2dbl(limit
);
702 return thread_join(target_th
, delay
);
709 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
712 * a = Thread.new { 2 + 2 }
717 thread_value(VALUE self
)
720 GetThreadPtr(self
, th
);
721 thread_join(th
, DELAY_INFTY
);
729 static struct timeval
730 double2timeval(double d
)
734 time
.tv_sec
= (int)d
;
735 time
.tv_usec
= (int)((d
- (int)d
) * 1e6
);
736 if (time
.tv_usec
< 0) {
737 time
.tv_usec
+= (long)1e6
;
744 sleep_forever(rb_thread_t
*th
, int deadlockable
)
746 enum rb_thread_status prev_status
= th
->status
;
748 th
->status
= deadlockable
? THREAD_STOPPED_FOREVER
: THREAD_STOPPED
;
752 rb_check_deadlock(th
->vm
);
758 RUBY_VM_CHECK_INTS();
759 } while (th
->status
== THREAD_STOPPED_FOREVER
);
760 th
->status
= prev_status
;
764 getclockofday(struct timeval
*tp
)
766 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
769 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0) {
770 tp
->tv_sec
= ts
.tv_sec
;
771 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
775 gettimeofday(tp
, NULL
);
780 sleep_timeval(rb_thread_t
*th
, struct timeval tv
)
782 struct timeval to
, tvn
;
783 enum rb_thread_status prev_status
= th
->status
;
786 to
.tv_sec
+= tv
.tv_sec
;
787 if ((to
.tv_usec
+= tv
.tv_usec
) >= 1000000) {
789 to
.tv_usec
-= 1000000;
792 th
->status
= THREAD_STOPPED
;
794 native_sleep(th
, &tv
);
795 RUBY_VM_CHECK_INTS();
797 if (to
.tv_sec
< tvn
.tv_sec
) break;
798 if (to
.tv_sec
== tvn
.tv_sec
&& to
.tv_usec
<= tvn
.tv_usec
) break;
799 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
800 (long)to
.tv_sec
, to
.tv_usec
,
801 (long)tvn
.tv_sec
, tvn
.tv_usec
);
802 tv
.tv_sec
= to
.tv_sec
- tvn
.tv_sec
;
803 if ((tv
.tv_usec
= to
.tv_usec
- tvn
.tv_usec
) < 0) {
805 tv
.tv_usec
+= 1000000;
807 } while (th
->status
== THREAD_STOPPED
);
808 th
->status
= prev_status
;
812 rb_thread_sleep_forever()
814 thread_debug("rb_thread_sleep_forever\n");
815 sleep_forever(GET_THREAD(), 0);
819 rb_thread_sleep_deadly()
821 thread_debug("rb_thread_sleep_deadly\n");
822 sleep_forever(GET_THREAD(), 1);
828 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
831 if (clock_gettime(CLOCK_MONOTONIC
, &tp
) == 0) {
832 return (double)tp
.tv_sec
+ (double)tp
.tv_nsec
* 1e-9;
837 gettimeofday(&tv
, NULL
);
838 return (double)tv
.tv_sec
+ (double)tv
.tv_usec
* 1e-6;
843 sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
)
845 sleep_timeval(th
, double2timeval(sleepsec
));
849 sleep_for_polling(rb_thread_t
*th
)
853 time
.tv_usec
= 100 * 1000; /* 0.1 sec */
854 sleep_timeval(th
, time
);
858 rb_thread_wait_for(struct timeval time
)
860 rb_thread_t
*th
= GET_THREAD();
861 sleep_timeval(th
, time
);
865 rb_thread_polling(void)
867 RUBY_VM_CHECK_INTS();
868 if (!rb_thread_alone()) {
869 rb_thread_t
*th
= GET_THREAD();
870 sleep_for_polling(th
);
874 struct timeval
rb_time_timeval();
877 rb_thread_sleep(int sec
)
879 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec
)));
883 rb_thread_schedule(void)
885 thread_debug("rb_thread_schedule\n");
886 if (!rb_thread_alone()) {
887 rb_thread_t
*th
= GET_THREAD();
889 thread_debug("rb_thread_schedule/switch start\n");
891 rb_gc_save_machine_context(th
);
892 native_mutex_unlock(&th
->vm
->global_vm_lock
);
894 native_thread_yield();
896 native_mutex_lock(&th
->vm
->global_vm_lock
);
898 rb_thread_set_current(th
);
899 thread_debug("rb_thread_schedule/switch done\n");
901 RUBY_VM_CHECK_INTS();
905 int rb_thread_critical
; /* TODO: dummy variable */
908 rb_thread_blocking_region(
909 rb_blocking_function_t
*func
, void *data1
,
910 rb_unblock_function_t
*ubf
, void *data2
)
913 rb_thread_t
*th
= GET_THREAD();
915 if (ubf
== RB_UBF_DFL
) {
931 * Invokes the thread scheduler to pass execution to another thread.
933 * a = Thread.new { print "a"; Thread.pass;
934 * print "b"; Thread.pass;
936 * b = Thread.new { print "x"; Thread.pass;
937 * print "y"; Thread.pass;
948 thread_s_pass(VALUE klass
)
950 rb_thread_schedule();
959 rb_thread_execute_interrupts(rb_thread_t
*th
)
961 if (th
->raised_flag
) return;
963 while (th
->interrupt_flag
) {
964 enum rb_thread_status status
= th
->status
;
965 int timer_interrupt
= th
->interrupt_flag
& 0x01;
966 int finalizer_interrupt
= th
->interrupt_flag
& 0x04;
968 th
->status
= THREAD_RUNNABLE
;
969 th
->interrupt_flag
= 0;
971 /* signal handling */
972 if (th
->exec_signal
) {
973 int sig
= th
->exec_signal
;
975 rb_signal_exec(th
, sig
);
978 /* exception from another thread */
979 if (th
->thrown_errinfo
) {
980 VALUE err
= th
->thrown_errinfo
;
981 th
->thrown_errinfo
= 0;
982 thread_debug("rb_thread_execute_interrupts: %ld\n", err
);
984 if (err
== eKillSignal
|| err
== eTerminateSignal
) {
985 th
->errinfo
= INT2FIX(TAG_FATAL
);
986 TH_JUMP_TAG(th
, TAG_FATAL
);
994 if (finalizer_interrupt
) {
995 rb_gc_finalize_deferred();
998 if (timer_interrupt
) {
999 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
1000 rb_thread_schedule();
1007 rb_gc_mark_threads(void)
1012 /*****************************************************/
1015 rb_thread_ready(rb_thread_t
*th
)
1017 rb_thread_interrupt(th
);
1021 rb_thread_raise(int argc
, VALUE
*argv
, rb_thread_t
*th
)
1026 if (rb_thread_dead(th
)) {
1030 if (th
->thrown_errinfo
!= 0 || th
->raised_flag
) {
1031 rb_thread_schedule();
1035 exc
= rb_make_exception(argc
, argv
);
1036 th
->thrown_errinfo
= exc
;
1037 rb_thread_ready(th
);
1042 rb_thread_signal_raise(void *thptr
, int sig
)
1045 rb_thread_t
*th
= thptr
;
1047 argv
[0] = rb_eSignal
;
1048 argv
[1] = INT2FIX(sig
);
1049 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1053 rb_thread_signal_exit(void *thptr
)
1056 rb_thread_t
*th
= thptr
;
1058 argv
[0] = rb_eSystemExit
;
1059 argv
[1] = rb_str_new2("exit");
1060 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1064 rb_thread_set_raised(rb_thread_t
*th
)
1066 if (th
->raised_flag
& RAISED_EXCEPTION
) {
1069 th
->raised_flag
|= RAISED_EXCEPTION
;
1074 rb_thread_reset_raised(rb_thread_t
*th
)
1076 if (!(th
->raised_flag
& RAISED_EXCEPTION
)) {
1079 th
->raised_flag
&= ~RAISED_EXCEPTION
;
1084 rb_thread_fd_close(int fd
)
1091 * thr.raise(exception)
1093 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1094 * caller does not have to be <i>thr</i>.
1096 * Thread.abort_on_exception = true
1097 * a = Thread.new { sleep(200) }
1100 * <em>produces:</em>
1102 * prog.rb:3: Gotcha (RuntimeError)
1103 * from prog.rb:2:in `initialize'
1104 * from prog.rb:2:in `new'
1109 thread_raise_m(int argc
, VALUE
*argv
, VALUE self
)
1112 GetThreadPtr(self
, th
);
1113 rb_thread_raise(argc
, argv
, th
);
1120 * thr.exit => thr or nil
1121 * thr.kill => thr or nil
1122 * thr.terminate => thr or nil
1124 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1125 * is already marked to be killed, <code>exit</code> returns the
1126 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1131 rb_thread_kill(VALUE thread
)
1135 GetThreadPtr(thread
, th
);
1137 if (th
!= GET_THREAD() && th
->safe_level
< 4) {
1140 if (th
->status
== THREAD_TO_KILL
|| th
->status
== THREAD_KILLED
) {
1143 if (th
== th
->vm
->main_thread
) {
1144 rb_exit(EXIT_SUCCESS
);
1147 thread_debug("rb_thread_kill: %p (%p)\n", th
, (void *)th
->thread_id
);
1149 rb_thread_interrupt(th
);
1150 th
->thrown_errinfo
= eKillSignal
;
1151 th
->status
= THREAD_TO_KILL
;
1159 * Thread.kill(thread) => thread
1161 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1164 * a = Thread.new { loop { count += 1 } }
1166 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1168 * a.alive? #=> false
1172 rb_thread_s_kill(VALUE obj
, VALUE th
)
1174 return rb_thread_kill(th
);
1180 * Thread.exit => thread
1182 * Terminates the currently running thread and schedules another thread to be
1183 * run. If this thread is already marked to be killed, <code>exit</code>
1184 * returns the <code>Thread</code>. If this is the main thread, or the last
1185 * thread, exit the process.
1189 rb_thread_exit(void)
1191 return rb_thread_kill(GET_THREAD()->self
);
1199 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1200 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1202 * c = Thread.new { Thread.stop; puts "hey!" }
1205 * <em>produces:</em>
1211 rb_thread_wakeup(VALUE thread
)
1214 GetThreadPtr(thread
, th
);
1216 if (th
->status
== THREAD_KILLED
) {
1217 rb_raise(rb_eThreadError
, "killed thread");
1219 rb_thread_ready(th
);
1220 if (th
->status
!= THREAD_TO_KILL
) {
1221 th
->status
= THREAD_RUNNABLE
;
1231 * Wakes up <i>thr</i>, making it eligible for scheduling.
1233 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1239 * <em>produces:</em>
1247 rb_thread_run(VALUE thread
)
1249 rb_thread_wakeup(thread
);
1250 rb_thread_schedule();
1257 * Thread.stop => nil
1259 * Stops execution of the current thread, putting it into a ``sleep'' state,
1260 * and schedules execution of another thread.
1262 * a = Thread.new { print "a"; Thread.stop; print "c" }
1268 * <em>produces:</em>
1274 rb_thread_stop(void)
1276 if (rb_thread_alone()) {
1277 rb_raise(rb_eThreadError
,
1278 "stopping only thread\n\tnote: use sleep to stop forever");
1280 rb_thread_sleep_deadly();
1285 thread_list_i(st_data_t key
, st_data_t val
, void *data
)
1287 VALUE ary
= (VALUE
)data
;
1289 GetThreadPtr((VALUE
)key
, th
);
1291 switch (th
->status
) {
1292 case THREAD_RUNNABLE
:
1293 case THREAD_STOPPED
:
1294 case THREAD_STOPPED_FOREVER
:
1295 case THREAD_TO_KILL
:
1296 rb_ary_push(ary
, th
->self
);
1303 /********************************************************************/
1307 * Thread.list => array
1309 * Returns an array of <code>Thread</code> objects for all threads that are
1310 * either runnable or stopped.
1312 * Thread.new { sleep(200) }
1313 * Thread.new { 1000000.times {|i| i*i } }
1314 * Thread.new { Thread.stop }
1315 * Thread.list.each {|t| p t}
1317 * <em>produces:</em>
1319 * #<Thread:0x401b3e84 sleep>
1320 * #<Thread:0x401b3f38 run>
1321 * #<Thread:0x401b3fb0 sleep>
1322 * #<Thread:0x401bdf4c run>
1326 rb_thread_list(void)
1328 VALUE ary
= rb_ary_new();
1329 st_foreach(GET_THREAD()->vm
->living_threads
, thread_list_i
, ary
);
1334 rb_thread_current(void)
1336 return GET_THREAD()->self
;
1341 * Thread.current => thread
1343 * Returns the currently executing thread.
1345 * Thread.current #=> #<Thread:0x401bdf4c run>
1349 thread_s_current(VALUE klass
)
1351 return rb_thread_current();
1355 rb_thread_main(void)
1357 return GET_THREAD()->vm
->main_thread
->self
;
1361 rb_thread_s_main(VALUE klass
)
1363 return rb_thread_main();
1369 * Thread.abort_on_exception => true or false
1371 * Returns the status of the global ``abort on exception'' condition. The
1372 * default is <code>false</code>. When set to <code>true</code>, or if the
1373 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1374 * command line option <code>-d</code> was specified) all threads will abort
1375 * (the process will <code>exit(0)</code>) if an exception is raised in any
1376 * thread. See also <code>Thread::abort_on_exception=</code>.
1380 rb_thread_s_abort_exc(void)
1382 return GET_THREAD()->vm
->thread_abort_on_exception
? Qtrue
: Qfalse
;
1388 * Thread.abort_on_exception= boolean => true or false
1390 * When set to <code>true</code>, all threads will abort if an exception is
1391 * raised. Returns the new state.
1393 * Thread.abort_on_exception = true
1394 * t1 = Thread.new do
1395 * puts "In new thread"
1396 * raise "Exception from thread"
1399 * puts "not reached"
1401 * <em>produces:</em>
1404 * prog.rb:4: Exception from thread (RuntimeError)
1405 * from prog.rb:2:in `initialize'
1406 * from prog.rb:2:in `new'
1411 rb_thread_s_abort_exc_set(VALUE self
, VALUE val
)
1414 GET_THREAD()->vm
->thread_abort_on_exception
= RTEST(val
);
1421 * thr.abort_on_exception => true or false
1423 * Returns the status of the thread-local ``abort on exception'' condition for
1424 * <i>thr</i>. The default is <code>false</code>. See also
1425 * <code>Thread::abort_on_exception=</code>.
1429 rb_thread_abort_exc(VALUE thread
)
1432 GetThreadPtr(thread
, th
);
1433 return th
->abort_on_exception
? Qtrue
: Qfalse
;
1439 * thr.abort_on_exception= boolean => true or false
1441 * When set to <code>true</code>, causes all threads (including the main
1442 * program) to abort if an exception is raised in <i>thr</i>. The process will
1443 * effectively <code>exit(0)</code>.
1447 rb_thread_abort_exc_set(VALUE thread
, VALUE val
)
1452 GetThreadPtr(thread
, th
);
1453 th
->abort_on_exception
= RTEST(val
);
1460 * thr.group => thgrp or nil
1462 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1463 * the thread is not a member of any group.
1465 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1469 rb_thread_group(VALUE thread
)
1473 GetThreadPtr(thread
, th
);
1474 group
= th
->thgroup
;
1483 thread_status_name(enum rb_thread_status status
)
1486 case THREAD_RUNNABLE
:
1488 case THREAD_STOPPED
:
1489 case THREAD_STOPPED_FOREVER
:
1491 case THREAD_TO_KILL
:
1501 rb_thread_dead(rb_thread_t
*th
)
1503 return th
->status
== THREAD_KILLED
;
1509 * thr.status => string, false or nil
1511 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1512 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1513 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1514 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1515 * terminated with an exception.
1517 * a = Thread.new { raise("die now") }
1518 * b = Thread.new { Thread.stop }
1519 * c = Thread.new { Thread.exit }
1520 * d = Thread.new { sleep }
1521 * d.kill #=> #<Thread:0x401b3678 aborting>
1523 * b.status #=> "sleep"
1524 * c.status #=> false
1525 * d.status #=> "aborting"
1526 * Thread.current.status #=> "run"
1530 rb_thread_status(VALUE thread
)
1533 GetThreadPtr(thread
, th
);
1535 if (rb_thread_dead(th
)) {
1536 if (!NIL_P(th
->errinfo
) && !FIXNUM_P(th
->errinfo
)
1542 return rb_str_new2(thread_status_name(th
->status
));
1548 * thr.alive? => true or false
1550 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1552 * thr = Thread.new { }
1553 * thr.join #=> #<Thread:0x401b3fb0 dead>
1554 * Thread.current.alive? #=> true
1555 * thr.alive? #=> false
1559 rb_thread_alive_p(VALUE thread
)
1562 GetThreadPtr(thread
, th
);
1564 if (rb_thread_dead(th
))
1571 * thr.stop? => true or false
1573 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1575 * a = Thread.new { Thread.stop }
1576 * b = Thread.current
1582 rb_thread_stop_p(VALUE thread
)
1585 GetThreadPtr(thread
, th
);
1587 if (rb_thread_dead(th
))
1589 if (th
->status
== THREAD_STOPPED
|| th
->status
== THREAD_STOPPED_FOREVER
)
1596 * thr.safe_level => integer
1598 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1599 * levels can help when implementing sandboxes which run insecure code.
1601 * thr = Thread.new { $SAFE = 3; sleep }
1602 * Thread.current.safe_level #=> 0
1603 * thr.safe_level #=> 3
1607 rb_thread_safe_level(VALUE thread
)
1610 GetThreadPtr(thread
, th
);
1612 return INT2NUM(th
->safe_level
);
1617 * thr.inspect => string
1619 * Dump the name, id, and status of _thr_ to a string.
1623 rb_thread_inspect(VALUE thread
)
1625 const char *cname
= rb_obj_classname(thread
);
1630 GetThreadPtr(thread
, th
);
1631 status
= thread_status_name(th
->status
);
1632 str
= rb_sprintf("#<%s:%p %s>", cname
, (void *)thread
, status
);
1633 OBJ_INFECT(str
, thread
);
1639 rb_thread_local_aref(VALUE thread
, ID id
)
1644 GetThreadPtr(thread
, th
);
1645 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1646 rb_raise(rb_eSecurityError
, "Insecure: thread locals");
1648 if (!th
->local_storage
) {
1651 if (st_lookup(th
->local_storage
, id
, &val
)) {
1659 * thr[sym] => obj or nil
1661 * Attribute Reference---Returns the value of a thread-local variable, using
1662 * either a symbol or a string name. If the specified variable does not exist,
1663 * returns <code>nil</code>.
1665 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1666 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1667 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1668 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1670 * <em>produces:</em>
1672 * #<Thread:0x401b3b3c sleep>: C
1673 * #<Thread:0x401b3bc8 sleep>: B
1674 * #<Thread:0x401b3c68 sleep>: A
1675 * #<Thread:0x401bdf4c run>:
1679 rb_thread_aref(VALUE thread
, VALUE id
)
1681 return rb_thread_local_aref(thread
, rb_to_id(id
));
1685 rb_thread_local_aset(VALUE thread
, ID id
, VALUE val
)
1688 GetThreadPtr(thread
, th
);
1690 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1691 rb_raise(rb_eSecurityError
, "Insecure: can't modify thread locals");
1693 if (OBJ_FROZEN(thread
)) {
1694 rb_error_frozen("thread locals");
1696 if (!th
->local_storage
) {
1697 th
->local_storage
= st_init_numtable();
1700 st_delete_wrap(th
->local_storage
, id
);
1703 st_insert(th
->local_storage
, id
, val
);
1709 * thr[sym] = obj => obj
1711 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1712 * using either a symbol or a string. See also <code>Thread#[]</code>.
1716 rb_thread_aset(VALUE self
, ID id
, VALUE val
)
1718 return rb_thread_local_aset(self
, rb_to_id(id
), val
);
1723 * thr.key?(sym) => true or false
1725 * Returns <code>true</code> if the given string (or symbol) exists as a
1726 * thread-local variable.
1728 * me = Thread.current
1730 * me.key?(:oliver) #=> true
1731 * me.key?(:stanley) #=> false
1735 rb_thread_key_p(VALUE self
, VALUE key
)
1738 ID id
= rb_to_id(key
);
1740 GetThreadPtr(self
, th
);
1742 if (!th
->local_storage
) {
1745 if (st_lookup(th
->local_storage
, id
, 0)) {
1752 thread_keys_i(ID key
, VALUE value
, VALUE ary
)
1754 rb_ary_push(ary
, ID2SYM(key
));
1759 vm_living_thread_num(rb_vm_t
*vm
)
1761 return vm
->living_threads
->num_entries
;
1768 if (GET_THREAD()->vm
->living_threads
) {
1769 num
= vm_living_thread_num(GET_THREAD()->vm
);
1770 thread_debug("rb_thread_alone: %d\n", num
);
1779 * Returns an an array of the names of the thread-local variables (as Symbols).
1781 * thr = Thread.new do
1782 * Thread.current[:cat] = 'meow'
1783 * Thread.current["dog"] = 'woof'
1785 * thr.join #=> #<Thread:0x401b3f10 dead>
1786 * thr.keys #=> [:dog, :cat]
1790 rb_thread_keys(VALUE self
)
1793 VALUE ary
= rb_ary_new();
1794 GetThreadPtr(self
, th
);
1796 if (th
->local_storage
) {
1797 st_foreach(th
->local_storage
, thread_keys_i
, ary
);
1804 * thr.priority => integer
1806 * Returns the priority of <i>thr</i>. Default is inherited from the
1807 * current thread which creating the new thread, or zero for the
1808 * initial main thread; higher-priority threads will run before
1809 * lower-priority threads.
1811 * Thread.current.priority #=> 0
1815 rb_thread_priority(VALUE thread
)
1818 GetThreadPtr(thread
, th
);
1819 return INT2NUM(th
->priority
);
1825 * thr.priority= integer => thr
1827 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1828 * will run before lower-priority threads.
1830 * count1 = count2 = 0
1832 * loop { count1 += 1 }
1837 * loop { count2 += 1 }
1846 rb_thread_priority_set(VALUE thread
, VALUE prio
)
1849 GetThreadPtr(thread
, th
);
1853 th
->priority
= NUM2INT(prio
);
1854 native_thread_apply_priority(th
);
1860 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1862 rb_fd_init(volatile rb_fdset_t
*fds
)
1865 fds
->fdset
= ALLOC(fd_set
);
1866 FD_ZERO(fds
->fdset
);
1870 rb_fd_term(rb_fdset_t
*fds
)
1872 if (fds
->fdset
) xfree(fds
->fdset
);
1878 rb_fd_zero(rb_fdset_t
*fds
)
1881 MEMZERO(fds
->fdset
, fd_mask
, howmany(fds
->maxfd
, NFDBITS
));
1882 FD_ZERO(fds
->fdset
);
1887 rb_fd_resize(int n
, rb_fdset_t
*fds
)
1889 int m
= howmany(n
+ 1, NFDBITS
) * sizeof(fd_mask
);
1890 int o
= howmany(fds
->maxfd
, NFDBITS
) * sizeof(fd_mask
);
1892 if (m
< sizeof(fd_set
)) m
= sizeof(fd_set
);
1893 if (o
< sizeof(fd_set
)) o
= sizeof(fd_set
);
1896 fds
->fdset
= realloc(fds
->fdset
, m
);
1897 memset((char *)fds
->fdset
+ o
, 0, m
- o
);
1899 if (n
>= fds
->maxfd
) fds
->maxfd
= n
+ 1;
1903 rb_fd_set(int n
, rb_fdset_t
*fds
)
1905 rb_fd_resize(n
, fds
);
1906 FD_SET(n
, fds
->fdset
);
1910 rb_fd_clr(int n
, rb_fdset_t
*fds
)
1912 if (n
>= fds
->maxfd
) return;
1913 FD_CLR(n
, fds
->fdset
);
1917 rb_fd_isset(int n
, const rb_fdset_t
*fds
)
1919 if (n
>= fds
->maxfd
) return 0;
1920 return FD_ISSET(n
, fds
->fdset
) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1924 rb_fd_copy(rb_fdset_t
*dst
, const fd_set
*src
, int max
)
1926 int size
= howmany(max
, NFDBITS
) * sizeof(fd_mask
);
1928 if (size
< sizeof(fd_set
)) size
= sizeof(fd_set
);
1930 dst
->fdset
= realloc(dst
->fdset
, size
);
1931 memcpy(dst
->fdset
, src
, size
);
1935 rb_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
)
1937 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
1939 rb_fd_resize(n
- 1, readfds
);
1940 r
= rb_fd_ptr(readfds
);
1943 rb_fd_resize(n
- 1, writefds
);
1944 w
= rb_fd_ptr(writefds
);
1947 rb_fd_resize(n
- 1, exceptfds
);
1948 e
= rb_fd_ptr(exceptfds
);
1950 return select(n
, r
, w
, e
, timeout
);
1958 #define FD_ZERO(f) rb_fd_zero(f)
1959 #define FD_SET(i, f) rb_fd_set(i, f)
1960 #define FD_CLR(i, f) rb_fd_clr(i, f)
1961 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1965 #if defined(__CYGWIN__) || defined(_WIN32)
1967 cmp_tv(const struct timeval
*a
, const struct timeval
*b
)
1969 long d
= (a
->tv_sec
- b
->tv_sec
);
1970 return (d
!= 0) ? d
: (a
->tv_usec
- b
->tv_usec
);
1974 subtract_tv(struct timeval
*rest
, const struct timeval
*wait
)
1976 while (rest
->tv_usec
< wait
->tv_usec
) {
1977 if (rest
->tv_sec
<= wait
->tv_sec
) {
1981 rest
->tv_usec
+= 1000 * 1000;
1983 rest
->tv_sec
-= wait
->tv_sec
;
1984 rest
->tv_usec
-= wait
->tv_usec
;
1990 do_select(int n
, fd_set
*read
, fd_set
*write
, fd_set
*except
,
1991 struct timeval
*timeout
)
1994 fd_set orig_read
, orig_write
, orig_except
;
1998 struct timeval wait_rest
;
1999 # if defined(__CYGWIN__) || defined(_WIN32)
2000 struct timeval start_time
;
2004 # if defined(__CYGWIN__) || defined(_WIN32)
2005 gettimeofday(&start_time
, NULL
);
2006 limit
= (double)start_time
.tv_sec
+ (double)start_time
.tv_usec
*1e-6;
2008 limit
= timeofday();
2010 limit
+= (double)timeout
->tv_sec
+(double)timeout
->tv_usec
*1e-6;
2011 wait_rest
= *timeout
;
2012 timeout
= &wait_rest
;
2016 if (read
) orig_read
= *read
;
2017 if (write
) orig_write
= *write
;
2018 if (except
) orig_except
= *except
;
2023 #if defined(__CYGWIN__) || defined(_WIN32)
2026 /* polling duration: 100ms */
2027 struct timeval wait_100ms
, *wait
;
2028 wait_100ms
.tv_sec
= 0;
2029 wait_100ms
.tv_usec
= 100 * 1000; /* 100 ms */
2032 wait
= (timeout
== 0 || cmp_tv(&wait_100ms
, timeout
) > 0) ? &wait_100ms
: timeout
;
2035 result
= select(n
, read
, write
, except
, wait
);
2036 if (result
< 0) lerrno
= errno
;
2037 if (result
!= 0) break;
2039 if (read
) *read
= orig_read
;
2040 if (write
) *write
= orig_write
;
2041 if (except
) *except
= orig_except
;
2044 struct timeval elapsed
;
2045 gettimeofday(&elapsed
, NULL
);
2046 subtract_tv(&elapsed
, &start_time
);
2047 if (!subtract_tv(timeout
, &elapsed
)) {
2051 if (cmp_tv(&wait_100ms
, timeout
) < 0) wait
= timeout
;
2053 } while (__th
->interrupt_flag
== 0);
2055 } while (result
== 0 && !finish
);
2059 result
= select(n
, read
, write
, except
, timeout
);
2060 if (result
< 0) lerrno
= errno
;
2061 }, ubf_select
, GET_THREAD());
2072 if (read
) *read
= orig_read
;
2073 if (write
) *write
= orig_write
;
2074 if (except
) *except
= orig_except
;
2077 double d
= limit
- timeofday();
2079 wait_rest
.tv_sec
= (unsigned int)d
;
2080 wait_rest
.tv_usec
= (long)((d
-(double)wait_rest
.tv_sec
)*1e6
);
2081 if (wait_rest
.tv_sec
< 0) wait_rest
.tv_sec
= 0;
2082 if (wait_rest
.tv_usec
< 0) wait_rest
.tv_usec
= 0;
2094 rb_thread_wait_fd_rw(int fd
, int read
)
2097 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd
, read
? "read" : "write");
2100 rb_raise(rb_eIOError
, "closed stream");
2102 while (result
<= 0) {
2108 result
= do_select(fd
+ 1, rb_fd_ptr(&set
), 0, 0, 0);
2111 result
= do_select(fd
+ 1, 0, rb_fd_ptr(&set
), 0, 0);
2121 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd
, read
? "read" : "write");
2125 rb_thread_wait_fd(int fd
)
2127 rb_thread_wait_fd_rw(fd
, 1);
2131 rb_thread_fd_writable(int fd
)
2133 rb_thread_wait_fd_rw(fd
, 0);
2138 rb_thread_select(int max
, fd_set
* read
, fd_set
* write
, fd_set
* except
,
2139 struct timeval
*timeout
)
2141 if (!read
&& !write
&& !except
) {
2143 rb_thread_sleep_forever();
2146 rb_thread_wait_for(*timeout
);
2150 return do_select(max
, read
, write
, except
, timeout
);
2159 #ifdef USE_CONSERVATIVE_STACK_END
2161 rb_gc_set_stack_end(VALUE
**stack_end_p
)
2164 *stack_end_p
= &stack_end
;
2169 rb_gc_save_machine_context(rb_thread_t
*th
)
2171 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
2172 FLUSH_REGISTER_WINDOWS
;
2174 th
->machine_register_stack_end
= rb_ia64_bsp();
2176 setjmp(th
->machine_regs
);
2183 int rb_get_next_signal(rb_vm_t
*vm
);
2186 timer_thread_function(void *arg
)
2188 rb_vm_t
*vm
= arg
; /* TODO: fix me for Multi-VM */
2190 /* for time slice */
2191 RUBY_VM_SET_TIMER_INTERRUPT(vm
->running_thread
);
2194 if (vm
->buffered_signal_size
&& vm
->main_thread
->exec_signal
== 0) {
2195 rb_thread_t
*mth
= vm
->main_thread
;
2196 enum rb_thread_status prev_status
= mth
->status
;
2197 mth
->exec_signal
= rb_get_next_signal(vm
);
2198 thread_debug("main_thread: %s\n", thread_status_name(prev_status
));
2199 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2200 (long)vm
->buffered_signal_size
, vm
->main_thread
->exec_signal
);
2201 if (mth
->status
!= THREAD_KILLED
) mth
->status
= THREAD_RUNNABLE
;
2202 rb_thread_interrupt(mth
);
2203 mth
->status
= prev_status
;
2207 /* prove profiler */
2208 if (vm
->prove_profile
.enable
) {
2209 rb_thread_t
*th
= vm
->running_thread
;
2211 if (vm
->during_gc
) {
2212 /* GC prove profiling */
2219 rb_thread_stop_timer_thread(void)
2221 if (timer_thread_id
) {
2223 native_thread_join(timer_thread_id
);
2224 timer_thread_id
= 0;
2229 rb_thread_reset_timer_thread(void)
2231 timer_thread_id
= 0;
2235 rb_thread_start_timer_thread(void)
2237 rb_thread_create_timer_thread();
2241 clear_coverage_i(st_data_t key
, st_data_t val
, st_data_t dummy
)
2244 VALUE lines
= (VALUE
)val
;
2246 for (i
= 0; i
< RARRAY_LEN(lines
); i
++) {
2247 if (RARRAY_PTR(lines
)[i
] != Qnil
) {
2248 RARRAY_PTR(lines
)[i
] = INT2FIX(0);
2255 clear_coverage(void)
2257 extern VALUE
rb_get_coverages(void);
2258 VALUE coverages
= rb_get_coverages();
2259 if (RTEST(coverages
)) {
2260 st_foreach(RHASH_TBL(coverages
), clear_coverage_i
, 0);
2265 terminate_atfork_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2269 GetThreadPtr(thval
, th
);
2271 if (th
!= current_th
) {
2272 thread_cleanup_func(th
);
2278 rb_thread_atfork(void)
2280 rb_thread_t
*th
= GET_THREAD();
2281 rb_vm_t
*vm
= th
->vm
;
2282 VALUE thval
= th
->self
;
2283 vm
->main_thread
= th
;
2285 st_foreach(vm
->living_threads
, terminate_atfork_i
, (st_data_t
)th
);
2286 st_clear(vm
->living_threads
);
2287 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2290 rb_reset_random_seed();
2294 terminate_atfork_before_exec_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2298 GetThreadPtr(thval
, th
);
2300 if (th
!= current_th
) {
2301 thread_cleanup_func_before_exec(th
);
2307 rb_thread_atfork_before_exec(void)
2309 rb_thread_t
*th
= GET_THREAD();
2310 rb_vm_t
*vm
= th
->vm
;
2311 VALUE thval
= th
->self
;
2312 vm
->main_thread
= th
;
2314 st_foreach(vm
->living_threads
, terminate_atfork_before_exec_i
, (st_data_t
)th
);
2315 st_clear(vm
->living_threads
);
2316 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2327 * Document-class: ThreadGroup
2329 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2330 * threads as a group. A <code>Thread</code> can belong to only one
2331 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2332 * remove it from any previous group.
2334 * Newly created threads belong to the same group as the thread from which they
2338 static VALUE
thgroup_s_alloc(VALUE
);
2340 thgroup_s_alloc(VALUE klass
)
2343 struct thgroup
*data
;
2345 group
= Data_Make_Struct(klass
, struct thgroup
, 0, -1, data
);
2347 data
->group
= group
;
2352 struct thgroup_list_params
{
2358 thgroup_list_i(st_data_t key
, st_data_t val
, st_data_t data
)
2360 VALUE thread
= (VALUE
)key
;
2361 VALUE ary
= ((struct thgroup_list_params
*)data
)->ary
;
2362 VALUE group
= ((struct thgroup_list_params
*)data
)->group
;
2364 GetThreadPtr(thread
, th
);
2366 if (th
->thgroup
== group
) {
2367 rb_ary_push(ary
, thread
);
2374 * thgrp.list => array
2376 * Returns an array of all existing <code>Thread</code> objects that belong to
2379 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2383 thgroup_list(VALUE group
)
2385 VALUE ary
= rb_ary_new();
2386 struct thgroup_list_params param
;
2389 param
.group
= group
;
2390 st_foreach(GET_THREAD()->vm
->living_threads
, thgroup_list_i
, (st_data_t
) & param
);
2397 * thgrp.enclose => thgrp
2399 * Prevents threads from being added to or removed from the receiving
2400 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2401 * <code>ThreadGroup</code>.
2403 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2404 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2405 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2408 * <em>produces:</em>
2410 * ThreadError: can't move from the enclosed thread group
2414 thgroup_enclose(VALUE group
)
2416 struct thgroup
*data
;
2418 Data_Get_Struct(group
, struct thgroup
, data
);
2427 * thgrp.enclosed? => true or false
2429 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2430 * ThreadGroup#enclose.
2434 thgroup_enclosed_p(VALUE group
)
2436 struct thgroup
*data
;
2438 Data_Get_Struct(group
, struct thgroup
, data
);
2447 * thgrp.add(thread) => thgrp
2449 * Adds the given <em>thread</em> to this group, removing it from any other
2450 * group to which it may have previously belonged.
2452 * puts "Initial group is #{ThreadGroup::Default.list}"
2453 * tg = ThreadGroup.new
2454 * t1 = Thread.new { sleep }
2455 * t2 = Thread.new { sleep }
2456 * puts "t1 is #{t1}"
2457 * puts "t2 is #{t2}"
2459 * puts "Initial group now #{ThreadGroup::Default.list}"
2460 * puts "tg group now #{tg.list}"
2462 * <em>produces:</em>
2464 * Initial group is #<Thread:0x401bdf4c>
2465 * t1 is #<Thread:0x401b3c90>
2466 * t2 is #<Thread:0x401b3c18>
2467 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2468 * tg group now #<Thread:0x401b3c90>
2472 thgroup_add(VALUE group
, VALUE thread
)
2475 struct thgroup
*data
;
2478 GetThreadPtr(thread
, th
);
2480 if (OBJ_FROZEN(group
)) {
2481 rb_raise(rb_eThreadError
, "can't move to the frozen thread group");
2483 Data_Get_Struct(group
, struct thgroup
, data
);
2484 if (data
->enclosed
) {
2485 rb_raise(rb_eThreadError
, "can't move to the enclosed thread group");
2492 if (OBJ_FROZEN(th
->thgroup
)) {
2493 rb_raise(rb_eThreadError
, "can't move from the frozen thread group");
2495 Data_Get_Struct(th
->thgroup
, struct thgroup
, data
);
2496 if (data
->enclosed
) {
2497 rb_raise(rb_eThreadError
,
2498 "can't move from the enclosed thread group");
2501 th
->thgroup
= group
;
2507 * Document-class: Mutex
2509 * Mutex implements a simple semaphore that can be used to coordinate access to
2510 * shared data from multiple concurrent threads.
2515 * semaphore = Mutex.new
2518 * semaphore.synchronize {
2519 * # access shared resource
2524 * semaphore.synchronize {
2525 * # access shared resource
2531 #define GetMutexPtr(obj, tobj) \
2532 Data_Get_Struct(obj, mutex_t, tobj)
2534 static const char *mutex_unlock(mutex_t
*mutex
);
2537 mutex_free(void *ptr
)
2540 mutex_t
*mutex
= ptr
;
2542 /* rb_warn("free locked mutex"); */
2543 mutex_unlock(mutex
);
2545 native_mutex_destroy(&mutex
->lock
);
2546 native_cond_destroy(&mutex
->cond
);
2552 mutex_alloc(VALUE klass
)
2557 obj
= Data_Make_Struct(klass
, mutex_t
, NULL
, mutex_free
, mutex
);
2558 native_mutex_initialize(&mutex
->lock
);
2559 native_cond_initialize(&mutex
->cond
);
2565 * Mutex.new => mutex
2567 * Creates a new Mutex
2570 mutex_initialize(VALUE self
)
2578 return mutex_alloc(rb_cMutex
);
2583 * mutex.locked? => true or false
2585 * Returns +true+ if this lock is currently held by some thread.
2588 rb_mutex_locked_p(VALUE self
)
2591 GetMutexPtr(self
, mutex
);
2592 return mutex
->th
? Qtrue
: Qfalse
;
2596 mutex_locked(rb_thread_t
*th
, VALUE self
)
2599 GetMutexPtr(self
, mutex
);
2601 if (th
->keeping_mutexes
) {
2602 mutex
->next_mutex
= th
->keeping_mutexes
;
2604 th
->keeping_mutexes
= mutex
;
2609 * mutex.try_lock => true or false
2611 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2615 rb_mutex_trylock(VALUE self
)
2618 VALUE locked
= Qfalse
;
2619 GetMutexPtr(self
, mutex
);
2621 if (mutex
->th
== GET_THREAD()) {
2622 rb_raise(rb_eThreadError
, "deadlock; recursive locking");
2625 native_mutex_lock(&mutex
->lock
);
2626 if (mutex
->th
== 0) {
2627 mutex
->th
= GET_THREAD();
2630 mutex_locked(GET_THREAD(), self
);
2632 native_mutex_unlock(&mutex
->lock
);
2638 lock_func(rb_thread_t
*th
, mutex_t
*mutex
, int last_thread
)
2640 int interrupted
= 0;
2641 #if 0 /* for debug */
2642 native_thread_yield();
2645 native_mutex_lock(&mutex
->lock
);
2646 th
->transition_for_lock
= 0;
2647 while (mutex
->th
|| (mutex
->th
= th
, 0)) {
2653 mutex
->cond_waiting
++;
2654 native_cond_wait(&mutex
->cond
, &mutex
->lock
);
2655 mutex
->cond_notified
--;
2657 if (RUBY_VM_INTERRUPTED(th
)) {
2662 th
->transition_for_lock
= 1;
2663 native_mutex_unlock(&mutex
->lock
);
2665 if (interrupted
== 2) native_thread_yield();
2666 #if 0 /* for debug */
2667 native_thread_yield();
2674 lock_interrupt(void *ptr
)
2676 mutex_t
*mutex
= (mutex_t
*)ptr
;
2677 native_mutex_lock(&mutex
->lock
);
2678 if (mutex
->cond_waiting
> 0) {
2679 native_cond_broadcast(&mutex
->cond
);
2680 mutex
->cond_notified
= mutex
->cond_waiting
;
2681 mutex
->cond_waiting
= 0;
2683 native_mutex_unlock(&mutex
->lock
);
2688 * mutex.lock => true or false
2690 * Attempts to grab the lock and waits if it isn't available.
2691 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2694 rb_mutex_lock(VALUE self
)
2696 if (rb_mutex_trylock(self
) == Qfalse
) {
2698 rb_thread_t
*th
= GET_THREAD();
2699 GetMutexPtr(self
, mutex
);
2701 while (mutex
->th
!= th
) {
2703 enum rb_thread_status prev_status
= th
->status
;
2704 int last_thread
= 0;
2705 struct rb_unblock_callback oldubf
;
2707 set_unblock_function(th
, lock_interrupt
, mutex
, &oldubf
);
2708 th
->status
= THREAD_STOPPED_FOREVER
;
2710 th
->locking_mutex
= self
;
2711 if (vm_living_thread_num(th
->vm
) == th
->vm
->sleeper
) {
2715 th
->transition_for_lock
= 1;
2716 BLOCKING_REGION_CORE({
2717 interrupted
= lock_func(th
, mutex
, last_thread
);
2719 th
->transition_for_lock
= 0;
2720 remove_signal_thread_list(th
);
2721 reset_unblock_function(th
, &oldubf
);
2723 th
->locking_mutex
= Qfalse
;
2724 if (mutex
->th
&& interrupted
== 2) {
2725 rb_check_deadlock(th
->vm
);
2727 if (th
->status
== THREAD_STOPPED_FOREVER
) {
2728 th
->status
= prev_status
;
2732 if (mutex
->th
== th
) mutex_locked(th
, self
);
2735 RUBY_VM_CHECK_INTS();
2743 mutex_unlock(mutex_t
*mutex
)
2745 const char *err
= NULL
;
2746 rb_thread_t
*th
= GET_THREAD();
2749 native_mutex_lock(&mutex
->lock
);
2751 if (mutex
->th
== 0) {
2752 err
= "Attempt to unlock a mutex which is not locked";
2754 else if (mutex
->th
!= GET_THREAD()) {
2755 err
= "Attempt to unlock a mutex which is locked by another thread";
2759 if (mutex
->cond_waiting
> 0) {
2760 /* waiting thread */
2761 native_cond_signal(&mutex
->cond
);
2762 mutex
->cond_waiting
--;
2763 mutex
->cond_notified
++;
2767 native_mutex_unlock(&mutex
->lock
);
2770 th_mutex
= th
->keeping_mutexes
;
2771 if (th_mutex
== mutex
) {
2772 th
->keeping_mutexes
= mutex
->next_mutex
;
2777 tmp_mutex
= th_mutex
->next_mutex
;
2778 if (tmp_mutex
== mutex
) {
2779 th_mutex
->next_mutex
= tmp_mutex
->next_mutex
;
2782 th_mutex
= tmp_mutex
;
2785 mutex
->next_mutex
= NULL
;
2793 * mutex.unlock => self
2795 * Releases the lock.
2796 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2799 rb_mutex_unlock(VALUE self
)
2803 GetMutexPtr(self
, mutex
);
2805 err
= mutex_unlock(mutex
);
2806 if (err
) rb_raise(rb_eThreadError
, err
);
2812 rb_mutex_unlock_all(mutex_t
*mutexes
)
2819 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
2821 mutexes
= mutex
->next_mutex
;
2822 err
= mutex_unlock(mutex
);
2823 if (err
) rb_bug("invalid keeping_mutexes: %s", err
);
2828 rb_mutex_sleep_forever(VALUE time
)
2830 rb_thread_sleep_deadly();
2835 rb_mutex_wait_for(VALUE time
)
2837 const struct timeval
*t
= (struct timeval
*)time
;
2838 rb_thread_wait_for(*t
);
2843 rb_mutex_sleep(VALUE self
, VALUE timeout
)
2848 if (!NIL_P(timeout
)) {
2849 t
= rb_time_interval(timeout
);
2851 rb_mutex_unlock(self
);
2853 if (NIL_P(timeout
)) {
2854 rb_ensure(rb_mutex_sleep_forever
, Qnil
, rb_mutex_lock
, self
);
2857 rb_ensure(rb_mutex_wait_for
, (VALUE
)&t
, rb_mutex_lock
, self
);
2859 end
= time(0) - beg
;
2860 return INT2FIX(end
);
2865 * mutex.sleep(timeout = nil) => number
2867 * Releases the lock and sleeps +timeout+ seconds if it is given and
2868 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2869 * the current thread.
2872 mutex_sleep(int argc
, VALUE
*argv
, VALUE self
)
2876 rb_scan_args(argc
, argv
, "01", &timeout
);
2877 return rb_mutex_sleep(self
, timeout
);
2882 * mutex.synchronize { ... } => result of the block
2884 * Obtains a lock, runs the block, and releases the lock when the block
2885 * completes. See the example under +Mutex+.
2889 rb_thread_synchronize(VALUE mutex
, VALUE (*func
)(VALUE arg
), VALUE arg
)
2891 rb_mutex_lock(mutex
);
2892 return rb_ensure(func
, arg
, rb_mutex_unlock
, mutex
);
2896 * Document-class: Barrier
2898 typedef struct rb_thread_list_struct rb_thread_list_t
;
2900 struct rb_thread_list_struct
{
2902 rb_thread_list_t
*next
;
2906 thlist_mark(void *ptr
)
2908 rb_thread_list_t
*q
= ptr
;
2910 for (; q
; q
= q
->next
) {
2911 rb_gc_mark(q
->th
->self
);
2916 thlist_free(void *ptr
)
2918 rb_thread_list_t
*q
= ptr
, *next
;
2920 for (; q
; q
= next
) {
2927 thlist_signal(rb_thread_list_t
**list
, unsigned int maxth
, rb_thread_t
**woken_thread
)
2930 rb_thread_list_t
*q
;
2932 while ((q
= *list
) != NULL
) {
2933 rb_thread_t
*th
= q
->th
;
2937 if (th
->status
!= THREAD_KILLED
) {
2938 rb_thread_ready(th
);
2939 if (!woken
&& woken_thread
) *woken_thread
= th
;
2940 if (++woken
>= maxth
&& maxth
) break;
2948 rb_thread_list_t
*waiting
, **tail
;
2952 barrier_mark(void *ptr
)
2954 rb_barrier_t
*b
= ptr
;
2956 if (b
->owner
) rb_gc_mark(b
->owner
->self
);
2957 thlist_mark(b
->waiting
);
2961 barrier_free(void *ptr
)
2963 rb_barrier_t
*b
= ptr
;
2966 thlist_free(b
->waiting
);
2972 barrier_alloc(VALUE klass
)
2975 rb_barrier_t
*barrier
;
2977 obj
= Data_Make_Struct(klass
, rb_barrier_t
, barrier_mark
, barrier_free
, barrier
);
2978 barrier
->owner
= GET_THREAD();
2979 barrier
->waiting
= 0;
2980 barrier
->tail
= &barrier
->waiting
;
2985 rb_barrier_new(void)
2987 return barrier_alloc(rb_cBarrier
);
2991 rb_barrier_wait(VALUE self
)
2993 rb_barrier_t
*barrier
;
2994 rb_thread_list_t
*q
;
2996 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2997 if (!barrier
->owner
|| barrier
->owner
->status
== THREAD_KILLED
) {
2999 if (thlist_signal(&barrier
->waiting
, 1, &barrier
->owner
)) return Qfalse
;
3002 else if (barrier
->owner
== GET_THREAD()) {
3006 *barrier
->tail
= q
= ALLOC(rb_thread_list_t
);
3007 q
->th
= GET_THREAD();
3009 barrier
->tail
= &q
->next
;
3010 rb_thread_sleep_forever();
3011 return barrier
->owner
== GET_THREAD() ? Qtrue
: Qfalse
;
3016 rb_barrier_release(VALUE self
)
3018 rb_barrier_t
*barrier
;
3021 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
3022 if (barrier
->owner
!= GET_THREAD()) {
3023 rb_raise(rb_eThreadError
, "not owned");
3025 n
= thlist_signal(&barrier
->waiting
, 0, &barrier
->owner
);
3026 return n
? UINT2NUM(n
) : Qfalse
;
3029 /* variables for recursive traversals */
3030 static ID recursive_key
;
3033 recursive_check(VALUE hash
, VALUE obj
)
3035 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3039 VALUE list
= rb_hash_aref(hash
, ID2SYM(rb_frame_this_func()));
3041 if (NIL_P(list
) || TYPE(list
) != T_HASH
)
3043 if (NIL_P(rb_hash_lookup(list
, obj
)))
3050 recursive_push(VALUE hash
, VALUE obj
)
3054 sym
= ID2SYM(rb_frame_this_func());
3055 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3056 hash
= rb_hash_new();
3057 rb_thread_local_aset(rb_thread_current(), recursive_key
, hash
);
3061 list
= rb_hash_aref(hash
, sym
);
3063 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3064 list
= rb_hash_new();
3065 rb_hash_aset(hash
, sym
, list
);
3067 rb_hash_aset(list
, obj
, Qtrue
);
3072 recursive_pop(VALUE hash
, VALUE obj
)
3076 sym
= ID2SYM(rb_frame_this_func());
3077 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3080 symname
= rb_inspect(sym
);
3081 thrname
= rb_inspect(rb_thread_current());
3083 rb_raise(rb_eTypeError
, "invalid inspect_tbl hash for %s in %s",
3084 StringValuePtr(symname
), StringValuePtr(thrname
));
3086 list
= rb_hash_aref(hash
, sym
);
3087 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3088 VALUE symname
= rb_inspect(sym
);
3089 VALUE thrname
= rb_inspect(rb_thread_current());
3090 rb_raise(rb_eTypeError
, "invalid inspect_tbl list for %s in %s",
3091 StringValuePtr(symname
), StringValuePtr(thrname
));
3093 rb_hash_delete(list
, obj
);
3097 rb_exec_recursive(VALUE (*func
) (VALUE
, VALUE
, int), VALUE obj
, VALUE arg
)
3099 VALUE hash
= rb_thread_local_aref(rb_thread_current(), recursive_key
);
3100 VALUE objid
= rb_obj_id(obj
);
3102 if (recursive_check(hash
, objid
)) {
3103 return (*func
) (obj
, arg
, Qtrue
);
3106 VALUE result
= Qundef
;
3109 hash
= recursive_push(hash
, objid
);
3111 if ((state
= EXEC_TAG()) == 0) {
3112 result
= (*func
) (obj
, arg
, Qfalse
);
3115 recursive_pop(hash
, objid
);
3124 static rb_event_hook_t
*
3125 alloc_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3127 rb_event_hook_t
*hook
= ALLOC(rb_event_hook_t
);
3129 hook
->flag
= events
;
3135 thread_reset_event_flags(rb_thread_t
*th
)
3137 rb_event_hook_t
*hook
= th
->event_hooks
;
3138 rb_event_flag_t flag
= th
->event_flags
& RUBY_EVENT_VM
;
3147 rb_thread_add_event_hook(rb_thread_t
*th
,
3148 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3150 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3151 hook
->next
= th
->event_hooks
;
3152 th
->event_hooks
= hook
;
3153 thread_reset_event_flags(th
);
3157 set_threads_event_flags_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3161 GetThreadPtr(thval
, th
);
3164 th
->event_flags
|= RUBY_EVENT_VM
;
3167 th
->event_flags
&= (~RUBY_EVENT_VM
);
3173 set_threads_event_flags(int flag
)
3175 st_foreach(GET_VM()->living_threads
, set_threads_event_flags_i
, (st_data_t
) flag
);
3179 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3181 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3182 rb_vm_t
*vm
= GET_VM();
3184 hook
->next
= vm
->event_hooks
;
3185 vm
->event_hooks
= hook
;
3187 set_threads_event_flags(1);
3191 remove_event_hook(rb_event_hook_t
**root
, rb_event_hook_func_t func
)
3193 rb_event_hook_t
*prev
= NULL
, *hook
= *root
, *next
;
3197 if (func
== 0 || hook
->func
== func
) {
3199 prev
->next
= hook
->next
;
3215 rb_thread_remove_event_hook(rb_thread_t
*th
, rb_event_hook_func_t func
)
3217 int ret
= remove_event_hook(&th
->event_hooks
, func
);
3218 thread_reset_event_flags(th
);
3223 rb_remove_event_hook(rb_event_hook_func_t func
)
3225 rb_vm_t
*vm
= GET_VM();
3226 rb_event_hook_t
*hook
= vm
->event_hooks
;
3227 int ret
= remove_event_hook(&vm
->event_hooks
, func
);
3229 if (hook
!= NULL
&& vm
->event_hooks
== NULL
) {
3230 set_threads_event_flags(0);
3237 clear_trace_func_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3240 GetThreadPtr((VALUE
)key
, th
);
3241 rb_thread_remove_event_hook(th
, 0);
3246 rb_clear_trace_func(void)
3248 st_foreach(GET_VM()->living_threads
, clear_trace_func_i
, (st_data_t
) 0);
3249 rb_remove_event_hook(0);
3252 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
3256 * set_trace_func(proc) => proc
3257 * set_trace_func(nil) => nil
3259 * Establishes _proc_ as the handler for tracing, or disables
3260 * tracing if the parameter is +nil+. _proc_ takes up
3261 * to six parameters: an event name, a filename, a line number, an
3262 * object id, a binding, and the name of a class. _proc_ is
3263 * invoked whenever an event occurs. Events are: <code>c-call</code>
3264 * (call a C-language routine), <code>c-return</code> (return from a
3265 * C-language routine), <code>call</code> (call a Ruby method),
3266 * <code>class</code> (start a class or module definition),
3267 * <code>end</code> (finish a class or module definition),
3268 * <code>line</code> (execute code on a new line), <code>raise</code>
3269 * (raise an exception), and <code>return</code> (return from a Ruby
3270 * method). Tracing is disabled within the context of _proc_.
3279 * set_trace_func proc { |event, file, line, id, binding, classname|
3280 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3285 * line prog.rb:11 false
3286 * c-call prog.rb:11 new Class
3287 * c-call prog.rb:11 initialize Object
3288 * c-return prog.rb:11 initialize Object
3289 * c-return prog.rb:11 new Class
3290 * line prog.rb:12 false
3291 * call prog.rb:2 test Test
3292 * line prog.rb:3 test Test
3293 * line prog.rb:4 test Test
3294 * return prog.rb:4 test Test
3298 set_trace_func(VALUE obj
, VALUE trace
)
3300 rb_remove_event_hook(call_trace_func
);
3306 if (!rb_obj_is_proc(trace
)) {
3307 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3310 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
3315 thread_add_trace_func(rb_thread_t
*th
, VALUE trace
)
3317 if (!rb_obj_is_proc(trace
)) {
3318 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3321 rb_thread_add_event_hook(th
, call_trace_func
, RUBY_EVENT_ALL
, trace
);
3325 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
3328 GetThreadPtr(obj
, th
);
3329 thread_add_trace_func(th
, trace
);
3334 thread_set_trace_func_m(VALUE obj
, VALUE trace
)
3337 GetThreadPtr(obj
, th
);
3338 rb_thread_remove_event_hook(th
, call_trace_func
);
3343 thread_add_trace_func(th
, trace
);
3348 get_event_name(rb_event_flag_t event
)
3351 case RUBY_EVENT_LINE
:
3353 case RUBY_EVENT_CLASS
:
3355 case RUBY_EVENT_END
:
3357 case RUBY_EVENT_CALL
:
3359 case RUBY_EVENT_RETURN
:
3361 case RUBY_EVENT_C_CALL
:
3363 case RUBY_EVENT_C_RETURN
:
3365 case RUBY_EVENT_RAISE
:
3372 VALUE
ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
);
3374 struct call_trace_func_args
{
3375 rb_event_flag_t event
;
3383 call_trace_proc(VALUE args
, int tracing
)
3385 struct call_trace_func_args
*p
= (struct call_trace_func_args
*)args
;
3386 VALUE eventname
= rb_str_new2(get_event_name(p
->event
));
3387 VALUE filename
= rb_str_new2(rb_sourcefile());
3389 int line
= rb_sourceline();
3393 if (p
->event
== RUBY_EVENT_C_CALL
||
3394 p
->event
== RUBY_EVENT_C_RETURN
) {
3399 rb_thread_method_id_and_class(GET_THREAD(), &id
, &klass
);
3401 if (id
== ID_ALLOCATOR
)
3404 if (TYPE(klass
) == T_ICLASS
) {
3405 klass
= RBASIC(klass
)->klass
;
3407 else if (FL_TEST(klass
, FL_SINGLETON
)) {
3408 klass
= rb_iv_get(klass
, "__attached__");
3412 argv
[0] = eventname
;
3414 argv
[2] = INT2FIX(line
);
3415 argv
[3] = id
? ID2SYM(id
) : Qnil
;
3416 argv
[4] = p
->self
? rb_binding_new() : Qnil
;
3417 argv
[5] = klass
? klass
: Qnil
;
3419 return rb_proc_call_with_block(p
->proc
, 6, argv
, Qnil
);
3423 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3425 struct call_trace_func_args args
;
3432 ruby_suppress_tracing(call_trace_proc
, (VALUE
)&args
, Qfalse
);
3436 ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
)
3438 rb_thread_t
*th
= GET_THREAD();
3439 int state
, raised
, tracing
;
3440 VALUE result
= Qnil
;
3442 if ((tracing
= th
->tracing
) != 0 && !always
) {
3449 raised
= rb_thread_reset_raised(th
);
3452 if ((state
= EXEC_TAG()) == 0) {
3453 result
= (*func
)(arg
, tracing
);
3457 rb_thread_set_raised(th
);
3461 th
->tracing
= tracing
;
3470 * +Thread+ encapsulates the behavior of a thread of
3471 * execution, including the main thread of the Ruby script.
3473 * In the descriptions of the methods in this class, the parameter _sym_
3474 * refers to a symbol, which is either a quoted string or a
3475 * +Symbol+ (such as <code>:name</code>).
3485 rb_define_singleton_method(rb_cThread
, "new", thread_s_new
, -1);
3486 rb_define_singleton_method(rb_cThread
, "start", thread_start
, -2);
3487 rb_define_singleton_method(rb_cThread
, "fork", thread_start
, -2);
3488 rb_define_singleton_method(rb_cThread
, "main", rb_thread_s_main
, 0);
3489 rb_define_singleton_method(rb_cThread
, "current", thread_s_current
, 0);
3490 rb_define_singleton_method(rb_cThread
, "stop", rb_thread_stop
, 0);
3491 rb_define_singleton_method(rb_cThread
, "kill", rb_thread_s_kill
, 1);
3492 rb_define_singleton_method(rb_cThread
, "exit", rb_thread_exit
, 0);
3493 rb_define_singleton_method(rb_cThread
, "pass", thread_s_pass
, 0);
3494 rb_define_singleton_method(rb_cThread
, "list", rb_thread_list
, 0);
3495 rb_define_singleton_method(rb_cThread
, "abort_on_exception", rb_thread_s_abort_exc
, 0);
3496 rb_define_singleton_method(rb_cThread
, "abort_on_exception=", rb_thread_s_abort_exc_set
, 1);
3497 #if THREAD_DEBUG < 0
3498 rb_define_singleton_method(rb_cThread
, "DEBUG", rb_thread_s_debug
, 0);
3499 rb_define_singleton_method(rb_cThread
, "DEBUG=", rb_thread_s_debug_set
, 1);
3502 rb_define_method(rb_cThread
, "initialize", thread_initialize
, -2);
3503 rb_define_method(rb_cThread
, "raise", thread_raise_m
, -1);
3504 rb_define_method(rb_cThread
, "join", thread_join_m
, -1);
3505 rb_define_method(rb_cThread
, "value", thread_value
, 0);
3506 rb_define_method(rb_cThread
, "kill", rb_thread_kill
, 0);
3507 rb_define_method(rb_cThread
, "terminate", rb_thread_kill
, 0);
3508 rb_define_method(rb_cThread
, "exit", rb_thread_kill
, 0);
3509 rb_define_method(rb_cThread
, "run", rb_thread_run
, 0);
3510 rb_define_method(rb_cThread
, "wakeup", rb_thread_wakeup
, 0);
3511 rb_define_method(rb_cThread
, "[]", rb_thread_aref
, 1);
3512 rb_define_method(rb_cThread
, "[]=", rb_thread_aset
, 2);
3513 rb_define_method(rb_cThread
, "key?", rb_thread_key_p
, 1);
3514 rb_define_method(rb_cThread
, "keys", rb_thread_keys
, 0);
3515 rb_define_method(rb_cThread
, "priority", rb_thread_priority
, 0);
3516 rb_define_method(rb_cThread
, "priority=", rb_thread_priority_set
, 1);
3517 rb_define_method(rb_cThread
, "status", rb_thread_status
, 0);
3518 rb_define_method(rb_cThread
, "alive?", rb_thread_alive_p
, 0);
3519 rb_define_method(rb_cThread
, "stop?", rb_thread_stop_p
, 0);
3520 rb_define_method(rb_cThread
, "abort_on_exception", rb_thread_abort_exc
, 0);
3521 rb_define_method(rb_cThread
, "abort_on_exception=", rb_thread_abort_exc_set
, 1);
3522 rb_define_method(rb_cThread
, "safe_level", rb_thread_safe_level
, 0);
3523 rb_define_method(rb_cThread
, "group", rb_thread_group
, 0);
3525 rb_define_method(rb_cThread
, "inspect", rb_thread_inspect
, 0);
3527 cThGroup
= rb_define_class("ThreadGroup", rb_cObject
);
3528 rb_define_alloc_func(cThGroup
, thgroup_s_alloc
);
3529 rb_define_method(cThGroup
, "list", thgroup_list
, 0);
3530 rb_define_method(cThGroup
, "enclose", thgroup_enclose
, 0);
3531 rb_define_method(cThGroup
, "enclosed?", thgroup_enclosed_p
, 0);
3532 rb_define_method(cThGroup
, "add", thgroup_add
, 1);
3535 rb_thread_t
*th
= GET_THREAD();
3536 th
->thgroup
= th
->vm
->thgroup_default
= rb_obj_alloc(cThGroup
);
3537 rb_define_const(cThGroup
, "Default", th
->thgroup
);
3540 rb_cMutex
= rb_define_class("Mutex", rb_cObject
);
3541 rb_define_alloc_func(rb_cMutex
, mutex_alloc
);
3542 rb_define_method(rb_cMutex
, "initialize", mutex_initialize
, 0);
3543 rb_define_method(rb_cMutex
, "locked?", rb_mutex_locked_p
, 0);
3544 rb_define_method(rb_cMutex
, "try_lock", rb_mutex_trylock
, 0);
3545 rb_define_method(rb_cMutex
, "lock", rb_mutex_lock
, 0);
3546 rb_define_method(rb_cMutex
, "unlock", rb_mutex_unlock
, 0);
3547 rb_define_method(rb_cMutex
, "sleep", mutex_sleep
, -1);
3549 recursive_key
= rb_intern("__recursive_key__");
3550 rb_eThreadError
= rb_define_class("ThreadError", rb_eStandardError
);
3553 rb_define_global_function("set_trace_func", set_trace_func
, 1);
3554 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
3555 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
3557 /* init thread core */
3558 Init_native_thread();
3560 /* main thread setting */
3562 /* acquire global interpreter lock */
3563 rb_thread_lock_t
*lp
= &GET_THREAD()->vm
->global_vm_lock
;
3564 native_mutex_initialize(lp
);
3565 native_mutex_lock(lp
);
3566 native_mutex_initialize(&GET_THREAD()->interrupt_lock
);
3570 rb_thread_create_timer_thread();
3572 (void)native_mutex_trylock
;
3573 (void)ruby_thread_set_native
;
3577 ruby_native_thread_p(void)
3579 rb_thread_t
*th
= ruby_thread_from_native();
3581 return th
? Qtrue
: Qfalse
;
3585 check_deadlock_i(st_data_t key
, st_data_t val
, int *found
)
3589 GetThreadPtr(thval
, th
);
3591 if (th
->status
!= THREAD_STOPPED_FOREVER
|| RUBY_VM_INTERRUPTED(th
) || th
->transition_for_lock
) {
3594 else if (th
->locking_mutex
) {
3596 GetMutexPtr(th
->locking_mutex
, mutex
);
3598 native_mutex_lock(&mutex
->lock
);
3599 if (mutex
->th
== th
|| (!mutex
->th
&& mutex
->cond_notified
)) {
3602 native_mutex_unlock(&mutex
->lock
);
3605 return (*found
) ? ST_STOP
: ST_CONTINUE
;
3608 #if 0 /* for debug */
3610 debug_i(st_data_t key
, st_data_t val
, int *found
)
3614 GetThreadPtr(thval
, th
);
3616 printf("th:%p %d %d %d", th
, th
->status
, th
->interrupt_flag
, th
->transition_for_lock
);
3617 if (th
->locking_mutex
) {
3619 GetMutexPtr(th
->locking_mutex
, mutex
);
3621 native_mutex_lock(&mutex
->lock
);
3622 printf(" %p %d\n", mutex
->th
, mutex
->cond_notified
);
3623 native_mutex_unlock(&mutex
->lock
);
3632 rb_check_deadlock(rb_vm_t
*vm
)
3636 if (vm_living_thread_num(vm
) > vm
->sleeper
) return;
3637 if (vm_living_thread_num(vm
) < vm
->sleeper
) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3639 st_foreach(vm
->living_threads
, check_deadlock_i
, (st_data_t
)&found
);
3643 argv
[0] = rb_eFatal
;
3644 argv
[1] = rb_str_new2("deadlock detected");
3645 #if 0 /* for debug */
3646 printf("%d %d %p %p\n", vm
->living_threads
->num_entries
, vm
->sleeper
, GET_THREAD(), vm
->main_thread
);
3647 st_foreach(vm
->living_threads
, debug_i
, (st_data_t
)0);
3649 rb_thread_raise(2, argv
, vm
->main_thread
);
3654 update_coverage(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3656 VALUE coverage
= GET_THREAD()->cfp
->iseq
->coverage
;
3657 if (coverage
&& RBASIC(coverage
)->klass
== 0) {
3658 long line
= rb_sourceline() - 1;
3660 if (RARRAY_PTR(coverage
)[line
] == Qnil
) {
3663 count
= FIX2LONG(RARRAY_PTR(coverage
)[line
]) + 1;
3664 if (POSFIXABLE(count
)) {
3665 RARRAY_PTR(coverage
)[line
] = LONG2FIX(count
);
3671 rb_get_coverages(void)
3673 return GET_VM()->coverages
;
3677 rb_set_coverages(VALUE coverages
)
3679 GET_VM()->coverages
= coverages
;
3680 rb_add_event_hook(update_coverage
, RUBY_EVENT_COVERAGE
, Qnil
);
3684 rb_reset_coverages(void)
3686 GET_VM()->coverages
= Qfalse
;
3687 rb_remove_event_hook(update_coverage
);