1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
51 #ifndef USE_NATIVE_THREAD_PRIORITY
52 #define USE_NATIVE_THREAD_PRIORITY 0
53 #define RUBY_THREAD_PRIORITY_MAX 3
54 #define RUBY_THREAD_PRIORITY_MIN -3
58 #define THREAD_DEBUG 0
64 static void sleep_timeval(rb_thread_t
*th
, struct timeval time
);
65 static void sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
);
66 static void sleep_forever(rb_thread_t
*th
, int nodeadlock
);
67 static double timeofday(void);
68 struct timeval
rb_time_interval(VALUE
);
69 static int rb_thread_dead(rb_thread_t
*th
);
71 static void rb_check_deadlock(rb_vm_t
*vm
);
73 void rb_signal_exec(rb_thread_t
*th
, int sig
);
74 void rb_disable_interrupt(void);
76 static const VALUE eKillSignal
= INT2FIX(0);
77 static const VALUE eTerminateSignal
= INT2FIX(1);
78 static volatile int system_working
= 1;
81 st_delete_wrap(st_table
*table
, st_data_t key
)
83 st_delete(table
, &key
, 0);
86 /********************************************************************************/
88 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
90 static void set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
91 struct rb_unblock_callback
*old
);
92 static void reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
);
94 #define GVL_UNLOCK_BEGIN() do { \
95 rb_thread_t *_th_stored = GET_THREAD(); \
96 rb_gc_save_machine_context(_th_stored); \
97 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
99 #define GVL_UNLOCK_END() \
100 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
101 rb_thread_set_current(_th_stored); \
104 #define BLOCKING_REGION_CORE(exec) do { \
105 GVL_UNLOCK_BEGIN(); {\
111 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
112 rb_thread_t *__th = GET_THREAD(); \
113 enum rb_thread_status __prev_status = __th->status; \
114 struct rb_unblock_callback __oldubf; \
115 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
116 __th->status = THREAD_STOPPED; \
117 thread_debug("enter blocking region (%p)\n", __th); \
118 BLOCKING_REGION_CORE(exec); \
119 thread_debug("leave blocking region (%p)\n", __th); \
120 remove_signal_thread_list(__th); \
121 reset_unblock_function(__th, &__oldubf); \
122 if (__th->status == THREAD_STOPPED) { \
123 __th->status = __prev_status; \
125 RUBY_VM_CHECK_INTS(); \
129 #ifdef HAVE_VA_ARGS_MACRO
130 void rb_thread_debug(const char *file
, int line
, const char *fmt
, ...);
131 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
132 #define POSITION_FORMAT "%s:%d:"
133 #define POSITION_ARGS ,file, line
135 void rb_thread_debug(const char *fmt
, ...);
136 #define thread_debug rb_thread_debug
137 #define POSITION_FORMAT
138 #define POSITION_ARGS
141 # if THREAD_DEBUG < 0
142 static int rb_thread_debug_enabled
;
145 rb_thread_s_debug(void)
147 return INT2NUM(rb_thread_debug_enabled
);
151 rb_thread_s_debug_set(VALUE self
, VALUE val
)
153 rb_thread_debug_enabled
= RTEST(val
);
157 # define rb_thread_debug_enabled THREAD_DEBUG
160 #define thread_debug if(0)printf
164 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
166 NOINLINE(static int thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
,
167 VALUE
*register_stack_start
));
168 static void timer_thread_function(void *);
171 #include "thread_win32.c"
173 #define DEBUG_OUT() \
174 WaitForSingleObject(&debug_mutex, INFINITE); \
175 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
177 ReleaseMutex(&debug_mutex);
179 #elif defined(HAVE_PTHREAD_H)
180 #include "thread_pthread.c"
182 #define DEBUG_OUT() \
183 pthread_mutex_lock(&debug_mutex); \
184 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
186 pthread_mutex_unlock(&debug_mutex);
189 #error "unsupported thread type"
193 static int debug_mutex_initialized
= 1;
194 static rb_thread_lock_t debug_mutex
;
198 #ifdef HAVE_VA_ARGS_MACRO
199 const char *file
, int line
,
201 const char *fmt
, ...)
206 if (!rb_thread_debug_enabled
) return;
208 if (debug_mutex_initialized
== 1) {
209 debug_mutex_initialized
= 0;
210 native_mutex_initialize(&debug_mutex
);
214 vsnprintf(buf
, BUFSIZ
, fmt
, args
);
223 set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
224 struct rb_unblock_callback
*old
)
227 RUBY_VM_CHECK_INTS(); /* check signal or so */
228 native_mutex_lock(&th
->interrupt_lock
);
229 if (th
->interrupt_flag
) {
230 native_mutex_unlock(&th
->interrupt_lock
);
234 if (old
) *old
= th
->unblock
;
235 th
->unblock
.func
= func
;
236 th
->unblock
.arg
= arg
;
238 native_mutex_unlock(&th
->interrupt_lock
);
242 reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
)
244 native_mutex_lock(&th
->interrupt_lock
);
246 native_mutex_unlock(&th
->interrupt_lock
);
250 rb_thread_interrupt(rb_thread_t
*th
)
252 native_mutex_lock(&th
->interrupt_lock
);
253 RUBY_VM_SET_INTERRUPT(th
);
254 if (th
->unblock
.func
) {
255 (th
->unblock
.func
)(th
->unblock
.arg
);
260 native_mutex_unlock(&th
->interrupt_lock
);
265 terminate_i(st_data_t key
, st_data_t val
, rb_thread_t
*main_thread
)
269 GetThreadPtr(thval
, th
);
271 if (th
!= main_thread
) {
272 thread_debug("terminate_i: %p\n", th
);
273 rb_thread_interrupt(th
);
274 th
->thrown_errinfo
= eTerminateSignal
;
275 th
->status
= THREAD_TO_KILL
;
278 thread_debug("terminate_i: main thread (%p)\n", th
);
283 typedef struct rb_mutex_struct
285 rb_thread_lock_t lock
;
286 rb_thread_cond_t cond
;
287 struct rb_thread_struct
volatile *th
;
288 volatile int cond_waiting
, cond_notified
;
289 struct rb_mutex_struct
*next_mutex
;
292 static void rb_mutex_unlock_all(mutex_t
*mutex
);
295 rb_thread_terminate_all(void)
297 rb_thread_t
*th
= GET_THREAD(); /* main thread */
298 rb_vm_t
*vm
= th
->vm
;
299 if (vm
->main_thread
!= th
) {
300 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm
->main_thread
, th
);
303 /* unlock all locking mutexes */
304 if (th
->keeping_mutexes
) {
305 rb_mutex_unlock_all(th
->keeping_mutexes
);
308 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th
);
309 st_foreach(vm
->living_threads
, terminate_i
, (st_data_t
)th
);
311 while (!rb_thread_alone()) {
313 if (EXEC_TAG() == 0) {
314 rb_thread_schedule();
317 /* ignore exception */
325 thread_cleanup_func_before_exec(void *th_ptr
)
327 rb_thread_t
*th
= th_ptr
;
328 th
->status
= THREAD_KILLED
;
329 th
->machine_stack_start
= th
->machine_stack_end
= 0;
331 th
->machine_register_stack_start
= th
->machine_register_stack_end
= 0;
336 thread_cleanup_func(void *th_ptr
)
338 rb_thread_t
*th
= th_ptr
;
339 thread_cleanup_func_before_exec(th_ptr
);
340 native_thread_destroy(th
);
343 extern void ruby_error_print(void);
344 static VALUE
rb_thread_raise(int, VALUE
*, rb_thread_t
*);
345 void rb_thread_recycle_stack_release(VALUE
*);
348 ruby_thread_init_stack(rb_thread_t
*th
)
350 native_thread_init_stack(th
);
354 thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
, VALUE
*register_stack_start
)
357 VALUE args
= th
->first_args
;
359 rb_thread_t
*join_th
;
360 rb_thread_t
*main_th
;
361 VALUE errinfo
= Qnil
;
363 th
->machine_stack_start
= stack_start
;
365 th
->machine_register_stack_start
= register_stack_start
;
367 thread_debug("thread start: %p\n", th
);
369 native_mutex_lock(&th
->vm
->global_vm_lock
);
371 thread_debug("thread start (get lock): %p\n", th
);
372 rb_thread_set_current(th
);
375 if ((state
= EXEC_TAG()) == 0) {
376 SAVE_ROOT_JMPBUF(th
, {
377 if (th
->first_proc
) {
378 GetProcPtr(th
->first_proc
, proc
);
380 th
->local_lfp
= proc
->block
.lfp
;
381 th
->local_svar
= Qnil
;
382 th
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
,
383 RARRAY_LEN(args
), RARRAY_PTR(args
), 0);
386 th
->value
= (*th
->first_func
)((void *)th
->first_args
);
391 errinfo
= th
->errinfo
;
392 if (NIL_P(errinfo
)) errinfo
= rb_errinfo();
393 if (state
== TAG_FATAL
) {
394 /* fatal error within this thread, need to stop whole script */
396 else if (rb_obj_is_kind_of(errinfo
, rb_eSystemExit
)) {
397 if (th
->safe_level
>= 4) {
398 th
->errinfo
= rb_exc_new3(rb_eSecurityError
,
399 rb_sprintf("Insecure exit at level %d", th
->safe_level
));
403 else if (th
->safe_level
< 4 &&
404 (th
->vm
->thread_abort_on_exception
||
405 th
->abort_on_exception
|| RTEST(ruby_debug
))) {
406 /* exit on main_thread */
414 th
->status
= THREAD_KILLED
;
415 thread_debug("thread end: %p\n", th
);
417 main_th
= th
->vm
->main_thread
;
419 if (TYPE(errinfo
) == T_OBJECT
) {
420 /* treat with normal error object */
421 rb_thread_raise(1, &errinfo
, main_th
);
426 /* locking_mutex must be Qfalse */
427 if (th
->locking_mutex
!= Qfalse
) {
428 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
429 th
, th
->locking_mutex
);
432 /* unlock all locking mutexes */
433 if (th
->keeping_mutexes
) {
434 rb_mutex_unlock_all(th
->keeping_mutexes
);
435 th
->keeping_mutexes
= NULL
;
438 /* delete self from living_threads */
439 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
441 /* wake up joinning threads */
442 join_th
= th
->join_list_head
;
444 if (join_th
== main_th
) errinfo
= Qnil
;
445 rb_thread_interrupt(join_th
);
446 switch (join_th
->status
) {
447 case THREAD_STOPPED
: case THREAD_STOPPED_FOREVER
:
448 join_th
->status
= THREAD_RUNNABLE
;
451 join_th
= join_th
->join_list_next
;
453 if (th
!= main_th
) rb_check_deadlock(th
->vm
);
455 if (!th
->root_fiber
) {
456 rb_thread_recycle_stack_release(th
->stack
);
460 thread_cleanup_func(th
);
461 native_mutex_unlock(&th
->vm
->global_vm_lock
);
467 thread_create_core(VALUE thval
, VALUE args
, VALUE (*fn
)(ANYARGS
))
471 if (OBJ_FROZEN(GET_THREAD()->thgroup
)) {
472 rb_raise(rb_eThreadError
,
473 "can't start a new thread (frozen ThreadGroup)");
475 GetThreadPtr(thval
, th
);
477 /* setup thread environment */
479 th
->first_proc
= fn
? Qfalse
: rb_block_proc();
480 th
->first_args
= args
; /* GC: shouldn't put before above line */
482 th
->priority
= GET_THREAD()->priority
;
483 th
->thgroup
= GET_THREAD()->thgroup
;
485 native_mutex_initialize(&th
->interrupt_lock
);
487 st_insert(th
->vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
488 native_thread_create(th
);
493 thread_s_new(int argc
, VALUE
*argv
, VALUE klass
)
496 VALUE thread
= rb_thread_alloc(klass
);
497 rb_obj_call_init(thread
, argc
, argv
);
498 GetThreadPtr(thread
, th
);
499 if (!th
->first_args
) {
500 rb_raise(rb_eThreadError
, "uninitialized thread - check `%s#initialize'",
501 rb_class2name(klass
));
508 * Thread.start([args]*) {|args| block } => thread
509 * Thread.fork([args]*) {|args| block } => thread
511 * Basically the same as <code>Thread::new</code>. However, if class
512 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
513 * subclass will not invoke the subclass's <code>initialize</code> method.
517 thread_start(VALUE klass
, VALUE args
)
519 return thread_create_core(rb_thread_alloc(klass
), args
, 0);
523 thread_initialize(VALUE thread
, VALUE args
)
526 if (!rb_block_given_p()) {
527 rb_raise(rb_eThreadError
, "must be called with a block");
529 GetThreadPtr(thread
, th
);
530 if (th
->first_args
) {
531 VALUE
rb_proc_location(VALUE self
);
532 VALUE proc
= th
->first_proc
, line
, loc
;
534 if (!proc
|| !RTEST(loc
= rb_proc_location(proc
))) {
535 rb_raise(rb_eThreadError
, "already initialized thread");
537 file
= RSTRING_PTR(RARRAY_PTR(loc
)[0]);
538 if (NIL_P(line
= RARRAY_PTR(loc
)[1])) {
539 rb_raise(rb_eThreadError
, "already initialized thread - %s",
542 rb_raise(rb_eThreadError
, "already initialized thread - %s:%d",
543 file
, NUM2INT(line
));
545 return thread_create_core(thread
, args
, 0);
549 rb_thread_create(VALUE (*fn
)(ANYARGS
), void *arg
)
551 return thread_create_core(rb_thread_alloc(rb_cThread
), (VALUE
)arg
, fn
);
555 /* +infty, for this purpose */
556 #define DELAY_INFTY 1E30
559 rb_thread_t
*target
, *waiting
;
565 remove_from_join_list(VALUE arg
)
567 struct join_arg
*p
= (struct join_arg
*)arg
;
568 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
570 if (target_th
->status
!= THREAD_KILLED
) {
571 rb_thread_t
**pth
= &target_th
->join_list_head
;
575 *pth
= th
->join_list_next
;
578 pth
= &(*pth
)->join_list_next
;
586 thread_join_sleep(VALUE arg
)
588 struct join_arg
*p
= (struct join_arg
*)arg
;
589 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
590 double now
, limit
= p
->limit
;
592 while (target_th
->status
!= THREAD_KILLED
) {
594 sleep_forever(th
, 1);
599 thread_debug("thread_join: timeout (thid: %p)\n",
600 (void *)target_th
->thread_id
);
603 sleep_wait_for_interrupt(th
, limit
- now
);
605 thread_debug("thread_join: interrupted (thid: %p)\n",
606 (void *)target_th
->thread_id
);
612 thread_join(rb_thread_t
*target_th
, double delay
)
614 rb_thread_t
*th
= GET_THREAD();
617 arg
.target
= target_th
;
619 arg
.limit
= timeofday() + delay
;
620 arg
.forever
= delay
== DELAY_INFTY
;
622 thread_debug("thread_join (thid: %p)\n", (void *)target_th
->thread_id
);
624 if (target_th
->status
!= THREAD_KILLED
) {
625 th
->join_list_next
= target_th
->join_list_head
;
626 target_th
->join_list_head
= th
;
627 if (!rb_ensure(thread_join_sleep
, (VALUE
)&arg
,
628 remove_from_join_list
, (VALUE
)&arg
)) {
633 thread_debug("thread_join: success (thid: %p)\n",
634 (void *)target_th
->thread_id
);
636 if (target_th
->errinfo
!= Qnil
) {
637 VALUE err
= target_th
->errinfo
;
642 else if (TYPE(target_th
->errinfo
) == T_NODE
) {
643 rb_exc_raise(vm_make_jump_tag_but_local_jump(
644 GET_THROWOBJ_STATE(err
), GET_THROWOBJ_VAL(err
)));
647 /* normal exception */
651 return target_th
->self
;
657 * thr.join(limit) => thr
659 * The calling thread will suspend execution and run <i>thr</i>. Does not
660 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
661 * the time limit expires, <code>nil</code> will be returned, otherwise
662 * <i>thr</i> is returned.
664 * Any threads not joined will be killed when the main program exits. If
665 * <i>thr</i> had previously raised an exception and the
666 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
667 * (so the exception has not yet been processed) it will be processed at this
670 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
671 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
672 * x.join # Let x thread finish, a will be killed on exit.
678 * The following example illustrates the <i>limit</i> parameter.
680 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
681 * puts "Waiting" until y.join(0.15)
695 thread_join_m(int argc
, VALUE
*argv
, VALUE self
)
697 rb_thread_t
*target_th
;
698 double delay
= DELAY_INFTY
;
701 GetThreadPtr(self
, target_th
);
703 rb_scan_args(argc
, argv
, "01", &limit
);
705 delay
= rb_num2dbl(limit
);
708 return thread_join(target_th
, delay
);
715 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
718 * a = Thread.new { 2 + 2 }
723 thread_value(VALUE self
)
726 GetThreadPtr(self
, th
);
727 thread_join(th
, DELAY_INFTY
);
735 static struct timeval
736 double2timeval(double d
)
740 time
.tv_sec
= (int)d
;
741 time
.tv_usec
= (int)((d
- (int)d
) * 1e6
);
742 if (time
.tv_usec
< 0) {
743 time
.tv_usec
+= (long)1e6
;
750 sleep_forever(rb_thread_t
*th
, int deadlockable
)
752 enum rb_thread_status prev_status
= th
->status
;
754 th
->status
= deadlockable
? THREAD_STOPPED_FOREVER
: THREAD_STOPPED
;
758 rb_check_deadlock(th
->vm
);
764 RUBY_VM_CHECK_INTS();
765 } while (th
->status
== THREAD_STOPPED_FOREVER
);
766 th
->status
= prev_status
;
770 getclockofday(struct timeval
*tp
)
772 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
775 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0) {
776 tp
->tv_sec
= ts
.tv_sec
;
777 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
781 gettimeofday(tp
, NULL
);
786 sleep_timeval(rb_thread_t
*th
, struct timeval tv
)
788 struct timeval to
, tvn
;
789 enum rb_thread_status prev_status
= th
->status
;
792 to
.tv_sec
+= tv
.tv_sec
;
793 if ((to
.tv_usec
+= tv
.tv_usec
) >= 1000000) {
795 to
.tv_usec
-= 1000000;
798 th
->status
= THREAD_STOPPED
;
800 native_sleep(th
, &tv
);
801 RUBY_VM_CHECK_INTS();
803 if (to
.tv_sec
< tvn
.tv_sec
) break;
804 if (to
.tv_sec
== tvn
.tv_sec
&& to
.tv_usec
<= tvn
.tv_usec
) break;
805 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
806 (long)to
.tv_sec
, to
.tv_usec
,
807 (long)tvn
.tv_sec
, tvn
.tv_usec
);
808 tv
.tv_sec
= to
.tv_sec
- tvn
.tv_sec
;
809 if ((tv
.tv_usec
= to
.tv_usec
- tvn
.tv_usec
) < 0) {
811 tv
.tv_usec
+= 1000000;
813 } while (th
->status
== THREAD_STOPPED
);
814 th
->status
= prev_status
;
818 rb_thread_sleep_forever()
820 thread_debug("rb_thread_sleep_forever\n");
821 sleep_forever(GET_THREAD(), 0);
825 rb_thread_sleep_deadly()
827 thread_debug("rb_thread_sleep_deadly\n");
828 sleep_forever(GET_THREAD(), 1);
834 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
837 if (clock_gettime(CLOCK_MONOTONIC
, &tp
) == 0) {
838 return (double)tp
.tv_sec
+ (double)tp
.tv_nsec
* 1e-9;
843 gettimeofday(&tv
, NULL
);
844 return (double)tv
.tv_sec
+ (double)tv
.tv_usec
* 1e-6;
849 sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
)
851 sleep_timeval(th
, double2timeval(sleepsec
));
855 sleep_for_polling(rb_thread_t
*th
)
859 time
.tv_usec
= 100 * 1000; /* 0.1 sec */
860 sleep_timeval(th
, time
);
864 rb_thread_wait_for(struct timeval time
)
866 rb_thread_t
*th
= GET_THREAD();
867 sleep_timeval(th
, time
);
871 rb_thread_polling(void)
873 RUBY_VM_CHECK_INTS();
874 if (!rb_thread_alone()) {
875 rb_thread_t
*th
= GET_THREAD();
876 sleep_for_polling(th
);
881 * CAUTION: This function causes thread switching.
882 * rb_thread_check_ints() check ruby's interrupts.
883 * some interrupt needs thread switching/invoke handlers,
888 rb_thread_check_ints(void)
890 RUBY_VM_CHECK_INTS();
893 struct timeval
rb_time_timeval();
896 rb_thread_sleep(int sec
)
898 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec
)));
902 rb_thread_schedule(void)
904 thread_debug("rb_thread_schedule\n");
905 if (!rb_thread_alone()) {
906 rb_thread_t
*th
= GET_THREAD();
908 thread_debug("rb_thread_schedule/switch start\n");
910 rb_gc_save_machine_context(th
);
911 native_mutex_unlock(&th
->vm
->global_vm_lock
);
913 native_thread_yield();
915 native_mutex_lock(&th
->vm
->global_vm_lock
);
917 rb_thread_set_current(th
);
918 thread_debug("rb_thread_schedule/switch done\n");
920 RUBY_VM_CHECK_INTS();
924 int rb_thread_critical
; /* TODO: dummy variable */
927 rb_thread_blocking_region(
928 rb_blocking_function_t
*func
, void *data1
,
929 rb_unblock_function_t
*ubf
, void *data2
)
932 rb_thread_t
*th
= GET_THREAD();
934 if (ubf
== RB_UBF_DFL
) {
950 * Invokes the thread scheduler to pass execution to another thread.
952 * a = Thread.new { print "a"; Thread.pass;
953 * print "b"; Thread.pass;
955 * b = Thread.new { print "x"; Thread.pass;
956 * print "y"; Thread.pass;
967 thread_s_pass(VALUE klass
)
969 rb_thread_schedule();
978 rb_thread_execute_interrupts(rb_thread_t
*th
)
980 if (th
->raised_flag
) return;
982 while (th
->interrupt_flag
) {
983 enum rb_thread_status status
= th
->status
;
984 int timer_interrupt
= th
->interrupt_flag
& 0x01;
985 int finalizer_interrupt
= th
->interrupt_flag
& 0x04;
987 th
->status
= THREAD_RUNNABLE
;
988 th
->interrupt_flag
= 0;
990 /* signal handling */
991 if (th
->exec_signal
) {
992 int sig
= th
->exec_signal
;
994 rb_signal_exec(th
, sig
);
997 /* exception from another thread */
998 if (th
->thrown_errinfo
) {
999 VALUE err
= th
->thrown_errinfo
;
1000 th
->thrown_errinfo
= 0;
1001 thread_debug("rb_thread_execute_interrupts: %ld\n", err
);
1003 if (err
== eKillSignal
|| err
== eTerminateSignal
) {
1004 th
->errinfo
= INT2FIX(TAG_FATAL
);
1005 TH_JUMP_TAG(th
, TAG_FATAL
);
1011 th
->status
= status
;
1013 if (finalizer_interrupt
) {
1014 rb_gc_finalize_deferred();
1017 if (timer_interrupt
) {
1018 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
1020 if (th
->slice
> 0) {
1025 rb_thread_schedule();
1026 if (th
->slice
< 0) {
1031 th
->slice
= th
->priority
;
1040 rb_gc_mark_threads(void)
1045 /*****************************************************/
1048 rb_thread_ready(rb_thread_t
*th
)
1050 rb_thread_interrupt(th
);
1054 rb_thread_raise(int argc
, VALUE
*argv
, rb_thread_t
*th
)
1059 if (rb_thread_dead(th
)) {
1063 if (th
->thrown_errinfo
!= 0 || th
->raised_flag
) {
1064 rb_thread_schedule();
1068 exc
= rb_make_exception(argc
, argv
);
1069 th
->thrown_errinfo
= exc
;
1070 rb_thread_ready(th
);
1075 rb_thread_signal_raise(void *thptr
, int sig
)
1078 rb_thread_t
*th
= thptr
;
1080 argv
[0] = rb_eSignal
;
1081 argv
[1] = INT2FIX(sig
);
1082 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1086 rb_thread_signal_exit(void *thptr
)
1089 rb_thread_t
*th
= thptr
;
1091 argv
[0] = rb_eSystemExit
;
1092 argv
[1] = rb_str_new2("exit");
1093 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1097 rb_thread_set_raised(rb_thread_t
*th
)
1099 if (th
->raised_flag
& RAISED_EXCEPTION
) {
1102 th
->raised_flag
|= RAISED_EXCEPTION
;
1107 rb_thread_reset_raised(rb_thread_t
*th
)
1109 if (!(th
->raised_flag
& RAISED_EXCEPTION
)) {
1112 th
->raised_flag
&= ~RAISED_EXCEPTION
;
1117 rb_thread_fd_close(int fd
)
1124 * thr.raise(exception)
1126 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1127 * caller does not have to be <i>thr</i>.
1129 * Thread.abort_on_exception = true
1130 * a = Thread.new { sleep(200) }
1133 * <em>produces:</em>
1135 * prog.rb:3: Gotcha (RuntimeError)
1136 * from prog.rb:2:in `initialize'
1137 * from prog.rb:2:in `new'
1142 thread_raise_m(int argc
, VALUE
*argv
, VALUE self
)
1145 GetThreadPtr(self
, th
);
1146 rb_thread_raise(argc
, argv
, th
);
1153 * thr.exit => thr or nil
1154 * thr.kill => thr or nil
1155 * thr.terminate => thr or nil
1157 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1158 * is already marked to be killed, <code>exit</code> returns the
1159 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1164 rb_thread_kill(VALUE thread
)
1168 GetThreadPtr(thread
, th
);
1170 if (th
!= GET_THREAD() && th
->safe_level
< 4) {
1173 if (th
->status
== THREAD_TO_KILL
|| th
->status
== THREAD_KILLED
) {
1176 if (th
== th
->vm
->main_thread
) {
1177 rb_exit(EXIT_SUCCESS
);
1180 thread_debug("rb_thread_kill: %p (%p)\n", th
, (void *)th
->thread_id
);
1182 rb_thread_interrupt(th
);
1183 th
->thrown_errinfo
= eKillSignal
;
1184 th
->status
= THREAD_TO_KILL
;
1192 * Thread.kill(thread) => thread
1194 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1197 * a = Thread.new { loop { count += 1 } }
1199 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1201 * a.alive? #=> false
1205 rb_thread_s_kill(VALUE obj
, VALUE th
)
1207 return rb_thread_kill(th
);
1213 * Thread.exit => thread
1215 * Terminates the currently running thread and schedules another thread to be
1216 * run. If this thread is already marked to be killed, <code>exit</code>
1217 * returns the <code>Thread</code>. If this is the main thread, or the last
1218 * thread, exit the process.
1222 rb_thread_exit(void)
1224 return rb_thread_kill(GET_THREAD()->self
);
1232 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1233 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1235 * c = Thread.new { Thread.stop; puts "hey!" }
1238 * <em>produces:</em>
1244 rb_thread_wakeup(VALUE thread
)
1247 GetThreadPtr(thread
, th
);
1249 if (th
->status
== THREAD_KILLED
) {
1250 rb_raise(rb_eThreadError
, "killed thread");
1252 rb_thread_ready(th
);
1253 if (th
->status
!= THREAD_TO_KILL
) {
1254 th
->status
= THREAD_RUNNABLE
;
1264 * Wakes up <i>thr</i>, making it eligible for scheduling.
1266 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1272 * <em>produces:</em>
1280 rb_thread_run(VALUE thread
)
1282 rb_thread_wakeup(thread
);
1283 rb_thread_schedule();
1290 * Thread.stop => nil
1292 * Stops execution of the current thread, putting it into a ``sleep'' state,
1293 * and schedules execution of another thread.
1295 * a = Thread.new { print "a"; Thread.stop; print "c" }
1301 * <em>produces:</em>
1307 rb_thread_stop(void)
1309 if (rb_thread_alone()) {
1310 rb_raise(rb_eThreadError
,
1311 "stopping only thread\n\tnote: use sleep to stop forever");
1313 rb_thread_sleep_deadly();
1318 thread_list_i(st_data_t key
, st_data_t val
, void *data
)
1320 VALUE ary
= (VALUE
)data
;
1322 GetThreadPtr((VALUE
)key
, th
);
1324 switch (th
->status
) {
1325 case THREAD_RUNNABLE
:
1326 case THREAD_STOPPED
:
1327 case THREAD_STOPPED_FOREVER
:
1328 case THREAD_TO_KILL
:
1329 rb_ary_push(ary
, th
->self
);
1336 /********************************************************************/
1340 * Thread.list => array
1342 * Returns an array of <code>Thread</code> objects for all threads that are
1343 * either runnable or stopped.
1345 * Thread.new { sleep(200) }
1346 * Thread.new { 1000000.times {|i| i*i } }
1347 * Thread.new { Thread.stop }
1348 * Thread.list.each {|t| p t}
1350 * <em>produces:</em>
1352 * #<Thread:0x401b3e84 sleep>
1353 * #<Thread:0x401b3f38 run>
1354 * #<Thread:0x401b3fb0 sleep>
1355 * #<Thread:0x401bdf4c run>
1359 rb_thread_list(void)
1361 VALUE ary
= rb_ary_new();
1362 st_foreach(GET_THREAD()->vm
->living_threads
, thread_list_i
, ary
);
1367 rb_thread_current(void)
1369 return GET_THREAD()->self
;
1374 * Thread.current => thread
1376 * Returns the currently executing thread.
1378 * Thread.current #=> #<Thread:0x401bdf4c run>
1382 thread_s_current(VALUE klass
)
1384 return rb_thread_current();
1388 rb_thread_main(void)
1390 return GET_THREAD()->vm
->main_thread
->self
;
1394 rb_thread_s_main(VALUE klass
)
1396 return rb_thread_main();
1402 * Thread.abort_on_exception => true or false
1404 * Returns the status of the global ``abort on exception'' condition. The
1405 * default is <code>false</code>. When set to <code>true</code>, or if the
1406 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1407 * command line option <code>-d</code> was specified) all threads will abort
1408 * (the process will <code>exit(0)</code>) if an exception is raised in any
1409 * thread. See also <code>Thread::abort_on_exception=</code>.
1413 rb_thread_s_abort_exc(void)
1415 return GET_THREAD()->vm
->thread_abort_on_exception
? Qtrue
: Qfalse
;
1421 * Thread.abort_on_exception= boolean => true or false
1423 * When set to <code>true</code>, all threads will abort if an exception is
1424 * raised. Returns the new state.
1426 * Thread.abort_on_exception = true
1427 * t1 = Thread.new do
1428 * puts "In new thread"
1429 * raise "Exception from thread"
1432 * puts "not reached"
1434 * <em>produces:</em>
1437 * prog.rb:4: Exception from thread (RuntimeError)
1438 * from prog.rb:2:in `initialize'
1439 * from prog.rb:2:in `new'
1444 rb_thread_s_abort_exc_set(VALUE self
, VALUE val
)
1447 GET_THREAD()->vm
->thread_abort_on_exception
= RTEST(val
);
1454 * thr.abort_on_exception => true or false
1456 * Returns the status of the thread-local ``abort on exception'' condition for
1457 * <i>thr</i>. The default is <code>false</code>. See also
1458 * <code>Thread::abort_on_exception=</code>.
1462 rb_thread_abort_exc(VALUE thread
)
1465 GetThreadPtr(thread
, th
);
1466 return th
->abort_on_exception
? Qtrue
: Qfalse
;
1472 * thr.abort_on_exception= boolean => true or false
1474 * When set to <code>true</code>, causes all threads (including the main
1475 * program) to abort if an exception is raised in <i>thr</i>. The process will
1476 * effectively <code>exit(0)</code>.
1480 rb_thread_abort_exc_set(VALUE thread
, VALUE val
)
1485 GetThreadPtr(thread
, th
);
1486 th
->abort_on_exception
= RTEST(val
);
1493 * thr.group => thgrp or nil
1495 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1496 * the thread is not a member of any group.
1498 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1502 rb_thread_group(VALUE thread
)
1506 GetThreadPtr(thread
, th
);
1507 group
= th
->thgroup
;
1516 thread_status_name(enum rb_thread_status status
)
1519 case THREAD_RUNNABLE
:
1521 case THREAD_STOPPED
:
1522 case THREAD_STOPPED_FOREVER
:
1524 case THREAD_TO_KILL
:
1534 rb_thread_dead(rb_thread_t
*th
)
1536 return th
->status
== THREAD_KILLED
;
1542 * thr.status => string, false or nil
1544 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1545 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1546 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1547 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1548 * terminated with an exception.
1550 * a = Thread.new { raise("die now") }
1551 * b = Thread.new { Thread.stop }
1552 * c = Thread.new { Thread.exit }
1553 * d = Thread.new { sleep }
1554 * d.kill #=> #<Thread:0x401b3678 aborting>
1556 * b.status #=> "sleep"
1557 * c.status #=> false
1558 * d.status #=> "aborting"
1559 * Thread.current.status #=> "run"
1563 rb_thread_status(VALUE thread
)
1566 GetThreadPtr(thread
, th
);
1568 if (rb_thread_dead(th
)) {
1569 if (!NIL_P(th
->errinfo
) && !FIXNUM_P(th
->errinfo
)
1575 return rb_str_new2(thread_status_name(th
->status
));
1581 * thr.alive? => true or false
1583 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1585 * thr = Thread.new { }
1586 * thr.join #=> #<Thread:0x401b3fb0 dead>
1587 * Thread.current.alive? #=> true
1588 * thr.alive? #=> false
1592 rb_thread_alive_p(VALUE thread
)
1595 GetThreadPtr(thread
, th
);
1597 if (rb_thread_dead(th
))
1604 * thr.stop? => true or false
1606 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1608 * a = Thread.new { Thread.stop }
1609 * b = Thread.current
1615 rb_thread_stop_p(VALUE thread
)
1618 GetThreadPtr(thread
, th
);
1620 if (rb_thread_dead(th
))
1622 if (th
->status
== THREAD_STOPPED
|| th
->status
== THREAD_STOPPED_FOREVER
)
1629 * thr.safe_level => integer
1631 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1632 * levels can help when implementing sandboxes which run insecure code.
1634 * thr = Thread.new { $SAFE = 3; sleep }
1635 * Thread.current.safe_level #=> 0
1636 * thr.safe_level #=> 3
1640 rb_thread_safe_level(VALUE thread
)
1643 GetThreadPtr(thread
, th
);
1645 return INT2NUM(th
->safe_level
);
1650 * thr.inspect => string
1652 * Dump the name, id, and status of _thr_ to a string.
1656 rb_thread_inspect(VALUE thread
)
1658 const char *cname
= rb_obj_classname(thread
);
1663 GetThreadPtr(thread
, th
);
1664 status
= thread_status_name(th
->status
);
1665 str
= rb_sprintf("#<%s:%p %s>", cname
, (void *)thread
, status
);
1666 OBJ_INFECT(str
, thread
);
1672 rb_thread_local_aref(VALUE thread
, ID id
)
1677 GetThreadPtr(thread
, th
);
1678 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1679 rb_raise(rb_eSecurityError
, "Insecure: thread locals");
1681 if (!th
->local_storage
) {
1684 if (st_lookup(th
->local_storage
, id
, &val
)) {
1692 * thr[sym] => obj or nil
1694 * Attribute Reference---Returns the value of a thread-local variable, using
1695 * either a symbol or a string name. If the specified variable does not exist,
1696 * returns <code>nil</code>.
1698 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1699 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1700 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1701 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1703 * <em>produces:</em>
1705 * #<Thread:0x401b3b3c sleep>: C
1706 * #<Thread:0x401b3bc8 sleep>: B
1707 * #<Thread:0x401b3c68 sleep>: A
1708 * #<Thread:0x401bdf4c run>:
1712 rb_thread_aref(VALUE thread
, VALUE id
)
1714 return rb_thread_local_aref(thread
, rb_to_id(id
));
1718 rb_thread_local_aset(VALUE thread
, ID id
, VALUE val
)
1721 GetThreadPtr(thread
, th
);
1723 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1724 rb_raise(rb_eSecurityError
, "Insecure: can't modify thread locals");
1726 if (OBJ_FROZEN(thread
)) {
1727 rb_error_frozen("thread locals");
1729 if (!th
->local_storage
) {
1730 th
->local_storage
= st_init_numtable();
1733 st_delete_wrap(th
->local_storage
, id
);
1736 st_insert(th
->local_storage
, id
, val
);
1742 * thr[sym] = obj => obj
1744 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1745 * using either a symbol or a string. See also <code>Thread#[]</code>.
1749 rb_thread_aset(VALUE self
, ID id
, VALUE val
)
1751 return rb_thread_local_aset(self
, rb_to_id(id
), val
);
1756 * thr.key?(sym) => true or false
1758 * Returns <code>true</code> if the given string (or symbol) exists as a
1759 * thread-local variable.
1761 * me = Thread.current
1763 * me.key?(:oliver) #=> true
1764 * me.key?(:stanley) #=> false
1768 rb_thread_key_p(VALUE self
, VALUE key
)
1771 ID id
= rb_to_id(key
);
1773 GetThreadPtr(self
, th
);
1775 if (!th
->local_storage
) {
1778 if (st_lookup(th
->local_storage
, id
, 0)) {
1785 thread_keys_i(ID key
, VALUE value
, VALUE ary
)
1787 rb_ary_push(ary
, ID2SYM(key
));
1792 vm_living_thread_num(rb_vm_t
*vm
)
1794 return vm
->living_threads
->num_entries
;
1801 if (GET_THREAD()->vm
->living_threads
) {
1802 num
= vm_living_thread_num(GET_THREAD()->vm
);
1803 thread_debug("rb_thread_alone: %d\n", num
);
1812 * Returns an an array of the names of the thread-local variables (as Symbols).
1814 * thr = Thread.new do
1815 * Thread.current[:cat] = 'meow'
1816 * Thread.current["dog"] = 'woof'
1818 * thr.join #=> #<Thread:0x401b3f10 dead>
1819 * thr.keys #=> [:dog, :cat]
1823 rb_thread_keys(VALUE self
)
1826 VALUE ary
= rb_ary_new();
1827 GetThreadPtr(self
, th
);
1829 if (th
->local_storage
) {
1830 st_foreach(th
->local_storage
, thread_keys_i
, ary
);
1837 * thr.priority => integer
1839 * Returns the priority of <i>thr</i>. Default is inherited from the
1840 * current thread which creating the new thread, or zero for the
1841 * initial main thread; higher-priority threads will run before
1842 * lower-priority threads.
1844 * Thread.current.priority #=> 0
1848 rb_thread_priority(VALUE thread
)
1851 GetThreadPtr(thread
, th
);
1852 return INT2NUM(th
->priority
);
1858 * thr.priority= integer => thr
1860 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1861 * will run before lower-priority threads.
1863 * count1 = count2 = 0
1865 * loop { count1 += 1 }
1870 * loop { count2 += 1 }
1879 rb_thread_priority_set(VALUE thread
, VALUE prio
)
1883 GetThreadPtr(thread
, th
);
1887 #if USE_NATIVE_THREAD_PRIORITY
1888 th
->priority
= NUM2INT(prio
);
1889 native_thread_apply_priority(th
);
1891 priority
= NUM2INT(prio
);
1892 if (priority
> RUBY_THREAD_PRIORITY_MAX
) {
1893 priority
= RUBY_THREAD_PRIORITY_MAX
;
1895 else if (priority
< RUBY_THREAD_PRIORITY_MIN
) {
1896 priority
= RUBY_THREAD_PRIORITY_MIN
;
1898 th
->priority
= priority
;
1899 th
->slice
= priority
;
1901 return INT2NUM(th
->priority
);
1906 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1908 rb_fd_init(volatile rb_fdset_t
*fds
)
1911 fds
->fdset
= ALLOC(fd_set
);
1912 FD_ZERO(fds
->fdset
);
1916 rb_fd_term(rb_fdset_t
*fds
)
1918 if (fds
->fdset
) xfree(fds
->fdset
);
1924 rb_fd_zero(rb_fdset_t
*fds
)
1927 MEMZERO(fds
->fdset
, fd_mask
, howmany(fds
->maxfd
, NFDBITS
));
1928 FD_ZERO(fds
->fdset
);
1933 rb_fd_resize(int n
, rb_fdset_t
*fds
)
1935 int m
= howmany(n
+ 1, NFDBITS
) * sizeof(fd_mask
);
1936 int o
= howmany(fds
->maxfd
, NFDBITS
) * sizeof(fd_mask
);
1938 if (m
< sizeof(fd_set
)) m
= sizeof(fd_set
);
1939 if (o
< sizeof(fd_set
)) o
= sizeof(fd_set
);
1942 fds
->fdset
= realloc(fds
->fdset
, m
);
1943 memset((char *)fds
->fdset
+ o
, 0, m
- o
);
1945 if (n
>= fds
->maxfd
) fds
->maxfd
= n
+ 1;
1949 rb_fd_set(int n
, rb_fdset_t
*fds
)
1951 rb_fd_resize(n
, fds
);
1952 FD_SET(n
, fds
->fdset
);
1956 rb_fd_clr(int n
, rb_fdset_t
*fds
)
1958 if (n
>= fds
->maxfd
) return;
1959 FD_CLR(n
, fds
->fdset
);
1963 rb_fd_isset(int n
, const rb_fdset_t
*fds
)
1965 if (n
>= fds
->maxfd
) return 0;
1966 return FD_ISSET(n
, fds
->fdset
) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1970 rb_fd_copy(rb_fdset_t
*dst
, const fd_set
*src
, int max
)
1972 int size
= howmany(max
, NFDBITS
) * sizeof(fd_mask
);
1974 if (size
< sizeof(fd_set
)) size
= sizeof(fd_set
);
1976 dst
->fdset
= realloc(dst
->fdset
, size
);
1977 memcpy(dst
->fdset
, src
, size
);
1981 rb_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
)
1983 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
1985 rb_fd_resize(n
- 1, readfds
);
1986 r
= rb_fd_ptr(readfds
);
1989 rb_fd_resize(n
- 1, writefds
);
1990 w
= rb_fd_ptr(writefds
);
1993 rb_fd_resize(n
- 1, exceptfds
);
1994 e
= rb_fd_ptr(exceptfds
);
1996 return select(n
, r
, w
, e
, timeout
);
2004 #define FD_ZERO(f) rb_fd_zero(f)
2005 #define FD_SET(i, f) rb_fd_set(i, f)
2006 #define FD_CLR(i, f) rb_fd_clr(i, f)
2007 #define FD_ISSET(i, f) rb_fd_isset(i, f)
2011 #if defined(__CYGWIN__) || defined(_WIN32)
2013 cmp_tv(const struct timeval
*a
, const struct timeval
*b
)
2015 long d
= (a
->tv_sec
- b
->tv_sec
);
2016 return (d
!= 0) ? d
: (a
->tv_usec
- b
->tv_usec
);
2020 subtract_tv(struct timeval
*rest
, const struct timeval
*wait
)
2022 while (rest
->tv_usec
< wait
->tv_usec
) {
2023 if (rest
->tv_sec
<= wait
->tv_sec
) {
2027 rest
->tv_usec
+= 1000 * 1000;
2029 rest
->tv_sec
-= wait
->tv_sec
;
2030 rest
->tv_usec
-= wait
->tv_usec
;
2036 do_select(int n
, fd_set
*read
, fd_set
*write
, fd_set
*except
,
2037 struct timeval
*timeout
)
2040 fd_set orig_read
, orig_write
, orig_except
;
2044 struct timeval wait_rest
;
2045 # if defined(__CYGWIN__) || defined(_WIN32)
2046 struct timeval start_time
;
2050 # if defined(__CYGWIN__) || defined(_WIN32)
2051 gettimeofday(&start_time
, NULL
);
2052 limit
= (double)start_time
.tv_sec
+ (double)start_time
.tv_usec
*1e-6;
2054 limit
= timeofday();
2056 limit
+= (double)timeout
->tv_sec
+(double)timeout
->tv_usec
*1e-6;
2057 wait_rest
= *timeout
;
2058 timeout
= &wait_rest
;
2062 if (read
) orig_read
= *read
;
2063 if (write
) orig_write
= *write
;
2064 if (except
) orig_except
= *except
;
2069 #if defined(__CYGWIN__) || defined(_WIN32)
2072 /* polling duration: 100ms */
2073 struct timeval wait_100ms
, *wait
;
2074 wait_100ms
.tv_sec
= 0;
2075 wait_100ms
.tv_usec
= 100 * 1000; /* 100 ms */
2078 wait
= (timeout
== 0 || cmp_tv(&wait_100ms
, timeout
) > 0) ? &wait_100ms
: timeout
;
2081 result
= select(n
, read
, write
, except
, wait
);
2082 if (result
< 0) lerrno
= errno
;
2083 if (result
!= 0) break;
2085 if (read
) *read
= orig_read
;
2086 if (write
) *write
= orig_write
;
2087 if (except
) *except
= orig_except
;
2090 struct timeval elapsed
;
2091 gettimeofday(&elapsed
, NULL
);
2092 subtract_tv(&elapsed
, &start_time
);
2093 if (!subtract_tv(timeout
, &elapsed
)) {
2097 if (cmp_tv(&wait_100ms
, timeout
) < 0) wait
= timeout
;
2099 } while (__th
->interrupt_flag
== 0);
2101 } while (result
== 0 && !finish
);
2105 result
= select(n
, read
, write
, except
, timeout
);
2106 if (result
< 0) lerrno
= errno
;
2107 }, ubf_select
, GET_THREAD());
2118 if (read
) *read
= orig_read
;
2119 if (write
) *write
= orig_write
;
2120 if (except
) *except
= orig_except
;
2123 double d
= limit
- timeofday();
2125 wait_rest
.tv_sec
= (unsigned int)d
;
2126 wait_rest
.tv_usec
= (long)((d
-(double)wait_rest
.tv_sec
)*1e6
);
2127 if (wait_rest
.tv_sec
< 0) wait_rest
.tv_sec
= 0;
2128 if (wait_rest
.tv_usec
< 0) wait_rest
.tv_usec
= 0;
2140 rb_thread_wait_fd_rw(int fd
, int read
)
2143 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd
, read
? "read" : "write");
2146 rb_raise(rb_eIOError
, "closed stream");
2148 while (result
<= 0) {
2154 result
= do_select(fd
+ 1, rb_fd_ptr(&set
), 0, 0, 0);
2157 result
= do_select(fd
+ 1, 0, rb_fd_ptr(&set
), 0, 0);
2167 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd
, read
? "read" : "write");
2171 rb_thread_wait_fd(int fd
)
2173 rb_thread_wait_fd_rw(fd
, 1);
2177 rb_thread_fd_writable(int fd
)
2179 rb_thread_wait_fd_rw(fd
, 0);
2184 rb_thread_select(int max
, fd_set
* read
, fd_set
* write
, fd_set
* except
,
2185 struct timeval
*timeout
)
2187 if (!read
&& !write
&& !except
) {
2189 rb_thread_sleep_forever();
2192 rb_thread_wait_for(*timeout
);
2196 return do_select(max
, read
, write
, except
, timeout
);
2205 #ifdef USE_CONSERVATIVE_STACK_END
2207 rb_gc_set_stack_end(VALUE
**stack_end_p
)
2210 *stack_end_p
= &stack_end
;
2215 rb_gc_save_machine_context(rb_thread_t
*th
)
2217 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
2218 FLUSH_REGISTER_WINDOWS
;
2220 th
->machine_register_stack_end
= rb_ia64_bsp();
2222 setjmp(th
->machine_regs
);
2229 int rb_get_next_signal(rb_vm_t
*vm
);
2232 timer_thread_function(void *arg
)
2234 rb_vm_t
*vm
= arg
; /* TODO: fix me for Multi-VM */
2236 /* for time slice */
2237 RUBY_VM_SET_TIMER_INTERRUPT(vm
->running_thread
);
2240 if (vm
->buffered_signal_size
&& vm
->main_thread
->exec_signal
== 0) {
2241 rb_thread_t
*mth
= vm
->main_thread
;
2242 enum rb_thread_status prev_status
= mth
->status
;
2243 mth
->exec_signal
= rb_get_next_signal(vm
);
2244 thread_debug("main_thread: %s\n", thread_status_name(prev_status
));
2245 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2246 (long)vm
->buffered_signal_size
, vm
->main_thread
->exec_signal
);
2247 if (mth
->status
!= THREAD_KILLED
) mth
->status
= THREAD_RUNNABLE
;
2248 rb_thread_interrupt(mth
);
2249 mth
->status
= prev_status
;
2253 /* prove profiler */
2254 if (vm
->prove_profile
.enable
) {
2255 rb_thread_t
*th
= vm
->running_thread
;
2257 if (vm
->during_gc
) {
2258 /* GC prove profiling */
2265 rb_thread_stop_timer_thread(void)
2267 if (timer_thread_id
) {
2269 native_thread_join(timer_thread_id
);
2270 timer_thread_id
= 0;
2275 rb_thread_reset_timer_thread(void)
2277 timer_thread_id
= 0;
2281 rb_thread_start_timer_thread(void)
2283 rb_thread_create_timer_thread();
2287 clear_coverage_i(st_data_t key
, st_data_t val
, st_data_t dummy
)
2290 VALUE lines
= (VALUE
)val
;
2292 for (i
= 0; i
< RARRAY_LEN(lines
); i
++) {
2293 if (RARRAY_PTR(lines
)[i
] != Qnil
) {
2294 RARRAY_PTR(lines
)[i
] = INT2FIX(0);
2301 clear_coverage(void)
2303 extern VALUE
rb_get_coverages(void);
2304 VALUE coverages
= rb_get_coverages();
2305 if (RTEST(coverages
)) {
2306 st_foreach(RHASH_TBL(coverages
), clear_coverage_i
, 0);
2311 terminate_atfork_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2315 GetThreadPtr(thval
, th
);
2317 if (th
!= current_th
) {
2318 thread_cleanup_func(th
);
2324 rb_thread_atfork(void)
2326 rb_thread_t
*th
= GET_THREAD();
2327 rb_vm_t
*vm
= th
->vm
;
2328 VALUE thval
= th
->self
;
2329 vm
->main_thread
= th
;
2331 st_foreach(vm
->living_threads
, terminate_atfork_i
, (st_data_t
)th
);
2332 st_clear(vm
->living_threads
);
2333 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2336 rb_reset_random_seed();
2340 terminate_atfork_before_exec_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2344 GetThreadPtr(thval
, th
);
2346 if (th
!= current_th
) {
2347 thread_cleanup_func_before_exec(th
);
2353 rb_thread_atfork_before_exec(void)
2355 rb_thread_t
*th
= GET_THREAD();
2356 rb_vm_t
*vm
= th
->vm
;
2357 VALUE thval
= th
->self
;
2358 vm
->main_thread
= th
;
2360 st_foreach(vm
->living_threads
, terminate_atfork_before_exec_i
, (st_data_t
)th
);
2361 st_clear(vm
->living_threads
);
2362 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2373 * Document-class: ThreadGroup
2375 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2376 * threads as a group. A <code>Thread</code> can belong to only one
2377 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2378 * remove it from any previous group.
2380 * Newly created threads belong to the same group as the thread from which they
2384 static VALUE
thgroup_s_alloc(VALUE
);
2386 thgroup_s_alloc(VALUE klass
)
2389 struct thgroup
*data
;
2391 group
= Data_Make_Struct(klass
, struct thgroup
, 0, -1, data
);
2393 data
->group
= group
;
2398 struct thgroup_list_params
{
2404 thgroup_list_i(st_data_t key
, st_data_t val
, st_data_t data
)
2406 VALUE thread
= (VALUE
)key
;
2407 VALUE ary
= ((struct thgroup_list_params
*)data
)->ary
;
2408 VALUE group
= ((struct thgroup_list_params
*)data
)->group
;
2410 GetThreadPtr(thread
, th
);
2412 if (th
->thgroup
== group
) {
2413 rb_ary_push(ary
, thread
);
2420 * thgrp.list => array
2422 * Returns an array of all existing <code>Thread</code> objects that belong to
2425 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2429 thgroup_list(VALUE group
)
2431 VALUE ary
= rb_ary_new();
2432 struct thgroup_list_params param
;
2435 param
.group
= group
;
2436 st_foreach(GET_THREAD()->vm
->living_threads
, thgroup_list_i
, (st_data_t
) & param
);
2443 * thgrp.enclose => thgrp
2445 * Prevents threads from being added to or removed from the receiving
2446 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2447 * <code>ThreadGroup</code>.
2449 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2450 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2451 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2454 * <em>produces:</em>
2456 * ThreadError: can't move from the enclosed thread group
2460 thgroup_enclose(VALUE group
)
2462 struct thgroup
*data
;
2464 Data_Get_Struct(group
, struct thgroup
, data
);
2473 * thgrp.enclosed? => true or false
2475 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2476 * ThreadGroup#enclose.
2480 thgroup_enclosed_p(VALUE group
)
2482 struct thgroup
*data
;
2484 Data_Get_Struct(group
, struct thgroup
, data
);
2493 * thgrp.add(thread) => thgrp
2495 * Adds the given <em>thread</em> to this group, removing it from any other
2496 * group to which it may have previously belonged.
2498 * puts "Initial group is #{ThreadGroup::Default.list}"
2499 * tg = ThreadGroup.new
2500 * t1 = Thread.new { sleep }
2501 * t2 = Thread.new { sleep }
2502 * puts "t1 is #{t1}"
2503 * puts "t2 is #{t2}"
2505 * puts "Initial group now #{ThreadGroup::Default.list}"
2506 * puts "tg group now #{tg.list}"
2508 * <em>produces:</em>
2510 * Initial group is #<Thread:0x401bdf4c>
2511 * t1 is #<Thread:0x401b3c90>
2512 * t2 is #<Thread:0x401b3c18>
2513 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2514 * tg group now #<Thread:0x401b3c90>
2518 thgroup_add(VALUE group
, VALUE thread
)
2521 struct thgroup
*data
;
2524 GetThreadPtr(thread
, th
);
2526 if (OBJ_FROZEN(group
)) {
2527 rb_raise(rb_eThreadError
, "can't move to the frozen thread group");
2529 Data_Get_Struct(group
, struct thgroup
, data
);
2530 if (data
->enclosed
) {
2531 rb_raise(rb_eThreadError
, "can't move to the enclosed thread group");
2538 if (OBJ_FROZEN(th
->thgroup
)) {
2539 rb_raise(rb_eThreadError
, "can't move from the frozen thread group");
2541 Data_Get_Struct(th
->thgroup
, struct thgroup
, data
);
2542 if (data
->enclosed
) {
2543 rb_raise(rb_eThreadError
,
2544 "can't move from the enclosed thread group");
2547 th
->thgroup
= group
;
2553 * Document-class: Mutex
2555 * Mutex implements a simple semaphore that can be used to coordinate access to
2556 * shared data from multiple concurrent threads.
2561 * semaphore = Mutex.new
2564 * semaphore.synchronize {
2565 * # access shared resource
2570 * semaphore.synchronize {
2571 * # access shared resource
2577 #define GetMutexPtr(obj, tobj) \
2578 Data_Get_Struct(obj, mutex_t, tobj)
2580 static const char *mutex_unlock(mutex_t
*mutex
);
2583 mutex_free(void *ptr
)
2586 mutex_t
*mutex
= ptr
;
2588 /* rb_warn("free locked mutex"); */
2589 mutex_unlock(mutex
);
2591 native_mutex_destroy(&mutex
->lock
);
2592 native_cond_destroy(&mutex
->cond
);
2598 mutex_alloc(VALUE klass
)
2603 obj
= Data_Make_Struct(klass
, mutex_t
, NULL
, mutex_free
, mutex
);
2604 native_mutex_initialize(&mutex
->lock
);
2605 native_cond_initialize(&mutex
->cond
);
2611 * Mutex.new => mutex
2613 * Creates a new Mutex
2616 mutex_initialize(VALUE self
)
2624 return mutex_alloc(rb_cMutex
);
2629 * mutex.locked? => true or false
2631 * Returns +true+ if this lock is currently held by some thread.
2634 rb_mutex_locked_p(VALUE self
)
2637 GetMutexPtr(self
, mutex
);
2638 return mutex
->th
? Qtrue
: Qfalse
;
2642 mutex_locked(rb_thread_t
*th
, VALUE self
)
2645 GetMutexPtr(self
, mutex
);
2647 if (th
->keeping_mutexes
) {
2648 mutex
->next_mutex
= th
->keeping_mutexes
;
2650 th
->keeping_mutexes
= mutex
;
2655 * mutex.try_lock => true or false
2657 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2661 rb_mutex_trylock(VALUE self
)
2664 VALUE locked
= Qfalse
;
2665 GetMutexPtr(self
, mutex
);
2667 if (mutex
->th
== GET_THREAD()) {
2668 rb_raise(rb_eThreadError
, "deadlock; recursive locking");
2671 native_mutex_lock(&mutex
->lock
);
2672 if (mutex
->th
== 0) {
2673 mutex
->th
= GET_THREAD();
2676 mutex_locked(GET_THREAD(), self
);
2678 native_mutex_unlock(&mutex
->lock
);
2684 lock_func(rb_thread_t
*th
, mutex_t
*mutex
, int last_thread
)
2686 int interrupted
= 0;
2687 #if 0 /* for debug */
2688 native_thread_yield();
2691 native_mutex_lock(&mutex
->lock
);
2692 th
->transition_for_lock
= 0;
2693 while (mutex
->th
|| (mutex
->th
= th
, 0)) {
2699 mutex
->cond_waiting
++;
2700 native_cond_wait(&mutex
->cond
, &mutex
->lock
);
2701 mutex
->cond_notified
--;
2703 if (RUBY_VM_INTERRUPTED(th
)) {
2708 th
->transition_for_lock
= 1;
2709 native_mutex_unlock(&mutex
->lock
);
2711 if (interrupted
== 2) native_thread_yield();
2712 #if 0 /* for debug */
2713 native_thread_yield();
2720 lock_interrupt(void *ptr
)
2722 mutex_t
*mutex
= (mutex_t
*)ptr
;
2723 native_mutex_lock(&mutex
->lock
);
2724 if (mutex
->cond_waiting
> 0) {
2725 native_cond_broadcast(&mutex
->cond
);
2726 mutex
->cond_notified
= mutex
->cond_waiting
;
2727 mutex
->cond_waiting
= 0;
2729 native_mutex_unlock(&mutex
->lock
);
2734 * mutex.lock => true or false
2736 * Attempts to grab the lock and waits if it isn't available.
2737 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2740 rb_mutex_lock(VALUE self
)
2742 if (rb_mutex_trylock(self
) == Qfalse
) {
2744 rb_thread_t
*th
= GET_THREAD();
2745 GetMutexPtr(self
, mutex
);
2747 while (mutex
->th
!= th
) {
2749 enum rb_thread_status prev_status
= th
->status
;
2750 int last_thread
= 0;
2751 struct rb_unblock_callback oldubf
;
2753 set_unblock_function(th
, lock_interrupt
, mutex
, &oldubf
);
2754 th
->status
= THREAD_STOPPED_FOREVER
;
2756 th
->locking_mutex
= self
;
2757 if (vm_living_thread_num(th
->vm
) == th
->vm
->sleeper
) {
2761 th
->transition_for_lock
= 1;
2762 BLOCKING_REGION_CORE({
2763 interrupted
= lock_func(th
, mutex
, last_thread
);
2765 th
->transition_for_lock
= 0;
2766 remove_signal_thread_list(th
);
2767 reset_unblock_function(th
, &oldubf
);
2769 th
->locking_mutex
= Qfalse
;
2770 if (mutex
->th
&& interrupted
== 2) {
2771 rb_check_deadlock(th
->vm
);
2773 if (th
->status
== THREAD_STOPPED_FOREVER
) {
2774 th
->status
= prev_status
;
2778 if (mutex
->th
== th
) mutex_locked(th
, self
);
2781 RUBY_VM_CHECK_INTS();
2789 mutex_unlock(mutex_t
*mutex
)
2791 const char *err
= NULL
;
2792 rb_thread_t
*th
= GET_THREAD();
2795 native_mutex_lock(&mutex
->lock
);
2797 if (mutex
->th
== 0) {
2798 err
= "Attempt to unlock a mutex which is not locked";
2800 else if (mutex
->th
!= GET_THREAD()) {
2801 err
= "Attempt to unlock a mutex which is locked by another thread";
2805 if (mutex
->cond_waiting
> 0) {
2806 /* waiting thread */
2807 native_cond_signal(&mutex
->cond
);
2808 mutex
->cond_waiting
--;
2809 mutex
->cond_notified
++;
2813 native_mutex_unlock(&mutex
->lock
);
2816 th_mutex
= th
->keeping_mutexes
;
2817 if (th_mutex
== mutex
) {
2818 th
->keeping_mutexes
= mutex
->next_mutex
;
2823 tmp_mutex
= th_mutex
->next_mutex
;
2824 if (tmp_mutex
== mutex
) {
2825 th_mutex
->next_mutex
= tmp_mutex
->next_mutex
;
2828 th_mutex
= tmp_mutex
;
2831 mutex
->next_mutex
= NULL
;
2839 * mutex.unlock => self
2841 * Releases the lock.
2842 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2845 rb_mutex_unlock(VALUE self
)
2849 GetMutexPtr(self
, mutex
);
2851 err
= mutex_unlock(mutex
);
2852 if (err
) rb_raise(rb_eThreadError
, err
);
2858 rb_mutex_unlock_all(mutex_t
*mutexes
)
2865 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
2867 mutexes
= mutex
->next_mutex
;
2868 err
= mutex_unlock(mutex
);
2869 if (err
) rb_bug("invalid keeping_mutexes: %s", err
);
2874 rb_mutex_sleep_forever(VALUE time
)
2876 rb_thread_sleep_deadly();
2881 rb_mutex_wait_for(VALUE time
)
2883 const struct timeval
*t
= (struct timeval
*)time
;
2884 rb_thread_wait_for(*t
);
2889 rb_mutex_sleep(VALUE self
, VALUE timeout
)
2894 if (!NIL_P(timeout
)) {
2895 t
= rb_time_interval(timeout
);
2897 rb_mutex_unlock(self
);
2899 if (NIL_P(timeout
)) {
2900 rb_ensure(rb_mutex_sleep_forever
, Qnil
, rb_mutex_lock
, self
);
2903 rb_ensure(rb_mutex_wait_for
, (VALUE
)&t
, rb_mutex_lock
, self
);
2905 end
= time(0) - beg
;
2906 return INT2FIX(end
);
2911 * mutex.sleep(timeout = nil) => number
2913 * Releases the lock and sleeps +timeout+ seconds if it is given and
2914 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2915 * the current thread.
2918 mutex_sleep(int argc
, VALUE
*argv
, VALUE self
)
2922 rb_scan_args(argc
, argv
, "01", &timeout
);
2923 return rb_mutex_sleep(self
, timeout
);
2928 * mutex.synchronize { ... } => result of the block
2930 * Obtains a lock, runs the block, and releases the lock when the block
2931 * completes. See the example under +Mutex+.
2935 rb_thread_synchronize(VALUE mutex
, VALUE (*func
)(VALUE arg
), VALUE arg
)
2937 rb_mutex_lock(mutex
);
2938 return rb_ensure(func
, arg
, rb_mutex_unlock
, mutex
);
2942 * Document-class: Barrier
2944 typedef struct rb_thread_list_struct rb_thread_list_t
;
2946 struct rb_thread_list_struct
{
2948 rb_thread_list_t
*next
;
2952 thlist_mark(void *ptr
)
2954 rb_thread_list_t
*q
= ptr
;
2956 for (; q
; q
= q
->next
) {
2957 rb_gc_mark(q
->th
->self
);
2962 thlist_free(void *ptr
)
2964 rb_thread_list_t
*q
= ptr
, *next
;
2966 for (; q
; q
= next
) {
2973 thlist_signal(rb_thread_list_t
**list
, unsigned int maxth
, rb_thread_t
**woken_thread
)
2976 rb_thread_list_t
*q
;
2978 while ((q
= *list
) != NULL
) {
2979 rb_thread_t
*th
= q
->th
;
2983 if (th
->status
!= THREAD_KILLED
) {
2984 rb_thread_ready(th
);
2985 if (!woken
&& woken_thread
) *woken_thread
= th
;
2986 if (++woken
>= maxth
&& maxth
) break;
2994 rb_thread_list_t
*waiting
, **tail
;
2998 barrier_mark(void *ptr
)
3000 rb_barrier_t
*b
= ptr
;
3002 if (b
->owner
) rb_gc_mark(b
->owner
->self
);
3003 thlist_mark(b
->waiting
);
3007 barrier_free(void *ptr
)
3009 rb_barrier_t
*b
= ptr
;
3012 thlist_free(b
->waiting
);
3018 barrier_alloc(VALUE klass
)
3021 rb_barrier_t
*barrier
;
3023 obj
= Data_Make_Struct(klass
, rb_barrier_t
, barrier_mark
, barrier_free
, barrier
);
3024 barrier
->owner
= GET_THREAD();
3025 barrier
->waiting
= 0;
3026 barrier
->tail
= &barrier
->waiting
;
3031 rb_barrier_new(void)
3033 return barrier_alloc(rb_cBarrier
);
3037 rb_barrier_wait(VALUE self
)
3039 rb_barrier_t
*barrier
;
3040 rb_thread_list_t
*q
;
3042 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
3043 if (!barrier
->owner
|| barrier
->owner
->status
== THREAD_KILLED
) {
3045 if (thlist_signal(&barrier
->waiting
, 1, &barrier
->owner
)) return Qfalse
;
3048 else if (barrier
->owner
== GET_THREAD()) {
3052 *barrier
->tail
= q
= ALLOC(rb_thread_list_t
);
3053 q
->th
= GET_THREAD();
3055 barrier
->tail
= &q
->next
;
3056 rb_thread_sleep_forever();
3057 return barrier
->owner
== GET_THREAD() ? Qtrue
: Qfalse
;
3062 rb_barrier_release(VALUE self
)
3064 rb_barrier_t
*barrier
;
3067 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
3068 if (barrier
->owner
!= GET_THREAD()) {
3069 rb_raise(rb_eThreadError
, "not owned");
3071 n
= thlist_signal(&barrier
->waiting
, 0, &barrier
->owner
);
3072 return n
? UINT2NUM(n
) : Qfalse
;
3075 /* variables for recursive traversals */
3076 static ID recursive_key
;
3079 recursive_check(VALUE hash
, VALUE obj
)
3081 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3085 VALUE list
= rb_hash_aref(hash
, ID2SYM(rb_frame_this_func()));
3087 if (NIL_P(list
) || TYPE(list
) != T_HASH
)
3089 if (NIL_P(rb_hash_lookup(list
, obj
)))
3096 recursive_push(VALUE hash
, VALUE obj
)
3100 sym
= ID2SYM(rb_frame_this_func());
3101 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3102 hash
= rb_hash_new();
3103 rb_thread_local_aset(rb_thread_current(), recursive_key
, hash
);
3107 list
= rb_hash_aref(hash
, sym
);
3109 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3110 list
= rb_hash_new();
3111 rb_hash_aset(hash
, sym
, list
);
3113 rb_hash_aset(list
, obj
, Qtrue
);
3118 recursive_pop(VALUE hash
, VALUE obj
)
3122 sym
= ID2SYM(rb_frame_this_func());
3123 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3126 symname
= rb_inspect(sym
);
3127 thrname
= rb_inspect(rb_thread_current());
3129 rb_raise(rb_eTypeError
, "invalid inspect_tbl hash for %s in %s",
3130 StringValuePtr(symname
), StringValuePtr(thrname
));
3132 list
= rb_hash_aref(hash
, sym
);
3133 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3134 VALUE symname
= rb_inspect(sym
);
3135 VALUE thrname
= rb_inspect(rb_thread_current());
3136 rb_raise(rb_eTypeError
, "invalid inspect_tbl list for %s in %s",
3137 StringValuePtr(symname
), StringValuePtr(thrname
));
3139 rb_hash_delete(list
, obj
);
3143 rb_exec_recursive(VALUE (*func
) (VALUE
, VALUE
, int), VALUE obj
, VALUE arg
)
3145 VALUE hash
= rb_thread_local_aref(rb_thread_current(), recursive_key
);
3146 VALUE objid
= rb_obj_id(obj
);
3148 if (recursive_check(hash
, objid
)) {
3149 return (*func
) (obj
, arg
, Qtrue
);
3152 VALUE result
= Qundef
;
3155 hash
= recursive_push(hash
, objid
);
3157 if ((state
= EXEC_TAG()) == 0) {
3158 result
= (*func
) (obj
, arg
, Qfalse
);
3161 recursive_pop(hash
, objid
);
3170 static rb_event_hook_t
*
3171 alloc_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3173 rb_event_hook_t
*hook
= ALLOC(rb_event_hook_t
);
3175 hook
->flag
= events
;
3181 thread_reset_event_flags(rb_thread_t
*th
)
3183 rb_event_hook_t
*hook
= th
->event_hooks
;
3184 rb_event_flag_t flag
= th
->event_flags
& RUBY_EVENT_VM
;
3193 rb_thread_add_event_hook(rb_thread_t
*th
,
3194 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3196 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3197 hook
->next
= th
->event_hooks
;
3198 th
->event_hooks
= hook
;
3199 thread_reset_event_flags(th
);
3203 set_threads_event_flags_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3207 GetThreadPtr(thval
, th
);
3210 th
->event_flags
|= RUBY_EVENT_VM
;
3213 th
->event_flags
&= (~RUBY_EVENT_VM
);
3219 set_threads_event_flags(int flag
)
3221 st_foreach(GET_VM()->living_threads
, set_threads_event_flags_i
, (st_data_t
) flag
);
3225 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3227 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3228 rb_vm_t
*vm
= GET_VM();
3230 hook
->next
= vm
->event_hooks
;
3231 vm
->event_hooks
= hook
;
3233 set_threads_event_flags(1);
3237 remove_event_hook(rb_event_hook_t
**root
, rb_event_hook_func_t func
)
3239 rb_event_hook_t
*prev
= NULL
, *hook
= *root
, *next
;
3243 if (func
== 0 || hook
->func
== func
) {
3245 prev
->next
= hook
->next
;
3261 rb_thread_remove_event_hook(rb_thread_t
*th
, rb_event_hook_func_t func
)
3263 int ret
= remove_event_hook(&th
->event_hooks
, func
);
3264 thread_reset_event_flags(th
);
3269 rb_remove_event_hook(rb_event_hook_func_t func
)
3271 rb_vm_t
*vm
= GET_VM();
3272 rb_event_hook_t
*hook
= vm
->event_hooks
;
3273 int ret
= remove_event_hook(&vm
->event_hooks
, func
);
3275 if (hook
!= NULL
&& vm
->event_hooks
== NULL
) {
3276 set_threads_event_flags(0);
3283 clear_trace_func_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3286 GetThreadPtr((VALUE
)key
, th
);
3287 rb_thread_remove_event_hook(th
, 0);
3292 rb_clear_trace_func(void)
3294 st_foreach(GET_VM()->living_threads
, clear_trace_func_i
, (st_data_t
) 0);
3295 rb_remove_event_hook(0);
3298 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
3302 * set_trace_func(proc) => proc
3303 * set_trace_func(nil) => nil
3305 * Establishes _proc_ as the handler for tracing, or disables
3306 * tracing if the parameter is +nil+. _proc_ takes up
3307 * to six parameters: an event name, a filename, a line number, an
3308 * object id, a binding, and the name of a class. _proc_ is
3309 * invoked whenever an event occurs. Events are: <code>c-call</code>
3310 * (call a C-language routine), <code>c-return</code> (return from a
3311 * C-language routine), <code>call</code> (call a Ruby method),
3312 * <code>class</code> (start a class or module definition),
3313 * <code>end</code> (finish a class or module definition),
3314 * <code>line</code> (execute code on a new line), <code>raise</code>
3315 * (raise an exception), and <code>return</code> (return from a Ruby
3316 * method). Tracing is disabled within the context of _proc_.
3325 * set_trace_func proc { |event, file, line, id, binding, classname|
3326 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3331 * line prog.rb:11 false
3332 * c-call prog.rb:11 new Class
3333 * c-call prog.rb:11 initialize Object
3334 * c-return prog.rb:11 initialize Object
3335 * c-return prog.rb:11 new Class
3336 * line prog.rb:12 false
3337 * call prog.rb:2 test Test
3338 * line prog.rb:3 test Test
3339 * line prog.rb:4 test Test
3340 * return prog.rb:4 test Test
3344 set_trace_func(VALUE obj
, VALUE trace
)
3346 rb_remove_event_hook(call_trace_func
);
3352 if (!rb_obj_is_proc(trace
)) {
3353 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3356 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
3361 thread_add_trace_func(rb_thread_t
*th
, VALUE trace
)
3363 if (!rb_obj_is_proc(trace
)) {
3364 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3367 rb_thread_add_event_hook(th
, call_trace_func
, RUBY_EVENT_ALL
, trace
);
3371 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
3374 GetThreadPtr(obj
, th
);
3375 thread_add_trace_func(th
, trace
);
3380 thread_set_trace_func_m(VALUE obj
, VALUE trace
)
3383 GetThreadPtr(obj
, th
);
3384 rb_thread_remove_event_hook(th
, call_trace_func
);
3389 thread_add_trace_func(th
, trace
);
3394 get_event_name(rb_event_flag_t event
)
3397 case RUBY_EVENT_LINE
:
3399 case RUBY_EVENT_CLASS
:
3401 case RUBY_EVENT_END
:
3403 case RUBY_EVENT_CALL
:
3405 case RUBY_EVENT_RETURN
:
3407 case RUBY_EVENT_C_CALL
:
3409 case RUBY_EVENT_C_RETURN
:
3411 case RUBY_EVENT_RAISE
:
3418 VALUE
ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
);
3420 struct call_trace_func_args
{
3421 rb_event_flag_t event
;
3429 call_trace_proc(VALUE args
, int tracing
)
3431 struct call_trace_func_args
*p
= (struct call_trace_func_args
*)args
;
3432 VALUE eventname
= rb_str_new2(get_event_name(p
->event
));
3433 VALUE filename
= rb_str_new2(rb_sourcefile());
3435 int line
= rb_sourceline();
3439 if (p
->event
== RUBY_EVENT_C_CALL
||
3440 p
->event
== RUBY_EVENT_C_RETURN
) {
3445 rb_thread_method_id_and_class(GET_THREAD(), &id
, &klass
);
3447 if (id
== ID_ALLOCATOR
)
3450 if (TYPE(klass
) == T_ICLASS
) {
3451 klass
= RBASIC(klass
)->klass
;
3453 else if (FL_TEST(klass
, FL_SINGLETON
)) {
3454 klass
= rb_iv_get(klass
, "__attached__");
3458 argv
[0] = eventname
;
3460 argv
[2] = INT2FIX(line
);
3461 argv
[3] = id
? ID2SYM(id
) : Qnil
;
3462 argv
[4] = p
->self
? rb_binding_new() : Qnil
;
3463 argv
[5] = klass
? klass
: Qnil
;
3465 return rb_proc_call_with_block(p
->proc
, 6, argv
, Qnil
);
3469 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3471 struct call_trace_func_args args
;
3478 ruby_suppress_tracing(call_trace_proc
, (VALUE
)&args
, Qfalse
);
3482 ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
)
3484 rb_thread_t
*th
= GET_THREAD();
3485 int state
, raised
, tracing
;
3486 VALUE result
= Qnil
;
3488 if ((tracing
= th
->tracing
) != 0 && !always
) {
3495 raised
= rb_thread_reset_raised(th
);
3498 if ((state
= EXEC_TAG()) == 0) {
3499 result
= (*func
)(arg
, tracing
);
3503 rb_thread_set_raised(th
);
3507 th
->tracing
= tracing
;
3516 * +Thread+ encapsulates the behavior of a thread of
3517 * execution, including the main thread of the Ruby script.
3519 * In the descriptions of the methods in this class, the parameter _sym_
3520 * refers to a symbol, which is either a quoted string or a
3521 * +Symbol+ (such as <code>:name</code>).
3528 #define rb_intern(str) rb_intern_const(str)
3532 rb_define_singleton_method(rb_cThread
, "new", thread_s_new
, -1);
3533 rb_define_singleton_method(rb_cThread
, "start", thread_start
, -2);
3534 rb_define_singleton_method(rb_cThread
, "fork", thread_start
, -2);
3535 rb_define_singleton_method(rb_cThread
, "main", rb_thread_s_main
, 0);
3536 rb_define_singleton_method(rb_cThread
, "current", thread_s_current
, 0);
3537 rb_define_singleton_method(rb_cThread
, "stop", rb_thread_stop
, 0);
3538 rb_define_singleton_method(rb_cThread
, "kill", rb_thread_s_kill
, 1);
3539 rb_define_singleton_method(rb_cThread
, "exit", rb_thread_exit
, 0);
3540 rb_define_singleton_method(rb_cThread
, "pass", thread_s_pass
, 0);
3541 rb_define_singleton_method(rb_cThread
, "list", rb_thread_list
, 0);
3542 rb_define_singleton_method(rb_cThread
, "abort_on_exception", rb_thread_s_abort_exc
, 0);
3543 rb_define_singleton_method(rb_cThread
, "abort_on_exception=", rb_thread_s_abort_exc_set
, 1);
3544 #if THREAD_DEBUG < 0
3545 rb_define_singleton_method(rb_cThread
, "DEBUG", rb_thread_s_debug
, 0);
3546 rb_define_singleton_method(rb_cThread
, "DEBUG=", rb_thread_s_debug_set
, 1);
3549 rb_define_method(rb_cThread
, "initialize", thread_initialize
, -2);
3550 rb_define_method(rb_cThread
, "raise", thread_raise_m
, -1);
3551 rb_define_method(rb_cThread
, "join", thread_join_m
, -1);
3552 rb_define_method(rb_cThread
, "value", thread_value
, 0);
3553 rb_define_method(rb_cThread
, "kill", rb_thread_kill
, 0);
3554 rb_define_method(rb_cThread
, "terminate", rb_thread_kill
, 0);
3555 rb_define_method(rb_cThread
, "exit", rb_thread_kill
, 0);
3556 rb_define_method(rb_cThread
, "run", rb_thread_run
, 0);
3557 rb_define_method(rb_cThread
, "wakeup", rb_thread_wakeup
, 0);
3558 rb_define_method(rb_cThread
, "[]", rb_thread_aref
, 1);
3559 rb_define_method(rb_cThread
, "[]=", rb_thread_aset
, 2);
3560 rb_define_method(rb_cThread
, "key?", rb_thread_key_p
, 1);
3561 rb_define_method(rb_cThread
, "keys", rb_thread_keys
, 0);
3562 rb_define_method(rb_cThread
, "priority", rb_thread_priority
, 0);
3563 rb_define_method(rb_cThread
, "priority=", rb_thread_priority_set
, 1);
3564 rb_define_method(rb_cThread
, "status", rb_thread_status
, 0);
3565 rb_define_method(rb_cThread
, "alive?", rb_thread_alive_p
, 0);
3566 rb_define_method(rb_cThread
, "stop?", rb_thread_stop_p
, 0);
3567 rb_define_method(rb_cThread
, "abort_on_exception", rb_thread_abort_exc
, 0);
3568 rb_define_method(rb_cThread
, "abort_on_exception=", rb_thread_abort_exc_set
, 1);
3569 rb_define_method(rb_cThread
, "safe_level", rb_thread_safe_level
, 0);
3570 rb_define_method(rb_cThread
, "group", rb_thread_group
, 0);
3572 rb_define_method(rb_cThread
, "inspect", rb_thread_inspect
, 0);
3574 cThGroup
= rb_define_class("ThreadGroup", rb_cObject
);
3575 rb_define_alloc_func(cThGroup
, thgroup_s_alloc
);
3576 rb_define_method(cThGroup
, "list", thgroup_list
, 0);
3577 rb_define_method(cThGroup
, "enclose", thgroup_enclose
, 0);
3578 rb_define_method(cThGroup
, "enclosed?", thgroup_enclosed_p
, 0);
3579 rb_define_method(cThGroup
, "add", thgroup_add
, 1);
3582 rb_thread_t
*th
= GET_THREAD();
3583 th
->thgroup
= th
->vm
->thgroup_default
= rb_obj_alloc(cThGroup
);
3584 rb_define_const(cThGroup
, "Default", th
->thgroup
);
3587 rb_cMutex
= rb_define_class("Mutex", rb_cObject
);
3588 rb_define_alloc_func(rb_cMutex
, mutex_alloc
);
3589 rb_define_method(rb_cMutex
, "initialize", mutex_initialize
, 0);
3590 rb_define_method(rb_cMutex
, "locked?", rb_mutex_locked_p
, 0);
3591 rb_define_method(rb_cMutex
, "try_lock", rb_mutex_trylock
, 0);
3592 rb_define_method(rb_cMutex
, "lock", rb_mutex_lock
, 0);
3593 rb_define_method(rb_cMutex
, "unlock", rb_mutex_unlock
, 0);
3594 rb_define_method(rb_cMutex
, "sleep", mutex_sleep
, -1);
3596 recursive_key
= rb_intern("__recursive_key__");
3597 rb_eThreadError
= rb_define_class("ThreadError", rb_eStandardError
);
3600 rb_define_global_function("set_trace_func", set_trace_func
, 1);
3601 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
3602 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
3604 /* init thread core */
3605 Init_native_thread();
3607 /* main thread setting */
3609 /* acquire global interpreter lock */
3610 rb_thread_lock_t
*lp
= &GET_THREAD()->vm
->global_vm_lock
;
3611 native_mutex_initialize(lp
);
3612 native_mutex_lock(lp
);
3613 native_mutex_initialize(&GET_THREAD()->interrupt_lock
);
3617 rb_thread_create_timer_thread();
3619 (void)native_mutex_trylock
;
3620 (void)ruby_thread_set_native
;
3624 ruby_native_thread_p(void)
3626 rb_thread_t
*th
= ruby_thread_from_native();
3628 return th
? Qtrue
: Qfalse
;
3632 check_deadlock_i(st_data_t key
, st_data_t val
, int *found
)
3636 GetThreadPtr(thval
, th
);
3638 if (th
->status
!= THREAD_STOPPED_FOREVER
|| RUBY_VM_INTERRUPTED(th
) || th
->transition_for_lock
) {
3641 else if (th
->locking_mutex
) {
3643 GetMutexPtr(th
->locking_mutex
, mutex
);
3645 native_mutex_lock(&mutex
->lock
);
3646 if (mutex
->th
== th
|| (!mutex
->th
&& mutex
->cond_notified
)) {
3649 native_mutex_unlock(&mutex
->lock
);
3652 return (*found
) ? ST_STOP
: ST_CONTINUE
;
3655 #if 0 /* for debug */
3657 debug_i(st_data_t key
, st_data_t val
, int *found
)
3661 GetThreadPtr(thval
, th
);
3663 printf("th:%p %d %d %d", th
, th
->status
, th
->interrupt_flag
, th
->transition_for_lock
);
3664 if (th
->locking_mutex
) {
3666 GetMutexPtr(th
->locking_mutex
, mutex
);
3668 native_mutex_lock(&mutex
->lock
);
3669 printf(" %p %d\n", mutex
->th
, mutex
->cond_notified
);
3670 native_mutex_unlock(&mutex
->lock
);
3679 rb_check_deadlock(rb_vm_t
*vm
)
3683 if (vm_living_thread_num(vm
) > vm
->sleeper
) return;
3684 if (vm_living_thread_num(vm
) < vm
->sleeper
) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3686 st_foreach(vm
->living_threads
, check_deadlock_i
, (st_data_t
)&found
);
3690 argv
[0] = rb_eFatal
;
3691 argv
[1] = rb_str_new2("deadlock detected");
3692 #if 0 /* for debug */
3693 printf("%d %d %p %p\n", vm
->living_threads
->num_entries
, vm
->sleeper
, GET_THREAD(), vm
->main_thread
);
3694 st_foreach(vm
->living_threads
, debug_i
, (st_data_t
)0);
3696 rb_thread_raise(2, argv
, vm
->main_thread
);
3701 update_coverage(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3703 VALUE coverage
= GET_THREAD()->cfp
->iseq
->coverage
;
3704 if (coverage
&& RBASIC(coverage
)->klass
== 0) {
3705 long line
= rb_sourceline() - 1;
3707 if (RARRAY_PTR(coverage
)[line
] == Qnil
) {
3710 count
= FIX2LONG(RARRAY_PTR(coverage
)[line
]) + 1;
3711 if (POSFIXABLE(count
)) {
3712 RARRAY_PTR(coverage
)[line
] = LONG2FIX(count
);
3718 rb_get_coverages(void)
3720 return GET_VM()->coverages
;
3724 rb_set_coverages(VALUE coverages
)
3726 GET_VM()->coverages
= coverages
;
3727 rb_add_event_hook(update_coverage
, RUBY_EVENT_COVERAGE
, Qnil
);
3731 rb_reset_coverages(void)
3733 GET_VM()->coverages
= Qfalse
;
3734 rb_remove_event_hook(update_coverage
);