1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
52 #define THREAD_DEBUG 0
58 static void sleep_timeval(rb_thread_t
*th
, struct timeval time
);
59 static void sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
);
60 static void sleep_forever(rb_thread_t
*th
, int nodeadlock
);
61 static double timeofday(void);
62 struct timeval
rb_time_interval(VALUE
);
63 static int rb_thread_dead(rb_thread_t
*th
);
65 static void rb_mutex_unlock_all(VALUE
);
66 static void rb_check_deadlock(rb_vm_t
*vm
);
68 void rb_signal_exec(rb_thread_t
*th
, int sig
);
69 void rb_disable_interrupt(void);
71 static const VALUE eKillSignal
= INT2FIX(0);
72 static const VALUE eTerminateSignal
= INT2FIX(1);
73 static volatile int system_working
= 1;
76 st_delete_wrap(st_table
*table
, st_data_t key
)
78 st_delete(table
, &key
, 0);
81 /********************************************************************************/
83 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
85 static void set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
86 struct rb_unblock_callback
*old
);
87 static void reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
);
89 #define GVL_UNLOCK_BEGIN() do { \
90 rb_thread_t *_th_stored = GET_THREAD(); \
91 rb_gc_save_machine_context(_th_stored); \
92 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
94 #define GVL_UNLOCK_END() \
95 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
96 rb_thread_set_current(_th_stored); \
99 #define BLOCKING_REGION_CORE(exec) do { \
100 GVL_UNLOCK_BEGIN(); {\
106 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
107 rb_thread_t *__th = GET_THREAD(); \
108 enum rb_thread_status __prev_status = __th->status; \
109 struct rb_unblock_callback __oldubf; \
110 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
111 __th->status = THREAD_STOPPED; \
112 thread_debug("enter blocking region (%p)\n", __th); \
113 BLOCKING_REGION_CORE(exec); \
114 thread_debug("leave blocking region (%p)\n", __th); \
115 remove_signal_thread_list(__th); \
116 reset_unblock_function(__th, &__oldubf); \
117 if (__th->status == THREAD_STOPPED) { \
118 __th->status = __prev_status; \
120 RUBY_VM_CHECK_INTS(); \
124 #ifdef HAVE_VA_ARGS_MACRO
125 void rb_thread_debug(const char *file
, int line
, const char *fmt
, ...);
126 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
127 #define POSITION_FORMAT "%s:%d:"
128 #define POSITION_ARGS ,file, line
130 void rb_thread_debug(const char *fmt
, ...);
131 #define thread_debug rb_thread_debug
132 #define POSITION_FORMAT
133 #define POSITION_ARGS
136 # if THREAD_DEBUG < 0
137 static int rb_thread_debug_enabled
;
140 rb_thread_s_debug(void)
142 return INT2NUM(rb_thread_debug_enabled
);
146 rb_thread_s_debug_set(VALUE self
, VALUE val
)
148 rb_thread_debug_enabled
= RTEST(val
);
152 # define rb_thread_debug_enabled THREAD_DEBUG
155 #define thread_debug if(0)printf
159 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
161 NOINLINE(static int thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
,
162 VALUE
*register_stack_start
));
163 static void timer_thread_function(void *);
166 #include "thread_win32.c"
168 #define DEBUG_OUT() \
169 WaitForSingleObject(&debug_mutex, INFINITE); \
170 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
172 ReleaseMutex(&debug_mutex);
174 #elif defined(HAVE_PTHREAD_H)
175 #include "thread_pthread.c"
177 #define DEBUG_OUT() \
178 pthread_mutex_lock(&debug_mutex); \
179 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
181 pthread_mutex_unlock(&debug_mutex);
184 #error "unsupported thread type"
188 static int debug_mutex_initialized
= 1;
189 static rb_thread_lock_t debug_mutex
;
193 #ifdef HAVE_VA_ARGS_MACRO
194 const char *file
, int line
,
196 const char *fmt
, ...)
201 if (!rb_thread_debug_enabled
) return;
203 if (debug_mutex_initialized
== 1) {
204 debug_mutex_initialized
= 0;
205 native_mutex_initialize(&debug_mutex
);
209 vsnprintf(buf
, BUFSIZ
, fmt
, args
);
218 set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
219 struct rb_unblock_callback
*old
)
222 RUBY_VM_CHECK_INTS(); /* check signal or so */
223 native_mutex_lock(&th
->interrupt_lock
);
224 if (th
->interrupt_flag
) {
225 native_mutex_unlock(&th
->interrupt_lock
);
229 if (old
) *old
= th
->unblock
;
230 th
->unblock
.func
= func
;
231 th
->unblock
.arg
= arg
;
233 native_mutex_unlock(&th
->interrupt_lock
);
237 reset_unblock_function(rb_thread_t
*th
, const struct rb_unblock_callback
*old
)
239 native_mutex_lock(&th
->interrupt_lock
);
241 native_mutex_unlock(&th
->interrupt_lock
);
245 rb_thread_interrupt(rb_thread_t
*th
)
247 native_mutex_lock(&th
->interrupt_lock
);
248 RUBY_VM_SET_INTERRUPT(th
);
249 if (th
->unblock
.func
) {
250 (th
->unblock
.func
)(th
->unblock
.arg
);
255 native_mutex_unlock(&th
->interrupt_lock
);
260 terminate_i(st_data_t key
, st_data_t val
, rb_thread_t
*main_thread
)
264 GetThreadPtr(thval
, th
);
266 if (th
!= main_thread
) {
267 thread_debug("terminate_i: %p\n", th
);
268 rb_thread_interrupt(th
);
269 th
->thrown_errinfo
= eTerminateSignal
;
270 th
->status
= THREAD_TO_KILL
;
273 thread_debug("terminate_i: main thread (%p)\n", th
);
279 rb_thread_terminate_all(void)
281 rb_thread_t
*th
= GET_THREAD(); /* main thread */
282 rb_vm_t
*vm
= th
->vm
;
283 if (vm
->main_thread
!= th
) {
284 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm
->main_thread
, th
);
287 /* unlock all locking mutexes */
288 if (th
->keeping_mutexes
) {
289 rb_mutex_unlock_all(th
->keeping_mutexes
);
292 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th
);
293 st_foreach(vm
->living_threads
, terminate_i
, (st_data_t
)th
);
295 while (!rb_thread_alone()) {
297 if (EXEC_TAG() == 0) {
298 rb_thread_schedule();
301 /* ignore exception */
309 thread_cleanup_func_before_exec(void *th_ptr
)
311 rb_thread_t
*th
= th_ptr
;
312 th
->status
= THREAD_KILLED
;
313 th
->machine_stack_start
= th
->machine_stack_end
= 0;
315 th
->machine_register_stack_start
= th
->machine_register_stack_end
= 0;
320 thread_cleanup_func(void *th_ptr
)
322 rb_thread_t
*th
= th_ptr
;
323 thread_cleanup_func_before_exec(th_ptr
);
324 native_thread_destroy(th
);
327 extern void ruby_error_print(void);
328 static VALUE
rb_thread_raise(int, VALUE
*, rb_thread_t
*);
329 void rb_thread_recycle_stack_release(VALUE
*);
332 ruby_thread_init_stack(rb_thread_t
*th
)
334 native_thread_init_stack(th
);
338 thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
, VALUE
*register_stack_start
)
341 VALUE args
= th
->first_args
;
343 rb_thread_t
*join_th
;
344 rb_thread_t
*main_th
;
345 VALUE errinfo
= Qnil
;
347 th
->machine_stack_start
= stack_start
;
349 th
->machine_register_stack_start
= register_stack_start
;
351 thread_debug("thread start: %p\n", th
);
353 native_mutex_lock(&th
->vm
->global_vm_lock
);
355 thread_debug("thread start (get lock): %p\n", th
);
356 rb_thread_set_current(th
);
359 if ((state
= EXEC_TAG()) == 0) {
360 SAVE_ROOT_JMPBUF(th
, {
361 if (th
->first_proc
) {
362 GetProcPtr(th
->first_proc
, proc
);
364 th
->local_lfp
= proc
->block
.lfp
;
365 th
->local_svar
= Qnil
;
366 th
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
,
367 RARRAY_LEN(args
), RARRAY_PTR(args
), 0);
370 th
->value
= (*th
->first_func
)((void *)th
->first_args
);
375 if (th
->safe_level
< 4 &&
376 (th
->vm
->thread_abort_on_exception
||
377 th
->abort_on_exception
|| RTEST(ruby_debug
))) {
378 errinfo
= th
->errinfo
;
379 if (NIL_P(errinfo
)) errinfo
= rb_errinfo();
384 th
->status
= THREAD_KILLED
;
385 thread_debug("thread end: %p\n", th
);
387 main_th
= th
->vm
->main_thread
;
389 if (TYPE(errinfo
) == T_OBJECT
) {
390 /* treat with normal error object */
391 rb_thread_raise(1, &errinfo
, main_th
);
396 /* locking_mutex must be Qfalse */
397 if (th
->locking_mutex
!= Qfalse
) {
398 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
399 th
, th
->locking_mutex
);
402 /* unlock all locking mutexes */
403 if (th
->keeping_mutexes
) {
404 rb_mutex_unlock_all(th
->keeping_mutexes
);
405 th
->keeping_mutexes
= Qfalse
;
408 /* delete self from living_threads */
409 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
411 /* wake up joinning threads */
412 join_th
= th
->join_list_head
;
414 if (join_th
== main_th
) errinfo
= Qnil
;
415 rb_thread_interrupt(join_th
);
416 switch (join_th
->status
) {
417 case THREAD_STOPPED
: case THREAD_STOPPED_FOREVER
:
418 join_th
->status
= THREAD_RUNNABLE
;
421 join_th
= join_th
->join_list_next
;
423 if (th
!= main_th
) rb_check_deadlock(th
->vm
);
425 if (!th
->root_fiber
) {
426 rb_thread_recycle_stack_release(th
->stack
);
430 thread_cleanup_func(th
);
431 native_mutex_unlock(&th
->vm
->global_vm_lock
);
437 thread_create_core(VALUE thval
, VALUE args
, VALUE (*fn
)(ANYARGS
))
441 if (OBJ_FROZEN(GET_THREAD()->thgroup
)) {
442 rb_raise(rb_eThreadError
,
443 "can't start a new thread (frozen ThreadGroup)");
445 GetThreadPtr(thval
, th
);
447 /* setup thread environment */
449 th
->first_proc
= fn
? Qfalse
: rb_block_proc();
450 th
->first_args
= args
; /* GC: shouldn't put before above line */
452 th
->priority
= GET_THREAD()->priority
;
453 th
->thgroup
= GET_THREAD()->thgroup
;
455 native_mutex_initialize(&th
->interrupt_lock
);
457 st_insert(th
->vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
458 native_thread_create(th
);
463 thread_s_new(int argc
, VALUE
*argv
, VALUE klass
)
466 VALUE thread
= rb_thread_alloc(klass
);
467 rb_obj_call_init(thread
, argc
, argv
);
468 GetThreadPtr(thread
, th
);
469 if (!th
->first_args
) {
470 rb_raise(rb_eThreadError
, "uninitialized thread - check `%s#initialize'",
471 rb_class2name(klass
));
478 * Thread.start([args]*) {|args| block } => thread
479 * Thread.fork([args]*) {|args| block } => thread
481 * Basically the same as <code>Thread::new</code>. However, if class
482 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
483 * subclass will not invoke the subclass's <code>initialize</code> method.
487 thread_start(VALUE klass
, VALUE args
)
489 return thread_create_core(rb_thread_alloc(klass
), args
, 0);
493 thread_initialize(VALUE thread
, VALUE args
)
496 if (!rb_block_given_p()) {
497 rb_raise(rb_eThreadError
, "must be called with a block");
499 GetThreadPtr(thread
, th
);
500 if (th
->first_args
) {
501 VALUE
rb_proc_location(VALUE self
);
502 VALUE proc
= th
->first_proc
, line
, loc
;
504 if (!proc
|| !RTEST(loc
= rb_proc_location(proc
))) {
505 rb_raise(rb_eThreadError
, "already initialized thread");
507 file
= RSTRING_PTR(RARRAY_PTR(loc
)[0]);
508 if (NIL_P(line
= RARRAY_PTR(loc
)[1])) {
509 rb_raise(rb_eThreadError
, "already initialized thread - %s",
512 rb_raise(rb_eThreadError
, "already initialized thread - %s:%d",
513 file
, NUM2INT(line
));
515 return thread_create_core(thread
, args
, 0);
519 rb_thread_create(VALUE (*fn
)(ANYARGS
), void *arg
)
521 return thread_create_core(rb_thread_alloc(rb_cThread
), (VALUE
)arg
, fn
);
525 /* +infty, for this purpose */
526 #define DELAY_INFTY 1E30
529 rb_thread_t
*target
, *waiting
;
535 remove_from_join_list(VALUE arg
)
537 struct join_arg
*p
= (struct join_arg
*)arg
;
538 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
540 if (target_th
->status
!= THREAD_KILLED
) {
541 rb_thread_t
**pth
= &target_th
->join_list_head
;
545 *pth
= th
->join_list_next
;
548 pth
= &(*pth
)->join_list_next
;
556 thread_join_sleep(VALUE arg
)
558 struct join_arg
*p
= (struct join_arg
*)arg
;
559 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
560 double now
, limit
= p
->limit
;
562 while (target_th
->status
!= THREAD_KILLED
) {
564 sleep_forever(th
, 1);
569 thread_debug("thread_join: timeout (thid: %p)\n",
570 (void *)target_th
->thread_id
);
573 sleep_wait_for_interrupt(th
, limit
- now
);
575 thread_debug("thread_join: interrupted (thid: %p)\n",
576 (void *)target_th
->thread_id
);
582 thread_join(rb_thread_t
*target_th
, double delay
)
584 rb_thread_t
*th
= GET_THREAD();
587 arg
.target
= target_th
;
589 arg
.limit
= timeofday() + delay
;
590 arg
.forever
= delay
== DELAY_INFTY
;
592 thread_debug("thread_join (thid: %p)\n", (void *)target_th
->thread_id
);
594 if (target_th
->status
!= THREAD_KILLED
) {
595 th
->join_list_next
= target_th
->join_list_head
;
596 target_th
->join_list_head
= th
;
597 if (!rb_ensure(thread_join_sleep
, (VALUE
)&arg
,
598 remove_from_join_list
, (VALUE
)&arg
)) {
603 thread_debug("thread_join: success (thid: %p)\n",
604 (void *)target_th
->thread_id
);
606 if (target_th
->errinfo
!= Qnil
) {
607 VALUE err
= target_th
->errinfo
;
612 else if (TYPE(target_th
->errinfo
) == T_NODE
) {
613 rb_exc_raise(vm_make_jump_tag_but_local_jump(
614 GET_THROWOBJ_STATE(err
), GET_THROWOBJ_VAL(err
)));
617 /* normal exception */
621 return target_th
->self
;
627 * thr.join(limit) => thr
629 * The calling thread will suspend execution and run <i>thr</i>. Does not
630 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
631 * the time limit expires, <code>nil</code> will be returned, otherwise
632 * <i>thr</i> is returned.
634 * Any threads not joined will be killed when the main program exits. If
635 * <i>thr</i> had previously raised an exception and the
636 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
637 * (so the exception has not yet been processed) it will be processed at this
640 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
641 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
642 * x.join # Let x thread finish, a will be killed on exit.
648 * The following example illustrates the <i>limit</i> parameter.
650 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
651 * puts "Waiting" until y.join(0.15)
665 thread_join_m(int argc
, VALUE
*argv
, VALUE self
)
667 rb_thread_t
*target_th
;
668 double delay
= DELAY_INFTY
;
671 GetThreadPtr(self
, target_th
);
673 rb_scan_args(argc
, argv
, "01", &limit
);
675 delay
= rb_num2dbl(limit
);
678 return thread_join(target_th
, delay
);
685 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
688 * a = Thread.new { 2 + 2 }
693 thread_value(VALUE self
)
696 GetThreadPtr(self
, th
);
697 thread_join(th
, DELAY_INFTY
);
705 static struct timeval
706 double2timeval(double d
)
710 time
.tv_sec
= (int)d
;
711 time
.tv_usec
= (int)((d
- (int)d
) * 1e6
);
712 if (time
.tv_usec
< 0) {
713 time
.tv_usec
+= (long)1e6
;
720 sleep_forever(rb_thread_t
*th
, int deadlockable
)
722 enum rb_thread_status prev_status
= th
->status
;
724 th
->status
= deadlockable
? THREAD_STOPPED_FOREVER
: THREAD_STOPPED
;
728 rb_check_deadlock(th
->vm
);
734 RUBY_VM_CHECK_INTS();
735 } while (th
->status
== THREAD_STOPPED_FOREVER
);
736 th
->status
= prev_status
;
740 getclockofday(struct timeval
*tp
)
742 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
745 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0) {
746 tp
->tv_sec
= ts
.tv_sec
;
747 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
751 gettimeofday(tp
, NULL
);
756 sleep_timeval(rb_thread_t
*th
, struct timeval tv
)
758 struct timeval to
, tvn
;
759 enum rb_thread_status prev_status
= th
->status
;
762 to
.tv_sec
+= tv
.tv_sec
;
763 if ((to
.tv_usec
+= tv
.tv_usec
) >= 1000000) {
765 to
.tv_usec
-= 1000000;
768 th
->status
= THREAD_STOPPED
;
770 native_sleep(th
, &tv
);
771 RUBY_VM_CHECK_INTS();
773 if (to
.tv_sec
< tvn
.tv_sec
) break;
774 if (to
.tv_sec
== tvn
.tv_sec
&& to
.tv_usec
<= tvn
.tv_usec
) break;
775 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
776 (long)to
.tv_sec
, to
.tv_usec
,
777 (long)tvn
.tv_sec
, tvn
.tv_usec
);
778 tv
.tv_sec
= to
.tv_sec
- tvn
.tv_sec
;
779 if ((tv
.tv_usec
= to
.tv_usec
- tvn
.tv_usec
) < 0) {
781 tv
.tv_usec
+= 1000000;
783 } while (th
->status
== THREAD_STOPPED
);
784 th
->status
= prev_status
;
788 rb_thread_sleep_forever()
790 thread_debug("rb_thread_sleep_forever\n");
791 sleep_forever(GET_THREAD(), 0);
795 rb_thread_sleep_deadly()
797 thread_debug("rb_thread_sleep_deadly\n");
798 sleep_forever(GET_THREAD(), 1);
804 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
807 if (clock_gettime(CLOCK_MONOTONIC
, &tp
) == 0) {
808 return (double)tp
.tv_sec
+ (double)tp
.tv_nsec
* 1e-9;
813 gettimeofday(&tv
, NULL
);
814 return (double)tv
.tv_sec
+ (double)tv
.tv_usec
* 1e-6;
819 sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
)
821 sleep_timeval(th
, double2timeval(sleepsec
));
825 sleep_for_polling(rb_thread_t
*th
)
829 time
.tv_usec
= 100 * 1000; /* 0.1 sec */
830 sleep_timeval(th
, time
);
834 rb_thread_wait_for(struct timeval time
)
836 rb_thread_t
*th
= GET_THREAD();
837 sleep_timeval(th
, time
);
841 rb_thread_polling(void)
843 RUBY_VM_CHECK_INTS();
844 if (!rb_thread_alone()) {
845 rb_thread_t
*th
= GET_THREAD();
846 sleep_for_polling(th
);
850 struct timeval
rb_time_timeval();
853 rb_thread_sleep(int sec
)
855 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec
)));
859 rb_thread_schedule(void)
861 thread_debug("rb_thread_schedule\n");
862 if (!rb_thread_alone()) {
863 rb_thread_t
*th
= GET_THREAD();
865 thread_debug("rb_thread_schedule/switch start\n");
867 rb_gc_save_machine_context(th
);
868 native_mutex_unlock(&th
->vm
->global_vm_lock
);
870 native_thread_yield();
872 native_mutex_lock(&th
->vm
->global_vm_lock
);
874 rb_thread_set_current(th
);
875 thread_debug("rb_thread_schedule/switch done\n");
877 RUBY_VM_CHECK_INTS();
881 int rb_thread_critical
; /* TODO: dummy variable */
884 rb_thread_blocking_region(
885 rb_blocking_function_t
*func
, void *data1
,
886 rb_unblock_function_t
*ubf
, void *data2
)
889 rb_thread_t
*th
= GET_THREAD();
891 if (ubf
== RB_UBF_DFL
) {
907 * Invokes the thread scheduler to pass execution to another thread.
909 * a = Thread.new { print "a"; Thread.pass;
910 * print "b"; Thread.pass;
912 * b = Thread.new { print "x"; Thread.pass;
913 * print "y"; Thread.pass;
924 thread_s_pass(VALUE klass
)
926 rb_thread_schedule();
935 rb_thread_execute_interrupts(rb_thread_t
*th
)
937 if (th
->raised_flag
) return;
938 while (th
->interrupt_flag
) {
939 enum rb_thread_status status
= th
->status
;
940 th
->status
= THREAD_RUNNABLE
;
941 th
->interrupt_flag
= 0;
943 /* signal handling */
944 if (th
->exec_signal
) {
945 int sig
= th
->exec_signal
;
947 rb_signal_exec(th
, sig
);
950 /* exception from another thread */
951 if (th
->thrown_errinfo
) {
952 VALUE err
= th
->thrown_errinfo
;
953 th
->thrown_errinfo
= 0;
954 thread_debug("rb_thread_execute_interrupts: %ld\n", err
);
956 if (err
== eKillSignal
|| err
== eTerminateSignal
) {
957 th
->errinfo
= INT2FIX(TAG_FATAL
);
958 TH_JUMP_TAG(th
, TAG_FATAL
);
967 rb_thread_schedule();
969 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
974 rb_gc_mark_threads(void)
979 /*****************************************************/
982 rb_thread_ready(rb_thread_t
*th
)
984 rb_thread_interrupt(th
);
988 rb_thread_raise(int argc
, VALUE
*argv
, rb_thread_t
*th
)
993 if (rb_thread_dead(th
)) {
997 if (th
->thrown_errinfo
!= 0 || th
->raised_flag
) {
998 rb_thread_schedule();
1002 exc
= rb_make_exception(argc
, argv
);
1003 th
->thrown_errinfo
= exc
;
1004 rb_thread_ready(th
);
1009 rb_thread_signal_raise(void *thptr
, int sig
)
1012 rb_thread_t
*th
= thptr
;
1014 argv
[0] = rb_eSignal
;
1015 argv
[1] = INT2FIX(sig
);
1016 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1020 rb_thread_signal_exit(void *thptr
)
1023 rb_thread_t
*th
= thptr
;
1025 argv
[0] = rb_eSystemExit
;
1026 argv
[1] = rb_str_new2("exit");
1027 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
1031 rb_thread_set_raised(rb_thread_t
*th
)
1033 if (th
->raised_flag
& RAISED_EXCEPTION
) {
1036 th
->raised_flag
|= RAISED_EXCEPTION
;
1041 rb_thread_reset_raised(rb_thread_t
*th
)
1043 if (!(th
->raised_flag
& RAISED_EXCEPTION
)) {
1046 th
->raised_flag
&= ~RAISED_EXCEPTION
;
1051 rb_thread_fd_close(int fd
)
1058 * thr.raise(exception)
1060 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1061 * caller does not have to be <i>thr</i>.
1063 * Thread.abort_on_exception = true
1064 * a = Thread.new { sleep(200) }
1067 * <em>produces:</em>
1069 * prog.rb:3: Gotcha (RuntimeError)
1070 * from prog.rb:2:in `initialize'
1071 * from prog.rb:2:in `new'
1076 thread_raise_m(int argc
, VALUE
*argv
, VALUE self
)
1079 GetThreadPtr(self
, th
);
1080 rb_thread_raise(argc
, argv
, th
);
1087 * thr.exit => thr or nil
1088 * thr.kill => thr or nil
1089 * thr.terminate => thr or nil
1091 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1092 * is already marked to be killed, <code>exit</code> returns the
1093 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1098 rb_thread_kill(VALUE thread
)
1102 GetThreadPtr(thread
, th
);
1104 if (th
!= GET_THREAD() && th
->safe_level
< 4) {
1107 if (th
->status
== THREAD_TO_KILL
|| th
->status
== THREAD_KILLED
) {
1110 if (th
== th
->vm
->main_thread
) {
1111 rb_exit(EXIT_SUCCESS
);
1114 thread_debug("rb_thread_kill: %p (%p)\n", th
, (void *)th
->thread_id
);
1116 rb_thread_interrupt(th
);
1117 th
->thrown_errinfo
= eKillSignal
;
1118 th
->status
= THREAD_TO_KILL
;
1126 * Thread.kill(thread) => thread
1128 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1131 * a = Thread.new { loop { count += 1 } }
1133 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1135 * a.alive? #=> false
1139 rb_thread_s_kill(VALUE obj
, VALUE th
)
1141 return rb_thread_kill(th
);
1147 * Thread.exit => thread
1149 * Terminates the currently running thread and schedules another thread to be
1150 * run. If this thread is already marked to be killed, <code>exit</code>
1151 * returns the <code>Thread</code>. If this is the main thread, or the last
1152 * thread, exit the process.
1156 rb_thread_exit(void)
1158 return rb_thread_kill(GET_THREAD()->self
);
1166 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1167 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1169 * c = Thread.new { Thread.stop; puts "hey!" }
1172 * <em>produces:</em>
1178 rb_thread_wakeup(VALUE thread
)
1181 GetThreadPtr(thread
, th
);
1183 if (th
->status
== THREAD_KILLED
) {
1184 rb_raise(rb_eThreadError
, "killed thread");
1186 rb_thread_ready(th
);
1187 if (th
->status
!= THREAD_TO_KILL
) {
1188 th
->status
= THREAD_RUNNABLE
;
1198 * Wakes up <i>thr</i>, making it eligible for scheduling.
1200 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1206 * <em>produces:</em>
1214 rb_thread_run(VALUE thread
)
1216 rb_thread_wakeup(thread
);
1217 rb_thread_schedule();
1224 * Thread.stop => nil
1226 * Stops execution of the current thread, putting it into a ``sleep'' state,
1227 * and schedules execution of another thread.
1229 * a = Thread.new { print "a"; Thread.stop; print "c" }
1235 * <em>produces:</em>
1241 rb_thread_stop(void)
1243 if (rb_thread_alone()) {
1244 rb_raise(rb_eThreadError
,
1245 "stopping only thread\n\tnote: use sleep to stop forever");
1247 rb_thread_sleep_deadly();
1252 thread_list_i(st_data_t key
, st_data_t val
, void *data
)
1254 VALUE ary
= (VALUE
)data
;
1256 GetThreadPtr((VALUE
)key
, th
);
1258 switch (th
->status
) {
1259 case THREAD_RUNNABLE
:
1260 case THREAD_STOPPED
:
1261 case THREAD_STOPPED_FOREVER
:
1262 case THREAD_TO_KILL
:
1263 rb_ary_push(ary
, th
->self
);
1270 /********************************************************************/
1274 * Thread.list => array
1276 * Returns an array of <code>Thread</code> objects for all threads that are
1277 * either runnable or stopped.
1279 * Thread.new { sleep(200) }
1280 * Thread.new { 1000000.times {|i| i*i } }
1281 * Thread.new { Thread.stop }
1282 * Thread.list.each {|t| p t}
1284 * <em>produces:</em>
1286 * #<Thread:0x401b3e84 sleep>
1287 * #<Thread:0x401b3f38 run>
1288 * #<Thread:0x401b3fb0 sleep>
1289 * #<Thread:0x401bdf4c run>
1293 rb_thread_list(void)
1295 VALUE ary
= rb_ary_new();
1296 st_foreach(GET_THREAD()->vm
->living_threads
, thread_list_i
, ary
);
1301 rb_thread_current(void)
1303 return GET_THREAD()->self
;
1308 * Thread.current => thread
1310 * Returns the currently executing thread.
1312 * Thread.current #=> #<Thread:0x401bdf4c run>
1316 thread_s_current(VALUE klass
)
1318 return rb_thread_current();
1322 rb_thread_main(void)
1324 return GET_THREAD()->vm
->main_thread
->self
;
1328 rb_thread_s_main(VALUE klass
)
1330 return rb_thread_main();
1336 * Thread.abort_on_exception => true or false
1338 * Returns the status of the global ``abort on exception'' condition. The
1339 * default is <code>false</code>. When set to <code>true</code>, or if the
1340 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1341 * command line option <code>-d</code> was specified) all threads will abort
1342 * (the process will <code>exit(0)</code>) if an exception is raised in any
1343 * thread. See also <code>Thread::abort_on_exception=</code>.
1347 rb_thread_s_abort_exc(void)
1349 return GET_THREAD()->vm
->thread_abort_on_exception
? Qtrue
: Qfalse
;
1355 * Thread.abort_on_exception= boolean => true or false
1357 * When set to <code>true</code>, all threads will abort if an exception is
1358 * raised. Returns the new state.
1360 * Thread.abort_on_exception = true
1361 * t1 = Thread.new do
1362 * puts "In new thread"
1363 * raise "Exception from thread"
1366 * puts "not reached"
1368 * <em>produces:</em>
1371 * prog.rb:4: Exception from thread (RuntimeError)
1372 * from prog.rb:2:in `initialize'
1373 * from prog.rb:2:in `new'
1378 rb_thread_s_abort_exc_set(VALUE self
, VALUE val
)
1381 GET_THREAD()->vm
->thread_abort_on_exception
= RTEST(val
);
1388 * thr.abort_on_exception => true or false
1390 * Returns the status of the thread-local ``abort on exception'' condition for
1391 * <i>thr</i>. The default is <code>false</code>. See also
1392 * <code>Thread::abort_on_exception=</code>.
1396 rb_thread_abort_exc(VALUE thread
)
1399 GetThreadPtr(thread
, th
);
1400 return th
->abort_on_exception
? Qtrue
: Qfalse
;
1406 * thr.abort_on_exception= boolean => true or false
1408 * When set to <code>true</code>, causes all threads (including the main
1409 * program) to abort if an exception is raised in <i>thr</i>. The process will
1410 * effectively <code>exit(0)</code>.
1414 rb_thread_abort_exc_set(VALUE thread
, VALUE val
)
1419 GetThreadPtr(thread
, th
);
1420 th
->abort_on_exception
= RTEST(val
);
1427 * thr.group => thgrp or nil
1429 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1430 * the thread is not a member of any group.
1432 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1436 rb_thread_group(VALUE thread
)
1440 GetThreadPtr(thread
, th
);
1441 group
= th
->thgroup
;
1450 thread_status_name(enum rb_thread_status status
)
1453 case THREAD_RUNNABLE
:
1455 case THREAD_STOPPED
:
1456 case THREAD_STOPPED_FOREVER
:
1458 case THREAD_TO_KILL
:
1468 rb_thread_dead(rb_thread_t
*th
)
1470 return th
->status
== THREAD_KILLED
;
1476 * thr.status => string, false or nil
1478 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1479 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1480 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1481 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1482 * terminated with an exception.
1484 * a = Thread.new { raise("die now") }
1485 * b = Thread.new { Thread.stop }
1486 * c = Thread.new { Thread.exit }
1487 * d = Thread.new { sleep }
1488 * d.kill #=> #<Thread:0x401b3678 aborting>
1490 * b.status #=> "sleep"
1491 * c.status #=> false
1492 * d.status #=> "aborting"
1493 * Thread.current.status #=> "run"
1497 rb_thread_status(VALUE thread
)
1500 GetThreadPtr(thread
, th
);
1502 if (rb_thread_dead(th
)) {
1503 if (!NIL_P(th
->errinfo
) && !FIXNUM_P(th
->errinfo
)
1509 return rb_str_new2(thread_status_name(th
->status
));
1515 * thr.alive? => true or false
1517 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1519 * thr = Thread.new { }
1520 * thr.join #=> #<Thread:0x401b3fb0 dead>
1521 * Thread.current.alive? #=> true
1522 * thr.alive? #=> false
1526 rb_thread_alive_p(VALUE thread
)
1529 GetThreadPtr(thread
, th
);
1531 if (rb_thread_dead(th
))
1538 * thr.stop? => true or false
1540 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1542 * a = Thread.new { Thread.stop }
1543 * b = Thread.current
1549 rb_thread_stop_p(VALUE thread
)
1552 GetThreadPtr(thread
, th
);
1554 if (rb_thread_dead(th
))
1556 if (th
->status
== THREAD_STOPPED
|| th
->status
== THREAD_STOPPED_FOREVER
)
1563 * thr.safe_level => integer
1565 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1566 * levels can help when implementing sandboxes which run insecure code.
1568 * thr = Thread.new { $SAFE = 3; sleep }
1569 * Thread.current.safe_level #=> 0
1570 * thr.safe_level #=> 3
1574 rb_thread_safe_level(VALUE thread
)
1577 GetThreadPtr(thread
, th
);
1579 return INT2NUM(th
->safe_level
);
1584 * thr.inspect => string
1586 * Dump the name, id, and status of _thr_ to a string.
1590 rb_thread_inspect(VALUE thread
)
1592 const char *cname
= rb_obj_classname(thread
);
1597 GetThreadPtr(thread
, th
);
1598 status
= thread_status_name(th
->status
);
1599 str
= rb_sprintf("#<%s:%p %s>", cname
, (void *)thread
, status
);
1600 OBJ_INFECT(str
, thread
);
1606 rb_thread_local_aref(VALUE thread
, ID id
)
1611 GetThreadPtr(thread
, th
);
1612 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1613 rb_raise(rb_eSecurityError
, "Insecure: thread locals");
1615 if (!th
->local_storage
) {
1618 if (st_lookup(th
->local_storage
, id
, &val
)) {
1626 * thr[sym] => obj or nil
1628 * Attribute Reference---Returns the value of a thread-local variable, using
1629 * either a symbol or a string name. If the specified variable does not exist,
1630 * returns <code>nil</code>.
1632 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1633 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1634 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1635 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1637 * <em>produces:</em>
1639 * #<Thread:0x401b3b3c sleep>: C
1640 * #<Thread:0x401b3bc8 sleep>: B
1641 * #<Thread:0x401b3c68 sleep>: A
1642 * #<Thread:0x401bdf4c run>:
1646 rb_thread_aref(VALUE thread
, VALUE id
)
1648 return rb_thread_local_aref(thread
, rb_to_id(id
));
1652 rb_thread_local_aset(VALUE thread
, ID id
, VALUE val
)
1655 GetThreadPtr(thread
, th
);
1657 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1658 rb_raise(rb_eSecurityError
, "Insecure: can't modify thread locals");
1660 if (OBJ_FROZEN(thread
)) {
1661 rb_error_frozen("thread locals");
1663 if (!th
->local_storage
) {
1664 th
->local_storage
= st_init_numtable();
1667 st_delete_wrap(th
->local_storage
, id
);
1670 st_insert(th
->local_storage
, id
, val
);
1676 * thr[sym] = obj => obj
1678 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1679 * using either a symbol or a string. See also <code>Thread#[]</code>.
1683 rb_thread_aset(VALUE self
, ID id
, VALUE val
)
1685 return rb_thread_local_aset(self
, rb_to_id(id
), val
);
1690 * thr.key?(sym) => true or false
1692 * Returns <code>true</code> if the given string (or symbol) exists as a
1693 * thread-local variable.
1695 * me = Thread.current
1697 * me.key?(:oliver) #=> true
1698 * me.key?(:stanley) #=> false
1702 rb_thread_key_p(VALUE self
, VALUE key
)
1705 ID id
= rb_to_id(key
);
1707 GetThreadPtr(self
, th
);
1709 if (!th
->local_storage
) {
1712 if (st_lookup(th
->local_storage
, id
, 0)) {
1719 thread_keys_i(ID key
, VALUE value
, VALUE ary
)
1721 rb_ary_push(ary
, ID2SYM(key
));
1726 vm_living_thread_num(rb_vm_t
*vm
)
1728 return vm
->living_threads
->num_entries
;
1735 if (GET_THREAD()->vm
->living_threads
) {
1736 num
= vm_living_thread_num(GET_THREAD()->vm
);
1737 thread_debug("rb_thread_alone: %d\n", num
);
1746 * Returns an an array of the names of the thread-local variables (as Symbols).
1748 * thr = Thread.new do
1749 * Thread.current[:cat] = 'meow'
1750 * Thread.current["dog"] = 'woof'
1752 * thr.join #=> #<Thread:0x401b3f10 dead>
1753 * thr.keys #=> [:dog, :cat]
1757 rb_thread_keys(VALUE self
)
1760 VALUE ary
= rb_ary_new();
1761 GetThreadPtr(self
, th
);
1763 if (th
->local_storage
) {
1764 st_foreach(th
->local_storage
, thread_keys_i
, ary
);
1771 * thr.priority => integer
1773 * Returns the priority of <i>thr</i>. Default is inherited from the
1774 * current thread which creating the new thread, or zero for the
1775 * initial main thread; higher-priority threads will run before
1776 * lower-priority threads.
1778 * Thread.current.priority #=> 0
1782 rb_thread_priority(VALUE thread
)
1785 GetThreadPtr(thread
, th
);
1786 return INT2NUM(th
->priority
);
1792 * thr.priority= integer => thr
1794 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1795 * will run before lower-priority threads.
1797 * count1 = count2 = 0
1799 * loop { count1 += 1 }
1804 * loop { count2 += 1 }
1813 rb_thread_priority_set(VALUE thread
, VALUE prio
)
1816 GetThreadPtr(thread
, th
);
1820 th
->priority
= NUM2INT(prio
);
1821 native_thread_apply_priority(th
);
1827 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1829 rb_fd_init(volatile rb_fdset_t
*fds
)
1832 fds
->fdset
= ALLOC(fd_set
);
1833 FD_ZERO(fds
->fdset
);
1837 rb_fd_term(rb_fdset_t
*fds
)
1839 if (fds
->fdset
) xfree(fds
->fdset
);
1845 rb_fd_zero(rb_fdset_t
*fds
)
1848 MEMZERO(fds
->fdset
, fd_mask
, howmany(fds
->maxfd
, NFDBITS
));
1849 FD_ZERO(fds
->fdset
);
1854 rb_fd_resize(int n
, rb_fdset_t
*fds
)
1856 int m
= howmany(n
+ 1, NFDBITS
) * sizeof(fd_mask
);
1857 int o
= howmany(fds
->maxfd
, NFDBITS
) * sizeof(fd_mask
);
1859 if (m
< sizeof(fd_set
)) m
= sizeof(fd_set
);
1860 if (o
< sizeof(fd_set
)) o
= sizeof(fd_set
);
1863 fds
->fdset
= realloc(fds
->fdset
, m
);
1864 memset((char *)fds
->fdset
+ o
, 0, m
- o
);
1866 if (n
>= fds
->maxfd
) fds
->maxfd
= n
+ 1;
1870 rb_fd_set(int n
, rb_fdset_t
*fds
)
1872 rb_fd_resize(n
, fds
);
1873 FD_SET(n
, fds
->fdset
);
1877 rb_fd_clr(int n
, rb_fdset_t
*fds
)
1879 if (n
>= fds
->maxfd
) return;
1880 FD_CLR(n
, fds
->fdset
);
1884 rb_fd_isset(int n
, const rb_fdset_t
*fds
)
1886 if (n
>= fds
->maxfd
) return 0;
1887 return FD_ISSET(n
, fds
->fdset
) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1891 rb_fd_copy(rb_fdset_t
*dst
, const fd_set
*src
, int max
)
1893 int size
= howmany(max
, NFDBITS
) * sizeof(fd_mask
);
1895 if (size
< sizeof(fd_set
)) size
= sizeof(fd_set
);
1897 dst
->fdset
= realloc(dst
->fdset
, size
);
1898 memcpy(dst
->fdset
, src
, size
);
1902 rb_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
)
1904 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
1906 rb_fd_resize(n
- 1, readfds
);
1907 r
= rb_fd_ptr(readfds
);
1910 rb_fd_resize(n
- 1, writefds
);
1911 w
= rb_fd_ptr(writefds
);
1914 rb_fd_resize(n
- 1, exceptfds
);
1915 e
= rb_fd_ptr(exceptfds
);
1917 return select(n
, r
, w
, e
, timeout
);
1925 #define FD_ZERO(f) rb_fd_zero(f)
1926 #define FD_SET(i, f) rb_fd_set(i, f)
1927 #define FD_CLR(i, f) rb_fd_clr(i, f)
1928 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1932 #if defined(__CYGWIN__) || defined(_WIN32)
1934 cmp_tv(const struct timeval
*a
, const struct timeval
*b
)
1936 long d
= (a
->tv_sec
- b
->tv_sec
);
1937 return (d
!= 0) ? d
: (a
->tv_usec
- b
->tv_usec
);
1941 subst(struct timeval
*rest
, const struct timeval
*wait
)
1943 while (rest
->tv_usec
< wait
->tv_usec
) {
1944 if (rest
->tv_sec
<= wait
->tv_sec
) {
1948 rest
->tv_usec
+= 1000 * 1000;
1950 rest
->tv_sec
-= wait
->tv_sec
;
1951 rest
->tv_usec
-= wait
->tv_usec
;
1957 do_select(int n
, fd_set
*read
, fd_set
*write
, fd_set
*except
,
1958 struct timeval
*timeout
)
1961 fd_set orig_read
, orig_write
, orig_except
;
1965 struct timeval wait_rest
;
1968 limit
= timeofday() +
1969 (double)timeout
->tv_sec
+(double)timeout
->tv_usec
*1e-6;
1970 wait_rest
= *timeout
;
1971 timeout
= &wait_rest
;
1975 if (read
) orig_read
= *read
;
1976 if (write
) orig_write
= *write
;
1977 if (except
) orig_except
= *except
;
1982 #if defined(__CYGWIN__) || defined(_WIN32)
1984 /* polling duration: 100ms */
1985 struct timeval wait_100ms
, *wait
;
1986 wait_100ms
.tv_sec
= 0;
1987 wait_100ms
.tv_usec
= 100 * 1000; /* 100 ms */
1990 wait
= (timeout
== 0 || cmp_tv(&wait_100ms
, timeout
) > 0) ? &wait_100ms
: timeout
;
1993 result
= select(n
, read
, write
, except
, wait
);
1994 if (result
< 0) lerrno
= errno
;
1995 if (result
!= 0) break;
1997 if (read
) *read
= orig_read
;
1998 if (write
) *write
= orig_write
;
1999 if (except
) *except
= orig_except
;
2001 } while (__th
->interrupt_flag
== 0 && (timeout
== 0 || subst(timeout
, &wait_100ms
)));
2003 } while (result
== 0 && (timeout
== 0 || subst(timeout
, &wait_100ms
)));
2007 result
= select(n
, read
, write
, except
, timeout
);
2008 if (result
< 0) lerrno
= errno
;
2009 }, ubf_select
, GET_THREAD());
2020 if (read
) *read
= orig_read
;
2021 if (write
) *write
= orig_write
;
2022 if (except
) *except
= orig_except
;
2025 double d
= limit
- timeofday();
2027 wait_rest
.tv_sec
= (unsigned int)d
;
2028 wait_rest
.tv_usec
= (long)((d
-(double)wait_rest
.tv_sec
)*1e6
);
2029 if (wait_rest
.tv_sec
< 0) wait_rest
.tv_sec
= 0;
2030 if (wait_rest
.tv_usec
< 0) wait_rest
.tv_usec
= 0;
2042 rb_thread_wait_fd_rw(int fd
, int read
)
2045 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd
, read
? "read" : "write");
2048 rb_raise(rb_eIOError
, "closed stream");
2050 while (result
<= 0) {
2056 result
= do_select(fd
+ 1, rb_fd_ptr(&set
), 0, 0, 0);
2059 result
= do_select(fd
+ 1, 0, rb_fd_ptr(&set
), 0, 0);
2069 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd
, read
? "read" : "write");
2073 rb_thread_wait_fd(int fd
)
2075 rb_thread_wait_fd_rw(fd
, 1);
2079 rb_thread_fd_writable(int fd
)
2081 rb_thread_wait_fd_rw(fd
, 0);
2086 rb_thread_select(int max
, fd_set
* read
, fd_set
* write
, fd_set
* except
,
2087 struct timeval
*timeout
)
2089 if (!read
&& !write
&& !except
) {
2091 rb_thread_sleep_forever();
2094 rb_thread_wait_for(*timeout
);
2098 return do_select(max
, read
, write
, except
, timeout
);
2107 #ifdef USE_CONSERVATIVE_STACK_END
2109 rb_gc_set_stack_end(VALUE
**stack_end_p
)
2112 *stack_end_p
= &stack_end
;
2117 rb_gc_save_machine_context(rb_thread_t
*th
)
2119 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
2120 FLUSH_REGISTER_WINDOWS
;
2122 th
->machine_register_stack_end
= rb_ia64_bsp();
2124 setjmp(th
->machine_regs
);
2131 int rb_get_next_signal(rb_vm_t
*vm
);
2134 timer_thread_function(void *arg
)
2136 rb_vm_t
*vm
= arg
; /* TODO: fix me for Multi-VM */
2138 /* for time slice */
2139 RUBY_VM_SET_TIMER_INTERRUPT(vm
->running_thread
);
2142 if (vm
->buffered_signal_size
&& vm
->main_thread
->exec_signal
== 0) {
2143 rb_thread_t
*mth
= vm
->main_thread
;
2144 enum rb_thread_status prev_status
= mth
->status
;
2145 mth
->exec_signal
= rb_get_next_signal(vm
);
2146 thread_debug("main_thread: %s\n", thread_status_name(prev_status
));
2147 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2148 (long)vm
->buffered_signal_size
, vm
->main_thread
->exec_signal
);
2149 if (mth
->status
!= THREAD_KILLED
) mth
->status
= THREAD_RUNNABLE
;
2150 rb_thread_interrupt(mth
);
2151 mth
->status
= prev_status
;
2155 /* prove profiler */
2156 if (vm
->prove_profile
.enable
) {
2157 rb_thread_t
*th
= vm
->running_thread
;
2159 if (vm
->during_gc
) {
2160 /* GC prove profiling */
2167 rb_thread_stop_timer_thread(void)
2169 if (timer_thread_id
) {
2171 native_thread_join(timer_thread_id
);
2172 timer_thread_id
= 0;
2177 rb_thread_reset_timer_thread(void)
2179 timer_thread_id
= 0;
2183 rb_thread_start_timer_thread(void)
2185 rb_thread_create_timer_thread();
2189 clear_coverage_i(st_data_t key
, st_data_t val
, st_data_t dummy
)
2192 VALUE lines
= (VALUE
)val
;
2194 for (i
= 0; i
< RARRAY_LEN(lines
); i
++) {
2195 if (RARRAY_PTR(lines
)[i
] != Qnil
) {
2196 RARRAY_PTR(lines
)[i
] = INT2FIX(0);
2203 clear_coverage(void)
2205 extern VALUE
rb_get_coverages(void);
2206 VALUE coverages
= rb_get_coverages();
2207 if (RTEST(coverages
)) {
2208 st_foreach(RHASH_TBL(coverages
), clear_coverage_i
, 0);
2213 terminate_atfork_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2217 GetThreadPtr(thval
, th
);
2219 if (th
!= current_th
) {
2220 thread_cleanup_func(th
);
2226 rb_thread_atfork(void)
2228 rb_thread_t
*th
= GET_THREAD();
2229 rb_vm_t
*vm
= th
->vm
;
2230 VALUE thval
= th
->self
;
2231 vm
->main_thread
= th
;
2233 st_foreach(vm
->living_threads
, terminate_atfork_i
, (st_data_t
)th
);
2234 st_clear(vm
->living_threads
);
2235 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2238 rb_reset_random_seed();
2242 terminate_atfork_before_exec_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2246 GetThreadPtr(thval
, th
);
2248 if (th
!= current_th
) {
2249 thread_cleanup_func_before_exec(th
);
2255 rb_thread_atfork_before_exec(void)
2257 rb_thread_t
*th
= GET_THREAD();
2258 rb_vm_t
*vm
= th
->vm
;
2259 VALUE thval
= th
->self
;
2260 vm
->main_thread
= th
;
2262 st_foreach(vm
->living_threads
, terminate_atfork_before_exec_i
, (st_data_t
)th
);
2263 st_clear(vm
->living_threads
);
2264 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2275 * Document-class: ThreadGroup
2277 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2278 * threads as a group. A <code>Thread</code> can belong to only one
2279 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2280 * remove it from any previous group.
2282 * Newly created threads belong to the same group as the thread from which they
2286 static VALUE
thgroup_s_alloc(VALUE
);
2288 thgroup_s_alloc(VALUE klass
)
2291 struct thgroup
*data
;
2293 group
= Data_Make_Struct(klass
, struct thgroup
, 0, -1, data
);
2295 data
->group
= group
;
2300 struct thgroup_list_params
{
2306 thgroup_list_i(st_data_t key
, st_data_t val
, st_data_t data
)
2308 VALUE thread
= (VALUE
)key
;
2309 VALUE ary
= ((struct thgroup_list_params
*)data
)->ary
;
2310 VALUE group
= ((struct thgroup_list_params
*)data
)->group
;
2312 GetThreadPtr(thread
, th
);
2314 if (th
->thgroup
== group
) {
2315 rb_ary_push(ary
, thread
);
2322 * thgrp.list => array
2324 * Returns an array of all existing <code>Thread</code> objects that belong to
2327 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2331 thgroup_list(VALUE group
)
2333 VALUE ary
= rb_ary_new();
2334 struct thgroup_list_params param
;
2337 param
.group
= group
;
2338 st_foreach(GET_THREAD()->vm
->living_threads
, thgroup_list_i
, (st_data_t
) & param
);
2345 * thgrp.enclose => thgrp
2347 * Prevents threads from being added to or removed from the receiving
2348 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2349 * <code>ThreadGroup</code>.
2351 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2352 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2353 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2356 * <em>produces:</em>
2358 * ThreadError: can't move from the enclosed thread group
2362 thgroup_enclose(VALUE group
)
2364 struct thgroup
*data
;
2366 Data_Get_Struct(group
, struct thgroup
, data
);
2375 * thgrp.enclosed? => true or false
2377 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2378 * ThreadGroup#enclose.
2382 thgroup_enclosed_p(VALUE group
)
2384 struct thgroup
*data
;
2386 Data_Get_Struct(group
, struct thgroup
, data
);
2395 * thgrp.add(thread) => thgrp
2397 * Adds the given <em>thread</em> to this group, removing it from any other
2398 * group to which it may have previously belonged.
2400 * puts "Initial group is #{ThreadGroup::Default.list}"
2401 * tg = ThreadGroup.new
2402 * t1 = Thread.new { sleep }
2403 * t2 = Thread.new { sleep }
2404 * puts "t1 is #{t1}"
2405 * puts "t2 is #{t2}"
2407 * puts "Initial group now #{ThreadGroup::Default.list}"
2408 * puts "tg group now #{tg.list}"
2410 * <em>produces:</em>
2412 * Initial group is #<Thread:0x401bdf4c>
2413 * t1 is #<Thread:0x401b3c90>
2414 * t2 is #<Thread:0x401b3c18>
2415 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2416 * tg group now #<Thread:0x401b3c90>
2420 thgroup_add(VALUE group
, VALUE thread
)
2423 struct thgroup
*data
;
2426 GetThreadPtr(thread
, th
);
2428 if (OBJ_FROZEN(group
)) {
2429 rb_raise(rb_eThreadError
, "can't move to the frozen thread group");
2431 Data_Get_Struct(group
, struct thgroup
, data
);
2432 if (data
->enclosed
) {
2433 rb_raise(rb_eThreadError
, "can't move to the enclosed thread group");
2440 if (OBJ_FROZEN(th
->thgroup
)) {
2441 rb_raise(rb_eThreadError
, "can't move from the frozen thread group");
2443 Data_Get_Struct(th
->thgroup
, struct thgroup
, data
);
2444 if (data
->enclosed
) {
2445 rb_raise(rb_eThreadError
,
2446 "can't move from the enclosed thread group");
2449 th
->thgroup
= group
;
2455 * Document-class: Mutex
2457 * Mutex implements a simple semaphore that can be used to coordinate access to
2458 * shared data from multiple concurrent threads.
2463 * semaphore = Mutex.new
2466 * semaphore.synchronize {
2467 * # access shared resource
2472 * semaphore.synchronize {
2473 * # access shared resource
2479 typedef struct mutex_struct
{
2480 rb_thread_lock_t lock
;
2481 rb_thread_cond_t cond
;
2482 rb_thread_t
volatile *th
;
2483 volatile int cond_waiting
, cond_notified
;
2487 #define GetMutexPtr(obj, tobj) \
2488 Data_Get_Struct(obj, mutex_t, tobj)
2490 static const char *mutex_unlock(mutex_t
*mutex
);
2493 mutex_mark(void *ptr
)
2496 mutex_t
*mutex
= ptr
;
2498 rb_gc_mark(mutex
->th
->self
);
2504 mutex_free(void *ptr
)
2507 mutex_t
*mutex
= ptr
;
2509 /* rb_warn("free locked mutex"); */
2510 mutex_unlock(mutex
);
2512 native_mutex_destroy(&mutex
->lock
);
2513 native_cond_destroy(&mutex
->cond
);
2519 mutex_alloc(VALUE klass
)
2524 obj
= Data_Make_Struct(klass
, mutex_t
, mutex_mark
, mutex_free
, mutex
);
2525 native_mutex_initialize(&mutex
->lock
);
2526 native_cond_initialize(&mutex
->cond
);
2532 * Mutex.new => mutex
2534 * Creates a new Mutex
2537 mutex_initialize(VALUE self
)
2545 return mutex_alloc(rb_cMutex
);
2550 * mutex.locked? => true or false
2552 * Returns +true+ if this lock is currently held by some thread.
2555 rb_mutex_locked_p(VALUE self
)
2558 GetMutexPtr(self
, mutex
);
2559 return mutex
->th
? Qtrue
: Qfalse
;
2563 mutex_locked(rb_thread_t
*th
, VALUE self
)
2565 if (th
->keeping_mutexes
) {
2567 GetMutexPtr(self
, mutex
);
2568 mutex
->next_mutex
= th
->keeping_mutexes
;
2570 th
->keeping_mutexes
= self
;
2575 * mutex.try_lock => true or false
2577 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2581 rb_mutex_trylock(VALUE self
)
2584 VALUE locked
= Qfalse
;
2585 GetMutexPtr(self
, mutex
);
2587 if (mutex
->th
== GET_THREAD()) {
2588 rb_raise(rb_eThreadError
, "deadlock; recursive locking");
2591 native_mutex_lock(&mutex
->lock
);
2592 if (mutex
->th
== 0) {
2593 mutex
->th
= GET_THREAD();
2596 mutex_locked(GET_THREAD(), self
);
2598 native_mutex_unlock(&mutex
->lock
);
2604 lock_func(rb_thread_t
*th
, mutex_t
*mutex
, int last_thread
)
2606 int interrupted
= 0;
2607 #if 0 /* for debug */
2608 native_thread_yield();
2611 native_mutex_lock(&mutex
->lock
);
2612 th
->transition_for_lock
= 0;
2613 while (mutex
->th
|| (mutex
->th
= th
, 0)) {
2619 mutex
->cond_waiting
++;
2620 native_cond_wait(&mutex
->cond
, &mutex
->lock
);
2621 mutex
->cond_notified
--;
2623 if (RUBY_VM_INTERRUPTED(th
)) {
2628 th
->transition_for_lock
= 1;
2629 native_mutex_unlock(&mutex
->lock
);
2631 if (interrupted
== 2) native_thread_yield();
2632 #if 0 /* for debug */
2633 native_thread_yield();
2640 lock_interrupt(void *ptr
)
2642 mutex_t
*mutex
= (mutex_t
*)ptr
;
2643 native_mutex_lock(&mutex
->lock
);
2644 if (mutex
->cond_waiting
> 0) {
2645 native_cond_broadcast(&mutex
->cond
);
2646 mutex
->cond_notified
= mutex
->cond_waiting
;
2647 mutex
->cond_waiting
= 0;
2649 native_mutex_unlock(&mutex
->lock
);
2654 * mutex.lock => true or false
2656 * Attempts to grab the lock and waits if it isn't available.
2657 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2660 rb_mutex_lock(VALUE self
)
2662 if (rb_mutex_trylock(self
) == Qfalse
) {
2664 rb_thread_t
*th
= GET_THREAD();
2665 GetMutexPtr(self
, mutex
);
2667 while (mutex
->th
!= th
) {
2669 enum rb_thread_status prev_status
= th
->status
;
2670 int last_thread
= 0;
2671 struct rb_unblock_callback oldubf
;
2673 set_unblock_function(th
, lock_interrupt
, mutex
, &oldubf
);
2674 th
->status
= THREAD_STOPPED_FOREVER
;
2676 th
->locking_mutex
= self
;
2677 if (vm_living_thread_num(th
->vm
) == th
->vm
->sleeper
) {
2681 th
->transition_for_lock
= 1;
2682 BLOCKING_REGION_CORE({
2683 interrupted
= lock_func(th
, mutex
, last_thread
);
2685 th
->transition_for_lock
= 0;
2686 remove_signal_thread_list(th
);
2687 reset_unblock_function(th
, &oldubf
);
2689 th
->locking_mutex
= Qfalse
;
2690 if (mutex
->th
&& interrupted
== 2) {
2691 rb_check_deadlock(th
->vm
);
2693 if (th
->status
== THREAD_STOPPED_FOREVER
) {
2694 th
->status
= prev_status
;
2698 if (mutex
->th
== th
) mutex_locked(th
, self
);
2701 RUBY_VM_CHECK_INTS();
2709 mutex_unlock(mutex_t
*mutex
)
2711 const char *err
= NULL
;
2712 rb_thread_t
*th
= GET_THREAD();
2715 native_mutex_lock(&mutex
->lock
);
2717 if (mutex
->th
== 0) {
2718 err
= "Attempt to unlock a mutex which is not locked";
2720 else if (mutex
->th
!= GET_THREAD()) {
2721 err
= "Attempt to unlock a mutex which is locked by another thread";
2725 if (mutex
->cond_waiting
> 0) {
2726 /* waiting thread */
2727 native_cond_signal(&mutex
->cond
);
2728 mutex
->cond_waiting
--;
2729 mutex
->cond_notified
++;
2733 native_mutex_unlock(&mutex
->lock
);
2736 GetMutexPtr(th
->keeping_mutexes
, th_mutex
);
2737 if (th_mutex
== mutex
) {
2738 th
->keeping_mutexes
= mutex
->next_mutex
;
2743 GetMutexPtr(th_mutex
->next_mutex
, tmp_mutex
);
2744 if (tmp_mutex
== mutex
) {
2745 th_mutex
->next_mutex
= tmp_mutex
->next_mutex
;
2748 th_mutex
= tmp_mutex
;
2751 mutex
->next_mutex
= Qfalse
;
2759 * mutex.unlock => self
2761 * Releases the lock.
2762 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2765 rb_mutex_unlock(VALUE self
)
2769 GetMutexPtr(self
, mutex
);
2771 err
= mutex_unlock(mutex
);
2772 if (err
) rb_raise(rb_eThreadError
, err
);
2778 rb_mutex_unlock_all(VALUE mutexes
)
2784 GetMutexPtr(mutexes
, mutex
);
2785 /* rb_warn("mutex #<%s:%p> remains to be locked by terminated thread",
2786 rb_obj_classname(mutexes), (void*)mutexes); */
2787 mutexes
= mutex
->next_mutex
;
2788 err
= mutex_unlock(mutex
);
2789 if (err
) rb_bug("invalid keeping_mutexes: %s", err
);
2794 rb_mutex_sleep_forever(VALUE time
)
2796 rb_thread_sleep_deadly();
2801 rb_mutex_wait_for(VALUE time
)
2803 const struct timeval
*t
= (struct timeval
*)time
;
2804 rb_thread_wait_for(*t
);
2809 rb_mutex_sleep(VALUE self
, VALUE timeout
)
2814 if (!NIL_P(timeout
)) {
2815 t
= rb_time_interval(timeout
);
2817 rb_mutex_unlock(self
);
2819 if (NIL_P(timeout
)) {
2820 rb_ensure(rb_mutex_sleep_forever
, Qnil
, rb_mutex_lock
, self
);
2823 rb_ensure(rb_mutex_wait_for
, (VALUE
)&t
, rb_mutex_lock
, self
);
2825 end
= time(0) - beg
;
2826 return INT2FIX(end
);
2831 * mutex.sleep(timeout = nil) => number
2833 * Releases the lock and sleeps +timeout+ seconds if it is given and
2834 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2835 * the current thread.
2838 mutex_sleep(int argc
, VALUE
*argv
, VALUE self
)
2842 rb_scan_args(argc
, argv
, "01", &timeout
);
2843 return rb_mutex_sleep(self
, timeout
);
2848 * mutex.synchronize { ... } => result of the block
2850 * Obtains a lock, runs the block, and releases the lock when the block
2851 * completes. See the example under +Mutex+.
2855 rb_thread_synchronize(VALUE mutex
, VALUE (*func
)(VALUE arg
), VALUE arg
)
2857 rb_mutex_lock(mutex
);
2858 return rb_ensure(func
, arg
, rb_mutex_unlock
, mutex
);
2862 * Document-class: Barrier
2864 typedef struct rb_thread_list_struct rb_thread_list_t
;
2866 struct rb_thread_list_struct
{
2868 rb_thread_list_t
*next
;
2872 thlist_mark(void *ptr
)
2874 rb_thread_list_t
*q
= ptr
;
2876 for (; q
; q
= q
->next
) {
2877 rb_gc_mark(q
->th
->self
);
2882 thlist_free(void *ptr
)
2884 rb_thread_list_t
*q
= ptr
, *next
;
2886 for (; q
; q
= next
) {
2893 thlist_signal(rb_thread_list_t
**list
, unsigned int maxth
, rb_thread_t
**woken_thread
)
2896 rb_thread_list_t
*q
;
2898 while ((q
= *list
) != NULL
) {
2899 rb_thread_t
*th
= q
->th
;
2903 if (th
->status
!= THREAD_KILLED
) {
2904 rb_thread_ready(th
);
2905 if (!woken
&& woken_thread
) *woken_thread
= th
;
2906 if (++woken
>= maxth
&& maxth
) break;
2914 rb_thread_list_t
*waiting
, **tail
;
2918 barrier_mark(void *ptr
)
2920 rb_barrier_t
*b
= ptr
;
2922 if (b
->owner
) rb_gc_mark(b
->owner
->self
);
2923 thlist_mark(b
->waiting
);
2927 barrier_free(void *ptr
)
2929 rb_barrier_t
*b
= ptr
;
2932 thlist_free(b
->waiting
);
2938 barrier_alloc(VALUE klass
)
2941 rb_barrier_t
*barrier
;
2943 obj
= Data_Make_Struct(klass
, rb_barrier_t
, barrier_mark
, barrier_free
, barrier
);
2944 barrier
->owner
= GET_THREAD();
2945 barrier
->waiting
= 0;
2946 barrier
->tail
= &barrier
->waiting
;
2951 rb_barrier_new(void)
2953 return barrier_alloc(rb_cBarrier
);
2957 rb_barrier_wait(VALUE self
)
2959 rb_barrier_t
*barrier
;
2960 rb_thread_list_t
*q
;
2962 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2963 if (!barrier
->owner
|| barrier
->owner
->status
== THREAD_KILLED
) {
2965 if (thlist_signal(&barrier
->waiting
, 1, &barrier
->owner
)) return Qfalse
;
2968 else if (barrier
->owner
== GET_THREAD()) {
2972 *barrier
->tail
= q
= ALLOC(rb_thread_list_t
);
2973 q
->th
= GET_THREAD();
2975 barrier
->tail
= &q
->next
;
2976 rb_thread_sleep_forever();
2977 return barrier
->owner
== GET_THREAD() ? Qtrue
: Qfalse
;
2982 rb_barrier_release(VALUE self
)
2984 rb_barrier_t
*barrier
;
2987 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2988 if (barrier
->owner
!= GET_THREAD()) {
2989 rb_raise(rb_eThreadError
, "not owned");
2991 n
= thlist_signal(&barrier
->waiting
, 0, &barrier
->owner
);
2992 return n
? UINT2NUM(n
) : Qfalse
;
2995 /* variables for recursive traversals */
2996 static ID recursive_key
;
2999 recursive_check(VALUE hash
, VALUE obj
)
3001 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3005 VALUE list
= rb_hash_aref(hash
, ID2SYM(rb_frame_this_func()));
3007 if (NIL_P(list
) || TYPE(list
) != T_HASH
)
3009 if (NIL_P(rb_hash_lookup(list
, obj
)))
3016 recursive_push(VALUE hash
, VALUE obj
)
3020 sym
= ID2SYM(rb_frame_this_func());
3021 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3022 hash
= rb_hash_new();
3023 rb_thread_local_aset(rb_thread_current(), recursive_key
, hash
);
3027 list
= rb_hash_aref(hash
, sym
);
3029 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3030 list
= rb_hash_new();
3031 rb_hash_aset(hash
, sym
, list
);
3033 rb_hash_aset(list
, obj
, Qtrue
);
3038 recursive_pop(VALUE hash
, VALUE obj
)
3042 sym
= ID2SYM(rb_frame_this_func());
3043 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
3046 symname
= rb_inspect(sym
);
3047 thrname
= rb_inspect(rb_thread_current());
3049 rb_raise(rb_eTypeError
, "invalid inspect_tbl hash for %s in %s",
3050 StringValuePtr(symname
), StringValuePtr(thrname
));
3052 list
= rb_hash_aref(hash
, sym
);
3053 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
3054 VALUE symname
= rb_inspect(sym
);
3055 VALUE thrname
= rb_inspect(rb_thread_current());
3056 rb_raise(rb_eTypeError
, "invalid inspect_tbl list for %s in %s",
3057 StringValuePtr(symname
), StringValuePtr(thrname
));
3059 rb_hash_delete(list
, obj
);
3063 rb_exec_recursive(VALUE (*func
) (VALUE
, VALUE
, int), VALUE obj
, VALUE arg
)
3065 VALUE hash
= rb_thread_local_aref(rb_thread_current(), recursive_key
);
3066 VALUE objid
= rb_obj_id(obj
);
3068 if (recursive_check(hash
, objid
)) {
3069 return (*func
) (obj
, arg
, Qtrue
);
3072 VALUE result
= Qundef
;
3075 hash
= recursive_push(hash
, objid
);
3077 if ((state
= EXEC_TAG()) == 0) {
3078 result
= (*func
) (obj
, arg
, Qfalse
);
3081 recursive_pop(hash
, objid
);
3090 static rb_event_hook_t
*
3091 alloc_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3093 rb_event_hook_t
*hook
= ALLOC(rb_event_hook_t
);
3095 hook
->flag
= events
;
3101 thread_reset_event_flags(rb_thread_t
*th
)
3103 rb_event_hook_t
*hook
= th
->event_hooks
;
3104 rb_event_flag_t flag
= th
->event_flags
& RUBY_EVENT_VM
;
3113 rb_thread_add_event_hook(rb_thread_t
*th
,
3114 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3116 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3117 hook
->next
= th
->event_hooks
;
3118 th
->event_hooks
= hook
;
3119 thread_reset_event_flags(th
);
3123 set_threads_event_flags_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3127 GetThreadPtr(thval
, th
);
3130 th
->event_flags
|= RUBY_EVENT_VM
;
3133 th
->event_flags
&= (~RUBY_EVENT_VM
);
3139 set_threads_event_flags(int flag
)
3141 st_foreach(GET_VM()->living_threads
, set_threads_event_flags_i
, (st_data_t
) flag
);
3145 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
3147 rb_event_hook_t
*hook
= alloc_event_hook(func
, events
, data
);
3148 rb_vm_t
*vm
= GET_VM();
3150 hook
->next
= vm
->event_hooks
;
3151 vm
->event_hooks
= hook
;
3153 set_threads_event_flags(1);
3157 remove_event_hook(rb_event_hook_t
**root
, rb_event_hook_func_t func
)
3159 rb_event_hook_t
*prev
= NULL
, *hook
= *root
, *next
;
3163 if (func
== 0 || hook
->func
== func
) {
3165 prev
->next
= hook
->next
;
3181 rb_thread_remove_event_hook(rb_thread_t
*th
, rb_event_hook_func_t func
)
3183 int ret
= remove_event_hook(&th
->event_hooks
, func
);
3184 thread_reset_event_flags(th
);
3189 rb_remove_event_hook(rb_event_hook_func_t func
)
3191 rb_vm_t
*vm
= GET_VM();
3192 rb_event_hook_t
*hook
= vm
->event_hooks
;
3193 int ret
= remove_event_hook(&vm
->event_hooks
, func
);
3195 if (hook
!= NULL
&& vm
->event_hooks
== NULL
) {
3196 set_threads_event_flags(0);
3203 clear_trace_func_i(st_data_t key
, st_data_t val
, st_data_t flag
)
3206 GetThreadPtr((VALUE
)key
, th
);
3207 rb_thread_remove_event_hook(th
, 0);
3212 rb_clear_trace_func(void)
3214 st_foreach(GET_VM()->living_threads
, clear_trace_func_i
, (st_data_t
) 0);
3215 rb_remove_event_hook(0);
3218 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
3222 * set_trace_func(proc) => proc
3223 * set_trace_func(nil) => nil
3225 * Establishes _proc_ as the handler for tracing, or disables
3226 * tracing if the parameter is +nil+. _proc_ takes up
3227 * to six parameters: an event name, a filename, a line number, an
3228 * object id, a binding, and the name of a class. _proc_ is
3229 * invoked whenever an event occurs. Events are: <code>c-call</code>
3230 * (call a C-language routine), <code>c-return</code> (return from a
3231 * C-language routine), <code>call</code> (call a Ruby method),
3232 * <code>class</code> (start a class or module definition),
3233 * <code>end</code> (finish a class or module definition),
3234 * <code>line</code> (execute code on a new line), <code>raise</code>
3235 * (raise an exception), and <code>return</code> (return from a Ruby
3236 * method). Tracing is disabled within the context of _proc_.
3245 * set_trace_func proc { |event, file, line, id, binding, classname|
3246 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3251 * line prog.rb:11 false
3252 * c-call prog.rb:11 new Class
3253 * c-call prog.rb:11 initialize Object
3254 * c-return prog.rb:11 initialize Object
3255 * c-return prog.rb:11 new Class
3256 * line prog.rb:12 false
3257 * call prog.rb:2 test Test
3258 * line prog.rb:3 test Test
3259 * line prog.rb:4 test Test
3260 * return prog.rb:4 test Test
3264 set_trace_func(VALUE obj
, VALUE trace
)
3266 rb_remove_event_hook(call_trace_func
);
3272 if (!rb_obj_is_proc(trace
)) {
3273 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3276 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
3281 thread_add_trace_func(rb_thread_t
*th
, VALUE trace
)
3283 if (!rb_obj_is_proc(trace
)) {
3284 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
3287 rb_thread_add_event_hook(th
, call_trace_func
, RUBY_EVENT_ALL
, trace
);
3291 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
3294 GetThreadPtr(obj
, th
);
3295 thread_add_trace_func(th
, trace
);
3300 thread_set_trace_func_m(VALUE obj
, VALUE trace
)
3303 GetThreadPtr(obj
, th
);
3304 rb_thread_remove_event_hook(th
, call_trace_func
);
3309 thread_add_trace_func(th
, trace
);
3314 get_event_name(rb_event_flag_t event
)
3317 case RUBY_EVENT_LINE
:
3319 case RUBY_EVENT_CLASS
:
3321 case RUBY_EVENT_END
:
3323 case RUBY_EVENT_CALL
:
3325 case RUBY_EVENT_RETURN
:
3327 case RUBY_EVENT_C_CALL
:
3329 case RUBY_EVENT_C_RETURN
:
3331 case RUBY_EVENT_RAISE
:
3338 VALUE
ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
);
3340 struct call_trace_func_args
{
3341 rb_event_flag_t event
;
3349 call_trace_proc(VALUE args
, int tracing
)
3351 struct call_trace_func_args
*p
= (struct call_trace_func_args
*)args
;
3352 VALUE eventname
= rb_str_new2(get_event_name(p
->event
));
3353 VALUE filename
= rb_str_new2(rb_sourcefile());
3355 int line
= rb_sourceline();
3359 if (p
->event
== RUBY_EVENT_C_CALL
||
3360 p
->event
== RUBY_EVENT_C_RETURN
) {
3365 rb_thread_method_id_and_class(GET_THREAD(), &id
, &klass
);
3367 if (id
== ID_ALLOCATOR
)
3370 if (TYPE(klass
) == T_ICLASS
) {
3371 klass
= RBASIC(klass
)->klass
;
3373 else if (FL_TEST(klass
, FL_SINGLETON
)) {
3374 klass
= rb_iv_get(klass
, "__attached__");
3378 argv
[0] = eventname
;
3380 argv
[2] = INT2FIX(line
);
3381 argv
[3] = id
? ID2SYM(id
) : Qnil
;
3382 argv
[4] = p
->self
? rb_binding_new() : Qnil
;
3383 argv
[5] = klass
? klass
: Qnil
;
3385 return rb_proc_call_with_block(p
->proc
, 6, argv
, Qnil
);
3389 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3391 struct call_trace_func_args args
;
3398 ruby_suppress_tracing(call_trace_proc
, (VALUE
)&args
, Qfalse
);
3402 ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
)
3404 rb_thread_t
*th
= GET_THREAD();
3405 int state
, raised
, tracing
;
3406 VALUE result
= Qnil
;
3408 if ((tracing
= th
->tracing
) != 0 && !always
) {
3415 raised
= rb_thread_reset_raised(th
);
3418 if ((state
= EXEC_TAG()) == 0) {
3419 result
= (*func
)(arg
, tracing
);
3423 rb_thread_set_raised(th
);
3427 th
->tracing
= tracing
;
3436 * +Thread+ encapsulates the behavior of a thread of
3437 * execution, including the main thread of the Ruby script.
3439 * In the descriptions of the methods in this class, the parameter _sym_
3440 * refers to a symbol, which is either a quoted string or a
3441 * +Symbol+ (such as <code>:name</code>).
3451 rb_define_singleton_method(rb_cThread
, "new", thread_s_new
, -1);
3452 rb_define_singleton_method(rb_cThread
, "start", thread_start
, -2);
3453 rb_define_singleton_method(rb_cThread
, "fork", thread_start
, -2);
3454 rb_define_singleton_method(rb_cThread
, "main", rb_thread_s_main
, 0);
3455 rb_define_singleton_method(rb_cThread
, "current", thread_s_current
, 0);
3456 rb_define_singleton_method(rb_cThread
, "stop", rb_thread_stop
, 0);
3457 rb_define_singleton_method(rb_cThread
, "kill", rb_thread_s_kill
, 1);
3458 rb_define_singleton_method(rb_cThread
, "exit", rb_thread_exit
, 0);
3459 rb_define_singleton_method(rb_cThread
, "pass", thread_s_pass
, 0);
3460 rb_define_singleton_method(rb_cThread
, "list", rb_thread_list
, 0);
3461 rb_define_singleton_method(rb_cThread
, "abort_on_exception", rb_thread_s_abort_exc
, 0);
3462 rb_define_singleton_method(rb_cThread
, "abort_on_exception=", rb_thread_s_abort_exc_set
, 1);
3463 #if THREAD_DEBUG < 0
3464 rb_define_singleton_method(rb_cThread
, "DEBUG", rb_thread_s_debug
, 0);
3465 rb_define_singleton_method(rb_cThread
, "DEBUG=", rb_thread_s_debug_set
, 1);
3468 rb_define_method(rb_cThread
, "initialize", thread_initialize
, -2);
3469 rb_define_method(rb_cThread
, "raise", thread_raise_m
, -1);
3470 rb_define_method(rb_cThread
, "join", thread_join_m
, -1);
3471 rb_define_method(rb_cThread
, "value", thread_value
, 0);
3472 rb_define_method(rb_cThread
, "kill", rb_thread_kill
, 0);
3473 rb_define_method(rb_cThread
, "terminate", rb_thread_kill
, 0);
3474 rb_define_method(rb_cThread
, "exit", rb_thread_kill
, 0);
3475 rb_define_method(rb_cThread
, "run", rb_thread_run
, 0);
3476 rb_define_method(rb_cThread
, "wakeup", rb_thread_wakeup
, 0);
3477 rb_define_method(rb_cThread
, "[]", rb_thread_aref
, 1);
3478 rb_define_method(rb_cThread
, "[]=", rb_thread_aset
, 2);
3479 rb_define_method(rb_cThread
, "key?", rb_thread_key_p
, 1);
3480 rb_define_method(rb_cThread
, "keys", rb_thread_keys
, 0);
3481 rb_define_method(rb_cThread
, "priority", rb_thread_priority
, 0);
3482 rb_define_method(rb_cThread
, "priority=", rb_thread_priority_set
, 1);
3483 rb_define_method(rb_cThread
, "status", rb_thread_status
, 0);
3484 rb_define_method(rb_cThread
, "alive?", rb_thread_alive_p
, 0);
3485 rb_define_method(rb_cThread
, "stop?", rb_thread_stop_p
, 0);
3486 rb_define_method(rb_cThread
, "abort_on_exception", rb_thread_abort_exc
, 0);
3487 rb_define_method(rb_cThread
, "abort_on_exception=", rb_thread_abort_exc_set
, 1);
3488 rb_define_method(rb_cThread
, "safe_level", rb_thread_safe_level
, 0);
3489 rb_define_method(rb_cThread
, "group", rb_thread_group
, 0);
3491 rb_define_method(rb_cThread
, "inspect", rb_thread_inspect
, 0);
3493 cThGroup
= rb_define_class("ThreadGroup", rb_cObject
);
3494 rb_define_alloc_func(cThGroup
, thgroup_s_alloc
);
3495 rb_define_method(cThGroup
, "list", thgroup_list
, 0);
3496 rb_define_method(cThGroup
, "enclose", thgroup_enclose
, 0);
3497 rb_define_method(cThGroup
, "enclosed?", thgroup_enclosed_p
, 0);
3498 rb_define_method(cThGroup
, "add", thgroup_add
, 1);
3501 rb_thread_t
*th
= GET_THREAD();
3502 th
->thgroup
= th
->vm
->thgroup_default
= rb_obj_alloc(cThGroup
);
3503 rb_define_const(cThGroup
, "Default", th
->thgroup
);
3506 rb_cMutex
= rb_define_class("Mutex", rb_cObject
);
3507 rb_define_alloc_func(rb_cMutex
, mutex_alloc
);
3508 rb_define_method(rb_cMutex
, "initialize", mutex_initialize
, 0);
3509 rb_define_method(rb_cMutex
, "locked?", rb_mutex_locked_p
, 0);
3510 rb_define_method(rb_cMutex
, "try_lock", rb_mutex_trylock
, 0);
3511 rb_define_method(rb_cMutex
, "lock", rb_mutex_lock
, 0);
3512 rb_define_method(rb_cMutex
, "unlock", rb_mutex_unlock
, 0);
3513 rb_define_method(rb_cMutex
, "sleep", mutex_sleep
, -1);
3515 recursive_key
= rb_intern("__recursive_key__");
3516 rb_eThreadError
= rb_define_class("ThreadError", rb_eStandardError
);
3519 rb_define_global_function("set_trace_func", set_trace_func
, 1);
3520 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
3521 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
3523 /* init thread core */
3524 Init_native_thread();
3526 /* main thread setting */
3528 /* acquire global interpreter lock */
3529 rb_thread_lock_t
*lp
= &GET_THREAD()->vm
->global_vm_lock
;
3530 native_mutex_initialize(lp
);
3531 native_mutex_lock(lp
);
3532 native_mutex_initialize(&GET_THREAD()->interrupt_lock
);
3536 rb_thread_create_timer_thread();
3538 (void)native_mutex_trylock
;
3539 (void)ruby_thread_set_native
;
3543 ruby_native_thread_p(void)
3545 rb_thread_t
*th
= ruby_thread_from_native();
3547 return th
? Qtrue
: Qfalse
;
3551 check_deadlock_i(st_data_t key
, st_data_t val
, int *found
)
3555 GetThreadPtr(thval
, th
);
3557 if (th
->status
!= THREAD_STOPPED_FOREVER
|| RUBY_VM_INTERRUPTED(th
) || th
->transition_for_lock
) {
3560 else if (th
->locking_mutex
) {
3562 GetMutexPtr(th
->locking_mutex
, mutex
);
3564 native_mutex_lock(&mutex
->lock
);
3565 if (mutex
->th
== th
|| (!mutex
->th
&& mutex
->cond_notified
)) {
3568 native_mutex_unlock(&mutex
->lock
);
3571 return (*found
) ? ST_STOP
: ST_CONTINUE
;
3574 #if 0 /* for debug */
3576 debug_i(st_data_t key
, st_data_t val
, int *found
)
3580 GetThreadPtr(thval
, th
);
3582 printf("th:%p %d %d %d", th
, th
->status
, th
->interrupt_flag
, th
->transition_for_lock
);
3583 if (th
->locking_mutex
) {
3585 GetMutexPtr(th
->locking_mutex
, mutex
);
3587 native_mutex_lock(&mutex
->lock
);
3588 printf(" %p %d\n", mutex
->th
, mutex
->cond_notified
);
3589 native_mutex_unlock(&mutex
->lock
);
3598 rb_check_deadlock(rb_vm_t
*vm
)
3602 if (vm_living_thread_num(vm
) > vm
->sleeper
) return;
3603 if (vm_living_thread_num(vm
) < vm
->sleeper
) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3605 st_foreach(vm
->living_threads
, check_deadlock_i
, (st_data_t
)&found
);
3609 argv
[0] = rb_eFatal
;
3610 argv
[1] = rb_str_new2("deadlock detected");
3611 #if 0 /* for debug */
3612 printf("%d %d %p %p\n", vm
->living_threads
->num_entries
, vm
->sleeper
, GET_THREAD(), vm
->main_thread
);
3613 st_foreach(vm
->living_threads
, debug_i
, (st_data_t
)0);
3615 rb_thread_raise(2, argv
, vm
->main_thread
);
3620 update_coverage(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3622 VALUE coverage
= GET_THREAD()->cfp
->iseq
->coverage
;
3623 if (coverage
&& RBASIC(coverage
)->klass
== 0) {
3624 long line
= rb_sourceline() - 1;
3626 if (RARRAY_PTR(coverage
)[line
] == Qnil
) {
3629 count
= FIX2LONG(RARRAY_PTR(coverage
)[line
]) + 1;
3630 if (POSFIXABLE(count
)) {
3631 RARRAY_PTR(coverage
)[line
] = LONG2FIX(count
);
3637 rb_get_coverages(void)
3639 return GET_VM()->coverages
;
3643 rb_set_coverages(VALUE coverages
)
3645 GET_VM()->coverages
= coverages
;
3646 rb_add_event_hook(update_coverage
, RUBY_EVENT_COVERAGE
, Qnil
);
3650 rb_reset_coverages(void)
3652 GET_VM()->coverages
= Qfalse
;
3653 rb_remove_event_hook(update_coverage
);