1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
52 #define THREAD_DEBUG 0
58 static void sleep_timeval(rb_thread_t
*th
, struct timeval time
);
59 static void sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
);
60 static void sleep_forever(rb_thread_t
*th
);
61 static double timeofday(void);
62 struct timeval
rb_time_interval(VALUE
);
63 static int rb_thread_dead(rb_thread_t
*th
);
65 void rb_signal_exec(rb_thread_t
*th
, int sig
);
66 void rb_disable_interrupt(void);
68 static VALUE eKillSignal
= INT2FIX(0);
69 static VALUE eTerminateSignal
= INT2FIX(1);
70 static volatile int system_working
= 1;
73 st_delete_wrap(st_table
* table
, VALUE key
)
75 st_delete(table
, (st_data_t
*) & key
, 0);
78 /********************************************************************************/
80 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
82 static void set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *ptr
,
83 rb_unblock_function_t
**oldfunc
, void **oldptr
);
85 #define GVL_UNLOCK_BEGIN() do { \
86 rb_thread_t *_th_stored = GET_THREAD(); \
87 rb_gc_save_machine_context(_th_stored); \
88 native_mutex_unlock(&_th_stored->vm->global_interpreter_lock)
90 #define GVL_UNLOCK_END() \
91 native_mutex_lock(&_th_stored->vm->global_interpreter_lock); \
92 rb_thread_set_current(_th_stored); \
95 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
96 rb_thread_t *__th = GET_THREAD(); \
97 int __prev_status = __th->status; \
98 rb_unblock_function_t *__oldubf; \
100 set_unblock_function(__th, ubf, ubfarg, &__oldubf, &__oldubfarg); \
101 __th->status = THREAD_STOPPED; \
102 thread_debug("enter blocking region (%p)\n", __th); \
103 GVL_UNLOCK_BEGIN(); {\
107 thread_debug("leave blocking region (%p)\n", __th); \
108 remove_signal_thread_list(__th); \
109 set_unblock_function(__th, __oldubf, __oldubfarg, 0, 0); \
110 if (__th->status == THREAD_STOPPED) { \
111 __th->status = __prev_status; \
113 RUBY_VM_CHECK_INTS(); \
117 void rb_thread_debug(const char *fmt
, ...);
119 # if THREAD_DEBUG < 0
120 static int rb_thread_debug_enabled
;
123 rb_thread_s_debug(void)
125 return INT2NUM(rb_thread_debug_enabled
);
129 rb_thread_s_debug_set(VALUE self
, VALUE val
)
131 rb_thread_debug_enabled
= RTEST(val
);
135 # define rb_thread_debug_enabled THREAD_DEBUG
137 #define thread_debug rb_thread_debug
139 #define thread_debug if(0)printf
143 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
145 NOINLINE(static int thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
,
146 VALUE
*register_stack_start
));
149 #include "thread_win32.c"
151 #define DEBUG_OUT() \
152 WaitForSingleObject(&debug_mutex, INFINITE); \
153 printf("%p - %s", GetCurrentThreadId(), buf); \
155 ReleaseMutex(&debug_mutex);
157 #elif defined(HAVE_PTHREAD_H)
158 #include "thread_pthread.c"
160 #define DEBUG_OUT() \
161 pthread_mutex_lock(&debug_mutex); \
162 printf("%p - %s", pthread_self(), buf); \
164 pthread_mutex_unlock(&debug_mutex);
167 #error "unsupported thread type"
171 static int debug_mutex_initialized
= 1;
172 static rb_thread_lock_t debug_mutex
;
175 rb_thread_debug(const char *fmt
, ...)
180 if (!rb_thread_debug_enabled
) return;
182 if (debug_mutex_initialized
== 1) {
183 debug_mutex_initialized
= 0;
184 native_mutex_initialize(&debug_mutex
);
188 vsnprintf(buf
, BUFSIZ
, fmt
, args
);
197 set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
198 rb_unblock_function_t
**oldfunc
, void **oldarg
)
201 RUBY_VM_CHECK_INTS(); /* check signal or so */
202 native_mutex_lock(&th
->interrupt_lock
);
203 if (th
->interrupt_flag
) {
204 native_mutex_unlock(&th
->interrupt_lock
);
208 if (oldfunc
) *oldfunc
= th
->unblock_function
;
209 if (oldarg
) *oldarg
= th
->unblock_function_arg
;
210 th
->unblock_function
= func
;
211 th
->unblock_function_arg
= arg
;
213 native_mutex_unlock(&th
->interrupt_lock
);
217 rb_thread_interrupt(rb_thread_t
*th
)
219 native_mutex_lock(&th
->interrupt_lock
);
220 RUBY_VM_SET_INTERRUPT(th
);
221 if (th
->unblock_function
) {
222 (th
->unblock_function
)(th
->unblock_function_arg
);
227 native_mutex_unlock(&th
->interrupt_lock
);
232 terminate_i(st_data_t key
, st_data_t val
, rb_thread_t
*main_thread
)
236 GetThreadPtr(thval
, th
);
238 if (th
!= main_thread
) {
239 thread_debug("terminate_i: %p\n", th
);
240 rb_thread_interrupt(th
);
241 th
->thrown_errinfo
= eTerminateSignal
;
242 th
->status
= THREAD_TO_KILL
;
245 thread_debug("terminate_i: main thread (%p)\n", th
);
251 rb_thread_terminate_all(void)
253 rb_thread_t
*th
= GET_THREAD(); /* main thread */
254 rb_vm_t
*vm
= th
->vm
;
255 if (vm
->main_thread
!= th
) {
256 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm
->main_thread
, th
);
259 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th
);
260 st_foreach(vm
->living_threads
, terminate_i
, (st_data_t
)th
);
262 while (!rb_thread_alone()) {
264 if (EXEC_TAG() == 0) {
265 rb_thread_schedule();
268 /* ignore exception */
276 thread_cleanup_func(void *th_ptr
)
278 rb_thread_t
*th
= th_ptr
;
279 th
->status
= THREAD_KILLED
;
280 th
->machine_stack_start
= th
->machine_stack_end
= 0;
282 th
->machine_register_stack_start
= th
->machine_register_stack_end
= 0;
284 native_thread_destroy(th
);
287 extern void ruby_error_print(void);
288 static VALUE
rb_thread_raise(int, VALUE
*, rb_thread_t
*);
289 void rb_thread_recycle_stack_release(VALUE
*);
292 thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
, VALUE
*register_stack_start
)
295 VALUE args
= th
->first_args
;
297 rb_thread_t
*join_th
;
298 rb_thread_t
*main_th
;
299 VALUE errinfo
= Qnil
;
301 th
->machine_stack_start
= stack_start
;
303 th
->machine_register_stack_start
= register_stack_start
;
305 thread_debug("thread start: %p\n", th
);
307 native_mutex_lock(&th
->vm
->global_interpreter_lock
);
309 thread_debug("thread start (get lock): %p\n", th
);
310 rb_thread_set_current(th
);
313 if ((state
= EXEC_TAG()) == 0) {
314 SAVE_ROOT_JMPBUF(th
, {
315 if (th
->first_proc
) {
316 GetProcPtr(th
->first_proc
, proc
);
318 th
->local_lfp
= proc
->block
.lfp
;
319 th
->local_svar
= Qnil
;
320 th
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
,
321 RARRAY_LEN(args
), RARRAY_PTR(args
), 0);
324 th
->value
= (*th
->first_func
)((void *)th
->first_args
);
329 if (th
->safe_level
< 4 &&
330 (th
->vm
->thread_abort_on_exception
||
331 th
->abort_on_exception
|| RTEST(ruby_debug
))) {
332 errinfo
= th
->errinfo
;
333 if (NIL_P(errinfo
)) errinfo
= rb_errinfo();
338 th
->status
= THREAD_KILLED
;
339 thread_debug("thread end: %p\n", th
);
341 main_th
= th
->vm
->main_thread
;
343 if (TYPE(errinfo
) == T_OBJECT
) {
344 /* treat with normal error object */
345 rb_thread_raise(1, &errinfo
, main_th
);
350 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
352 /* wake up joinning threads */
353 join_th
= th
->join_list_head
;
355 if (join_th
== main_th
) errinfo
= Qnil
;
356 rb_thread_interrupt(join_th
);
357 join_th
= join_th
->join_list_next
;
359 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
361 if (!th
->root_fiber
) {
362 rb_thread_recycle_stack_release(th
->stack
);
366 thread_cleanup_func(th
);
367 native_mutex_unlock(&th
->vm
->global_interpreter_lock
);
373 thread_create_core(VALUE thval
, VALUE args
, VALUE (*fn
)(ANYARGS
))
377 if (OBJ_FROZEN(GET_THREAD()->thgroup
)) {
378 rb_raise(rb_eThreadError
,
379 "can't start a new thread (frozen ThreadGroup)");
381 GetThreadPtr(thval
, th
);
383 /* setup thread environment */
384 th
->first_args
= args
;
385 th
->first_proc
= fn
? Qfalse
: rb_block_proc();
388 th
->priority
= GET_THREAD()->priority
;
389 th
->thgroup
= GET_THREAD()->thgroup
;
391 native_mutex_initialize(&th
->interrupt_lock
);
393 st_insert(th
->vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
394 native_thread_create(th
);
399 thread_s_new(int argc
, VALUE
*argv
, VALUE klass
)
402 VALUE thread
= rb_thread_alloc(klass
);
403 rb_obj_call_init(thread
, argc
, argv
);
404 GetThreadPtr(thread
, th
);
405 if (!th
->first_args
) {
406 rb_raise(rb_eThreadError
, "uninitialized thread - check `%s#initialize'",
407 rb_class2name(klass
));
414 * Thread.start([args]*) {|args| block } => thread
415 * Thread.fork([args]*) {|args| block } => thread
417 * Basically the same as <code>Thread::new</code>. However, if class
418 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
419 * subclass will not invoke the subclass's <code>initialize</code> method.
423 thread_start(VALUE klass
, VALUE args
)
425 return thread_create_core(rb_thread_alloc(klass
), args
, 0);
429 thread_initialize(VALUE thread
, VALUE args
)
432 if (!rb_block_given_p()) {
433 rb_raise(rb_eThreadError
, "must be called with a block");
435 GetThreadPtr(thread
, th
);
436 if (th
->first_args
) {
437 VALUE
rb_proc_location(VALUE self
);
438 VALUE proc
= th
->first_proc
, line
, loc
;
440 if (!proc
|| !RTEST(loc
= rb_proc_location(proc
))) {
441 rb_raise(rb_eThreadError
, "already initialized thread");
443 file
= RSTRING_PTR(RARRAY_PTR(loc
)[0]);
444 if (NIL_P(line
= RARRAY_PTR(loc
)[1])) {
445 rb_raise(rb_eThreadError
, "already initialized thread - %s",
448 rb_raise(rb_eThreadError
, "already initialized thread - %s:%d",
449 file
, NUM2INT(line
));
451 return thread_create_core(thread
, args
, 0);
455 rb_thread_create(VALUE (*fn
)(ANYARGS
), void *arg
)
457 return thread_create_core(rb_thread_alloc(rb_cThread
), (VALUE
)arg
, fn
);
461 /* +infty, for this purpose */
462 #define DELAY_INFTY 1E30
465 rb_thread_t
*target
, *waiting
;
471 remove_from_join_list(VALUE arg
)
473 struct join_arg
*p
= (struct join_arg
*)arg
;
474 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
476 if (target_th
->status
!= THREAD_KILLED
) {
477 rb_thread_t
**pth
= &target_th
->join_list_head
;
481 *pth
= th
->join_list_next
;
484 pth
= &(*pth
)->join_list_next
;
492 thread_join_sleep(VALUE arg
)
494 struct join_arg
*p
= (struct join_arg
*)arg
;
495 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
496 double now
, limit
= p
->limit
;
498 while (target_th
->status
!= THREAD_KILLED
) {
505 thread_debug("thread_join: timeout (thid: %p)\n",
506 (void *)target_th
->thread_id
);
509 sleep_wait_for_interrupt(th
, limit
- now
);
511 thread_debug("thread_join: interrupted (thid: %p)\n",
512 (void *)target_th
->thread_id
);
518 thread_join(rb_thread_t
*target_th
, double delay
)
520 rb_thread_t
*th
= GET_THREAD();
523 arg
.target
= target_th
;
525 arg
.limit
= timeofday() + delay
;
526 arg
.forever
= delay
== DELAY_INFTY
;
528 thread_debug("thread_join (thid: %p)\n", (void *)target_th
->thread_id
);
530 if (target_th
->status
!= THREAD_KILLED
) {
531 th
->join_list_next
= target_th
->join_list_head
;
532 target_th
->join_list_head
= th
;
533 if (!rb_ensure(thread_join_sleep
, (VALUE
)&arg
,
534 remove_from_join_list
, (VALUE
)&arg
)) {
539 thread_debug("thread_join: success (thid: %p)\n",
540 (void *)target_th
->thread_id
);
542 if (target_th
->errinfo
!= Qnil
) {
543 VALUE err
= target_th
->errinfo
;
548 else if (TYPE(target_th
->errinfo
) == T_NODE
) {
549 rb_exc_raise(vm_make_jump_tag_but_local_jump(
550 GET_THROWOBJ_STATE(err
), GET_THROWOBJ_VAL(err
)));
553 /* normal exception */
557 return target_th
->self
;
563 * thr.join(limit) => thr
565 * The calling thread will suspend execution and run <i>thr</i>. Does not
566 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
567 * the time limit expires, <code>nil</code> will be returned, otherwise
568 * <i>thr</i> is returned.
570 * Any threads not joined will be killed when the main program exits. If
571 * <i>thr</i> had previously raised an exception and the
572 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
573 * (so the exception has not yet been processed) it will be processed at this
576 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
577 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
578 * x.join # Let x thread finish, a will be killed on exit.
584 * The following example illustrates the <i>limit</i> parameter.
586 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
587 * puts "Waiting" until y.join(0.15)
601 thread_join_m(int argc
, VALUE
*argv
, VALUE self
)
603 rb_thread_t
*target_th
;
604 double delay
= DELAY_INFTY
;
607 GetThreadPtr(self
, target_th
);
609 rb_scan_args(argc
, argv
, "01", &limit
);
611 delay
= rb_num2dbl(limit
);
614 return thread_join(target_th
, delay
);
621 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
624 * a = Thread.new { 2 + 2 }
629 thread_value(VALUE self
)
632 GetThreadPtr(self
, th
);
633 thread_join(th
, DELAY_INFTY
);
641 static struct timeval
642 double2timeval(double d
)
646 time
.tv_sec
= (int)d
;
647 time
.tv_usec
= (int)((d
- (int)d
) * 1e6
);
648 if (time
.tv_usec
< 0) {
649 time
.tv_usec
+= (long)1e6
;
656 sleep_forever(rb_thread_t
*th
)
662 sleep_timeval(rb_thread_t
*th
, struct timeval tv
)
664 native_sleep(th
, &tv
);
668 rb_thread_sleep_forever()
670 thread_debug("rb_thread_sleep_forever\n");
671 sleep_forever(GET_THREAD());
678 gettimeofday(&tv
, NULL
);
679 return (double)tv
.tv_sec
+ (double)tv
.tv_usec
* 1e-6;
683 sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
)
685 sleep_timeval(th
, double2timeval(sleepsec
));
689 sleep_for_polling(rb_thread_t
*th
)
693 time
.tv_usec
= 100 * 1000; /* 0.1 sec */
694 sleep_timeval(th
, time
);
698 rb_thread_wait_for(struct timeval time
)
700 rb_thread_t
*th
= GET_THREAD();
701 sleep_timeval(th
, time
);
705 rb_thread_polling(void)
707 RUBY_VM_CHECK_INTS();
708 if (!rb_thread_alone()) {
709 rb_thread_t
*th
= GET_THREAD();
710 sleep_for_polling(th
);
714 struct timeval
rb_time_timeval();
717 rb_thread_sleep(int sec
)
719 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec
)));
723 rb_thread_schedule(void)
725 thread_debug("rb_thread_schedule\n");
726 if (!rb_thread_alone()) {
727 rb_thread_t
*th
= GET_THREAD();
729 thread_debug("rb_thread_schedule/switch start\n");
731 rb_gc_save_machine_context(th
);
732 native_mutex_unlock(&th
->vm
->global_interpreter_lock
);
734 native_thread_yield();
736 native_mutex_lock(&th
->vm
->global_interpreter_lock
);
738 rb_thread_set_current(th
);
739 thread_debug("rb_thread_schedule/switch done\n");
741 RUBY_VM_CHECK_INTS();
745 int rb_thread_critical
; /* TODO: dummy variable */
748 rb_thread_blocking_region(
749 rb_blocking_function_t
*func
, void *data1
,
750 rb_unblock_function_t
*ubf
, void *data2
)
753 rb_thread_t
*th
= GET_THREAD();
755 if (ubf
== RB_UBF_DFL
) {
771 * Invokes the thread scheduler to pass execution to another thread.
773 * a = Thread.new { print "a"; Thread.pass;
774 * print "b"; Thread.pass;
776 * b = Thread.new { print "x"; Thread.pass;
777 * print "y"; Thread.pass;
788 thread_s_pass(VALUE klass
)
790 rb_thread_schedule();
799 rb_thread_execute_interrupts(rb_thread_t
*th
)
801 while (th
->interrupt_flag
) {
802 int status
= th
->status
;
803 th
->status
= THREAD_RUNNABLE
;
804 th
->interrupt_flag
= 0;
806 /* signal handling */
807 if (th
->exec_signal
) {
808 int sig
= th
->exec_signal
;
810 rb_signal_exec(th
, sig
);
813 /* exception from another thread */
814 if (th
->thrown_errinfo
) {
815 VALUE err
= th
->thrown_errinfo
;
816 th
->thrown_errinfo
= 0;
817 thread_debug("rb_thread_execute_interrupts: %ld\n", err
);
819 if (err
== eKillSignal
|| err
== eTerminateSignal
) {
820 th
->errinfo
= INT2FIX(TAG_FATAL
);
821 TH_JUMP_TAG(th
, TAG_FATAL
);
830 rb_thread_schedule();
832 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
837 rb_gc_mark_threads(void)
842 /*****************************************************/
845 rb_thread_ready(rb_thread_t
*th
)
847 rb_thread_interrupt(th
);
851 rb_thread_raise(int argc
, VALUE
*argv
, rb_thread_t
*th
)
856 if (rb_thread_dead(th
)) {
860 if (th
->thrown_errinfo
!= 0 || th
->raised_flag
) {
861 rb_thread_schedule();
865 exc
= rb_make_exception(argc
, argv
);
866 th
->thrown_errinfo
= exc
;
872 rb_thread_signal_raise(void *thptr
, int sig
)
875 rb_thread_t
*th
= thptr
;
877 argv
[0] = rb_eSignal
;
878 argv
[1] = INT2FIX(sig
);
879 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
883 rb_thread_signal_exit(void *thptr
)
886 rb_thread_t
*th
= thptr
;
888 argv
[0] = rb_eSystemExit
;
889 argv
[1] = rb_str_new2("exit");
890 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
894 rb_thread_set_raised(rb_thread_t
*th
)
896 if (th
->raised_flag
& RAISED_EXCEPTION
) {
899 th
->raised_flag
|= RAISED_EXCEPTION
;
904 rb_thread_reset_raised(rb_thread_t
*th
)
906 if (!(th
->raised_flag
& RAISED_EXCEPTION
)) {
909 th
->raised_flag
&= ~RAISED_EXCEPTION
;
914 rb_thread_fd_close(int fd
)
921 * thr.raise(exception)
923 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
924 * caller does not have to be <i>thr</i>.
926 * Thread.abort_on_exception = true
927 * a = Thread.new { sleep(200) }
932 * prog.rb:3: Gotcha (RuntimeError)
933 * from prog.rb:2:in `initialize'
934 * from prog.rb:2:in `new'
939 thread_raise_m(int argc
, VALUE
*argv
, VALUE self
)
942 GetThreadPtr(self
, th
);
943 rb_thread_raise(argc
, argv
, th
);
950 * thr.exit => thr or nil
951 * thr.kill => thr or nil
952 * thr.terminate => thr or nil
954 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
955 * is already marked to be killed, <code>exit</code> returns the
956 * <code>Thread</code>. If this is the main thread, or the last thread, exits
961 rb_thread_kill(VALUE thread
)
965 GetThreadPtr(thread
, th
);
967 if (th
!= GET_THREAD() && th
->safe_level
< 4) {
970 if (th
->status
== THREAD_TO_KILL
|| th
->status
== THREAD_KILLED
) {
973 if (th
== th
->vm
->main_thread
) {
974 rb_exit(EXIT_SUCCESS
);
977 thread_debug("rb_thread_kill: %p (%p)\n", th
, (void *)th
->thread_id
);
979 rb_thread_interrupt(th
);
980 th
->thrown_errinfo
= eKillSignal
;
981 th
->status
= THREAD_TO_KILL
;
989 * Thread.kill(thread) => thread
991 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
994 * a = Thread.new { loop { count += 1 } }
996 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1002 rb_thread_s_kill(VALUE obj
, VALUE th
)
1004 return rb_thread_kill(th
);
1010 * Thread.exit => thread
1012 * Terminates the currently running thread and schedules another thread to be
1013 * run. If this thread is already marked to be killed, <code>exit</code>
1014 * returns the <code>Thread</code>. If this is the main thread, or the last
1015 * thread, exit the process.
1019 rb_thread_exit(void)
1021 return rb_thread_kill(GET_THREAD()->self
);
1029 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1030 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1032 * c = Thread.new { Thread.stop; puts "hey!" }
1035 * <em>produces:</em>
1041 rb_thread_wakeup(VALUE thread
)
1044 GetThreadPtr(thread
, th
);
1046 if (th
->status
== THREAD_KILLED
) {
1047 rb_raise(rb_eThreadError
, "killed thread");
1049 rb_thread_ready(th
);
1058 * Wakes up <i>thr</i>, making it eligible for scheduling.
1060 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1066 * <em>produces:</em>
1074 rb_thread_run(VALUE thread
)
1076 rb_thread_wakeup(thread
);
1077 rb_thread_schedule();
1084 * Thread.stop => nil
1086 * Stops execution of the current thread, putting it into a ``sleep'' state,
1087 * and schedules execution of another thread.
1089 * a = Thread.new { print "a"; Thread.stop; print "c" }
1095 * <em>produces:</em>
1101 rb_thread_stop(void)
1103 if (rb_thread_alone()) {
1104 rb_raise(rb_eThreadError
,
1105 "stopping only thread\n\tnote: use sleep to stop forever");
1107 rb_thread_sleep_forever();
1112 thread_list_i(st_data_t key
, st_data_t val
, void *data
)
1114 VALUE ary
= (VALUE
)data
;
1116 GetThreadPtr((VALUE
)key
, th
);
1118 switch (th
->status
) {
1119 case THREAD_RUNNABLE
:
1120 case THREAD_STOPPED
:
1121 case THREAD_TO_KILL
:
1122 rb_ary_push(ary
, th
->self
);
1129 /********************************************************************/
1133 * Thread.list => array
1135 * Returns an array of <code>Thread</code> objects for all threads that are
1136 * either runnable or stopped.
1138 * Thread.new { sleep(200) }
1139 * Thread.new { 1000000.times {|i| i*i } }
1140 * Thread.new { Thread.stop }
1141 * Thread.list.each {|t| p t}
1143 * <em>produces:</em>
1145 * #<Thread:0x401b3e84 sleep>
1146 * #<Thread:0x401b3f38 run>
1147 * #<Thread:0x401b3fb0 sleep>
1148 * #<Thread:0x401bdf4c run>
1152 rb_thread_list(void)
1154 VALUE ary
= rb_ary_new();
1155 st_foreach(GET_THREAD()->vm
->living_threads
, thread_list_i
, ary
);
1160 rb_thread_current(void)
1162 return GET_THREAD()->self
;
1167 * Thread.current => thread
1169 * Returns the currently executing thread.
1171 * Thread.current #=> #<Thread:0x401bdf4c run>
1175 thread_s_current(VALUE klass
)
1177 return rb_thread_current();
1181 rb_thread_main(void)
1183 return GET_THREAD()->vm
->main_thread
->self
;
1187 rb_thread_s_main(VALUE klass
)
1189 return rb_thread_main();
1195 * Thread.abort_on_exception => true or false
1197 * Returns the status of the global ``abort on exception'' condition. The
1198 * default is <code>false</code>. When set to <code>true</code>, or if the
1199 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1200 * command line option <code>-d</code> was specified) all threads will abort
1201 * (the process will <code>exit(0)</code>) if an exception is raised in any
1202 * thread. See also <code>Thread::abort_on_exception=</code>.
1206 rb_thread_s_abort_exc(void)
1208 return GET_THREAD()->vm
->thread_abort_on_exception
? Qtrue
: Qfalse
;
1214 * Thread.abort_on_exception= boolean => true or false
1216 * When set to <code>true</code>, all threads will abort if an exception is
1217 * raised. Returns the new state.
1219 * Thread.abort_on_exception = true
1220 * t1 = Thread.new do
1221 * puts "In new thread"
1222 * raise "Exception from thread"
1225 * puts "not reached"
1227 * <em>produces:</em>
1230 * prog.rb:4: Exception from thread (RuntimeError)
1231 * from prog.rb:2:in `initialize'
1232 * from prog.rb:2:in `new'
1237 rb_thread_s_abort_exc_set(VALUE self
, VALUE val
)
1240 GET_THREAD()->vm
->thread_abort_on_exception
= RTEST(val
);
1247 * thr.abort_on_exception => true or false
1249 * Returns the status of the thread-local ``abort on exception'' condition for
1250 * <i>thr</i>. The default is <code>false</code>. See also
1251 * <code>Thread::abort_on_exception=</code>.
1255 rb_thread_abort_exc(VALUE thread
)
1258 GetThreadPtr(thread
, th
);
1259 return th
->abort_on_exception
? Qtrue
: Qfalse
;
1265 * thr.abort_on_exception= boolean => true or false
1267 * When set to <code>true</code>, causes all threads (including the main
1268 * program) to abort if an exception is raised in <i>thr</i>. The process will
1269 * effectively <code>exit(0)</code>.
1273 rb_thread_abort_exc_set(VALUE thread
, VALUE val
)
1278 GetThreadPtr(thread
, th
);
1279 th
->abort_on_exception
= RTEST(val
);
1286 * thr.group => thgrp or nil
1288 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1289 * the thread is not a member of any group.
1291 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1295 rb_thread_group(VALUE thread
)
1299 GetThreadPtr(thread
, th
);
1300 group
= th
->thgroup
;
1309 thread_status_name(enum rb_thread_status status
)
1312 case THREAD_RUNNABLE
:
1314 case THREAD_STOPPED
:
1316 case THREAD_TO_KILL
:
1326 rb_thread_dead(rb_thread_t
*th
)
1328 return th
->status
== THREAD_KILLED
;
1334 * thr.status => string, false or nil
1336 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1337 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1338 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1339 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1340 * terminated with an exception.
1342 * a = Thread.new { raise("die now") }
1343 * b = Thread.new { Thread.stop }
1344 * c = Thread.new { Thread.exit }
1345 * d = Thread.new { sleep }
1346 * d.kill #=> #<Thread:0x401b3678 aborting>
1348 * b.status #=> "sleep"
1349 * c.status #=> false
1350 * d.status #=> "aborting"
1351 * Thread.current.status #=> "run"
1355 rb_thread_status(VALUE thread
)
1358 GetThreadPtr(thread
, th
);
1360 if (rb_thread_dead(th
)) {
1361 if (!NIL_P(th
->errinfo
) && !FIXNUM_P(th
->errinfo
)
1367 return rb_str_new2(thread_status_name(th
->status
));
1373 * thr.alive? => true or false
1375 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1377 * thr = Thread.new { }
1378 * thr.join #=> #<Thread:0x401b3fb0 dead>
1379 * Thread.current.alive? #=> true
1380 * thr.alive? #=> false
1384 rb_thread_alive_p(VALUE thread
)
1387 GetThreadPtr(thread
, th
);
1389 if (rb_thread_dead(th
))
1396 * thr.stop? => true or false
1398 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1400 * a = Thread.new { Thread.stop }
1401 * b = Thread.current
1407 rb_thread_stop_p(VALUE thread
)
1410 GetThreadPtr(thread
, th
);
1412 if (rb_thread_dead(th
))
1414 if (th
->status
== THREAD_STOPPED
)
1421 * thr.safe_level => integer
1423 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1424 * levels can help when implementing sandboxes which run insecure code.
1426 * thr = Thread.new { $SAFE = 3; sleep }
1427 * Thread.current.safe_level #=> 0
1428 * thr.safe_level #=> 3
1432 rb_thread_safe_level(VALUE thread
)
1435 GetThreadPtr(thread
, th
);
1437 return INT2NUM(th
->safe_level
);
1442 * thr.inspect => string
1444 * Dump the name, id, and status of _thr_ to a string.
1448 rb_thread_inspect(VALUE thread
)
1450 char *cname
= rb_obj_classname(thread
);
1455 GetThreadPtr(thread
, th
);
1456 status
= thread_status_name(th
->status
);
1457 str
= rb_sprintf("#<%s:%p %s>", cname
, (void *)thread
, status
);
1458 OBJ_INFECT(str
, thread
);
1464 rb_thread_local_aref(VALUE thread
, ID id
)
1469 GetThreadPtr(thread
, th
);
1470 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1471 rb_raise(rb_eSecurityError
, "Insecure: thread locals");
1473 if (!th
->local_storage
) {
1476 if (st_lookup(th
->local_storage
, id
, &val
)) {
1484 * thr[sym] => obj or nil
1486 * Attribute Reference---Returns the value of a thread-local variable, using
1487 * either a symbol or a string name. If the specified variable does not exist,
1488 * returns <code>nil</code>.
1490 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1491 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1492 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1493 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1495 * <em>produces:</em>
1497 * #<Thread:0x401b3b3c sleep>: C
1498 * #<Thread:0x401b3bc8 sleep>: B
1499 * #<Thread:0x401b3c68 sleep>: A
1500 * #<Thread:0x401bdf4c run>:
1504 rb_thread_aref(VALUE thread
, VALUE id
)
1506 return rb_thread_local_aref(thread
, rb_to_id(id
));
1510 rb_thread_local_aset(VALUE thread
, ID id
, VALUE val
)
1513 GetThreadPtr(thread
, th
);
1515 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1516 rb_raise(rb_eSecurityError
, "Insecure: can't modify thread locals");
1518 if (OBJ_FROZEN(thread
)) {
1519 rb_error_frozen("thread locals");
1521 if (!th
->local_storage
) {
1522 th
->local_storage
= st_init_numtable();
1525 st_delete(th
->local_storage
, (st_data_t
*) & id
, 0);
1528 st_insert(th
->local_storage
, id
, val
);
1534 * thr[sym] = obj => obj
1536 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1537 * using either a symbol or a string. See also <code>Thread#[]</code>.
1541 rb_thread_aset(VALUE self
, ID id
, VALUE val
)
1543 return rb_thread_local_aset(self
, rb_to_id(id
), val
);
1548 * thr.key?(sym) => true or false
1550 * Returns <code>true</code> if the given string (or symbol) exists as a
1551 * thread-local variable.
1553 * me = Thread.current
1555 * me.key?(:oliver) #=> true
1556 * me.key?(:stanley) #=> false
1560 rb_thread_key_p(VALUE self
, ID id
)
1563 GetThreadPtr(self
, th
);
1565 if (!th
->local_storage
) {
1568 if (st_lookup(th
->local_storage
, rb_to_id(id
), 0)) {
1575 thread_keys_i(ID key
, VALUE value
, VALUE ary
)
1577 rb_ary_push(ary
, ID2SYM(key
));
1585 if (GET_THREAD()->vm
->living_threads
) {
1586 num
= GET_THREAD()->vm
->living_threads
->num_entries
;
1587 thread_debug("rb_thread_alone: %d\n", num
);
1596 * Returns an an array of the names of the thread-local variables (as Symbols).
1598 * thr = Thread.new do
1599 * Thread.current[:cat] = 'meow'
1600 * Thread.current["dog"] = 'woof'
1602 * thr.join #=> #<Thread:0x401b3f10 dead>
1603 * thr.keys #=> [:dog, :cat]
1607 rb_thread_keys(VALUE self
)
1610 VALUE ary
= rb_ary_new();
1611 GetThreadPtr(self
, th
);
1613 if (th
->local_storage
) {
1614 st_foreach(th
->local_storage
, thread_keys_i
, ary
);
1621 * thr.priority => integer
1623 * Returns the priority of <i>thr</i>. Default is inherited from the
1624 * current thread which creating the new thread, or zero for the
1625 * initial main thread; higher-priority threads will run before
1626 * lower-priority threads.
1628 * Thread.current.priority #=> 0
1632 rb_thread_priority(VALUE thread
)
1635 GetThreadPtr(thread
, th
);
1636 return INT2NUM(th
->priority
);
1642 * thr.priority= integer => thr
1644 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1645 * will run before lower-priority threads.
1647 * count1 = count2 = 0
1649 * loop { count1 += 1 }
1654 * loop { count2 += 1 }
1663 rb_thread_priority_set(VALUE thread
, VALUE prio
)
1666 GetThreadPtr(thread
, th
);
1670 th
->priority
= NUM2INT(prio
);
1671 native_thread_apply_priority(th
);
1677 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1679 rb_fd_init(volatile rb_fdset_t
*fds
)
1682 fds
->fdset
= ALLOC(fd_set
);
1683 FD_ZERO(fds
->fdset
);
1687 rb_fd_term(rb_fdset_t
*fds
)
1689 if (fds
->fdset
) free(fds
->fdset
);
1695 rb_fd_zero(rb_fdset_t
*fds
)
1698 MEMZERO(fds
->fdset
, fd_mask
, howmany(fds
->maxfd
, NFDBITS
));
1699 FD_ZERO(fds
->fdset
);
1704 rb_fd_resize(int n
, rb_fdset_t
*fds
)
1706 int m
= howmany(n
+ 1, NFDBITS
) * sizeof(fd_mask
);
1707 int o
= howmany(fds
->maxfd
, NFDBITS
) * sizeof(fd_mask
);
1709 if (m
< sizeof(fd_set
)) m
= sizeof(fd_set
);
1710 if (o
< sizeof(fd_set
)) o
= sizeof(fd_set
);
1713 fds
->fdset
= realloc(fds
->fdset
, m
);
1714 memset((char *)fds
->fdset
+ o
, 0, m
- o
);
1716 if (n
>= fds
->maxfd
) fds
->maxfd
= n
+ 1;
1720 rb_fd_set(int n
, rb_fdset_t
*fds
)
1722 rb_fd_resize(n
, fds
);
1723 FD_SET(n
, fds
->fdset
);
1727 rb_fd_clr(int n
, rb_fdset_t
*fds
)
1729 if (n
>= fds
->maxfd
) return;
1730 FD_CLR(n
, fds
->fdset
);
1734 rb_fd_isset(int n
, const rb_fdset_t
*fds
)
1736 if (n
>= fds
->maxfd
) return 0;
1737 return FD_ISSET(n
, fds
->fdset
) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1741 rb_fd_copy(rb_fdset_t
*dst
, const fd_set
*src
, int max
)
1743 int size
= howmany(max
, NFDBITS
) * sizeof(fd_mask
);
1745 if (size
< sizeof(fd_set
)) size
= sizeof(fd_set
);
1747 dst
->fdset
= realloc(dst
->fdset
, size
);
1748 memcpy(dst
->fdset
, src
, size
);
1752 rb_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
)
1754 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
1756 rb_fd_resize(n
- 1, readfds
);
1757 r
= rb_fd_ptr(readfds
);
1760 rb_fd_resize(n
- 1, writefds
);
1761 w
= rb_fd_ptr(writefds
);
1764 rb_fd_resize(n
- 1, exceptfds
);
1765 e
= rb_fd_ptr(exceptfds
);
1767 return select(n
, r
, w
, e
, timeout
);
1775 #define FD_ZERO(f) rb_fd_zero(f)
1776 #define FD_SET(i, f) rb_fd_set(i, f)
1777 #define FD_CLR(i, f) rb_fd_clr(i, f)
1778 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1782 #if defined(__CYGWIN__) || defined(_WIN32)
1784 cmp_tv(const struct timeval
*a
, const struct timeval
*b
)
1786 long d
= (a
->tv_sec
- b
->tv_sec
);
1787 return (d
!= 0) ? d
: (a
->tv_usec
- b
->tv_usec
);
1791 subst(struct timeval
*rest
, const struct timeval
*wait
)
1793 while (rest
->tv_usec
< wait
->tv_usec
) {
1794 if (rest
->tv_sec
<= wait
->tv_sec
) {
1798 rest
->tv_usec
+= 1000 * 1000;
1800 rest
->tv_sec
-= wait
->tv_sec
;
1801 rest
->tv_usec
-= wait
->tv_usec
;
1807 do_select(int n
, fd_set
*read
, fd_set
*write
, fd_set
*except
,
1808 struct timeval
*timeout
)
1811 fd_set orig_read
, orig_write
, orig_except
;
1815 struct timeval wait_rest
;
1818 limit
= timeofday() +
1819 (double)timeout
->tv_sec
+(double)timeout
->tv_usec
*1e-6;
1820 wait_rest
= *timeout
;
1821 timeout
= &wait_rest
;
1825 if (read
) orig_read
= *read
;
1826 if (write
) orig_write
= *write
;
1827 if (except
) orig_except
= *except
;
1832 #if defined(__CYGWIN__) || defined(_WIN32)
1834 /* polling duration: 100ms */
1835 struct timeval wait_100ms
, *wait
;
1836 wait_100ms
.tv_sec
= 0;
1837 wait_100ms
.tv_usec
= 100 * 1000; /* 100 ms */
1840 wait
= (timeout
== 0 || cmp_tv(&wait_100ms
, timeout
) > 0) ? &wait_100ms
: timeout
;
1843 result
= select(n
, read
, write
, except
, wait
);
1844 if (result
< 0) lerrno
= errno
;
1845 if (result
!= 0) break;
1847 if (read
) *read
= orig_read
;
1848 if (write
) *write
= orig_write
;
1849 if (except
) *except
= orig_except
;
1851 } while (__th
->interrupt_flag
== 0 && (timeout
== 0 || subst(timeout
, &wait_100ms
)));
1853 } while (result
== 0 && (timeout
== 0 || subst(timeout
, &wait_100ms
)));
1857 result
= select(n
, read
, write
, except
, timeout
);
1858 if (result
< 0) lerrno
= errno
;
1859 }, ubf_select
, GET_THREAD());
1870 if (read
) *read
= orig_read
;
1871 if (write
) *write
= orig_write
;
1872 if (except
) *except
= orig_except
;
1875 double d
= limit
- timeofday();
1877 wait_rest
.tv_sec
= (unsigned int)d
;
1878 wait_rest
.tv_usec
= (long)((d
-(double)wait_rest
.tv_sec
)*1e6
);
1879 if (wait_rest
.tv_sec
< 0) wait_rest
.tv_sec
= 0;
1880 if (wait_rest
.tv_usec
< 0) wait_rest
.tv_usec
= 0;
1892 rb_thread_wait_fd_rw(int fd
, int read
)
1895 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd
, read
? "read" : "write");
1897 while (result
<= 0) {
1903 result
= do_select(fd
+ 1, rb_fd_ptr(&set
), 0, 0, 0);
1906 result
= do_select(fd
+ 1, 0, rb_fd_ptr(&set
), 0, 0);
1916 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd
, read
? "read" : "write");
1920 rb_thread_wait_fd(int fd
)
1922 rb_thread_wait_fd_rw(fd
, 1);
1926 rb_thread_fd_writable(int fd
)
1928 rb_thread_wait_fd_rw(fd
, 0);
1933 rb_thread_select(int max
, fd_set
* read
, fd_set
* write
, fd_set
* except
,
1934 struct timeval
*timeout
)
1936 if (!read
&& !write
&& !except
) {
1938 rb_thread_sleep_forever();
1941 rb_thread_wait_for(*timeout
);
1945 return do_select(max
, read
, write
, except
, timeout
);
1954 #ifdef USE_CONSERVATIVE_STACK_END
1956 rb_gc_set_stack_end(VALUE
**stack_end_p
)
1959 *stack_end_p
= &stack_end
;
1964 rb_gc_save_machine_context(rb_thread_t
*th
)
1966 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
1968 th
->machine_register_stack_end
= rb_ia64_bsp();
1970 setjmp(th
->machine_regs
);
1977 int rb_get_next_signal(rb_vm_t
*vm
);
1980 timer_thread_function(void)
1982 rb_vm_t
*vm
= GET_VM(); /* TODO: fix me for Multi-VM */
1984 /* for time slice */
1985 RUBY_VM_SET_TIMER_INTERRUPT(vm
->running_thread
);
1988 if (vm
->buffered_signal_size
&& vm
->main_thread
->exec_signal
== 0) {
1989 vm
->main_thread
->exec_signal
= rb_get_next_signal(vm
);
1990 thread_debug("buffered_signal_size: %ld, sig: %d\n",
1991 (long)vm
->buffered_signal_size
, vm
->main_thread
->exec_signal
);
1992 rb_thread_interrupt(vm
->main_thread
);
1996 /* prove profiler */
1997 if (vm
->prove_profile
.enable
) {
1998 rb_thread_t
*th
= vm
->running_thread
;
2000 if (vm
->during_gc
) {
2001 /* GC prove profiling */
2008 rb_thread_stop_timer_thread(void)
2010 if (timer_thread_id
) {
2012 native_thread_join(timer_thread_id
);
2013 timer_thread_id
= 0;
2018 rb_thread_reset_timer_thread(void)
2020 timer_thread_id
= 0;
2024 rb_thread_start_timer_thread(void)
2026 rb_thread_create_timer_thread();
2030 terminate_atfork_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2034 GetThreadPtr(thval
, th
);
2036 if (th
!= current_th
) {
2037 thread_cleanup_func(th
);
2043 rb_thread_atfork(void)
2045 rb_thread_t
*th
= GET_THREAD();
2046 rb_vm_t
*vm
= th
->vm
;
2047 VALUE thval
= th
->self
;
2048 vm
->main_thread
= th
;
2050 st_foreach(vm
->living_threads
, terminate_atfork_i
, (st_data_t
)th
);
2051 st_clear(vm
->living_threads
);
2052 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2061 * Document-class: ThreadGroup
2063 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2064 * threads as a group. A <code>Thread</code> can belong to only one
2065 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2066 * remove it from any previous group.
2068 * Newly created threads belong to the same group as the thread from which they
2072 static VALUE
thgroup_s_alloc(VALUE
);
2074 thgroup_s_alloc(VALUE klass
)
2077 struct thgroup
*data
;
2079 group
= Data_Make_Struct(klass
, struct thgroup
, 0, free
, data
);
2081 data
->group
= group
;
2086 struct thgroup_list_params
{
2092 thgroup_list_i(st_data_t key
, st_data_t val
, st_data_t data
)
2094 VALUE thread
= (VALUE
)key
;
2095 VALUE ary
= ((struct thgroup_list_params
*)data
)->ary
;
2096 VALUE group
= ((struct thgroup_list_params
*)data
)->group
;
2098 GetThreadPtr(thread
, th
);
2100 if (th
->thgroup
== group
) {
2101 rb_ary_push(ary
, thread
);
2108 * thgrp.list => array
2110 * Returns an array of all existing <code>Thread</code> objects that belong to
2113 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2117 thgroup_list(VALUE group
)
2119 VALUE ary
= rb_ary_new();
2120 struct thgroup_list_params param
;
2123 param
.group
= group
;
2124 st_foreach(GET_THREAD()->vm
->living_threads
, thgroup_list_i
, (st_data_t
) & param
);
2131 * thgrp.enclose => thgrp
2133 * Prevents threads from being added to or removed from the receiving
2134 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2135 * <code>ThreadGroup</code>.
2137 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2138 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2139 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2142 * <em>produces:</em>
2144 * ThreadError: can't move from the enclosed thread group
2148 thgroup_enclose(VALUE group
)
2150 struct thgroup
*data
;
2152 Data_Get_Struct(group
, struct thgroup
, data
);
2161 * thgrp.enclosed? => true or false
2163 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2164 * ThreadGroup#enclose.
2168 thgroup_enclosed_p(VALUE group
)
2170 struct thgroup
*data
;
2172 Data_Get_Struct(group
, struct thgroup
, data
);
2181 * thgrp.add(thread) => thgrp
2183 * Adds the given <em>thread</em> to this group, removing it from any other
2184 * group to which it may have previously belonged.
2186 * puts "Initial group is #{ThreadGroup::Default.list}"
2187 * tg = ThreadGroup.new
2188 * t1 = Thread.new { sleep }
2189 * t2 = Thread.new { sleep }
2190 * puts "t1 is #{t1}"
2191 * puts "t2 is #{t2}"
2193 * puts "Initial group now #{ThreadGroup::Default.list}"
2194 * puts "tg group now #{tg.list}"
2196 * <em>produces:</em>
2198 * Initial group is #<Thread:0x401bdf4c>
2199 * t1 is #<Thread:0x401b3c90>
2200 * t2 is #<Thread:0x401b3c18>
2201 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2202 * tg group now #<Thread:0x401b3c90>
2206 thgroup_add(VALUE group
, VALUE thread
)
2209 struct thgroup
*data
;
2212 GetThreadPtr(thread
, th
);
2214 if (OBJ_FROZEN(group
)) {
2215 rb_raise(rb_eThreadError
, "can't move to the frozen thread group");
2217 Data_Get_Struct(group
, struct thgroup
, data
);
2218 if (data
->enclosed
) {
2219 rb_raise(rb_eThreadError
, "can't move to the enclosed thread group");
2226 if (OBJ_FROZEN(th
->thgroup
)) {
2227 rb_raise(rb_eThreadError
, "can't move from the frozen thread group");
2229 Data_Get_Struct(th
->thgroup
, struct thgroup
, data
);
2230 if (data
->enclosed
) {
2231 rb_raise(rb_eThreadError
,
2232 "can't move from the enclosed thread group");
2235 th
->thgroup
= group
;
2241 * Document-class: Mutex
2243 * Mutex implements a simple semaphore that can be used to coordinate access to
2244 * shared data from multiple concurrent threads.
2249 * semaphore = Mutex.new
2252 * semaphore.synchronize {
2253 * # access shared resource
2258 * semaphore.synchronize {
2259 * # access shared resource
2265 typedef struct mutex_struct
{
2266 rb_thread_lock_t lock
;
2267 rb_thread_cond_t cond
;
2268 rb_thread_t
volatile *th
;
2269 volatile int cond_waiting
;
2272 #define GetMutexPtr(obj, tobj) \
2273 Data_Get_Struct(obj, mutex_t, tobj)
2276 mutex_mark(void *ptr
)
2279 mutex_t
*mutex
= ptr
;
2281 rb_gc_mark(mutex
->th
->self
);
2287 mutex_free(void *ptr
)
2290 mutex_t
*mutex
= ptr
;
2291 native_mutex_destroy(&mutex
->lock
);
2292 native_cond_destroy(&mutex
->cond
);
2298 mutex_alloc(VALUE klass
)
2303 obj
= Data_Make_Struct(klass
, mutex_t
, mutex_mark
, mutex_free
, mutex
);
2304 native_mutex_initialize(&mutex
->lock
);
2305 native_cond_initialize(&mutex
->cond
);
2311 * Mutex.new => mutex
2313 * Creates a new Mutex
2316 mutex_initialize(VALUE self
)
2324 return mutex_alloc(rb_cMutex
);
2329 * mutex.locked? => true or false
2331 * Returns +true+ if this lock is currently held by some thread.
2334 rb_mutex_locked_p(VALUE self
)
2337 GetMutexPtr(self
, mutex
);
2338 return mutex
->th
? Qtrue
: Qfalse
;
2343 * mutex.try_lock => true or false
2345 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2349 rb_mutex_trylock(VALUE self
)
2352 VALUE locked
= Qfalse
;
2353 GetMutexPtr(self
, mutex
);
2355 if (mutex
->th
== GET_THREAD()) {
2356 rb_raise(rb_eThreadError
, "deadlock; recursive locking");
2359 native_mutex_lock(&mutex
->lock
);
2360 if (mutex
->th
== 0) {
2361 mutex
->th
= GET_THREAD();
2364 native_mutex_unlock(&mutex
->lock
);
2370 lock_func(rb_thread_t
*th
, mutex_t
*mutex
)
2372 int interrupted
= Qfalse
;
2374 native_mutex_lock(&mutex
->lock
);
2375 while (mutex
->th
|| (mutex
->th
= th
, 0)) {
2376 mutex
->cond_waiting
++;
2377 native_cond_wait(&mutex
->cond
, &mutex
->lock
);
2379 if (th
->interrupt_flag
) {
2380 interrupted
= Qtrue
;
2384 native_mutex_unlock(&mutex
->lock
);
2389 lock_interrupt(void *ptr
)
2391 mutex_t
*mutex
= (mutex_t
*)ptr
;
2392 native_mutex_lock(&mutex
->lock
);
2393 if (mutex
->cond_waiting
> 0) {
2394 native_cond_broadcast(&mutex
->cond
);
2395 mutex
->cond_waiting
= 0;
2397 native_mutex_unlock(&mutex
->lock
);
2402 * mutex.lock => true or false
2404 * Attempts to grab the lock and waits if it isn't available.
2405 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2408 rb_mutex_lock(VALUE self
)
2410 if (rb_mutex_trylock(self
) == Qfalse
) {
2412 rb_thread_t
*th
= GET_THREAD();
2413 GetMutexPtr(self
, mutex
);
2415 while (mutex
->th
!= th
) {
2419 interrupted
= lock_func(th
, mutex
);
2420 }, lock_interrupt
, mutex
);
2423 RUBY_VM_CHECK_INTS();
2432 * mutex.unlock => self
2434 * Releases the lock.
2435 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2438 rb_mutex_unlock(VALUE self
)
2442 GetMutexPtr(self
, mutex
);
2444 native_mutex_lock(&mutex
->lock
);
2446 if (mutex
->th
== 0) {
2447 err
= "Attempt to unlock a mutex which is not locked";
2449 else if (mutex
->th
!= GET_THREAD()) {
2450 err
= "Attempt to unlock a mutex which is locked by another thread";
2454 if (mutex
->cond_waiting
> 0) {
2455 /* waiting thread */
2456 native_cond_signal(&mutex
->cond
);
2457 mutex
->cond_waiting
--;
2461 native_mutex_unlock(&mutex
->lock
);
2463 if (err
) rb_raise(rb_eThreadError
, err
);
2469 rb_mutex_sleep_forever(VALUE time
)
2471 rb_thread_sleep_forever();
2476 rb_mutex_wait_for(VALUE time
)
2478 const struct timeval
*t
= (struct timeval
*)time
;
2479 rb_thread_wait_for(*t
);
2484 rb_mutex_sleep(VALUE self
, VALUE timeout
)
2489 if (!NIL_P(timeout
)) {
2490 t
= rb_time_interval(timeout
);
2492 rb_mutex_unlock(self
);
2494 if (NIL_P(timeout
)) {
2495 rb_ensure(rb_mutex_sleep_forever
, Qnil
, rb_mutex_lock
, self
);
2498 rb_ensure(rb_mutex_wait_for
, (VALUE
)&t
, rb_mutex_lock
, self
);
2500 end
= time(0) - beg
;
2501 return INT2FIX(end
);
2506 * mutex.sleep(timeout = nil) => number
2508 * Releases the lock and sleeps +timeout+ seconds if it is given and
2509 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2510 * the current thread.
2513 mutex_sleep(int argc
, VALUE
*argv
, VALUE self
)
2517 rb_scan_args(argc
, argv
, "01", &timeout
);
2518 return rb_mutex_sleep(self
, timeout
);
2523 * mutex.synchronize { ... } => result of the block
2525 * Obtains a lock, runs the block, and releases the lock when the block
2526 * completes. See the example under +Mutex+.
2530 rb_thread_synchronize(VALUE mutex
, VALUE (*func
)(VALUE arg
), VALUE arg
)
2532 rb_mutex_lock(mutex
);
2533 return rb_ensure(func
, arg
, rb_mutex_unlock
, mutex
);
2537 * Document-class: Barrier
2539 typedef struct rb_thread_list_struct rb_thread_list_t
;
2541 struct rb_thread_list_struct
{
2543 rb_thread_list_t
*next
;
2547 thlist_mark(void *ptr
)
2549 rb_thread_list_t
*q
= ptr
;
2551 for (; q
; q
= q
->next
) {
2552 rb_gc_mark(q
->th
->self
);
2557 thlist_free(void *ptr
)
2559 rb_thread_list_t
*q
= ptr
, *next
;
2561 for (; q
; q
= next
) {
2568 thlist_signal(rb_thread_list_t
**list
, unsigned int maxth
, rb_thread_t
**woken_thread
)
2571 rb_thread_list_t
*q
;
2573 while ((q
= *list
) != NULL
) {
2574 rb_thread_t
*th
= q
->th
;
2578 if (th
->status
!= THREAD_KILLED
) {
2579 rb_thread_ready(th
);
2580 if (!woken
&& woken_thread
) *woken_thread
= th
;
2581 if (++woken
>= maxth
&& maxth
) break;
2589 rb_thread_list_t
*waiting
, **tail
;
2593 barrier_mark(void *ptr
)
2595 rb_barrier_t
*b
= ptr
;
2597 if (b
->owner
) rb_gc_mark(b
->owner
->self
);
2598 thlist_mark(b
->waiting
);
2602 barrier_free(void *ptr
)
2604 rb_barrier_t
*b
= ptr
;
2607 thlist_free(b
->waiting
);
2613 barrier_alloc(VALUE klass
)
2616 rb_barrier_t
*barrier
;
2618 obj
= Data_Make_Struct(klass
, rb_barrier_t
,
2619 barrier_mark
, barrier_free
, barrier
);
2620 barrier
->owner
= GET_THREAD();
2621 barrier
->waiting
= 0;
2622 barrier
->tail
= &barrier
->waiting
;
2627 rb_barrier_new(void)
2629 return barrier_alloc(rb_cBarrier
);
2633 rb_barrier_wait(VALUE self
)
2635 rb_barrier_t
*barrier
;
2636 rb_thread_list_t
*q
;
2638 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2639 if (!barrier
->owner
|| barrier
->owner
->status
== THREAD_KILLED
) {
2641 if (thlist_signal(&barrier
->waiting
, 1, &barrier
->owner
)) return Qfalse
;
2644 else if (barrier
->owner
== GET_THREAD()) {
2648 *barrier
->tail
= q
= ALLOC(rb_thread_list_t
);
2649 q
->th
= GET_THREAD();
2651 barrier
->tail
= &q
->next
;
2652 rb_thread_sleep_forever();
2653 return barrier
->owner
== GET_THREAD() ? Qtrue
: Qfalse
;
2658 rb_barrier_release(VALUE self
)
2660 rb_barrier_t
*barrier
;
2663 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2664 if (barrier
->owner
!= GET_THREAD()) {
2665 rb_raise(rb_eThreadError
, "not owned");
2667 n
= thlist_signal(&barrier
->waiting
, 0, &barrier
->owner
);
2668 return n
? UINT2NUM(n
) : Qfalse
;
2671 /* variables for recursive traversals */
2672 static ID recursive_key
;
2675 recursive_check(VALUE hash
, VALUE obj
)
2677 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
2681 VALUE list
= rb_hash_aref(hash
, ID2SYM(rb_frame_this_func()));
2683 if (NIL_P(list
) || TYPE(list
) != T_HASH
)
2685 if (NIL_P(rb_hash_lookup(list
, obj
)))
2692 recursive_push(VALUE hash
, VALUE obj
)
2696 sym
= ID2SYM(rb_frame_this_func());
2697 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
2698 hash
= rb_hash_new();
2699 rb_thread_local_aset(rb_thread_current(), recursive_key
, hash
);
2703 list
= rb_hash_aref(hash
, sym
);
2705 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
2706 list
= rb_hash_new();
2707 rb_hash_aset(hash
, sym
, list
);
2709 rb_hash_aset(list
, obj
, Qtrue
);
2714 recursive_pop(VALUE hash
, VALUE obj
)
2718 sym
= ID2SYM(rb_frame_this_func());
2719 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
2722 symname
= rb_inspect(sym
);
2723 thrname
= rb_inspect(rb_thread_current());
2725 rb_raise(rb_eTypeError
, "invalid inspect_tbl hash for %s in %s",
2726 StringValuePtr(symname
), StringValuePtr(thrname
));
2728 list
= rb_hash_aref(hash
, sym
);
2729 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
2730 VALUE symname
= rb_inspect(sym
);
2731 VALUE thrname
= rb_inspect(rb_thread_current());
2732 rb_raise(rb_eTypeError
, "invalid inspect_tbl list for %s in %s",
2733 StringValuePtr(symname
), StringValuePtr(thrname
));
2735 rb_hash_delete(list
, obj
);
2739 rb_exec_recursive(VALUE (*func
) (VALUE
, VALUE
, int), VALUE obj
, VALUE arg
)
2741 VALUE hash
= rb_thread_local_aref(rb_thread_current(), recursive_key
);
2742 VALUE objid
= rb_obj_id(obj
);
2744 if (recursive_check(hash
, objid
)) {
2745 return (*func
) (obj
, arg
, Qtrue
);
2748 VALUE result
= Qundef
;
2751 hash
= recursive_push(hash
, objid
);
2753 if ((state
= EXEC_TAG()) == 0) {
2754 result
= (*func
) (obj
, arg
, Qfalse
);
2757 recursive_pop(hash
, objid
);
2766 static rb_event_hook_t
*
2767 alloc_event_fook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
2769 rb_event_hook_t
*hook
= ALLOC(rb_event_hook_t
);
2771 hook
->flag
= events
;
2777 thread_reset_event_flags(rb_thread_t
*th
)
2779 rb_event_hook_t
*hook
= th
->event_hooks
;
2780 rb_event_flag_t flag
= th
->event_flags
& RUBY_EVENT_VM
;
2789 rb_thread_add_event_hook(rb_thread_t
*th
,
2790 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
2792 rb_event_hook_t
*hook
= alloc_event_fook(func
, events
, data
);
2793 hook
->next
= th
->event_hooks
;
2794 th
->event_hooks
= hook
;
2795 thread_reset_event_flags(th
);
2799 set_threads_event_flags_i(st_data_t key
, st_data_t val
, st_data_t flag
)
2803 GetThreadPtr(thval
, th
);
2806 th
->event_flags
|= RUBY_EVENT_VM
;
2809 th
->event_flags
&= (~RUBY_EVENT_VM
);
2815 set_threads_event_flags(int flag
)
2817 st_foreach(GET_VM()->living_threads
, set_threads_event_flags_i
, (st_data_t
) flag
);
2821 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
2823 rb_event_hook_t
*hook
= alloc_event_fook(func
, events
, data
);
2824 rb_vm_t
*vm
= GET_VM();
2826 hook
->next
= vm
->event_hooks
;
2827 vm
->event_hooks
= hook
;
2829 set_threads_event_flags(1);
2833 remove_event_hook(rb_event_hook_t
**root
, rb_event_hook_func_t func
)
2835 rb_event_hook_t
*prev
= NULL
, *hook
= *root
, *next
;
2839 if (func
== 0 || hook
->func
== func
) {
2841 prev
->next
= hook
->next
;
2857 rb_thread_remove_event_hook(rb_thread_t
*th
, rb_event_hook_func_t func
)
2859 int ret
= remove_event_hook(&th
->event_hooks
, func
);
2860 thread_reset_event_flags(th
);
2865 rb_remove_event_hook(rb_event_hook_func_t func
)
2867 rb_vm_t
*vm
= GET_VM();
2868 rb_event_hook_t
*hook
= vm
->event_hooks
;
2869 int ret
= remove_event_hook(&vm
->event_hooks
, func
);
2871 if (hook
!= NULL
&& vm
->event_hooks
== NULL
) {
2872 set_threads_event_flags(0);
2879 clear_trace_func_i(st_data_t key
, st_data_t val
, st_data_t flag
)
2882 GetThreadPtr((VALUE
)key
, th
);
2883 rb_thread_remove_event_hook(th
, 0);
2888 rb_clear_trace_func(void)
2890 st_foreach(GET_VM()->living_threads
, clear_trace_func_i
, (st_data_t
) 0);
2891 rb_remove_event_hook(0);
2894 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
2898 * set_trace_func(proc) => proc
2899 * set_trace_func(nil) => nil
2901 * Establishes _proc_ as the handler for tracing, or disables
2902 * tracing if the parameter is +nil+. _proc_ takes up
2903 * to six parameters: an event name, a filename, a line number, an
2904 * object id, a binding, and the name of a class. _proc_ is
2905 * invoked whenever an event occurs. Events are: <code>c-call</code>
2906 * (call a C-language routine), <code>c-return</code> (return from a
2907 * C-language routine), <code>call</code> (call a Ruby method),
2908 * <code>class</code> (start a class or module definition),
2909 * <code>end</code> (finish a class or module definition),
2910 * <code>line</code> (execute code on a new line), <code>raise</code>
2911 * (raise an exception), and <code>return</code> (return from a Ruby
2912 * method). Tracing is disabled within the context of _proc_.
2921 * set_trace_func proc { |event, file, line, id, binding, classname|
2922 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
2927 * line prog.rb:11 false
2928 * c-call prog.rb:11 new Class
2929 * c-call prog.rb:11 initialize Object
2930 * c-return prog.rb:11 initialize Object
2931 * c-return prog.rb:11 new Class
2932 * line prog.rb:12 false
2933 * call prog.rb:2 test Test
2934 * line prog.rb:3 test Test
2935 * line prog.rb:4 test Test
2936 * return prog.rb:4 test Test
2940 set_trace_func(VALUE obj
, VALUE trace
)
2942 rb_remove_event_hook(call_trace_func
);
2948 if (!rb_obj_is_proc(trace
)) {
2949 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
2952 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
2957 thread_add_trace_func(rb_thread_t
*th
, VALUE trace
)
2959 if (!rb_obj_is_proc(trace
)) {
2960 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
2963 rb_thread_add_event_hook(th
, call_trace_func
, RUBY_EVENT_ALL
, trace
);
2967 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
2970 GetThreadPtr(obj
, th
);
2971 thread_add_trace_func(th
, trace
);
2976 thread_set_trace_func_m(VALUE obj
, VALUE trace
)
2979 GetThreadPtr(obj
, th
);
2980 rb_thread_remove_event_hook(th
, call_trace_func
);
2982 if (!NIL_P(trace
)) {
2985 thread_add_trace_func(th
, trace
);
2990 get_event_name(rb_event_flag_t event
)
2993 case RUBY_EVENT_LINE
:
2995 case RUBY_EVENT_CLASS
:
2997 case RUBY_EVENT_END
:
2999 case RUBY_EVENT_CALL
:
3001 case RUBY_EVENT_RETURN
:
3003 case RUBY_EVENT_C_CALL
:
3005 case RUBY_EVENT_C_RETURN
:
3007 case RUBY_EVENT_RAISE
:
3014 VALUE
ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
);
3016 struct call_trace_func_args
{
3017 rb_event_flag_t event
;
3025 call_trace_proc(VALUE args
, int tracing
)
3027 struct call_trace_func_args
*p
= (struct call_trace_func_args
*)args
;
3028 VALUE eventname
= rb_str_new2(get_event_name(p
->event
));
3029 VALUE filename
= rb_str_new2(rb_sourcefile());
3030 int line
= rb_sourceline();
3034 if (p
->event
== RUBY_EVENT_C_CALL
||
3035 p
->event
== RUBY_EVENT_C_RETURN
) {
3040 rb_thread_method_id_and_class(GET_THREAD(), &id
, &klass
);
3042 if (id
== ID_ALLOCATOR
)
3045 if (TYPE(klass
) == T_ICLASS
) {
3046 klass
= RBASIC(klass
)->klass
;
3048 else if (FL_TEST(klass
, FL_SINGLETON
)) {
3049 klass
= rb_iv_get(klass
, "__attached__");
3052 return rb_proc_call(p
->proc
, rb_ary_new3(6,
3053 eventname
, filename
, INT2FIX(line
),
3054 id
? ID2SYM(id
) : Qnil
,
3055 p
->self
? rb_binding_new() : Qnil
,
3056 klass
? klass
: Qnil
));
3060 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3062 struct call_trace_func_args args
;
3069 ruby_suppress_tracing(call_trace_proc
, (VALUE
)&args
, Qfalse
);
3073 ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
)
3075 rb_thread_t
*th
= GET_THREAD();
3076 int state
, raised
, tracing
;
3077 VALUE result
= Qnil
;
3079 if ((tracing
= th
->tracing
) != 0 && !always
) {
3086 raised
= rb_thread_reset_raised(th
);
3089 if ((state
= EXEC_TAG()) == 0) {
3090 result
= (*func
)(arg
, tracing
);
3094 rb_thread_set_raised(th
);
3098 th
->tracing
= tracing
;
3107 * +Thread+ encapsulates the behavior of a thread of
3108 * execution, including the main thread of the Ruby script.
3110 * In the descriptions of the methods in this class, the parameter _sym_
3111 * refers to a symbol, which is either a quoted string or a
3112 * +Symbol+ (such as <code>:name</code>).
3120 rb_define_singleton_method(rb_cThread
, "new", thread_s_new
, -1);
3121 rb_define_singleton_method(rb_cThread
, "start", thread_start
, -2);
3122 rb_define_singleton_method(rb_cThread
, "fork", thread_start
, -2);
3123 rb_define_singleton_method(rb_cThread
, "main", rb_thread_s_main
, 0);
3124 rb_define_singleton_method(rb_cThread
, "current", thread_s_current
, 0);
3125 rb_define_singleton_method(rb_cThread
, "stop", rb_thread_stop
, 0);
3126 rb_define_singleton_method(rb_cThread
, "kill", rb_thread_s_kill
, 1);
3127 rb_define_singleton_method(rb_cThread
, "exit", rb_thread_exit
, 0);
3128 rb_define_singleton_method(rb_cThread
, "pass", thread_s_pass
, 0);
3129 rb_define_singleton_method(rb_cThread
, "list", rb_thread_list
, 0);
3130 rb_define_singleton_method(rb_cThread
, "abort_on_exception", rb_thread_s_abort_exc
, 0);
3131 rb_define_singleton_method(rb_cThread
, "abort_on_exception=", rb_thread_s_abort_exc_set
, 1);
3132 #if THREAD_DEBUG < 0
3133 rb_define_singleton_method(rb_cThread
, "DEBUG", rb_thread_s_debug
, 0);
3134 rb_define_singleton_method(rb_cThread
, "DEBUG=", rb_thread_s_debug_set
, 1);
3137 rb_define_method(rb_cThread
, "initialize", thread_initialize
, -2);
3138 rb_define_method(rb_cThread
, "raise", thread_raise_m
, -1);
3139 rb_define_method(rb_cThread
, "join", thread_join_m
, -1);
3140 rb_define_method(rb_cThread
, "value", thread_value
, 0);
3141 rb_define_method(rb_cThread
, "kill", rb_thread_kill
, 0);
3142 rb_define_method(rb_cThread
, "terminate", rb_thread_kill
, 0);
3143 rb_define_method(rb_cThread
, "exit", rb_thread_kill
, 0);
3144 rb_define_method(rb_cThread
, "run", rb_thread_run
, 0);
3145 rb_define_method(rb_cThread
, "wakeup", rb_thread_wakeup
, 0);
3146 rb_define_method(rb_cThread
, "[]", rb_thread_aref
, 1);
3147 rb_define_method(rb_cThread
, "[]=", rb_thread_aset
, 2);
3148 rb_define_method(rb_cThread
, "key?", rb_thread_key_p
, 1);
3149 rb_define_method(rb_cThread
, "keys", rb_thread_keys
, 0);
3150 rb_define_method(rb_cThread
, "priority", rb_thread_priority
, 0);
3151 rb_define_method(rb_cThread
, "priority=", rb_thread_priority_set
, 1);
3152 rb_define_method(rb_cThread
, "status", rb_thread_status
, 0);
3153 rb_define_method(rb_cThread
, "alive?", rb_thread_alive_p
, 0);
3154 rb_define_method(rb_cThread
, "stop?", rb_thread_stop_p
, 0);
3155 rb_define_method(rb_cThread
, "abort_on_exception", rb_thread_abort_exc
, 0);
3156 rb_define_method(rb_cThread
, "abort_on_exception=", rb_thread_abort_exc_set
, 1);
3157 rb_define_method(rb_cThread
, "safe_level", rb_thread_safe_level
, 0);
3158 rb_define_method(rb_cThread
, "group", rb_thread_group
, 0);
3160 rb_define_method(rb_cThread
, "inspect", rb_thread_inspect
, 0);
3162 cThGroup
= rb_define_class("ThreadGroup", rb_cObject
);
3163 rb_define_alloc_func(cThGroup
, thgroup_s_alloc
);
3164 rb_define_method(cThGroup
, "list", thgroup_list
, 0);
3165 rb_define_method(cThGroup
, "enclose", thgroup_enclose
, 0);
3166 rb_define_method(cThGroup
, "enclosed?", thgroup_enclosed_p
, 0);
3167 rb_define_method(cThGroup
, "add", thgroup_add
, 1);
3170 rb_thread_t
*th
= GET_THREAD();
3171 th
->thgroup
= th
->vm
->thgroup_default
= rb_obj_alloc(cThGroup
);
3172 rb_define_const(cThGroup
, "Default", th
->thgroup
);
3175 rb_cMutex
= rb_define_class("Mutex", rb_cObject
);
3176 rb_define_alloc_func(rb_cMutex
, mutex_alloc
);
3177 rb_define_method(rb_cMutex
, "initialize", mutex_initialize
, 0);
3178 rb_define_method(rb_cMutex
, "locked?", rb_mutex_locked_p
, 0);
3179 rb_define_method(rb_cMutex
, "try_lock", rb_mutex_trylock
, 0);
3180 rb_define_method(rb_cMutex
, "lock", rb_mutex_lock
, 0);
3181 rb_define_method(rb_cMutex
, "unlock", rb_mutex_unlock
, 0);
3182 rb_define_method(rb_cMutex
, "sleep", mutex_sleep
, -1);
3184 recursive_key
= rb_intern("__recursive_key__");
3185 rb_eThreadError
= rb_define_class("ThreadError", rb_eStandardError
);
3188 rb_define_global_function("set_trace_func", set_trace_func
, 1);
3189 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
3190 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
3192 /* init thread core */
3193 Init_native_thread();
3195 /* main thread setting */
3197 /* acquire global interpreter lock */
3198 rb_thread_lock_t
*lp
= &GET_THREAD()->vm
->global_interpreter_lock
;
3199 native_mutex_initialize(lp
);
3200 native_mutex_lock(lp
);
3201 native_mutex_initialize(&GET_THREAD()->interrupt_lock
);
3205 rb_thread_create_timer_thread();
3209 ruby_native_thread_p(void)
3211 rb_thread_t
*rb_thread_check_ptr(rb_thread_t
*ptr
);
3212 rb_thread_t
*th
= ruby_thread_from_native();
3214 return th
? Qtrue
: Qfalse
;