1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
47 #include "eval_intern.h"
52 #define THREAD_DEBUG 0
58 static void sleep_timeval(rb_thread_t
*th
, struct timeval time
);
59 static void sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
);
60 static void sleep_forever(rb_thread_t
*th
);
61 static double timeofday(void);
62 struct timeval
rb_time_interval(VALUE
);
63 static int rb_thread_dead(rb_thread_t
*th
);
65 void rb_signal_exec(rb_thread_t
*th
, int sig
);
66 void rb_disable_interrupt(void);
68 static VALUE eKillSignal
= INT2FIX(0);
69 static VALUE eTerminateSignal
= INT2FIX(1);
70 static volatile int system_working
= 1;
73 st_delete_wrap(st_table
* table
, VALUE key
)
75 st_delete(table
, (st_data_t
*) & key
, 0);
78 /********************************************************************************/
80 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
82 static void set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *ptr
,
83 rb_unblock_function_t
**oldfunc
, void **oldptr
);
85 #define GVL_UNLOCK_BEGIN() do { \
86 rb_thread_t *_th_stored = GET_THREAD(); \
87 rb_gc_save_machine_context(_th_stored); \
88 native_mutex_unlock(&_th_stored->vm->global_interpreter_lock)
90 #define GVL_UNLOCK_END() \
91 native_mutex_lock(&_th_stored->vm->global_interpreter_lock); \
92 rb_thread_set_current(_th_stored); \
95 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
96 rb_thread_t *__th = GET_THREAD(); \
97 int __prev_status = __th->status; \
98 rb_unblock_function_t *__oldubf; \
100 set_unblock_function(__th, ubf, ubfarg, &__oldubf, &__oldubfarg); \
101 __th->status = THREAD_STOPPED; \
102 thread_debug("enter blocking region (%p)\n", __th); \
103 GVL_UNLOCK_BEGIN(); {\
107 thread_debug("leave blocking region (%p)\n", __th); \
108 remove_signal_thread_list(__th); \
109 set_unblock_function(__th, __oldubf, __oldubfarg, 0, 0); \
110 if (__th->status == THREAD_STOPPED) { \
111 __th->status = __prev_status; \
113 RUBY_VM_CHECK_INTS(); \
117 void rb_thread_debug(const char *fmt
, ...);
119 # if THREAD_DEBUG < 0
120 static int rb_thread_debug_enabled
;
123 rb_thread_s_debug(void)
125 return INT2NUM(rb_thread_debug_enabled
);
129 rb_thread_s_debug_set(VALUE self
, VALUE val
)
131 rb_thread_debug_enabled
= RTEST(val
);
135 # define rb_thread_debug_enabled THREAD_DEBUG
137 #define thread_debug rb_thread_debug
139 #define thread_debug if(0)printf
143 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
145 NOINLINE(static int thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
,
146 VALUE
*register_stack_start
));
149 #include "thread_win32.c"
151 #define DEBUG_OUT() \
152 WaitForSingleObject(&debug_mutex, INFINITE); \
153 printf("%p - %s", GetCurrentThreadId(), buf); \
155 ReleaseMutex(&debug_mutex);
157 #elif defined(HAVE_PTHREAD_H)
158 #include "thread_pthread.c"
160 #define DEBUG_OUT() \
161 pthread_mutex_lock(&debug_mutex); \
162 printf("%p - %s", pthread_self(), buf); \
164 pthread_mutex_unlock(&debug_mutex);
167 #error "unsupported thread type"
171 static int debug_mutex_initialized
= 1;
172 static rb_thread_lock_t debug_mutex
;
175 rb_thread_debug(const char *fmt
, ...)
180 if (!rb_thread_debug_enabled
) return;
182 if (debug_mutex_initialized
== 1) {
183 debug_mutex_initialized
= 0;
184 native_mutex_initialize(&debug_mutex
);
188 vsnprintf(buf
, BUFSIZ
, fmt
, args
);
197 set_unblock_function(rb_thread_t
*th
, rb_unblock_function_t
*func
, void *arg
,
198 rb_unblock_function_t
**oldfunc
, void **oldarg
)
201 RUBY_VM_CHECK_INTS(); /* check signal or so */
202 native_mutex_lock(&th
->interrupt_lock
);
203 if (th
->interrupt_flag
) {
204 native_mutex_unlock(&th
->interrupt_lock
);
208 if (oldfunc
) *oldfunc
= th
->unblock_function
;
209 if (oldarg
) *oldarg
= th
->unblock_function_arg
;
210 th
->unblock_function
= func
;
211 th
->unblock_function_arg
= arg
;
213 native_mutex_unlock(&th
->interrupt_lock
);
217 rb_thread_interrupt(rb_thread_t
*th
)
219 native_mutex_lock(&th
->interrupt_lock
);
220 RUBY_VM_SET_INTERRUPT(th
);
221 if (th
->unblock_function
) {
222 (th
->unblock_function
)(th
->unblock_function_arg
);
227 native_mutex_unlock(&th
->interrupt_lock
);
232 terminate_i(st_data_t key
, st_data_t val
, rb_thread_t
*main_thread
)
236 GetThreadPtr(thval
, th
);
238 if (th
!= main_thread
) {
239 thread_debug("terminate_i: %p\n", th
);
240 rb_thread_interrupt(th
);
241 th
->thrown_errinfo
= eTerminateSignal
;
242 th
->status
= THREAD_TO_KILL
;
245 thread_debug("terminate_i: main thread (%p)\n", th
);
251 rb_thread_terminate_all(void)
253 rb_thread_t
*th
= GET_THREAD(); /* main thread */
254 rb_vm_t
*vm
= th
->vm
;
255 if (vm
->main_thread
!= th
) {
256 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm
->main_thread
, th
);
259 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th
);
260 st_foreach(vm
->living_threads
, terminate_i
, (st_data_t
)th
);
262 while (!rb_thread_alone()) {
264 if (EXEC_TAG() == 0) {
265 rb_thread_schedule();
268 /* ignore exception */
276 thread_cleanup_func(void *th_ptr
)
278 rb_thread_t
*th
= th_ptr
;
279 th
->status
= THREAD_KILLED
;
280 th
->machine_stack_start
= th
->machine_stack_end
= 0;
282 th
->machine_register_stack_start
= th
->machine_register_stack_end
= 0;
284 native_thread_destroy(th
);
287 extern void ruby_error_print(void);
288 static VALUE
rb_thread_raise(int, VALUE
*, rb_thread_t
*);
289 void rb_thread_recycle_stack_release(VALUE
*);
292 thread_start_func_2(rb_thread_t
*th
, VALUE
*stack_start
, VALUE
*register_stack_start
)
295 VALUE args
= th
->first_args
;
297 rb_thread_t
*join_th
;
298 rb_thread_t
*main_th
;
299 VALUE errinfo
= Qnil
;
301 th
->machine_stack_start
= stack_start
;
303 th
->machine_register_stack_start
= register_stack_start
;
305 thread_debug("thread start: %p\n", th
);
307 native_mutex_lock(&th
->vm
->global_interpreter_lock
);
309 thread_debug("thread start (get lock): %p\n", th
);
310 rb_thread_set_current(th
);
313 if ((state
= EXEC_TAG()) == 0) {
314 SAVE_ROOT_JMPBUF(th
, {
315 if (th
->first_proc
) {
316 GetProcPtr(th
->first_proc
, proc
);
318 th
->local_lfp
= proc
->block
.lfp
;
319 th
->local_svar
= Qnil
;
320 th
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
,
321 RARRAY_LEN(args
), RARRAY_PTR(args
), 0);
324 th
->value
= (*th
->first_func
)((void *)th
->first_args
);
329 if (th
->safe_level
< 4 &&
330 (th
->vm
->thread_abort_on_exception
||
331 th
->abort_on_exception
|| RTEST(ruby_debug
))) {
332 errinfo
= th
->errinfo
;
333 if (NIL_P(errinfo
)) errinfo
= rb_errinfo();
338 th
->status
= THREAD_KILLED
;
339 thread_debug("thread end: %p\n", th
);
341 main_th
= th
->vm
->main_thread
;
343 if (TYPE(errinfo
) == T_OBJECT
) {
344 /* treat with normal error object */
345 rb_thread_raise(1, &errinfo
, main_th
);
350 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
352 /* wake up joinning threads */
353 join_th
= th
->join_list_head
;
355 if (join_th
== main_th
) errinfo
= Qnil
;
356 rb_thread_interrupt(join_th
);
357 join_th
= join_th
->join_list_next
;
359 st_delete_wrap(th
->vm
->living_threads
, th
->self
);
361 if (!th
->root_fiber
) {
362 rb_thread_recycle_stack_release(th
->stack
);
366 thread_cleanup_func(th
);
367 native_mutex_unlock(&th
->vm
->global_interpreter_lock
);
373 thread_create_core(VALUE thval
, VALUE args
, VALUE (*fn
)(ANYARGS
))
377 if (OBJ_FROZEN(GET_THREAD()->thgroup
)) {
378 rb_raise(rb_eThreadError
,
379 "can't start a new thread (frozen ThreadGroup)");
381 GetThreadPtr(thval
, th
);
383 /* setup thread environment */
384 th
->first_args
= args
;
385 th
->first_proc
= fn
? Qfalse
: rb_block_proc();
388 th
->priority
= GET_THREAD()->priority
;
389 th
->thgroup
= GET_THREAD()->thgroup
;
391 native_mutex_initialize(&th
->interrupt_lock
);
393 st_insert(th
->vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
394 native_thread_create(th
);
399 thread_s_new(int argc
, VALUE
*argv
, VALUE klass
)
402 VALUE thread
= rb_thread_alloc(klass
);
403 rb_obj_call_init(thread
, argc
, argv
);
404 GetThreadPtr(thread
, th
);
405 if (!th
->first_args
) {
406 rb_raise(rb_eThreadError
, "uninitialized thread - check `%s#initialize'",
407 rb_class2name(klass
));
414 * Thread.start([args]*) {|args| block } => thread
415 * Thread.fork([args]*) {|args| block } => thread
417 * Basically the same as <code>Thread::new</code>. However, if class
418 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
419 * subclass will not invoke the subclass's <code>initialize</code> method.
423 thread_start(VALUE klass
, VALUE args
)
425 return thread_create_core(rb_thread_alloc(klass
), args
, 0);
429 thread_initialize(VALUE thread
, VALUE args
)
432 if (!rb_block_given_p()) {
433 rb_raise(rb_eThreadError
, "must be called with a block");
435 GetThreadPtr(thread
, th
);
436 if (th
->first_args
) {
437 VALUE
rb_proc_location(VALUE self
);
438 VALUE proc
= th
->first_proc
, line
, loc
;
440 if (!proc
|| !RTEST(loc
= rb_proc_location(proc
))) {
441 rb_raise(rb_eThreadError
, "already initialized thread");
443 file
= RSTRING_PTR(RARRAY_PTR(loc
)[0]);
444 if (NIL_P(line
= RARRAY_PTR(loc
)[1])) {
445 rb_raise(rb_eThreadError
, "already initialized thread - %s",
448 rb_raise(rb_eThreadError
, "already initialized thread - %s:%d",
449 file
, NUM2INT(line
));
451 return thread_create_core(thread
, args
, 0);
455 rb_thread_create(VALUE (*fn
)(ANYARGS
), void *arg
)
457 return thread_create_core(rb_thread_alloc(rb_cThread
), (VALUE
)arg
, fn
);
461 /* +infty, for this purpose */
462 #define DELAY_INFTY 1E30
465 rb_thread_t
*target
, *waiting
;
471 remove_from_join_list(VALUE arg
)
473 struct join_arg
*p
= (struct join_arg
*)arg
;
474 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
476 if (target_th
->status
!= THREAD_KILLED
) {
477 rb_thread_t
**pth
= &target_th
->join_list_head
;
481 *pth
= th
->join_list_next
;
484 pth
= &(*pth
)->join_list_next
;
492 thread_join_sleep(VALUE arg
)
494 struct join_arg
*p
= (struct join_arg
*)arg
;
495 rb_thread_t
*target_th
= p
->target
, *th
= p
->waiting
;
496 double now
, limit
= p
->limit
;
498 while (target_th
->status
!= THREAD_KILLED
) {
505 thread_debug("thread_join: timeout (thid: %p)\n",
506 (void *)target_th
->thread_id
);
509 sleep_wait_for_interrupt(th
, limit
- now
);
511 thread_debug("thread_join: interrupted (thid: %p)\n",
512 (void *)target_th
->thread_id
);
518 thread_join(rb_thread_t
*target_th
, double delay
)
520 rb_thread_t
*th
= GET_THREAD();
523 arg
.target
= target_th
;
525 arg
.limit
= timeofday() + delay
;
526 arg
.forever
= delay
== DELAY_INFTY
;
528 thread_debug("thread_join (thid: %p)\n", (void *)target_th
->thread_id
);
530 if (target_th
->status
!= THREAD_KILLED
) {
531 th
->join_list_next
= target_th
->join_list_head
;
532 target_th
->join_list_head
= th
;
533 if (!rb_ensure(thread_join_sleep
, (VALUE
)&arg
,
534 remove_from_join_list
, (VALUE
)&arg
)) {
539 thread_debug("thread_join: success (thid: %p)\n",
540 (void *)target_th
->thread_id
);
542 if (target_th
->errinfo
!= Qnil
) {
543 VALUE err
= target_th
->errinfo
;
548 else if (TYPE(target_th
->errinfo
) == T_NODE
) {
549 rb_exc_raise(vm_make_jump_tag_but_local_jump(
550 GET_THROWOBJ_STATE(err
), GET_THROWOBJ_VAL(err
)));
553 /* normal exception */
557 return target_th
->self
;
563 * thr.join(limit) => thr
565 * The calling thread will suspend execution and run <i>thr</i>. Does not
566 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
567 * the time limit expires, <code>nil</code> will be returned, otherwise
568 * <i>thr</i> is returned.
570 * Any threads not joined will be killed when the main program exits. If
571 * <i>thr</i> had previously raised an exception and the
572 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
573 * (so the exception has not yet been processed) it will be processed at this
576 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
577 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
578 * x.join # Let x thread finish, a will be killed on exit.
584 * The following example illustrates the <i>limit</i> parameter.
586 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
587 * puts "Waiting" until y.join(0.15)
601 thread_join_m(int argc
, VALUE
*argv
, VALUE self
)
603 rb_thread_t
*target_th
;
604 double delay
= DELAY_INFTY
;
607 GetThreadPtr(self
, target_th
);
609 rb_scan_args(argc
, argv
, "01", &limit
);
611 delay
= rb_num2dbl(limit
);
614 return thread_join(target_th
, delay
);
621 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
624 * a = Thread.new { 2 + 2 }
629 thread_value(VALUE self
)
632 GetThreadPtr(self
, th
);
633 thread_join(th
, DELAY_INFTY
);
641 static struct timeval
642 double2timeval(double d
)
646 time
.tv_sec
= (int)d
;
647 time
.tv_usec
= (int)((d
- (int)d
) * 1e6
);
648 if (time
.tv_usec
< 0) {
649 time
.tv_usec
+= (long)1e6
;
656 sleep_forever(rb_thread_t
*th
)
662 sleep_timeval(rb_thread_t
*th
, struct timeval tv
)
664 native_sleep(th
, &tv
);
668 rb_thread_sleep_forever()
670 thread_debug("rb_thread_sleep_forever\n");
671 sleep_forever(GET_THREAD());
678 gettimeofday(&tv
, NULL
);
679 return (double)tv
.tv_sec
+ (double)tv
.tv_usec
* 1e-6;
683 sleep_wait_for_interrupt(rb_thread_t
*th
, double sleepsec
)
685 sleep_timeval(th
, double2timeval(sleepsec
));
689 sleep_for_polling(rb_thread_t
*th
)
693 time
.tv_usec
= 100 * 1000; /* 0.1 sec */
694 sleep_timeval(th
, time
);
698 rb_thread_wait_for(struct timeval time
)
700 rb_thread_t
*th
= GET_THREAD();
701 sleep_timeval(th
, time
);
705 rb_thread_polling(void)
707 RUBY_VM_CHECK_INTS();
708 if (!rb_thread_alone()) {
709 rb_thread_t
*th
= GET_THREAD();
710 sleep_for_polling(th
);
714 struct timeval
rb_time_timeval();
717 rb_thread_sleep(int sec
)
719 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec
)));
723 rb_thread_schedule(void)
725 thread_debug("rb_thread_schedule\n");
726 if (!rb_thread_alone()) {
727 rb_thread_t
*th
= GET_THREAD();
729 thread_debug("rb_thread_schedule/switch start\n");
731 rb_gc_save_machine_context(th
);
732 native_mutex_unlock(&th
->vm
->global_interpreter_lock
);
734 native_thread_yield();
736 native_mutex_lock(&th
->vm
->global_interpreter_lock
);
738 rb_thread_set_current(th
);
739 thread_debug("rb_thread_schedule/switch done\n");
741 RUBY_VM_CHECK_INTS();
745 int rb_thread_critical
; /* TODO: dummy variable */
748 rb_thread_blocking_region(
749 rb_blocking_function_t
*func
, void *data1
,
750 rb_unblock_function_t
*ubf
, void *data2
)
753 rb_thread_t
*th
= GET_THREAD();
755 if (ubf
== RB_UBF_DFL
) {
771 * Invokes the thread scheduler to pass execution to another thread.
773 * a = Thread.new { print "a"; Thread.pass;
774 * print "b"; Thread.pass;
776 * b = Thread.new { print "x"; Thread.pass;
777 * print "y"; Thread.pass;
788 thread_s_pass(VALUE klass
)
790 rb_thread_schedule();
799 rb_thread_execute_interrupts(rb_thread_t
*th
)
801 while (th
->interrupt_flag
) {
802 int status
= th
->status
;
803 th
->status
= THREAD_RUNNABLE
;
804 th
->interrupt_flag
= 0;
806 /* signal handling */
807 if (th
->exec_signal
) {
808 int sig
= th
->exec_signal
;
810 rb_signal_exec(th
, sig
);
813 /* exception from another thread */
814 if (th
->thrown_errinfo
) {
815 VALUE err
= th
->thrown_errinfo
;
816 th
->thrown_errinfo
= 0;
817 thread_debug("rb_thread_execute_interrupts: %ld\n", err
);
819 if (err
== eKillSignal
|| err
== eTerminateSignal
) {
820 th
->errinfo
= INT2FIX(TAG_FATAL
);
821 TH_JUMP_TAG(th
, TAG_FATAL
);
830 rb_thread_schedule();
832 EXEC_EVENT_HOOK(th
, RUBY_EVENT_SWITCH
, th
->cfp
->self
, 0, 0);
837 rb_gc_mark_threads(void)
842 /*****************************************************/
845 rb_thread_ready(rb_thread_t
*th
)
847 rb_thread_interrupt(th
);
851 rb_thread_raise(int argc
, VALUE
*argv
, rb_thread_t
*th
)
856 if (rb_thread_dead(th
)) {
860 if (th
->thrown_errinfo
!= 0 || th
->raised_flag
) {
861 rb_thread_schedule();
865 exc
= rb_make_exception(argc
, argv
);
866 th
->thrown_errinfo
= exc
;
872 rb_thread_signal_raise(void *thptr
, int sig
)
875 rb_thread_t
*th
= thptr
;
877 argv
[0] = rb_eSignal
;
878 argv
[1] = INT2FIX(sig
);
879 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
883 rb_thread_signal_exit(void *thptr
)
886 rb_thread_t
*th
= thptr
;
888 argv
[0] = rb_eSystemExit
;
889 argv
[1] = rb_str_new2("exit");
890 rb_thread_raise(2, argv
, th
->vm
->main_thread
);
894 rb_thread_set_raised(rb_thread_t
*th
)
896 if (th
->raised_flag
& RAISED_EXCEPTION
) {
899 th
->raised_flag
|= RAISED_EXCEPTION
;
904 rb_thread_reset_raised(rb_thread_t
*th
)
906 if (!(th
->raised_flag
& RAISED_EXCEPTION
)) {
909 th
->raised_flag
&= ~RAISED_EXCEPTION
;
914 rb_thread_fd_close(int fd
)
921 * thr.raise(exception)
923 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
924 * caller does not have to be <i>thr</i>.
926 * Thread.abort_on_exception = true
927 * a = Thread.new { sleep(200) }
932 * prog.rb:3: Gotcha (RuntimeError)
933 * from prog.rb:2:in `initialize'
934 * from prog.rb:2:in `new'
939 thread_raise_m(int argc
, VALUE
*argv
, VALUE self
)
942 GetThreadPtr(self
, th
);
943 rb_thread_raise(argc
, argv
, th
);
950 * thr.exit => thr or nil
951 * thr.kill => thr or nil
952 * thr.terminate => thr or nil
954 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
955 * is already marked to be killed, <code>exit</code> returns the
956 * <code>Thread</code>. If this is the main thread, or the last thread, exits
961 rb_thread_kill(VALUE thread
)
965 GetThreadPtr(thread
, th
);
967 if (th
!= GET_THREAD() && th
->safe_level
< 4) {
970 if (th
->status
== THREAD_TO_KILL
|| th
->status
== THREAD_KILLED
) {
973 if (th
== th
->vm
->main_thread
) {
974 rb_exit(EXIT_SUCCESS
);
977 thread_debug("rb_thread_kill: %p (%p)\n", th
, (void *)th
->thread_id
);
979 rb_thread_interrupt(th
);
980 th
->thrown_errinfo
= eKillSignal
;
981 th
->status
= THREAD_TO_KILL
;
989 * Thread.kill(thread) => thread
991 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
994 * a = Thread.new { loop { count += 1 } }
996 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1002 rb_thread_s_kill(VALUE obj
, VALUE th
)
1004 return rb_thread_kill(th
);
1010 * Thread.exit => thread
1012 * Terminates the currently running thread and schedules another thread to be
1013 * run. If this thread is already marked to be killed, <code>exit</code>
1014 * returns the <code>Thread</code>. If this is the main thread, or the last
1015 * thread, exit the process.
1019 rb_thread_exit(void)
1021 return rb_thread_kill(GET_THREAD()->self
);
1029 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1030 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1032 * c = Thread.new { Thread.stop; puts "hey!" }
1035 * <em>produces:</em>
1041 rb_thread_wakeup(VALUE thread
)
1044 GetThreadPtr(thread
, th
);
1046 if (th
->status
== THREAD_KILLED
) {
1047 rb_raise(rb_eThreadError
, "killed thread");
1049 rb_thread_ready(th
);
1058 * Wakes up <i>thr</i>, making it eligible for scheduling.
1060 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1066 * <em>produces:</em>
1074 rb_thread_run(VALUE thread
)
1076 rb_thread_wakeup(thread
);
1077 rb_thread_schedule();
1084 * Thread.stop => nil
1086 * Stops execution of the current thread, putting it into a ``sleep'' state,
1087 * and schedules execution of another thread.
1089 * a = Thread.new { print "a"; Thread.stop; print "c" }
1095 * <em>produces:</em>
1101 rb_thread_stop(void)
1103 if (rb_thread_alone()) {
1104 rb_raise(rb_eThreadError
,
1105 "stopping only thread\n\tnote: use sleep to stop forever");
1107 rb_thread_sleep_forever();
1112 thread_list_i(st_data_t key
, st_data_t val
, void *data
)
1114 VALUE ary
= (VALUE
)data
;
1116 GetThreadPtr((VALUE
)key
, th
);
1118 switch (th
->status
) {
1119 case THREAD_RUNNABLE
:
1120 case THREAD_STOPPED
:
1121 case THREAD_TO_KILL
:
1122 rb_ary_push(ary
, th
->self
);
1129 /********************************************************************/
1133 * Thread.list => array
1135 * Returns an array of <code>Thread</code> objects for all threads that are
1136 * either runnable or stopped.
1138 * Thread.new { sleep(200) }
1139 * Thread.new { 1000000.times {|i| i*i } }
1140 * Thread.new { Thread.stop }
1141 * Thread.list.each {|t| p t}
1143 * <em>produces:</em>
1145 * #<Thread:0x401b3e84 sleep>
1146 * #<Thread:0x401b3f38 run>
1147 * #<Thread:0x401b3fb0 sleep>
1148 * #<Thread:0x401bdf4c run>
1152 rb_thread_list(void)
1154 VALUE ary
= rb_ary_new();
1155 st_foreach(GET_THREAD()->vm
->living_threads
, thread_list_i
, ary
);
1160 rb_thread_current(void)
1162 return GET_THREAD()->self
;
1167 * Thread.current => thread
1169 * Returns the currently executing thread.
1171 * Thread.current #=> #<Thread:0x401bdf4c run>
1175 thread_s_current(VALUE klass
)
1177 return rb_thread_current();
1181 rb_thread_main(void)
1183 return GET_THREAD()->vm
->main_thread
->self
;
1187 rb_thread_s_main(VALUE klass
)
1189 return rb_thread_main();
1195 * Thread.abort_on_exception => true or false
1197 * Returns the status of the global ``abort on exception'' condition. The
1198 * default is <code>false</code>. When set to <code>true</code>, or if the
1199 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1200 * command line option <code>-d</code> was specified) all threads will abort
1201 * (the process will <code>exit(0)</code>) if an exception is raised in any
1202 * thread. See also <code>Thread::abort_on_exception=</code>.
1206 rb_thread_s_abort_exc(void)
1208 return GET_THREAD()->vm
->thread_abort_on_exception
? Qtrue
: Qfalse
;
1214 * Thread.abort_on_exception= boolean => true or false
1216 * When set to <code>true</code>, all threads will abort if an exception is
1217 * raised. Returns the new state.
1219 * Thread.abort_on_exception = true
1220 * t1 = Thread.new do
1221 * puts "In new thread"
1222 * raise "Exception from thread"
1225 * puts "not reached"
1227 * <em>produces:</em>
1230 * prog.rb:4: Exception from thread (RuntimeError)
1231 * from prog.rb:2:in `initialize'
1232 * from prog.rb:2:in `new'
1237 rb_thread_s_abort_exc_set(VALUE self
, VALUE val
)
1240 GET_THREAD()->vm
->thread_abort_on_exception
= RTEST(val
);
1247 * thr.abort_on_exception => true or false
1249 * Returns the status of the thread-local ``abort on exception'' condition for
1250 * <i>thr</i>. The default is <code>false</code>. See also
1251 * <code>Thread::abort_on_exception=</code>.
1255 rb_thread_abort_exc(VALUE thread
)
1258 GetThreadPtr(thread
, th
);
1259 return th
->abort_on_exception
? Qtrue
: Qfalse
;
1265 * thr.abort_on_exception= boolean => true or false
1267 * When set to <code>true</code>, causes all threads (including the main
1268 * program) to abort if an exception is raised in <i>thr</i>. The process will
1269 * effectively <code>exit(0)</code>.
1273 rb_thread_abort_exc_set(VALUE thread
, VALUE val
)
1278 GetThreadPtr(thread
, th
);
1279 th
->abort_on_exception
= RTEST(val
);
1286 * thr.group => thgrp or nil
1288 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1289 * the thread is not a member of any group.
1291 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1295 rb_thread_group(VALUE thread
)
1299 GetThreadPtr(thread
, th
);
1300 group
= th
->thgroup
;
1309 thread_status_name(enum rb_thread_status status
)
1312 case THREAD_RUNNABLE
:
1314 case THREAD_STOPPED
:
1316 case THREAD_TO_KILL
:
1326 rb_thread_dead(rb_thread_t
*th
)
1328 return th
->status
== THREAD_KILLED
;
1334 * thr.status => string, false or nil
1336 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1337 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1338 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1339 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1340 * terminated with an exception.
1342 * a = Thread.new { raise("die now") }
1343 * b = Thread.new { Thread.stop }
1344 * c = Thread.new { Thread.exit }
1345 * d = Thread.new { sleep }
1346 * d.kill #=> #<Thread:0x401b3678 aborting>
1348 * b.status #=> "sleep"
1349 * c.status #=> false
1350 * d.status #=> "aborting"
1351 * Thread.current.status #=> "run"
1355 rb_thread_status(VALUE thread
)
1358 GetThreadPtr(thread
, th
);
1360 if (rb_thread_dead(th
)) {
1361 if (!NIL_P(th
->errinfo
) && !FIXNUM_P(th
->errinfo
)
1367 return rb_str_new2(thread_status_name(th
->status
));
1373 * thr.alive? => true or false
1375 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1377 * thr = Thread.new { }
1378 * thr.join #=> #<Thread:0x401b3fb0 dead>
1379 * Thread.current.alive? #=> true
1380 * thr.alive? #=> false
1384 rb_thread_alive_p(VALUE thread
)
1387 GetThreadPtr(thread
, th
);
1389 if (rb_thread_dead(th
))
1396 * thr.stop? => true or false
1398 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1400 * a = Thread.new { Thread.stop }
1401 * b = Thread.current
1407 rb_thread_stop_p(VALUE thread
)
1410 GetThreadPtr(thread
, th
);
1412 if (rb_thread_dead(th
))
1414 if (th
->status
== THREAD_STOPPED
)
1421 * thr.safe_level => integer
1423 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1424 * levels can help when implementing sandboxes which run insecure code.
1426 * thr = Thread.new { $SAFE = 3; sleep }
1427 * Thread.current.safe_level #=> 0
1428 * thr.safe_level #=> 3
1432 rb_thread_safe_level(VALUE thread
)
1435 GetThreadPtr(thread
, th
);
1437 return INT2NUM(th
->safe_level
);
1442 * thr.inspect => string
1444 * Dump the name, id, and status of _thr_ to a string.
1448 rb_thread_inspect(VALUE thread
)
1450 char *cname
= rb_obj_classname(thread
);
1455 GetThreadPtr(thread
, th
);
1456 status
= thread_status_name(th
->status
);
1457 str
= rb_sprintf("#<%s:%p %s>", cname
, (void *)thread
, status
);
1458 OBJ_INFECT(str
, thread
);
1464 rb_thread_local_aref(VALUE thread
, ID id
)
1469 GetThreadPtr(thread
, th
);
1470 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1471 rb_raise(rb_eSecurityError
, "Insecure: thread locals");
1473 if (!th
->local_storage
) {
1476 if (st_lookup(th
->local_storage
, id
, &val
)) {
1484 * thr[sym] => obj or nil
1486 * Attribute Reference---Returns the value of a thread-local variable, using
1487 * either a symbol or a string name. If the specified variable does not exist,
1488 * returns <code>nil</code>.
1490 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1491 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1492 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1493 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1495 * <em>produces:</em>
1497 * #<Thread:0x401b3b3c sleep>: C
1498 * #<Thread:0x401b3bc8 sleep>: B
1499 * #<Thread:0x401b3c68 sleep>: A
1500 * #<Thread:0x401bdf4c run>:
1504 rb_thread_aref(VALUE thread
, VALUE id
)
1506 return rb_thread_local_aref(thread
, rb_to_id(id
));
1510 rb_thread_local_aset(VALUE thread
, ID id
, VALUE val
)
1513 GetThreadPtr(thread
, th
);
1515 if (rb_safe_level() >= 4 && th
!= GET_THREAD()) {
1516 rb_raise(rb_eSecurityError
, "Insecure: can't modify thread locals");
1518 if (OBJ_FROZEN(thread
)) {
1519 rb_error_frozen("thread locals");
1521 if (!th
->local_storage
) {
1522 th
->local_storage
= st_init_numtable();
1525 st_delete(th
->local_storage
, (st_data_t
*) & id
, 0);
1528 st_insert(th
->local_storage
, id
, val
);
1534 * thr[sym] = obj => obj
1536 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1537 * using either a symbol or a string. See also <code>Thread#[]</code>.
1541 rb_thread_aset(VALUE self
, ID id
, VALUE val
)
1543 return rb_thread_local_aset(self
, rb_to_id(id
), val
);
1548 * thr.key?(sym) => true or false
1550 * Returns <code>true</code> if the given string (or symbol) exists as a
1551 * thread-local variable.
1553 * me = Thread.current
1555 * me.key?(:oliver) #=> true
1556 * me.key?(:stanley) #=> false
1560 rb_thread_key_p(VALUE self
, ID id
)
1563 GetThreadPtr(self
, th
);
1565 if (!th
->local_storage
) {
1568 if (st_lookup(th
->local_storage
, rb_to_id(id
), 0)) {
1575 thread_keys_i(ID key
, VALUE value
, VALUE ary
)
1577 rb_ary_push(ary
, ID2SYM(key
));
1585 if (GET_THREAD()->vm
->living_threads
) {
1586 num
= GET_THREAD()->vm
->living_threads
->num_entries
;
1587 thread_debug("rb_thread_alone: %d\n", num
);
1596 * Returns an an array of the names of the thread-local variables (as Symbols).
1598 * thr = Thread.new do
1599 * Thread.current[:cat] = 'meow'
1600 * Thread.current["dog"] = 'woof'
1602 * thr.join #=> #<Thread:0x401b3f10 dead>
1603 * thr.keys #=> [:dog, :cat]
1607 rb_thread_keys(VALUE self
)
1610 VALUE ary
= rb_ary_new();
1611 GetThreadPtr(self
, th
);
1613 if (th
->local_storage
) {
1614 st_foreach(th
->local_storage
, thread_keys_i
, ary
);
1621 * thr.priority => integer
1623 * Returns the priority of <i>thr</i>. Default is inherited from the
1624 * current thread which creating the new thread, or zero for the
1625 * initial main thread; higher-priority threads will run before
1626 * lower-priority threads.
1628 * Thread.current.priority #=> 0
1632 rb_thread_priority(VALUE thread
)
1635 GetThreadPtr(thread
, th
);
1636 return INT2NUM(th
->priority
);
1642 * thr.priority= integer => thr
1644 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1645 * will run before lower-priority threads.
1647 * count1 = count2 = 0
1649 * loop { count1 += 1 }
1654 * loop { count2 += 1 }
1663 rb_thread_priority_set(VALUE thread
, VALUE prio
)
1666 GetThreadPtr(thread
, th
);
1670 th
->priority
= NUM2INT(prio
);
1671 native_thread_apply_priority(th
);
1677 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1679 rb_fd_init(volatile rb_fdset_t
*fds
)
1682 fds
->fdset
= ALLOC(fd_set
);
1683 FD_ZERO(fds
->fdset
);
1687 rb_fd_term(rb_fdset_t
*fds
)
1689 if (fds
->fdset
) free(fds
->fdset
);
1695 rb_fd_zero(rb_fdset_t
*fds
)
1698 MEMZERO(fds
->fdset
, fd_mask
, howmany(fds
->maxfd
, NFDBITS
));
1699 FD_ZERO(fds
->fdset
);
1704 rb_fd_resize(int n
, rb_fdset_t
*fds
)
1706 int m
= howmany(n
+ 1, NFDBITS
) * sizeof(fd_mask
);
1707 int o
= howmany(fds
->maxfd
, NFDBITS
) * sizeof(fd_mask
);
1709 if (m
< sizeof(fd_set
)) m
= sizeof(fd_set
);
1710 if (o
< sizeof(fd_set
)) o
= sizeof(fd_set
);
1713 fds
->fdset
= realloc(fds
->fdset
, m
);
1714 memset((char *)fds
->fdset
+ o
, 0, m
- o
);
1716 if (n
>= fds
->maxfd
) fds
->maxfd
= n
+ 1;
1720 rb_fd_set(int n
, rb_fdset_t
*fds
)
1722 rb_fd_resize(n
, fds
);
1723 FD_SET(n
, fds
->fdset
);
1727 rb_fd_clr(int n
, rb_fdset_t
*fds
)
1729 if (n
>= fds
->maxfd
) return;
1730 FD_CLR(n
, fds
->fdset
);
1734 rb_fd_isset(int n
, const rb_fdset_t
*fds
)
1736 if (n
>= fds
->maxfd
) return 0;
1737 return FD_ISSET(n
, fds
->fdset
) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1741 rb_fd_copy(rb_fdset_t
*dst
, const fd_set
*src
, int max
)
1743 int size
= howmany(max
, NFDBITS
) * sizeof(fd_mask
);
1745 if (size
< sizeof(fd_set
)) size
= sizeof(fd_set
);
1747 dst
->fdset
= realloc(dst
->fdset
, size
);
1748 memcpy(dst
->fdset
, src
, size
);
1752 rb_fd_select(int n
, rb_fdset_t
*readfds
, rb_fdset_t
*writefds
, rb_fdset_t
*exceptfds
, struct timeval
*timeout
)
1754 fd_set
*r
= NULL
, *w
= NULL
, *e
= NULL
;
1756 rb_fd_resize(n
- 1, readfds
);
1757 r
= rb_fd_ptr(readfds
);
1760 rb_fd_resize(n
- 1, writefds
);
1761 w
= rb_fd_ptr(writefds
);
1764 rb_fd_resize(n
- 1, exceptfds
);
1765 e
= rb_fd_ptr(exceptfds
);
1767 return select(n
, r
, w
, e
, timeout
);
1775 #define FD_ZERO(f) rb_fd_zero(f)
1776 #define FD_SET(i, f) rb_fd_set(i, f)
1777 #define FD_CLR(i, f) rb_fd_clr(i, f)
1778 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1782 #if defined(__CYGWIN__) || defined(_WIN32)
1784 cmp_tv(const struct timeval
*a
, const struct timeval
*b
)
1786 long d
= (a
->tv_sec
- b
->tv_sec
);
1787 return (d
!= 0) ? d
: (a
->tv_usec
- b
->tv_usec
);
1791 subst(struct timeval
*rest
, const struct timeval
*wait
)
1793 while (rest
->tv_usec
< wait
->tv_usec
) {
1794 if (rest
->tv_sec
<= wait
->tv_sec
) {
1798 rest
->tv_usec
+= 1000 * 1000;
1800 rest
->tv_sec
-= wait
->tv_sec
;
1801 rest
->tv_usec
-= wait
->tv_usec
;
1807 do_select(int n
, fd_set
*read
, fd_set
*write
, fd_set
*except
,
1808 struct timeval
*timeout
)
1811 fd_set orig_read
, orig_write
, orig_except
;
1815 struct timeval wait_rest
;
1818 limit
= timeofday() +
1819 (double)timeout
->tv_sec
+(double)timeout
->tv_usec
*1e-6;
1820 wait_rest
= *timeout
;
1821 timeout
= &wait_rest
;
1825 if (read
) orig_read
= *read
;
1826 if (write
) orig_write
= *write
;
1827 if (except
) orig_except
= *except
;
1832 #if defined(__CYGWIN__) || defined(_WIN32)
1834 /* polling duration: 100ms */
1835 struct timeval wait_100ms
, *wait
;
1836 wait_100ms
.tv_sec
= 0;
1837 wait_100ms
.tv_usec
= 100 * 1000; /* 100 ms */
1840 wait
= (timeout
== 0 || cmp_tv(&wait_100ms
, timeout
) > 0) ? &wait_100ms
: timeout
;
1843 result
= select(n
, read
, write
, except
, wait
);
1844 if (result
< 0) lerrno
= errno
;
1845 if (result
!= 0) break;
1847 if (read
) *read
= orig_read
;
1848 if (write
) *write
= orig_write
;
1849 if (except
) *except
= orig_except
;
1851 } while (__th
->interrupt_flag
== 0 && (timeout
== 0 || subst(timeout
, &wait_100ms
)));
1853 } while (result
== 0 && (timeout
== 0 || subst(timeout
, &wait_100ms
)));
1857 result
= select(n
, read
, write
, except
, timeout
);
1858 if (result
< 0) lerrno
= errno
;
1859 }, ubf_select
, GET_THREAD());
1867 || errno
== ERESTART
1870 if (read
) *read
= orig_read
;
1871 if (write
) *write
= orig_write
;
1872 if (except
) *except
= orig_except
;
1875 double d
= limit
- timeofday();
1877 wait_rest
.tv_sec
= (unsigned int)d
;
1878 wait_rest
.tv_usec
= (long)((d
-(double)wait_rest
.tv_sec
)*1e6
);
1879 if (wait_rest
.tv_sec
< 0) wait_rest
.tv_sec
= 0;
1880 if (wait_rest
.tv_usec
< 0) wait_rest
.tv_usec
= 0;
1890 rb_thread_wait_fd_rw(int fd
, int read
)
1893 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd
, read
? "read" : "write");
1895 while (result
<= 0) {
1901 result
= do_select(fd
+ 1, rb_fd_ptr(&set
), 0, 0, 0);
1904 result
= do_select(fd
+ 1, 0, rb_fd_ptr(&set
), 0, 0);
1914 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd
, read
? "read" : "write");
1918 rb_thread_wait_fd(int fd
)
1920 rb_thread_wait_fd_rw(fd
, 1);
1924 rb_thread_fd_writable(int fd
)
1926 rb_thread_wait_fd_rw(fd
, 0);
1931 rb_thread_select(int max
, fd_set
* read
, fd_set
* write
, fd_set
* except
,
1932 struct timeval
*timeout
)
1934 if (!read
&& !write
&& !except
) {
1936 rb_thread_sleep_forever();
1939 rb_thread_wait_for(*timeout
);
1943 return do_select(max
, read
, write
, except
, timeout
);
1952 #ifdef USE_CONSERVATIVE_STACK_END
1954 rb_gc_set_stack_end(VALUE
**stack_end_p
)
1957 *stack_end_p
= &stack_end
;
1962 rb_gc_save_machine_context(rb_thread_t
*th
)
1964 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
1966 th
->machine_register_stack_end
= rb_ia64_bsp();
1968 setjmp(th
->machine_regs
);
1975 int rb_get_next_signal(rb_vm_t
*vm
);
1978 timer_thread_function(void)
1980 rb_vm_t
*vm
= GET_VM(); /* TODO: fix me for Multi-VM */
1982 /* for time slice */
1983 RUBY_VM_SET_TIMER_INTERRUPT(vm
->running_thread
);
1986 if (vm
->buffered_signal_size
&& vm
->main_thread
->exec_signal
== 0) {
1987 vm
->main_thread
->exec_signal
= rb_get_next_signal(vm
);
1988 thread_debug("buffered_signal_size: %d, sig: %d\n",
1989 vm
->buffered_signal_size
, vm
->main_thread
->exec_signal
);
1990 rb_thread_interrupt(vm
->main_thread
);
1994 /* prove profiler */
1995 if (vm
->prove_profile
.enable
) {
1996 rb_thread_t
*th
= vm
->running_thread
;
1998 if (vm
->during_gc
) {
1999 /* GC prove profiling */
2006 rb_thread_stop_timer_thread(void)
2008 if (timer_thread_id
) {
2010 native_thread_join(timer_thread_id
);
2011 timer_thread_id
= 0;
2016 rb_thread_reset_timer_thread(void)
2018 timer_thread_id
= 0;
2022 rb_thread_start_timer_thread(void)
2024 rb_thread_create_timer_thread();
2028 terminate_atfork_i(st_data_t key
, st_data_t val
, rb_thread_t
*current_th
)
2032 GetThreadPtr(thval
, th
);
2034 if (th
!= current_th
) {
2035 thread_cleanup_func(th
);
2041 rb_thread_atfork(void)
2043 rb_thread_t
*th
= GET_THREAD();
2044 rb_vm_t
*vm
= th
->vm
;
2045 VALUE thval
= th
->self
;
2046 vm
->main_thread
= th
;
2048 st_foreach(vm
->living_threads
, terminate_atfork_i
, (st_data_t
)th
);
2049 st_clear(vm
->living_threads
);
2050 st_insert(vm
->living_threads
, thval
, (st_data_t
) th
->thread_id
);
2059 * Document-class: ThreadGroup
2061 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2062 * threads as a group. A <code>Thread</code> can belong to only one
2063 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2064 * remove it from any previous group.
2066 * Newly created threads belong to the same group as the thread from which they
2070 static VALUE
thgroup_s_alloc(VALUE
);
2072 thgroup_s_alloc(VALUE klass
)
2075 struct thgroup
*data
;
2077 group
= Data_Make_Struct(klass
, struct thgroup
, 0, free
, data
);
2079 data
->group
= group
;
2084 struct thgroup_list_params
{
2090 thgroup_list_i(st_data_t key
, st_data_t val
, st_data_t data
)
2092 VALUE thread
= (VALUE
)key
;
2093 VALUE ary
= ((struct thgroup_list_params
*)data
)->ary
;
2094 VALUE group
= ((struct thgroup_list_params
*)data
)->group
;
2096 GetThreadPtr(thread
, th
);
2098 if (th
->thgroup
== group
) {
2099 rb_ary_push(ary
, thread
);
2106 * thgrp.list => array
2108 * Returns an array of all existing <code>Thread</code> objects that belong to
2111 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2115 thgroup_list(VALUE group
)
2117 VALUE ary
= rb_ary_new();
2118 struct thgroup_list_params param
;
2121 param
.group
= group
;
2122 st_foreach(GET_THREAD()->vm
->living_threads
, thgroup_list_i
, (st_data_t
) & param
);
2129 * thgrp.enclose => thgrp
2131 * Prevents threads from being added to or removed from the receiving
2132 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2133 * <code>ThreadGroup</code>.
2135 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2136 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2137 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2140 * <em>produces:</em>
2142 * ThreadError: can't move from the enclosed thread group
2146 thgroup_enclose(VALUE group
)
2148 struct thgroup
*data
;
2150 Data_Get_Struct(group
, struct thgroup
, data
);
2159 * thgrp.enclosed? => true or false
2161 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2162 * ThreadGroup#enclose.
2166 thgroup_enclosed_p(VALUE group
)
2168 struct thgroup
*data
;
2170 Data_Get_Struct(group
, struct thgroup
, data
);
2179 * thgrp.add(thread) => thgrp
2181 * Adds the given <em>thread</em> to this group, removing it from any other
2182 * group to which it may have previously belonged.
2184 * puts "Initial group is #{ThreadGroup::Default.list}"
2185 * tg = ThreadGroup.new
2186 * t1 = Thread.new { sleep }
2187 * t2 = Thread.new { sleep }
2188 * puts "t1 is #{t1}"
2189 * puts "t2 is #{t2}"
2191 * puts "Initial group now #{ThreadGroup::Default.list}"
2192 * puts "tg group now #{tg.list}"
2194 * <em>produces:</em>
2196 * Initial group is #<Thread:0x401bdf4c>
2197 * t1 is #<Thread:0x401b3c90>
2198 * t2 is #<Thread:0x401b3c18>
2199 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2200 * tg group now #<Thread:0x401b3c90>
2204 thgroup_add(VALUE group
, VALUE thread
)
2207 struct thgroup
*data
;
2210 GetThreadPtr(thread
, th
);
2212 if (OBJ_FROZEN(group
)) {
2213 rb_raise(rb_eThreadError
, "can't move to the frozen thread group");
2215 Data_Get_Struct(group
, struct thgroup
, data
);
2216 if (data
->enclosed
) {
2217 rb_raise(rb_eThreadError
, "can't move to the enclosed thread group");
2224 if (OBJ_FROZEN(th
->thgroup
)) {
2225 rb_raise(rb_eThreadError
, "can't move from the frozen thread group");
2227 Data_Get_Struct(th
->thgroup
, struct thgroup
, data
);
2228 if (data
->enclosed
) {
2229 rb_raise(rb_eThreadError
,
2230 "can't move from the enclosed thread group");
2233 th
->thgroup
= group
;
2239 * Document-class: Mutex
2241 * Mutex implements a simple semaphore that can be used to coordinate access to
2242 * shared data from multiple concurrent threads.
2247 * semaphore = Mutex.new
2250 * semaphore.synchronize {
2251 * # access shared resource
2256 * semaphore.synchronize {
2257 * # access shared resource
2263 typedef struct mutex_struct
{
2264 rb_thread_lock_t lock
;
2265 rb_thread_cond_t cond
;
2266 rb_thread_t
volatile *th
;
2267 volatile int cond_waiting
;
2270 #define GetMutexPtr(obj, tobj) \
2271 Data_Get_Struct(obj, mutex_t, tobj)
2274 mutex_mark(void *ptr
)
2277 mutex_t
*mutex
= ptr
;
2279 rb_gc_mark(mutex
->th
->self
);
2285 mutex_free(void *ptr
)
2288 mutex_t
*mutex
= ptr
;
2289 native_mutex_destroy(&mutex
->lock
);
2290 native_cond_destroy(&mutex
->cond
);
2296 mutex_alloc(VALUE klass
)
2301 obj
= Data_Make_Struct(klass
, mutex_t
, mutex_mark
, mutex_free
, mutex
);
2302 native_mutex_initialize(&mutex
->lock
);
2303 native_cond_initialize(&mutex
->cond
);
2309 * Mutex.new => mutex
2311 * Creates a new Mutex
2314 mutex_initialize(VALUE self
)
2322 return mutex_alloc(rb_cMutex
);
2327 * mutex.locked? => true or false
2329 * Returns +true+ if this lock is currently held by some thread.
2332 rb_mutex_locked_p(VALUE self
)
2335 GetMutexPtr(self
, mutex
);
2336 return mutex
->th
? Qtrue
: Qfalse
;
2341 * mutex.try_lock => true or false
2343 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2347 rb_mutex_trylock(VALUE self
)
2350 VALUE locked
= Qfalse
;
2351 GetMutexPtr(self
, mutex
);
2353 if (mutex
->th
== GET_THREAD()) {
2354 rb_raise(rb_eThreadError
, "deadlock; recursive locking");
2357 native_mutex_lock(&mutex
->lock
);
2358 if (mutex
->th
== 0) {
2359 mutex
->th
= GET_THREAD();
2362 native_mutex_unlock(&mutex
->lock
);
2368 lock_func(rb_thread_t
*th
, mutex_t
*mutex
)
2370 int interrupted
= Qfalse
;
2372 native_mutex_lock(&mutex
->lock
);
2373 while (mutex
->th
|| (mutex
->th
= th
, 0)) {
2374 mutex
->cond_waiting
++;
2375 native_cond_wait(&mutex
->cond
, &mutex
->lock
);
2377 if (th
->interrupt_flag
) {
2378 interrupted
= Qtrue
;
2382 native_mutex_unlock(&mutex
->lock
);
2387 lock_interrupt(void *ptr
)
2389 mutex_t
*mutex
= (mutex_t
*)ptr
;
2390 native_mutex_lock(&mutex
->lock
);
2391 if (mutex
->cond_waiting
> 0) {
2392 native_cond_broadcast(&mutex
->cond
);
2393 mutex
->cond_waiting
= 0;
2395 native_mutex_unlock(&mutex
->lock
);
2400 * mutex.lock => true or false
2402 * Attempts to grab the lock and waits if it isn't available.
2403 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2406 rb_mutex_lock(VALUE self
)
2408 if (rb_mutex_trylock(self
) == Qfalse
) {
2410 rb_thread_t
*th
= GET_THREAD();
2411 GetMutexPtr(self
, mutex
);
2413 while (mutex
->th
!= th
) {
2417 interrupted
= lock_func(th
, mutex
);
2418 }, lock_interrupt
, mutex
);
2421 RUBY_VM_CHECK_INTS();
2430 * mutex.unlock => self
2432 * Releases the lock.
2433 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2436 rb_mutex_unlock(VALUE self
)
2440 GetMutexPtr(self
, mutex
);
2442 native_mutex_lock(&mutex
->lock
);
2444 if (mutex
->th
== 0) {
2445 err
= "Attempt to unlock a mutex which is not locked";
2447 else if (mutex
->th
!= GET_THREAD()) {
2448 err
= "Attempt to unlock a mutex which is locked by another thread";
2452 if (mutex
->cond_waiting
> 0) {
2453 /* waiting thread */
2454 native_cond_signal(&mutex
->cond
);
2455 mutex
->cond_waiting
--;
2459 native_mutex_unlock(&mutex
->lock
);
2461 if (err
) rb_raise(rb_eThreadError
, err
);
2467 rb_mutex_sleep_forever(VALUE time
)
2469 rb_thread_sleep_forever();
2474 rb_mutex_wait_for(VALUE time
)
2476 const struct timeval
*t
= (struct timeval
*)time
;
2477 rb_thread_wait_for(*t
);
2482 rb_mutex_sleep(VALUE self
, VALUE timeout
)
2487 if (!NIL_P(timeout
)) {
2488 t
= rb_time_interval(timeout
);
2490 rb_mutex_unlock(self
);
2492 if (NIL_P(timeout
)) {
2493 rb_ensure(rb_mutex_sleep_forever
, Qnil
, rb_mutex_lock
, self
);
2496 rb_ensure(rb_mutex_wait_for
, (VALUE
)&t
, rb_mutex_lock
, self
);
2498 end
= time(0) - beg
;
2499 return INT2FIX(end
);
2504 * mutex.sleep(timeout = nil) => number
2506 * Releases the lock and sleeps +timeout+ seconds if it is given and
2507 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2508 * the current thread.
2511 mutex_sleep(int argc
, VALUE
*argv
, VALUE self
)
2515 rb_scan_args(argc
, argv
, "01", &timeout
);
2516 return rb_mutex_sleep(self
, timeout
);
2521 * mutex.synchronize { ... } => result of the block
2523 * Obtains a lock, runs the block, and releases the lock when the block
2524 * completes. See the example under +Mutex+.
2528 rb_thread_synchronize(VALUE mutex
, VALUE (*func
)(VALUE arg
), VALUE arg
)
2530 rb_mutex_lock(mutex
);
2531 return rb_ensure(func
, arg
, rb_mutex_unlock
, mutex
);
2535 * Document-class: Barrier
2537 typedef struct rb_thread_list_struct rb_thread_list_t
;
2539 struct rb_thread_list_struct
{
2541 rb_thread_list_t
*next
;
2545 thlist_mark(void *ptr
)
2547 rb_thread_list_t
*q
= ptr
;
2549 for (; q
; q
= q
->next
) {
2550 rb_gc_mark(q
->th
->self
);
2555 thlist_free(void *ptr
)
2557 rb_thread_list_t
*q
= ptr
, *next
;
2559 for (; q
; q
= next
) {
2566 thlist_signal(rb_thread_list_t
**list
, unsigned int maxth
, rb_thread_t
**woken_thread
)
2569 rb_thread_list_t
*q
;
2571 while ((q
= *list
) != NULL
) {
2572 rb_thread_t
*th
= q
->th
;
2576 if (th
->status
!= THREAD_KILLED
) {
2577 rb_thread_ready(th
);
2578 if (!woken
&& woken_thread
) *woken_thread
= th
;
2579 if (++woken
>= maxth
&& maxth
) break;
2587 rb_thread_list_t
*waiting
, **tail
;
2591 barrier_mark(void *ptr
)
2593 rb_barrier_t
*b
= ptr
;
2595 if (b
->owner
) rb_gc_mark(b
->owner
->self
);
2596 thlist_mark(b
->waiting
);
2600 barrier_free(void *ptr
)
2602 rb_barrier_t
*b
= ptr
;
2605 thlist_free(b
->waiting
);
2611 barrier_alloc(VALUE klass
)
2614 rb_barrier_t
*barrier
;
2616 obj
= Data_Make_Struct(klass
, rb_barrier_t
,
2617 barrier_mark
, barrier_free
, barrier
);
2618 barrier
->owner
= GET_THREAD();
2619 barrier
->waiting
= 0;
2620 barrier
->tail
= &barrier
->waiting
;
2625 rb_barrier_new(void)
2627 return barrier_alloc(rb_cBarrier
);
2631 rb_barrier_wait(VALUE self
)
2633 rb_barrier_t
*barrier
;
2634 rb_thread_list_t
*q
;
2636 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2637 if (!barrier
->owner
|| barrier
->owner
->status
== THREAD_KILLED
) {
2639 if (thlist_signal(&barrier
->waiting
, 1, &barrier
->owner
)) return Qfalse
;
2642 else if (barrier
->owner
== GET_THREAD()) {
2646 *barrier
->tail
= q
= ALLOC(rb_thread_list_t
);
2647 q
->th
= GET_THREAD();
2649 barrier
->tail
= &q
->next
;
2650 rb_thread_sleep_forever();
2651 return barrier
->owner
== GET_THREAD() ? Qtrue
: Qfalse
;
2656 rb_barrier_release(VALUE self
)
2658 rb_barrier_t
*barrier
;
2661 Data_Get_Struct(self
, rb_barrier_t
, barrier
);
2662 if (barrier
->owner
!= GET_THREAD()) {
2663 rb_raise(rb_eThreadError
, "not owned");
2665 n
= thlist_signal(&barrier
->waiting
, 0, &barrier
->owner
);
2666 return n
? UINT2NUM(n
) : Qfalse
;
2669 /* variables for recursive traversals */
2670 static ID recursive_key
;
2673 recursive_check(VALUE hash
, VALUE obj
)
2675 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
2679 VALUE list
= rb_hash_aref(hash
, ID2SYM(rb_frame_this_func()));
2681 if (NIL_P(list
) || TYPE(list
) != T_HASH
)
2683 if (NIL_P(rb_hash_lookup(list
, obj
)))
2690 recursive_push(VALUE hash
, VALUE obj
)
2694 sym
= ID2SYM(rb_frame_this_func());
2695 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
2696 hash
= rb_hash_new();
2697 rb_thread_local_aset(rb_thread_current(), recursive_key
, hash
);
2701 list
= rb_hash_aref(hash
, sym
);
2703 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
2704 list
= rb_hash_new();
2705 rb_hash_aset(hash
, sym
, list
);
2707 rb_hash_aset(list
, obj
, Qtrue
);
2712 recursive_pop(VALUE hash
, VALUE obj
)
2716 sym
= ID2SYM(rb_frame_this_func());
2717 if (NIL_P(hash
) || TYPE(hash
) != T_HASH
) {
2720 symname
= rb_inspect(sym
);
2721 thrname
= rb_inspect(rb_thread_current());
2723 rb_raise(rb_eTypeError
, "invalid inspect_tbl hash for %s in %s",
2724 StringValuePtr(symname
), StringValuePtr(thrname
));
2726 list
= rb_hash_aref(hash
, sym
);
2727 if (NIL_P(list
) || TYPE(list
) != T_HASH
) {
2728 VALUE symname
= rb_inspect(sym
);
2729 VALUE thrname
= rb_inspect(rb_thread_current());
2730 rb_raise(rb_eTypeError
, "invalid inspect_tbl list for %s in %s",
2731 StringValuePtr(symname
), StringValuePtr(thrname
));
2733 rb_hash_delete(list
, obj
);
2737 rb_exec_recursive(VALUE (*func
) (VALUE
, VALUE
, int), VALUE obj
, VALUE arg
)
2739 VALUE hash
= rb_thread_local_aref(rb_thread_current(), recursive_key
);
2740 VALUE objid
= rb_obj_id(obj
);
2742 if (recursive_check(hash
, objid
)) {
2743 return (*func
) (obj
, arg
, Qtrue
);
2746 VALUE result
= Qundef
;
2749 hash
= recursive_push(hash
, objid
);
2751 if ((state
= EXEC_TAG()) == 0) {
2752 result
= (*func
) (obj
, arg
, Qfalse
);
2755 recursive_pop(hash
, objid
);
2764 static rb_event_hook_t
*
2765 alloc_event_fook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
2767 rb_event_hook_t
*hook
= ALLOC(rb_event_hook_t
);
2769 hook
->flag
= events
;
2775 thread_reset_event_flags(rb_thread_t
*th
)
2777 rb_event_hook_t
*hook
= th
->event_hooks
;
2778 rb_event_flag_t flag
= th
->event_flags
& RUBY_EVENT_VM
;
2787 rb_thread_add_event_hook(rb_thread_t
*th
,
2788 rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
2790 rb_event_hook_t
*hook
= alloc_event_fook(func
, events
, data
);
2791 hook
->next
= th
->event_hooks
;
2792 th
->event_hooks
= hook
;
2793 thread_reset_event_flags(th
);
2797 set_threads_event_flags_i(st_data_t key
, st_data_t val
, st_data_t flag
)
2801 GetThreadPtr(thval
, th
);
2804 th
->event_flags
|= RUBY_EVENT_VM
;
2807 th
->event_flags
&= (~RUBY_EVENT_VM
);
2813 set_threads_event_flags(int flag
)
2815 st_foreach(GET_VM()->living_threads
, set_threads_event_flags_i
, (st_data_t
) flag
);
2819 rb_add_event_hook(rb_event_hook_func_t func
, rb_event_flag_t events
, VALUE data
)
2821 rb_event_hook_t
*hook
= alloc_event_fook(func
, events
, data
);
2822 rb_vm_t
*vm
= GET_VM();
2824 hook
->next
= vm
->event_hooks
;
2825 vm
->event_hooks
= hook
;
2827 set_threads_event_flags(1);
2831 remove_event_hook(rb_event_hook_t
**root
, rb_event_hook_func_t func
)
2833 rb_event_hook_t
*prev
= NULL
, *hook
= *root
, *next
;
2837 if (func
== 0 || hook
->func
== func
) {
2839 prev
->next
= hook
->next
;
2855 rb_thread_remove_event_hook(rb_thread_t
*th
, rb_event_hook_func_t func
)
2857 int ret
= remove_event_hook(&th
->event_hooks
, func
);
2858 thread_reset_event_flags(th
);
2863 rb_remove_event_hook(rb_event_hook_func_t func
)
2865 rb_vm_t
*vm
= GET_VM();
2866 rb_event_hook_t
*hook
= vm
->event_hooks
;
2867 int ret
= remove_event_hook(&vm
->event_hooks
, func
);
2869 if (hook
!= NULL
&& vm
->event_hooks
== NULL
) {
2870 set_threads_event_flags(0);
2877 clear_trace_func_i(st_data_t key
, st_data_t val
, st_data_t flag
)
2880 GetThreadPtr((VALUE
)key
, th
);
2881 rb_thread_remove_event_hook(th
, 0);
2886 rb_clear_trace_func(void)
2888 st_foreach(GET_VM()->living_threads
, clear_trace_func_i
, (st_data_t
) 0);
2889 rb_remove_event_hook(0);
2892 static void call_trace_func(rb_event_flag_t
, VALUE data
, VALUE self
, ID id
, VALUE klass
);
2896 * set_trace_func(proc) => proc
2897 * set_trace_func(nil) => nil
2899 * Establishes _proc_ as the handler for tracing, or disables
2900 * tracing if the parameter is +nil+. _proc_ takes up
2901 * to six parameters: an event name, a filename, a line number, an
2902 * object id, a binding, and the name of a class. _proc_ is
2903 * invoked whenever an event occurs. Events are: <code>c-call</code>
2904 * (call a C-language routine), <code>c-return</code> (return from a
2905 * C-language routine), <code>call</code> (call a Ruby method),
2906 * <code>class</code> (start a class or module definition),
2907 * <code>end</code> (finish a class or module definition),
2908 * <code>line</code> (execute code on a new line), <code>raise</code>
2909 * (raise an exception), and <code>return</code> (return from a Ruby
2910 * method). Tracing is disabled within the context of _proc_.
2919 * set_trace_func proc { |event, file, line, id, binding, classname|
2920 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
2925 * line prog.rb:11 false
2926 * c-call prog.rb:11 new Class
2927 * c-call prog.rb:11 initialize Object
2928 * c-return prog.rb:11 initialize Object
2929 * c-return prog.rb:11 new Class
2930 * line prog.rb:12 false
2931 * call prog.rb:2 test Test
2932 * line prog.rb:3 test Test
2933 * line prog.rb:4 test Test
2934 * return prog.rb:4 test Test
2938 set_trace_func(VALUE obj
, VALUE trace
)
2940 rb_remove_event_hook(call_trace_func
);
2946 if (!rb_obj_is_proc(trace
)) {
2947 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
2950 rb_add_event_hook(call_trace_func
, RUBY_EVENT_ALL
, trace
);
2955 thread_add_trace_func(rb_thread_t
*th
, VALUE trace
)
2957 if (!rb_obj_is_proc(trace
)) {
2958 rb_raise(rb_eTypeError
, "trace_func needs to be Proc");
2961 rb_thread_add_event_hook(th
, call_trace_func
, RUBY_EVENT_ALL
, trace
);
2965 thread_add_trace_func_m(VALUE obj
, VALUE trace
)
2968 GetThreadPtr(obj
, th
);
2969 thread_add_trace_func(th
, trace
);
2974 thread_set_trace_func_m(VALUE obj
, VALUE trace
)
2977 GetThreadPtr(obj
, th
);
2978 rb_thread_remove_event_hook(th
, call_trace_func
);
2980 if (!NIL_P(trace
)) {
2983 thread_add_trace_func(th
, trace
);
2988 get_event_name(rb_event_flag_t event
)
2991 case RUBY_EVENT_LINE
:
2993 case RUBY_EVENT_CLASS
:
2995 case RUBY_EVENT_END
:
2997 case RUBY_EVENT_CALL
:
2999 case RUBY_EVENT_RETURN
:
3001 case RUBY_EVENT_C_CALL
:
3003 case RUBY_EVENT_C_RETURN
:
3005 case RUBY_EVENT_RAISE
:
3012 VALUE
ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
);
3014 struct call_trace_func_args
{
3015 rb_event_flag_t event
;
3023 call_trace_proc(VALUE args
, int tracing
)
3025 struct call_trace_func_args
*p
= (struct call_trace_func_args
*)args
;
3026 VALUE eventname
= rb_str_new2(get_event_name(p
->event
));
3027 VALUE filename
= rb_str_new2(rb_sourcefile());
3028 int line
= rb_sourceline();
3032 if (p
->event
== RUBY_EVENT_C_CALL
||
3033 p
->event
== RUBY_EVENT_C_RETURN
) {
3038 rb_thread_method_id_and_class(GET_THREAD(), &id
, &klass
);
3040 if (id
== ID_ALLOCATOR
)
3043 if (TYPE(klass
) == T_ICLASS
) {
3044 klass
= RBASIC(klass
)->klass
;
3046 else if (FL_TEST(klass
, FL_SINGLETON
)) {
3047 klass
= rb_iv_get(klass
, "__attached__");
3050 return rb_proc_call(p
->proc
, rb_ary_new3(6,
3051 eventname
, filename
, INT2FIX(line
),
3052 id
? ID2SYM(id
) : Qnil
,
3053 p
->self
? rb_binding_new() : Qnil
,
3054 klass
? klass
: Qnil
));
3058 call_trace_func(rb_event_flag_t event
, VALUE proc
, VALUE self
, ID id
, VALUE klass
)
3060 struct call_trace_func_args args
;
3067 ruby_suppress_tracing(call_trace_proc
, (VALUE
)&args
, Qfalse
);
3071 ruby_suppress_tracing(VALUE (*func
)(VALUE
, int), VALUE arg
, int always
)
3073 rb_thread_t
*th
= GET_THREAD();
3074 int state
, raised
, tracing
;
3075 VALUE result
= Qnil
;
3077 if ((tracing
= th
->tracing
) != 0 && !always
) {
3084 raised
= rb_thread_reset_raised(th
);
3087 if ((state
= EXEC_TAG()) == 0) {
3088 result
= (*func
)(arg
, tracing
);
3092 rb_thread_set_raised(th
);
3096 th
->tracing
= tracing
;
3105 * +Thread+ encapsulates the behavior of a thread of
3106 * execution, including the main thread of the Ruby script.
3108 * In the descriptions of the methods in this class, the parameter _sym_
3109 * refers to a symbol, which is either a quoted string or a
3110 * +Symbol+ (such as <code>:name</code>).
3118 rb_define_singleton_method(rb_cThread
, "new", thread_s_new
, -1);
3119 rb_define_singleton_method(rb_cThread
, "start", thread_start
, -2);
3120 rb_define_singleton_method(rb_cThread
, "fork", thread_start
, -2);
3121 rb_define_singleton_method(rb_cThread
, "main", rb_thread_s_main
, 0);
3122 rb_define_singleton_method(rb_cThread
, "current", thread_s_current
, 0);
3123 rb_define_singleton_method(rb_cThread
, "stop", rb_thread_stop
, 0);
3124 rb_define_singleton_method(rb_cThread
, "kill", rb_thread_s_kill
, 1);
3125 rb_define_singleton_method(rb_cThread
, "exit", rb_thread_exit
, 0);
3126 rb_define_singleton_method(rb_cThread
, "pass", thread_s_pass
, 0);
3127 rb_define_singleton_method(rb_cThread
, "list", rb_thread_list
, 0);
3128 rb_define_singleton_method(rb_cThread
, "abort_on_exception", rb_thread_s_abort_exc
, 0);
3129 rb_define_singleton_method(rb_cThread
, "abort_on_exception=", rb_thread_s_abort_exc_set
, 1);
3130 #if THREAD_DEBUG < 0
3131 rb_define_singleton_method(rb_cThread
, "DEBUG", rb_thread_s_debug
, 0);
3132 rb_define_singleton_method(rb_cThread
, "DEBUG=", rb_thread_s_debug_set
, 1);
3135 rb_define_method(rb_cThread
, "initialize", thread_initialize
, -2);
3136 rb_define_method(rb_cThread
, "raise", thread_raise_m
, -1);
3137 rb_define_method(rb_cThread
, "join", thread_join_m
, -1);
3138 rb_define_method(rb_cThread
, "value", thread_value
, 0);
3139 rb_define_method(rb_cThread
, "kill", rb_thread_kill
, 0);
3140 rb_define_method(rb_cThread
, "terminate", rb_thread_kill
, 0);
3141 rb_define_method(rb_cThread
, "exit", rb_thread_kill
, 0);
3142 rb_define_method(rb_cThread
, "run", rb_thread_run
, 0);
3143 rb_define_method(rb_cThread
, "wakeup", rb_thread_wakeup
, 0);
3144 rb_define_method(rb_cThread
, "[]", rb_thread_aref
, 1);
3145 rb_define_method(rb_cThread
, "[]=", rb_thread_aset
, 2);
3146 rb_define_method(rb_cThread
, "key?", rb_thread_key_p
, 1);
3147 rb_define_method(rb_cThread
, "keys", rb_thread_keys
, 0);
3148 rb_define_method(rb_cThread
, "priority", rb_thread_priority
, 0);
3149 rb_define_method(rb_cThread
, "priority=", rb_thread_priority_set
, 1);
3150 rb_define_method(rb_cThread
, "status", rb_thread_status
, 0);
3151 rb_define_method(rb_cThread
, "alive?", rb_thread_alive_p
, 0);
3152 rb_define_method(rb_cThread
, "stop?", rb_thread_stop_p
, 0);
3153 rb_define_method(rb_cThread
, "abort_on_exception", rb_thread_abort_exc
, 0);
3154 rb_define_method(rb_cThread
, "abort_on_exception=", rb_thread_abort_exc_set
, 1);
3155 rb_define_method(rb_cThread
, "safe_level", rb_thread_safe_level
, 0);
3156 rb_define_method(rb_cThread
, "group", rb_thread_group
, 0);
3158 rb_define_method(rb_cThread
, "inspect", rb_thread_inspect
, 0);
3160 cThGroup
= rb_define_class("ThreadGroup", rb_cObject
);
3161 rb_define_alloc_func(cThGroup
, thgroup_s_alloc
);
3162 rb_define_method(cThGroup
, "list", thgroup_list
, 0);
3163 rb_define_method(cThGroup
, "enclose", thgroup_enclose
, 0);
3164 rb_define_method(cThGroup
, "enclosed?", thgroup_enclosed_p
, 0);
3165 rb_define_method(cThGroup
, "add", thgroup_add
, 1);
3168 rb_thread_t
*th
= GET_THREAD();
3169 th
->thgroup
= th
->vm
->thgroup_default
= rb_obj_alloc(cThGroup
);
3170 rb_define_const(cThGroup
, "Default", th
->thgroup
);
3173 rb_cMutex
= rb_define_class("Mutex", rb_cObject
);
3174 rb_define_alloc_func(rb_cMutex
, mutex_alloc
);
3175 rb_define_method(rb_cMutex
, "initialize", mutex_initialize
, 0);
3176 rb_define_method(rb_cMutex
, "locked?", rb_mutex_locked_p
, 0);
3177 rb_define_method(rb_cMutex
, "try_lock", rb_mutex_trylock
, 0);
3178 rb_define_method(rb_cMutex
, "lock", rb_mutex_lock
, 0);
3179 rb_define_method(rb_cMutex
, "unlock", rb_mutex_unlock
, 0);
3180 rb_define_method(rb_cMutex
, "sleep", mutex_sleep
, -1);
3182 recursive_key
= rb_intern("__recursive_key__");
3183 rb_eThreadError
= rb_define_class("ThreadError", rb_eStandardError
);
3186 rb_define_global_function("set_trace_func", set_trace_func
, 1);
3187 rb_define_method(rb_cThread
, "set_trace_func", thread_set_trace_func_m
, 1);
3188 rb_define_method(rb_cThread
, "add_trace_func", thread_add_trace_func_m
, 1);
3190 /* init thread core */
3191 Init_native_thread();
3193 /* main thread setting */
3195 /* acquire global interpreter lock */
3196 rb_thread_lock_t
*lp
= &GET_THREAD()->vm
->global_interpreter_lock
;
3197 native_mutex_initialize(lp
);
3198 native_mutex_lock(lp
);
3199 native_mutex_initialize(&GET_THREAD()->interrupt_lock
);
3203 rb_thread_create_timer_thread();
3207 ruby_native_thread_p(void)
3209 rb_thread_t
*rb_thread_check_ptr(rb_thread_t
*ptr
);
3210 rb_thread_t
*th
= ruby_thread_from_native();
3212 return th
? Qtrue
: Qfalse
;