* process.c (rb_spawn_internal): new function to specify
[ruby-svn.git] / thread.c
blob2f1ff92e8f86b10e480d9fca3758289c74bb7678
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Desgin
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
25 model 2:
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
38 model 3:
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
45 /* for model 2 */
47 #include "eval_intern.h"
48 #include "vm.h"
49 #include "gc.h"
51 #ifndef THREAD_DEBUG
52 #define THREAD_DEBUG 0
53 #endif
55 VALUE rb_cMutex;
56 VALUE rb_cBarrier;
58 static void sleep_timeval(rb_thread_t *th, struct timeval time);
59 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
60 static void sleep_forever(rb_thread_t *th);
61 static double timeofday(void);
62 struct timeval rb_time_interval(VALUE);
63 static int rb_thread_dead(rb_thread_t *th);
65 void rb_signal_exec(rb_thread_t *th, int sig);
66 void rb_disable_interrupt(void);
68 static VALUE eKillSignal = INT2FIX(0);
69 static VALUE eTerminateSignal = INT2FIX(1);
70 static volatile int system_working = 1;
72 inline static void
73 st_delete_wrap(st_table * table, VALUE key)
75 st_delete(table, (st_data_t *) & key, 0);
78 /********************************************************************************/
80 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
82 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *ptr,
83 rb_unblock_function_t **oldfunc, void **oldptr);
85 #define GVL_UNLOCK_BEGIN() do { \
86 rb_thread_t *_th_stored = GET_THREAD(); \
87 rb_gc_save_machine_context(_th_stored); \
88 native_mutex_unlock(&_th_stored->vm->global_interpreter_lock)
90 #define GVL_UNLOCK_END() \
91 native_mutex_lock(&_th_stored->vm->global_interpreter_lock); \
92 rb_thread_set_current(_th_stored); \
93 } while(0)
95 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
96 rb_thread_t *__th = GET_THREAD(); \
97 int __prev_status = __th->status; \
98 rb_unblock_function_t *__oldubf; \
99 void *__oldubfarg; \
100 set_unblock_function(__th, ubf, ubfarg, &__oldubf, &__oldubfarg); \
101 __th->status = THREAD_STOPPED; \
102 thread_debug("enter blocking region (%p)\n", __th); \
103 GVL_UNLOCK_BEGIN(); {\
104 exec; \
106 GVL_UNLOCK_END(); \
107 thread_debug("leave blocking region (%p)\n", __th); \
108 remove_signal_thread_list(__th); \
109 set_unblock_function(__th, __oldubf, __oldubfarg, 0, 0); \
110 if (__th->status == THREAD_STOPPED) { \
111 __th->status = __prev_status; \
113 RUBY_VM_CHECK_INTS(); \
114 } while(0)
116 #if THREAD_DEBUG
117 void rb_thread_debug(const char *fmt, ...);
119 # if THREAD_DEBUG < 0
120 static int rb_thread_debug_enabled;
122 static VALUE
123 rb_thread_s_debug(void)
125 return INT2NUM(rb_thread_debug_enabled);
128 static VALUE
129 rb_thread_s_debug_set(VALUE self, VALUE val)
131 rb_thread_debug_enabled = RTEST(val);
132 return val;
134 # else
135 # define rb_thread_debug_enabled THREAD_DEBUG
136 # endif
137 #define thread_debug rb_thread_debug
138 #else
139 #define thread_debug if(0)printf
140 #endif
142 #ifndef __ia64
143 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
144 #endif
145 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
146 VALUE *register_stack_start));
148 #if defined(_WIN32)
149 #include "thread_win32.c"
151 #define DEBUG_OUT() \
152 WaitForSingleObject(&debug_mutex, INFINITE); \
153 printf("%p - %s", GetCurrentThreadId(), buf); \
154 fflush(stdout); \
155 ReleaseMutex(&debug_mutex);
157 #elif defined(HAVE_PTHREAD_H)
158 #include "thread_pthread.c"
160 #define DEBUG_OUT() \
161 pthread_mutex_lock(&debug_mutex); \
162 printf("%p - %s", pthread_self(), buf); \
163 fflush(stdout); \
164 pthread_mutex_unlock(&debug_mutex);
166 #else
167 #error "unsupported thread type"
168 #endif
170 #if THREAD_DEBUG
171 static int debug_mutex_initialized = 1;
172 static rb_thread_lock_t debug_mutex;
174 void
175 rb_thread_debug(const char *fmt, ...)
177 va_list args;
178 char buf[BUFSIZ];
180 if (!rb_thread_debug_enabled) return;
182 if (debug_mutex_initialized == 1) {
183 debug_mutex_initialized = 0;
184 native_mutex_initialize(&debug_mutex);
187 va_start(args, fmt);
188 vsnprintf(buf, BUFSIZ, fmt, args);
189 va_end(args);
191 DEBUG_OUT();
193 #endif
196 static void
197 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
198 rb_unblock_function_t **oldfunc, void **oldarg)
200 check_ints:
201 RUBY_VM_CHECK_INTS(); /* check signal or so */
202 native_mutex_lock(&th->interrupt_lock);
203 if (th->interrupt_flag) {
204 native_mutex_unlock(&th->interrupt_lock);
205 goto check_ints;
207 else {
208 if (oldfunc) *oldfunc = th->unblock_function;
209 if (oldarg) *oldarg = th->unblock_function_arg;
210 th->unblock_function = func;
211 th->unblock_function_arg = arg;
213 native_mutex_unlock(&th->interrupt_lock);
216 static void
217 rb_thread_interrupt(rb_thread_t *th)
219 native_mutex_lock(&th->interrupt_lock);
220 RUBY_VM_SET_INTERRUPT(th);
221 if (th->unblock_function) {
222 (th->unblock_function)(th->unblock_function_arg);
224 else {
225 /* none */
227 native_mutex_unlock(&th->interrupt_lock);
231 static int
232 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
234 VALUE thval = key;
235 rb_thread_t *th;
236 GetThreadPtr(thval, th);
238 if (th != main_thread) {
239 thread_debug("terminate_i: %p\n", th);
240 rb_thread_interrupt(th);
241 th->thrown_errinfo = eTerminateSignal;
242 th->status = THREAD_TO_KILL;
244 else {
245 thread_debug("terminate_i: main thread (%p)\n", th);
247 return ST_CONTINUE;
250 void
251 rb_thread_terminate_all(void)
253 rb_thread_t *th = GET_THREAD(); /* main thread */
254 rb_vm_t *vm = th->vm;
255 if (vm->main_thread != th) {
256 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm->main_thread, th);
259 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th);
260 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
262 while (!rb_thread_alone()) {
263 PUSH_TAG();
264 if (EXEC_TAG() == 0) {
265 rb_thread_schedule();
267 else {
268 /* ignore exception */
270 POP_TAG();
272 system_working = 0;
275 static void
276 thread_cleanup_func(void *th_ptr)
278 rb_thread_t *th = th_ptr;
279 th->status = THREAD_KILLED;
280 th->machine_stack_start = th->machine_stack_end = 0;
281 #ifdef __ia64
282 th->machine_register_stack_start = th->machine_register_stack_end = 0;
283 #endif
284 native_thread_destroy(th);
287 extern void ruby_error_print(void);
288 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
289 void rb_thread_recycle_stack_release(VALUE *);
291 static int
292 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
294 int state;
295 VALUE args = th->first_args;
296 rb_proc_t *proc;
297 rb_thread_t *join_th;
298 rb_thread_t *main_th;
299 VALUE errinfo = Qnil;
301 th->machine_stack_start = stack_start;
302 #ifdef __ia64
303 th->machine_register_stack_start = register_stack_start;
304 #endif
305 thread_debug("thread start: %p\n", th);
307 native_mutex_lock(&th->vm->global_interpreter_lock);
309 thread_debug("thread start (get lock): %p\n", th);
310 rb_thread_set_current(th);
312 TH_PUSH_TAG(th);
313 if ((state = EXEC_TAG()) == 0) {
314 SAVE_ROOT_JMPBUF(th, {
315 if (th->first_proc) {
316 GetProcPtr(th->first_proc, proc);
317 th->errinfo = Qnil;
318 th->local_lfp = proc->block.lfp;
319 th->local_svar = Qnil;
320 th->value = vm_invoke_proc(th, proc, proc->block.self,
321 RARRAY_LEN(args), RARRAY_PTR(args), 0);
323 else {
324 th->value = (*th->first_func)((void *)th->first_args);
328 else {
329 if (th->safe_level < 4 &&
330 (th->vm->thread_abort_on_exception ||
331 th->abort_on_exception || RTEST(ruby_debug))) {
332 errinfo = th->errinfo;
333 if (NIL_P(errinfo)) errinfo = rb_errinfo();
335 th->value = Qnil;
338 th->status = THREAD_KILLED;
339 thread_debug("thread end: %p\n", th);
341 main_th = th->vm->main_thread;
342 if (th != main_th) {
343 if (TYPE(errinfo) == T_OBJECT) {
344 /* treat with normal error object */
345 rb_thread_raise(1, &errinfo, main_th);
348 TH_POP_TAG();
350 st_delete_wrap(th->vm->living_threads, th->self);
352 /* wake up joinning threads */
353 join_th = th->join_list_head;
354 while (join_th) {
355 if (join_th == main_th) errinfo = Qnil;
356 rb_thread_interrupt(join_th);
357 join_th = join_th->join_list_next;
359 st_delete_wrap(th->vm->living_threads, th->self);
361 if (!th->root_fiber) {
362 rb_thread_recycle_stack_release(th->stack);
363 th->stack = 0;
366 thread_cleanup_func(th);
367 native_mutex_unlock(&th->vm->global_interpreter_lock);
369 return 0;
372 static VALUE
373 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
375 rb_thread_t *th;
377 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
378 rb_raise(rb_eThreadError,
379 "can't start a new thread (frozen ThreadGroup)");
381 GetThreadPtr(thval, th);
383 /* setup thread environment */
384 th->first_args = args;
385 th->first_proc = fn ? Qfalse : rb_block_proc();
386 th->first_func = fn;
388 th->priority = GET_THREAD()->priority;
389 th->thgroup = GET_THREAD()->thgroup;
391 native_mutex_initialize(&th->interrupt_lock);
392 /* kick thread */
393 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
394 native_thread_create(th);
395 return thval;
398 static VALUE
399 thread_s_new(int argc, VALUE *argv, VALUE klass)
401 rb_thread_t *th;
402 VALUE thread = rb_thread_alloc(klass);
403 rb_obj_call_init(thread, argc, argv);
404 GetThreadPtr(thread, th);
405 if (!th->first_args) {
406 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
407 rb_class2name(klass));
409 return thread;
413 * call-seq:
414 * Thread.start([args]*) {|args| block } => thread
415 * Thread.fork([args]*) {|args| block } => thread
417 * Basically the same as <code>Thread::new</code>. However, if class
418 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
419 * subclass will not invoke the subclass's <code>initialize</code> method.
422 static VALUE
423 thread_start(VALUE klass, VALUE args)
425 return thread_create_core(rb_thread_alloc(klass), args, 0);
428 static VALUE
429 thread_initialize(VALUE thread, VALUE args)
431 rb_thread_t *th;
432 if (!rb_block_given_p()) {
433 rb_raise(rb_eThreadError, "must be called with a block");
435 GetThreadPtr(thread, th);
436 if (th->first_args) {
437 VALUE rb_proc_location(VALUE self);
438 VALUE proc = th->first_proc, line, loc;
439 const char *file;
440 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
441 rb_raise(rb_eThreadError, "already initialized thread");
443 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
444 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
445 rb_raise(rb_eThreadError, "already initialized thread - %s",
446 file);
448 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
449 file, NUM2INT(line));
451 return thread_create_core(thread, args, 0);
454 VALUE
455 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
457 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
461 /* +infty, for this purpose */
462 #define DELAY_INFTY 1E30
464 struct join_arg {
465 rb_thread_t *target, *waiting;
466 double limit;
467 int forever;
470 static VALUE
471 remove_from_join_list(VALUE arg)
473 struct join_arg *p = (struct join_arg *)arg;
474 rb_thread_t *target_th = p->target, *th = p->waiting;
476 if (target_th->status != THREAD_KILLED) {
477 rb_thread_t **pth = &target_th->join_list_head;
479 while (*pth) {
480 if (*pth == th) {
481 *pth = th->join_list_next;
482 break;
484 pth = &(*pth)->join_list_next;
488 return Qnil;
491 static VALUE
492 thread_join_sleep(VALUE arg)
494 struct join_arg *p = (struct join_arg *)arg;
495 rb_thread_t *target_th = p->target, *th = p->waiting;
496 double now, limit = p->limit;
498 while (target_th->status != THREAD_KILLED) {
499 if (p->forever) {
500 sleep_forever(th);
502 else {
503 now = timeofday();
504 if (now > limit) {
505 thread_debug("thread_join: timeout (thid: %p)\n",
506 (void *)target_th->thread_id);
507 return Qfalse;
509 sleep_wait_for_interrupt(th, limit - now);
511 thread_debug("thread_join: interrupted (thid: %p)\n",
512 (void *)target_th->thread_id);
514 return Qtrue;
517 static VALUE
518 thread_join(rb_thread_t *target_th, double delay)
520 rb_thread_t *th = GET_THREAD();
521 struct join_arg arg;
523 arg.target = target_th;
524 arg.waiting = th;
525 arg.limit = timeofday() + delay;
526 arg.forever = delay == DELAY_INFTY;
528 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
530 if (target_th->status != THREAD_KILLED) {
531 th->join_list_next = target_th->join_list_head;
532 target_th->join_list_head = th;
533 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
534 remove_from_join_list, (VALUE)&arg)) {
535 return Qnil;
539 thread_debug("thread_join: success (thid: %p)\n",
540 (void *)target_th->thread_id);
542 if (target_th->errinfo != Qnil) {
543 VALUE err = target_th->errinfo;
545 if (FIXNUM_P(err)) {
546 /* */
548 else if (TYPE(target_th->errinfo) == T_NODE) {
549 rb_exc_raise(vm_make_jump_tag_but_local_jump(
550 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
552 else {
553 /* normal exception */
554 rb_exc_raise(err);
557 return target_th->self;
561 * call-seq:
562 * thr.join => thr
563 * thr.join(limit) => thr
565 * The calling thread will suspend execution and run <i>thr</i>. Does not
566 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
567 * the time limit expires, <code>nil</code> will be returned, otherwise
568 * <i>thr</i> is returned.
570 * Any threads not joined will be killed when the main program exits. If
571 * <i>thr</i> had previously raised an exception and the
572 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
573 * (so the exception has not yet been processed) it will be processed at this
574 * time.
576 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
577 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
578 * x.join # Let x thread finish, a will be killed on exit.
580 * <em>produces:</em>
582 * axyz
584 * The following example illustrates the <i>limit</i> parameter.
586 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
587 * puts "Waiting" until y.join(0.15)
589 * <em>produces:</em>
591 * tick...
592 * Waiting
593 * tick...
594 * Waitingtick...
597 * tick...
600 static VALUE
601 thread_join_m(int argc, VALUE *argv, VALUE self)
603 rb_thread_t *target_th;
604 double delay = DELAY_INFTY;
605 VALUE limit;
607 GetThreadPtr(self, target_th);
609 rb_scan_args(argc, argv, "01", &limit);
610 if (!NIL_P(limit)) {
611 delay = rb_num2dbl(limit);
614 return thread_join(target_th, delay);
618 * call-seq:
619 * thr.value => obj
621 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
622 * its value.
624 * a = Thread.new { 2 + 2 }
625 * a.value #=> 4
628 static VALUE
629 thread_value(VALUE self)
631 rb_thread_t *th;
632 GetThreadPtr(self, th);
633 thread_join(th, DELAY_INFTY);
634 return th->value;
638 * Thread Scheduling
641 static struct timeval
642 double2timeval(double d)
644 struct timeval time;
646 time.tv_sec = (int)d;
647 time.tv_usec = (int)((d - (int)d) * 1e6);
648 if (time.tv_usec < 0) {
649 time.tv_usec += (long)1e6;
650 time.tv_sec -= 1;
652 return time;
655 static void
656 sleep_forever(rb_thread_t *th)
658 native_sleep(th, 0);
661 static void
662 sleep_timeval(rb_thread_t *th, struct timeval tv)
664 native_sleep(th, &tv);
667 void
668 rb_thread_sleep_forever()
670 thread_debug("rb_thread_sleep_forever\n");
671 sleep_forever(GET_THREAD());
674 static double
675 timeofday(void)
677 struct timeval tv;
678 gettimeofday(&tv, NULL);
679 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
682 static void
683 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
685 sleep_timeval(th, double2timeval(sleepsec));
688 static void
689 sleep_for_polling(rb_thread_t *th)
691 struct timeval time;
692 time.tv_sec = 0;
693 time.tv_usec = 100 * 1000; /* 0.1 sec */
694 sleep_timeval(th, time);
697 void
698 rb_thread_wait_for(struct timeval time)
700 rb_thread_t *th = GET_THREAD();
701 sleep_timeval(th, time);
704 void
705 rb_thread_polling(void)
707 RUBY_VM_CHECK_INTS();
708 if (!rb_thread_alone()) {
709 rb_thread_t *th = GET_THREAD();
710 sleep_for_polling(th);
714 struct timeval rb_time_timeval();
716 void
717 rb_thread_sleep(int sec)
719 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
722 void
723 rb_thread_schedule(void)
725 thread_debug("rb_thread_schedule\n");
726 if (!rb_thread_alone()) {
727 rb_thread_t *th = GET_THREAD();
729 thread_debug("rb_thread_schedule/switch start\n");
731 rb_gc_save_machine_context(th);
732 native_mutex_unlock(&th->vm->global_interpreter_lock);
734 native_thread_yield();
736 native_mutex_lock(&th->vm->global_interpreter_lock);
738 rb_thread_set_current(th);
739 thread_debug("rb_thread_schedule/switch done\n");
741 RUBY_VM_CHECK_INTS();
745 int rb_thread_critical; /* TODO: dummy variable */
747 VALUE
748 rb_thread_blocking_region(
749 rb_blocking_function_t *func, void *data1,
750 rb_unblock_function_t *ubf, void *data2)
752 VALUE val;
753 rb_thread_t *th = GET_THREAD();
755 if (ubf == RB_UBF_DFL) {
756 ubf = ubf_select;
757 data2 = th;
760 BLOCKING_REGION({
761 val = func(data1);
762 }, ubf, data2);
764 return val;
768 * call-seq:
769 * Thread.pass => nil
771 * Invokes the thread scheduler to pass execution to another thread.
773 * a = Thread.new { print "a"; Thread.pass;
774 * print "b"; Thread.pass;
775 * print "c" }
776 * b = Thread.new { print "x"; Thread.pass;
777 * print "y"; Thread.pass;
778 * print "z" }
779 * a.join
780 * b.join
782 * <em>produces:</em>
784 * axbycz
787 static VALUE
788 thread_s_pass(VALUE klass)
790 rb_thread_schedule();
791 return Qnil;
798 void
799 rb_thread_execute_interrupts(rb_thread_t *th)
801 while (th->interrupt_flag) {
802 int status = th->status;
803 th->status = THREAD_RUNNABLE;
804 th->interrupt_flag = 0;
806 /* signal handling */
807 if (th->exec_signal) {
808 int sig = th->exec_signal;
809 th->exec_signal = 0;
810 rb_signal_exec(th, sig);
813 /* exception from another thread */
814 if (th->thrown_errinfo) {
815 VALUE err = th->thrown_errinfo;
816 th->thrown_errinfo = 0;
817 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
819 if (err == eKillSignal || err == eTerminateSignal) {
820 th->errinfo = INT2FIX(TAG_FATAL);
821 TH_JUMP_TAG(th, TAG_FATAL);
823 else {
824 rb_exc_raise(err);
827 th->status = status;
829 /* thread pass */
830 rb_thread_schedule();
832 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
836 void
837 rb_gc_mark_threads(void)
839 /* TODO: remove */
842 /*****************************************************/
844 static void
845 rb_thread_ready(rb_thread_t *th)
847 rb_thread_interrupt(th);
850 static VALUE
851 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
853 VALUE exc;
855 again:
856 if (rb_thread_dead(th)) {
857 return Qnil;
860 if (th->thrown_errinfo != 0 || th->raised_flag) {
861 rb_thread_schedule();
862 goto again;
865 exc = rb_make_exception(argc, argv);
866 th->thrown_errinfo = exc;
867 rb_thread_ready(th);
868 return Qnil;
871 void
872 rb_thread_signal_raise(void *thptr, int sig)
874 VALUE argv[2];
875 rb_thread_t *th = thptr;
877 argv[0] = rb_eSignal;
878 argv[1] = INT2FIX(sig);
879 rb_thread_raise(2, argv, th->vm->main_thread);
882 void
883 rb_thread_signal_exit(void *thptr)
885 VALUE argv[2];
886 rb_thread_t *th = thptr;
888 argv[0] = rb_eSystemExit;
889 argv[1] = rb_str_new2("exit");
890 rb_thread_raise(2, argv, th->vm->main_thread);
894 rb_thread_set_raised(rb_thread_t *th)
896 if (th->raised_flag & RAISED_EXCEPTION) {
897 return 1;
899 th->raised_flag |= RAISED_EXCEPTION;
900 return 0;
904 rb_thread_reset_raised(rb_thread_t *th)
906 if (!(th->raised_flag & RAISED_EXCEPTION)) {
907 return 0;
909 th->raised_flag &= ~RAISED_EXCEPTION;
910 return 1;
913 void
914 rb_thread_fd_close(int fd)
916 /* TODO: fix me */
920 * call-seq:
921 * thr.raise(exception)
923 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
924 * caller does not have to be <i>thr</i>.
926 * Thread.abort_on_exception = true
927 * a = Thread.new { sleep(200) }
928 * a.raise("Gotcha")
930 * <em>produces:</em>
932 * prog.rb:3: Gotcha (RuntimeError)
933 * from prog.rb:2:in `initialize'
934 * from prog.rb:2:in `new'
935 * from prog.rb:2
938 static VALUE
939 thread_raise_m(int argc, VALUE *argv, VALUE self)
941 rb_thread_t *th;
942 GetThreadPtr(self, th);
943 rb_thread_raise(argc, argv, th);
944 return Qnil;
949 * call-seq:
950 * thr.exit => thr or nil
951 * thr.kill => thr or nil
952 * thr.terminate => thr or nil
954 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
955 * is already marked to be killed, <code>exit</code> returns the
956 * <code>Thread</code>. If this is the main thread, or the last thread, exits
957 * the process.
960 VALUE
961 rb_thread_kill(VALUE thread)
963 rb_thread_t *th;
965 GetThreadPtr(thread, th);
967 if (th != GET_THREAD() && th->safe_level < 4) {
968 rb_secure(4);
970 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
971 return thread;
973 if (th == th->vm->main_thread) {
974 rb_exit(EXIT_SUCCESS);
977 thread_debug("rb_thread_kill: %p (%p)\n", th, (void *)th->thread_id);
979 rb_thread_interrupt(th);
980 th->thrown_errinfo = eKillSignal;
981 th->status = THREAD_TO_KILL;
983 return thread;
988 * call-seq:
989 * Thread.kill(thread) => thread
991 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
993 * count = 0
994 * a = Thread.new { loop { count += 1 } }
995 * sleep(0.1) #=> 0
996 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
997 * count #=> 93947
998 * a.alive? #=> false
1001 static VALUE
1002 rb_thread_s_kill(VALUE obj, VALUE th)
1004 return rb_thread_kill(th);
1009 * call-seq:
1010 * Thread.exit => thread
1012 * Terminates the currently running thread and schedules another thread to be
1013 * run. If this thread is already marked to be killed, <code>exit</code>
1014 * returns the <code>Thread</code>. If this is the main thread, or the last
1015 * thread, exit the process.
1018 static VALUE
1019 rb_thread_exit(void)
1021 return rb_thread_kill(GET_THREAD()->self);
1026 * call-seq:
1027 * thr.wakeup => thr
1029 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1030 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1032 * c = Thread.new { Thread.stop; puts "hey!" }
1033 * c.wakeup
1035 * <em>produces:</em>
1037 * hey!
1040 VALUE
1041 rb_thread_wakeup(VALUE thread)
1043 rb_thread_t *th;
1044 GetThreadPtr(thread, th);
1046 if (th->status == THREAD_KILLED) {
1047 rb_raise(rb_eThreadError, "killed thread");
1049 rb_thread_ready(th);
1050 return thread;
1055 * call-seq:
1056 * thr.run => thr
1058 * Wakes up <i>thr</i>, making it eligible for scheduling.
1060 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1061 * Thread.pass
1062 * puts "Got here"
1063 * a.run
1064 * a.join
1066 * <em>produces:</em>
1069 * Got here
1073 VALUE
1074 rb_thread_run(VALUE thread)
1076 rb_thread_wakeup(thread);
1077 rb_thread_schedule();
1078 return thread;
1083 * call-seq:
1084 * Thread.stop => nil
1086 * Stops execution of the current thread, putting it into a ``sleep'' state,
1087 * and schedules execution of another thread.
1089 * a = Thread.new { print "a"; Thread.stop; print "c" }
1090 * Thread.pass
1091 * print "b"
1092 * a.run
1093 * a.join
1095 * <em>produces:</em>
1097 * abc
1100 VALUE
1101 rb_thread_stop(void)
1103 if (rb_thread_alone()) {
1104 rb_raise(rb_eThreadError,
1105 "stopping only thread\n\tnote: use sleep to stop forever");
1107 rb_thread_sleep_forever();
1108 return Qnil;
1111 static int
1112 thread_list_i(st_data_t key, st_data_t val, void *data)
1114 VALUE ary = (VALUE)data;
1115 rb_thread_t *th;
1116 GetThreadPtr((VALUE)key, th);
1118 switch (th->status) {
1119 case THREAD_RUNNABLE:
1120 case THREAD_STOPPED:
1121 case THREAD_TO_KILL:
1122 rb_ary_push(ary, th->self);
1123 default:
1124 break;
1126 return ST_CONTINUE;
1129 /********************************************************************/
1132 * call-seq:
1133 * Thread.list => array
1135 * Returns an array of <code>Thread</code> objects for all threads that are
1136 * either runnable or stopped.
1138 * Thread.new { sleep(200) }
1139 * Thread.new { 1000000.times {|i| i*i } }
1140 * Thread.new { Thread.stop }
1141 * Thread.list.each {|t| p t}
1143 * <em>produces:</em>
1145 * #<Thread:0x401b3e84 sleep>
1146 * #<Thread:0x401b3f38 run>
1147 * #<Thread:0x401b3fb0 sleep>
1148 * #<Thread:0x401bdf4c run>
1151 VALUE
1152 rb_thread_list(void)
1154 VALUE ary = rb_ary_new();
1155 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1156 return ary;
1159 VALUE
1160 rb_thread_current(void)
1162 return GET_THREAD()->self;
1166 * call-seq:
1167 * Thread.current => thread
1169 * Returns the currently executing thread.
1171 * Thread.current #=> #<Thread:0x401bdf4c run>
1174 static VALUE
1175 thread_s_current(VALUE klass)
1177 return rb_thread_current();
1180 VALUE
1181 rb_thread_main(void)
1183 return GET_THREAD()->vm->main_thread->self;
1186 static VALUE
1187 rb_thread_s_main(VALUE klass)
1189 return rb_thread_main();
1194 * call-seq:
1195 * Thread.abort_on_exception => true or false
1197 * Returns the status of the global ``abort on exception'' condition. The
1198 * default is <code>false</code>. When set to <code>true</code>, or if the
1199 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1200 * command line option <code>-d</code> was specified) all threads will abort
1201 * (the process will <code>exit(0)</code>) if an exception is raised in any
1202 * thread. See also <code>Thread::abort_on_exception=</code>.
1205 static VALUE
1206 rb_thread_s_abort_exc(void)
1208 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1213 * call-seq:
1214 * Thread.abort_on_exception= boolean => true or false
1216 * When set to <code>true</code>, all threads will abort if an exception is
1217 * raised. Returns the new state.
1219 * Thread.abort_on_exception = true
1220 * t1 = Thread.new do
1221 * puts "In new thread"
1222 * raise "Exception from thread"
1223 * end
1224 * sleep(1)
1225 * puts "not reached"
1227 * <em>produces:</em>
1229 * In new thread
1230 * prog.rb:4: Exception from thread (RuntimeError)
1231 * from prog.rb:2:in `initialize'
1232 * from prog.rb:2:in `new'
1233 * from prog.rb:2
1236 static VALUE
1237 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1239 rb_secure(4);
1240 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1241 return val;
1246 * call-seq:
1247 * thr.abort_on_exception => true or false
1249 * Returns the status of the thread-local ``abort on exception'' condition for
1250 * <i>thr</i>. The default is <code>false</code>. See also
1251 * <code>Thread::abort_on_exception=</code>.
1254 static VALUE
1255 rb_thread_abort_exc(VALUE thread)
1257 rb_thread_t *th;
1258 GetThreadPtr(thread, th);
1259 return th->abort_on_exception ? Qtrue : Qfalse;
1264 * call-seq:
1265 * thr.abort_on_exception= boolean => true or false
1267 * When set to <code>true</code>, causes all threads (including the main
1268 * program) to abort if an exception is raised in <i>thr</i>. The process will
1269 * effectively <code>exit(0)</code>.
1272 static VALUE
1273 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1275 rb_thread_t *th;
1276 rb_secure(4);
1278 GetThreadPtr(thread, th);
1279 th->abort_on_exception = RTEST(val);
1280 return val;
1285 * call-seq:
1286 * thr.group => thgrp or nil
1288 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1289 * the thread is not a member of any group.
1291 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1294 VALUE
1295 rb_thread_group(VALUE thread)
1297 rb_thread_t *th;
1298 VALUE group;
1299 GetThreadPtr(thread, th);
1300 group = th->thgroup;
1302 if (!group) {
1303 group = Qnil;
1305 return group;
1308 static const char *
1309 thread_status_name(enum rb_thread_status status)
1311 switch (status) {
1312 case THREAD_RUNNABLE:
1313 return "run";
1314 case THREAD_STOPPED:
1315 return "sleep";
1316 case THREAD_TO_KILL:
1317 return "aborting";
1318 case THREAD_KILLED:
1319 return "dead";
1320 default:
1321 return "unknown";
1325 static int
1326 rb_thread_dead(rb_thread_t *th)
1328 return th->status == THREAD_KILLED;
1333 * call-seq:
1334 * thr.status => string, false or nil
1336 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1337 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1338 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1339 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1340 * terminated with an exception.
1342 * a = Thread.new { raise("die now") }
1343 * b = Thread.new { Thread.stop }
1344 * c = Thread.new { Thread.exit }
1345 * d = Thread.new { sleep }
1346 * d.kill #=> #<Thread:0x401b3678 aborting>
1347 * a.status #=> nil
1348 * b.status #=> "sleep"
1349 * c.status #=> false
1350 * d.status #=> "aborting"
1351 * Thread.current.status #=> "run"
1354 static VALUE
1355 rb_thread_status(VALUE thread)
1357 rb_thread_t *th;
1358 GetThreadPtr(thread, th);
1360 if (rb_thread_dead(th)) {
1361 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1362 /* TODO */ ) {
1363 return Qnil;
1365 return Qfalse;
1367 return rb_str_new2(thread_status_name(th->status));
1372 * call-seq:
1373 * thr.alive? => true or false
1375 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1377 * thr = Thread.new { }
1378 * thr.join #=> #<Thread:0x401b3fb0 dead>
1379 * Thread.current.alive? #=> true
1380 * thr.alive? #=> false
1383 static VALUE
1384 rb_thread_alive_p(VALUE thread)
1386 rb_thread_t *th;
1387 GetThreadPtr(thread, th);
1389 if (rb_thread_dead(th))
1390 return Qfalse;
1391 return Qtrue;
1395 * call-seq:
1396 * thr.stop? => true or false
1398 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1400 * a = Thread.new { Thread.stop }
1401 * b = Thread.current
1402 * a.stop? #=> true
1403 * b.stop? #=> false
1406 static VALUE
1407 rb_thread_stop_p(VALUE thread)
1409 rb_thread_t *th;
1410 GetThreadPtr(thread, th);
1412 if (rb_thread_dead(th))
1413 return Qtrue;
1414 if (th->status == THREAD_STOPPED)
1415 return Qtrue;
1416 return Qfalse;
1420 * call-seq:
1421 * thr.safe_level => integer
1423 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1424 * levels can help when implementing sandboxes which run insecure code.
1426 * thr = Thread.new { $SAFE = 3; sleep }
1427 * Thread.current.safe_level #=> 0
1428 * thr.safe_level #=> 3
1431 static VALUE
1432 rb_thread_safe_level(VALUE thread)
1434 rb_thread_t *th;
1435 GetThreadPtr(thread, th);
1437 return INT2NUM(th->safe_level);
1441 * call-seq:
1442 * thr.inspect => string
1444 * Dump the name, id, and status of _thr_ to a string.
1447 static VALUE
1448 rb_thread_inspect(VALUE thread)
1450 char *cname = rb_obj_classname(thread);
1451 rb_thread_t *th;
1452 const char *status;
1453 VALUE str;
1455 GetThreadPtr(thread, th);
1456 status = thread_status_name(th->status);
1457 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1458 OBJ_INFECT(str, thread);
1460 return str;
1463 VALUE
1464 rb_thread_local_aref(VALUE thread, ID id)
1466 rb_thread_t *th;
1467 VALUE val;
1469 GetThreadPtr(thread, th);
1470 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1471 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1473 if (!th->local_storage) {
1474 return Qnil;
1476 if (st_lookup(th->local_storage, id, &val)) {
1477 return val;
1479 return Qnil;
1483 * call-seq:
1484 * thr[sym] => obj or nil
1486 * Attribute Reference---Returns the value of a thread-local variable, using
1487 * either a symbol or a string name. If the specified variable does not exist,
1488 * returns <code>nil</code>.
1490 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1491 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1492 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1493 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1495 * <em>produces:</em>
1497 * #<Thread:0x401b3b3c sleep>: C
1498 * #<Thread:0x401b3bc8 sleep>: B
1499 * #<Thread:0x401b3c68 sleep>: A
1500 * #<Thread:0x401bdf4c run>:
1503 static VALUE
1504 rb_thread_aref(VALUE thread, VALUE id)
1506 return rb_thread_local_aref(thread, rb_to_id(id));
1509 VALUE
1510 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1512 rb_thread_t *th;
1513 GetThreadPtr(thread, th);
1515 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1516 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1518 if (OBJ_FROZEN(thread)) {
1519 rb_error_frozen("thread locals");
1521 if (!th->local_storage) {
1522 th->local_storage = st_init_numtable();
1524 if (NIL_P(val)) {
1525 st_delete(th->local_storage, (st_data_t *) & id, 0);
1526 return Qnil;
1528 st_insert(th->local_storage, id, val);
1529 return val;
1533 * call-seq:
1534 * thr[sym] = obj => obj
1536 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1537 * using either a symbol or a string. See also <code>Thread#[]</code>.
1540 static VALUE
1541 rb_thread_aset(VALUE self, ID id, VALUE val)
1543 return rb_thread_local_aset(self, rb_to_id(id), val);
1547 * call-seq:
1548 * thr.key?(sym) => true or false
1550 * Returns <code>true</code> if the given string (or symbol) exists as a
1551 * thread-local variable.
1553 * me = Thread.current
1554 * me[:oliver] = "a"
1555 * me.key?(:oliver) #=> true
1556 * me.key?(:stanley) #=> false
1559 static VALUE
1560 rb_thread_key_p(VALUE self, ID id)
1562 rb_thread_t *th;
1563 GetThreadPtr(self, th);
1565 if (!th->local_storage) {
1566 return Qfalse;
1568 if (st_lookup(th->local_storage, rb_to_id(id), 0)) {
1569 return Qtrue;
1571 return Qfalse;
1574 static int
1575 thread_keys_i(ID key, VALUE value, VALUE ary)
1577 rb_ary_push(ary, ID2SYM(key));
1578 return ST_CONTINUE;
1582 rb_thread_alone()
1584 int num = 1;
1585 if (GET_THREAD()->vm->living_threads) {
1586 num = GET_THREAD()->vm->living_threads->num_entries;
1587 thread_debug("rb_thread_alone: %d\n", num);
1589 return num == 1;
1593 * call-seq:
1594 * thr.keys => array
1596 * Returns an an array of the names of the thread-local variables (as Symbols).
1598 * thr = Thread.new do
1599 * Thread.current[:cat] = 'meow'
1600 * Thread.current["dog"] = 'woof'
1601 * end
1602 * thr.join #=> #<Thread:0x401b3f10 dead>
1603 * thr.keys #=> [:dog, :cat]
1606 static VALUE
1607 rb_thread_keys(VALUE self)
1609 rb_thread_t *th;
1610 VALUE ary = rb_ary_new();
1611 GetThreadPtr(self, th);
1613 if (th->local_storage) {
1614 st_foreach(th->local_storage, thread_keys_i, ary);
1616 return ary;
1620 * call-seq:
1621 * thr.priority => integer
1623 * Returns the priority of <i>thr</i>. Default is inherited from the
1624 * current thread which creating the new thread, or zero for the
1625 * initial main thread; higher-priority threads will run before
1626 * lower-priority threads.
1628 * Thread.current.priority #=> 0
1631 static VALUE
1632 rb_thread_priority(VALUE thread)
1634 rb_thread_t *th;
1635 GetThreadPtr(thread, th);
1636 return INT2NUM(th->priority);
1641 * call-seq:
1642 * thr.priority= integer => thr
1644 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1645 * will run before lower-priority threads.
1647 * count1 = count2 = 0
1648 * a = Thread.new do
1649 * loop { count1 += 1 }
1650 * end
1651 * a.priority = -1
1653 * b = Thread.new do
1654 * loop { count2 += 1 }
1655 * end
1656 * b.priority = -2
1657 * sleep 1 #=> 1
1658 * count1 #=> 622504
1659 * count2 #=> 5832
1662 static VALUE
1663 rb_thread_priority_set(VALUE thread, VALUE prio)
1665 rb_thread_t *th;
1666 GetThreadPtr(thread, th);
1668 rb_secure(4);
1670 th->priority = NUM2INT(prio);
1671 native_thread_apply_priority(th);
1672 return prio;
1675 /* for IO */
1677 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1678 void
1679 rb_fd_init(volatile rb_fdset_t *fds)
1681 fds->maxfd = 0;
1682 fds->fdset = ALLOC(fd_set);
1683 FD_ZERO(fds->fdset);
1686 void
1687 rb_fd_term(rb_fdset_t *fds)
1689 if (fds->fdset) free(fds->fdset);
1690 fds->maxfd = 0;
1691 fds->fdset = 0;
1694 void
1695 rb_fd_zero(rb_fdset_t *fds)
1697 if (fds->fdset) {
1698 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
1699 FD_ZERO(fds->fdset);
1703 static void
1704 rb_fd_resize(int n, rb_fdset_t *fds)
1706 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
1707 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
1709 if (m < sizeof(fd_set)) m = sizeof(fd_set);
1710 if (o < sizeof(fd_set)) o = sizeof(fd_set);
1712 if (m > o) {
1713 fds->fdset = realloc(fds->fdset, m);
1714 memset((char *)fds->fdset + o, 0, m - o);
1716 if (n >= fds->maxfd) fds->maxfd = n + 1;
1719 void
1720 rb_fd_set(int n, rb_fdset_t *fds)
1722 rb_fd_resize(n, fds);
1723 FD_SET(n, fds->fdset);
1726 void
1727 rb_fd_clr(int n, rb_fdset_t *fds)
1729 if (n >= fds->maxfd) return;
1730 FD_CLR(n, fds->fdset);
1734 rb_fd_isset(int n, const rb_fdset_t *fds)
1736 if (n >= fds->maxfd) return 0;
1737 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1740 void
1741 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
1743 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
1745 if (size < sizeof(fd_set)) size = sizeof(fd_set);
1746 dst->maxfd = max;
1747 dst->fdset = realloc(dst->fdset, size);
1748 memcpy(dst->fdset, src, size);
1752 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
1754 fd_set *r = NULL, *w = NULL, *e = NULL;
1755 if (readfds) {
1756 rb_fd_resize(n - 1, readfds);
1757 r = rb_fd_ptr(readfds);
1759 if (writefds) {
1760 rb_fd_resize(n - 1, writefds);
1761 w = rb_fd_ptr(writefds);
1763 if (exceptfds) {
1764 rb_fd_resize(n - 1, exceptfds);
1765 e = rb_fd_ptr(exceptfds);
1767 return select(n, r, w, e, timeout);
1770 #undef FD_ZERO
1771 #undef FD_SET
1772 #undef FD_CLR
1773 #undef FD_ISSET
1775 #define FD_ZERO(f) rb_fd_zero(f)
1776 #define FD_SET(i, f) rb_fd_set(i, f)
1777 #define FD_CLR(i, f) rb_fd_clr(i, f)
1778 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1780 #endif
1782 #if defined(__CYGWIN__) || defined(_WIN32)
1783 static long
1784 cmp_tv(const struct timeval *a, const struct timeval *b)
1786 long d = (a->tv_sec - b->tv_sec);
1787 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
1790 static int
1791 subst(struct timeval *rest, const struct timeval *wait)
1793 while (rest->tv_usec < wait->tv_usec) {
1794 if (rest->tv_sec <= wait->tv_sec) {
1795 return 0;
1797 rest->tv_sec -= 1;
1798 rest->tv_usec += 1000 * 1000;
1800 rest->tv_sec -= wait->tv_sec;
1801 rest->tv_usec -= wait->tv_usec;
1802 return 1;
1804 #endif
1806 static int
1807 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
1808 struct timeval *timeout)
1810 int result, lerrno;
1811 fd_set orig_read, orig_write, orig_except;
1813 #ifndef linux
1814 double limit;
1815 struct timeval wait_rest;
1817 if (timeout) {
1818 limit = timeofday() +
1819 (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
1820 wait_rest = *timeout;
1821 timeout = &wait_rest;
1823 #endif
1825 if (read) orig_read = *read;
1826 if (write) orig_write = *write;
1827 if (except) orig_except = *except;
1829 retry:
1830 lerrno = 0;
1832 #if defined(__CYGWIN__) || defined(_WIN32)
1834 /* polling duration: 100ms */
1835 struct timeval wait_100ms, *wait;
1836 wait_100ms.tv_sec = 0;
1837 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
1839 do {
1840 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
1841 BLOCKING_REGION({
1842 do {
1843 result = select(n, read, write, except, wait);
1844 if (result < 0) lerrno = errno;
1845 if (result != 0) break;
1847 if (read) *read = orig_read;
1848 if (write) *write = orig_write;
1849 if (except) *except = orig_except;
1850 wait = &wait_100ms;
1851 } while (__th->interrupt_flag == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
1852 }, 0, 0);
1853 } while (result == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
1855 #else
1856 BLOCKING_REGION({
1857 result = select(n, read, write, except, timeout);
1858 if (result < 0) lerrno = errno;
1859 }, ubf_select, GET_THREAD());
1860 #endif
1862 errno = lerrno;
1864 if (result < 0) {
1865 if (errno == EINTR
1866 #ifdef ERESTART
1867 || errno == ERESTART
1868 #endif
1870 if (read) *read = orig_read;
1871 if (write) *write = orig_write;
1872 if (except) *except = orig_except;
1873 #ifndef linux
1874 if (timeout) {
1875 double d = limit - timeofday();
1877 wait_rest.tv_sec = (unsigned int)d;
1878 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
1879 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
1880 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
1882 #endif
1883 goto retry;
1886 return result;
1889 static void
1890 rb_thread_wait_fd_rw(int fd, int read)
1892 int result = 0;
1893 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
1895 while (result <= 0) {
1896 rb_fdset_t set;
1897 rb_fd_init(&set);
1898 FD_SET(fd, &set);
1900 if (read) {
1901 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
1903 else {
1904 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
1907 rb_fd_term(&set);
1909 if (result < 0) {
1910 rb_sys_fail(0);
1914 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
1917 void
1918 rb_thread_wait_fd(int fd)
1920 rb_thread_wait_fd_rw(fd, 1);
1924 rb_thread_fd_writable(int fd)
1926 rb_thread_wait_fd_rw(fd, 0);
1927 return Qtrue;
1931 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
1932 struct timeval *timeout)
1934 if (!read && !write && !except) {
1935 if (!timeout) {
1936 rb_thread_sleep_forever();
1937 return 0;
1939 rb_thread_wait_for(*timeout);
1940 return 0;
1942 else {
1943 return do_select(max, read, write, except, timeout);
1949 * for GC
1952 #ifdef USE_CONSERVATIVE_STACK_END
1953 void
1954 rb_gc_set_stack_end(VALUE **stack_end_p)
1956 VALUE stack_end;
1957 *stack_end_p = &stack_end;
1959 #endif
1961 void
1962 rb_gc_save_machine_context(rb_thread_t *th)
1964 SET_MACHINE_STACK_END(&th->machine_stack_end);
1965 #ifdef __ia64
1966 th->machine_register_stack_end = rb_ia64_bsp();
1967 #endif
1968 setjmp(th->machine_regs);
1975 int rb_get_next_signal(rb_vm_t *vm);
1977 static void
1978 timer_thread_function(void)
1980 rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
1982 /* for time slice */
1983 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
1985 /* check signal */
1986 if (vm->buffered_signal_size && vm->main_thread->exec_signal == 0) {
1987 vm->main_thread->exec_signal = rb_get_next_signal(vm);
1988 thread_debug("buffered_signal_size: %d, sig: %d\n",
1989 vm->buffered_signal_size, vm->main_thread->exec_signal);
1990 rb_thread_interrupt(vm->main_thread);
1993 #if 0
1994 /* prove profiler */
1995 if (vm->prove_profile.enable) {
1996 rb_thread_t *th = vm->running_thread;
1998 if (vm->during_gc) {
1999 /* GC prove profiling */
2002 #endif
2005 void
2006 rb_thread_stop_timer_thread(void)
2008 if (timer_thread_id) {
2009 system_working = 0;
2010 native_thread_join(timer_thread_id);
2011 timer_thread_id = 0;
2015 void
2016 rb_thread_reset_timer_thread(void)
2018 timer_thread_id = 0;
2021 void
2022 rb_thread_start_timer_thread(void)
2024 rb_thread_create_timer_thread();
2027 static int
2028 terminate_atfork_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2030 VALUE thval = key;
2031 rb_thread_t *th;
2032 GetThreadPtr(thval, th);
2034 if (th != current_th) {
2035 thread_cleanup_func(th);
2037 return ST_CONTINUE;
2040 void
2041 rb_thread_atfork(void)
2043 rb_thread_t *th = GET_THREAD();
2044 rb_vm_t *vm = th->vm;
2045 VALUE thval = th->self;
2046 vm->main_thread = th;
2048 st_foreach(vm->living_threads, terminate_atfork_i, (st_data_t)th);
2049 st_clear(vm->living_threads);
2050 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2053 struct thgroup {
2054 int enclosed;
2055 VALUE group;
2059 * Document-class: ThreadGroup
2061 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2062 * threads as a group. A <code>Thread</code> can belong to only one
2063 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2064 * remove it from any previous group.
2066 * Newly created threads belong to the same group as the thread from which they
2067 * were created.
2070 static VALUE thgroup_s_alloc(VALUE);
2071 static VALUE
2072 thgroup_s_alloc(VALUE klass)
2074 VALUE group;
2075 struct thgroup *data;
2077 group = Data_Make_Struct(klass, struct thgroup, 0, free, data);
2078 data->enclosed = 0;
2079 data->group = group;
2081 return group;
2084 struct thgroup_list_params {
2085 VALUE ary;
2086 VALUE group;
2089 static int
2090 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2092 VALUE thread = (VALUE)key;
2093 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2094 VALUE group = ((struct thgroup_list_params *)data)->group;
2095 rb_thread_t *th;
2096 GetThreadPtr(thread, th);
2098 if (th->thgroup == group) {
2099 rb_ary_push(ary, thread);
2101 return ST_CONTINUE;
2105 * call-seq:
2106 * thgrp.list => array
2108 * Returns an array of all existing <code>Thread</code> objects that belong to
2109 * this group.
2111 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2114 static VALUE
2115 thgroup_list(VALUE group)
2117 VALUE ary = rb_ary_new();
2118 struct thgroup_list_params param;
2120 param.ary = ary;
2121 param.group = group;
2122 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2123 return ary;
2128 * call-seq:
2129 * thgrp.enclose => thgrp
2131 * Prevents threads from being added to or removed from the receiving
2132 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2133 * <code>ThreadGroup</code>.
2135 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2136 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2137 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2138 * tg.add thr
2140 * <em>produces:</em>
2142 * ThreadError: can't move from the enclosed thread group
2145 VALUE
2146 thgroup_enclose(VALUE group)
2148 struct thgroup *data;
2150 Data_Get_Struct(group, struct thgroup, data);
2151 data->enclosed = 1;
2153 return group;
2158 * call-seq:
2159 * thgrp.enclosed? => true or false
2161 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2162 * ThreadGroup#enclose.
2165 static VALUE
2166 thgroup_enclosed_p(VALUE group)
2168 struct thgroup *data;
2170 Data_Get_Struct(group, struct thgroup, data);
2171 if (data->enclosed)
2172 return Qtrue;
2173 return Qfalse;
2178 * call-seq:
2179 * thgrp.add(thread) => thgrp
2181 * Adds the given <em>thread</em> to this group, removing it from any other
2182 * group to which it may have previously belonged.
2184 * puts "Initial group is #{ThreadGroup::Default.list}"
2185 * tg = ThreadGroup.new
2186 * t1 = Thread.new { sleep }
2187 * t2 = Thread.new { sleep }
2188 * puts "t1 is #{t1}"
2189 * puts "t2 is #{t2}"
2190 * tg.add(t1)
2191 * puts "Initial group now #{ThreadGroup::Default.list}"
2192 * puts "tg group now #{tg.list}"
2194 * <em>produces:</em>
2196 * Initial group is #<Thread:0x401bdf4c>
2197 * t1 is #<Thread:0x401b3c90>
2198 * t2 is #<Thread:0x401b3c18>
2199 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2200 * tg group now #<Thread:0x401b3c90>
2203 static VALUE
2204 thgroup_add(VALUE group, VALUE thread)
2206 rb_thread_t *th;
2207 struct thgroup *data;
2209 rb_secure(4);
2210 GetThreadPtr(thread, th);
2212 if (OBJ_FROZEN(group)) {
2213 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2215 Data_Get_Struct(group, struct thgroup, data);
2216 if (data->enclosed) {
2217 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2220 if (!th->thgroup) {
2221 return Qnil;
2224 if (OBJ_FROZEN(th->thgroup)) {
2225 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2227 Data_Get_Struct(th->thgroup, struct thgroup, data);
2228 if (data->enclosed) {
2229 rb_raise(rb_eThreadError,
2230 "can't move from the enclosed thread group");
2233 th->thgroup = group;
2234 return group;
2239 * Document-class: Mutex
2241 * Mutex implements a simple semaphore that can be used to coordinate access to
2242 * shared data from multiple concurrent threads.
2244 * Example:
2246 * require 'thread'
2247 * semaphore = Mutex.new
2249 * a = Thread.new {
2250 * semaphore.synchronize {
2251 * # access shared resource
2255 * b = Thread.new {
2256 * semaphore.synchronize {
2257 * # access shared resource
2263 typedef struct mutex_struct {
2264 rb_thread_lock_t lock;
2265 rb_thread_cond_t cond;
2266 rb_thread_t volatile *th;
2267 volatile int cond_waiting;
2268 } mutex_t;
2270 #define GetMutexPtr(obj, tobj) \
2271 Data_Get_Struct(obj, mutex_t, tobj)
2273 static void
2274 mutex_mark(void *ptr)
2276 if (ptr) {
2277 mutex_t *mutex = ptr;
2278 if (mutex->th) {
2279 rb_gc_mark(mutex->th->self);
2284 static void
2285 mutex_free(void *ptr)
2287 if (ptr) {
2288 mutex_t *mutex = ptr;
2289 native_mutex_destroy(&mutex->lock);
2290 native_cond_destroy(&mutex->cond);
2292 ruby_xfree(ptr);
2295 static VALUE
2296 mutex_alloc(VALUE klass)
2298 VALUE volatile obj;
2299 mutex_t *mutex;
2301 obj = Data_Make_Struct(klass, mutex_t, mutex_mark, mutex_free, mutex);
2302 native_mutex_initialize(&mutex->lock);
2303 native_cond_initialize(&mutex->cond);
2304 return obj;
2308 * call-seq:
2309 * Mutex.new => mutex
2311 * Creates a new Mutex
2313 static VALUE
2314 mutex_initialize(VALUE self)
2316 return self;
2319 VALUE
2320 rb_mutex_new(void)
2322 return mutex_alloc(rb_cMutex);
2326 * call-seq:
2327 * mutex.locked? => true or false
2329 * Returns +true+ if this lock is currently held by some thread.
2331 VALUE
2332 rb_mutex_locked_p(VALUE self)
2334 mutex_t *mutex;
2335 GetMutexPtr(self, mutex);
2336 return mutex->th ? Qtrue : Qfalse;
2340 * call-seq:
2341 * mutex.try_lock => true or false
2343 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2344 * lock was granted.
2346 VALUE
2347 rb_mutex_trylock(VALUE self)
2349 mutex_t *mutex;
2350 VALUE locked = Qfalse;
2351 GetMutexPtr(self, mutex);
2353 if (mutex->th == GET_THREAD()) {
2354 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2357 native_mutex_lock(&mutex->lock);
2358 if (mutex->th == 0) {
2359 mutex->th = GET_THREAD();
2360 locked = Qtrue;
2362 native_mutex_unlock(&mutex->lock);
2364 return locked;
2367 static int
2368 lock_func(rb_thread_t *th, mutex_t *mutex)
2370 int interrupted = Qfalse;
2372 native_mutex_lock(&mutex->lock);
2373 while (mutex->th || (mutex->th = th, 0)) {
2374 mutex->cond_waiting++;
2375 native_cond_wait(&mutex->cond, &mutex->lock);
2377 if (th->interrupt_flag) {
2378 interrupted = Qtrue;
2379 break;
2382 native_mutex_unlock(&mutex->lock);
2383 return interrupted;
2386 static void
2387 lock_interrupt(void *ptr)
2389 mutex_t *mutex = (mutex_t *)ptr;
2390 native_mutex_lock(&mutex->lock);
2391 if (mutex->cond_waiting > 0) {
2392 native_cond_broadcast(&mutex->cond);
2393 mutex->cond_waiting = 0;
2395 native_mutex_unlock(&mutex->lock);
2399 * call-seq:
2400 * mutex.lock => true or false
2402 * Attempts to grab the lock and waits if it isn't available.
2403 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2405 VALUE
2406 rb_mutex_lock(VALUE self)
2408 if (rb_mutex_trylock(self) == Qfalse) {
2409 mutex_t *mutex;
2410 rb_thread_t *th = GET_THREAD();
2411 GetMutexPtr(self, mutex);
2413 while (mutex->th != th) {
2414 int interrupted;
2416 BLOCKING_REGION({
2417 interrupted = lock_func(th, mutex);
2418 }, lock_interrupt, mutex);
2420 if (interrupted) {
2421 RUBY_VM_CHECK_INTS();
2425 return self;
2429 * call-seq:
2430 * mutex.unlock => self
2432 * Releases the lock.
2433 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2435 VALUE
2436 rb_mutex_unlock(VALUE self)
2438 mutex_t *mutex;
2439 char *err = NULL;
2440 GetMutexPtr(self, mutex);
2442 native_mutex_lock(&mutex->lock);
2444 if (mutex->th == 0) {
2445 err = "Attempt to unlock a mutex which is not locked";
2447 else if (mutex->th != GET_THREAD()) {
2448 err = "Attempt to unlock a mutex which is locked by another thread";
2450 else {
2451 mutex->th = 0;
2452 if (mutex->cond_waiting > 0) {
2453 /* waiting thread */
2454 native_cond_signal(&mutex->cond);
2455 mutex->cond_waiting--;
2459 native_mutex_unlock(&mutex->lock);
2461 if (err) rb_raise(rb_eThreadError, err);
2463 return self;
2466 static VALUE
2467 rb_mutex_sleep_forever(VALUE time)
2469 rb_thread_sleep_forever();
2470 return Qnil;
2473 static VALUE
2474 rb_mutex_wait_for(VALUE time)
2476 const struct timeval *t = (struct timeval *)time;
2477 rb_thread_wait_for(*t);
2478 return Qnil;
2481 VALUE
2482 rb_mutex_sleep(VALUE self, VALUE timeout)
2484 time_t beg, end;
2485 struct timeval t;
2487 if (!NIL_P(timeout)) {
2488 t = rb_time_interval(timeout);
2490 rb_mutex_unlock(self);
2491 beg = time(0);
2492 if (NIL_P(timeout)) {
2493 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
2495 else {
2496 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
2498 end = time(0) - beg;
2499 return INT2FIX(end);
2503 * call-seq:
2504 * mutex.sleep(timeout = nil) => number
2506 * Releases the lock and sleeps +timeout+ seconds if it is given and
2507 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2508 * the current thread.
2510 static VALUE
2511 mutex_sleep(int argc, VALUE *argv, VALUE self)
2513 VALUE timeout;
2515 rb_scan_args(argc, argv, "01", &timeout);
2516 return rb_mutex_sleep(self, timeout);
2520 * call-seq:
2521 * mutex.synchronize { ... } => result of the block
2523 * Obtains a lock, runs the block, and releases the lock when the block
2524 * completes. See the example under +Mutex+.
2527 VALUE
2528 rb_thread_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
2530 rb_mutex_lock(mutex);
2531 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
2535 * Document-class: Barrier
2537 typedef struct rb_thread_list_struct rb_thread_list_t;
2539 struct rb_thread_list_struct {
2540 rb_thread_t *th;
2541 rb_thread_list_t *next;
2544 static void
2545 thlist_mark(void *ptr)
2547 rb_thread_list_t *q = ptr;
2549 for (; q; q = q->next) {
2550 rb_gc_mark(q->th->self);
2554 static void
2555 thlist_free(void *ptr)
2557 rb_thread_list_t *q = ptr, *next;
2559 for (; q; q = next) {
2560 next = q->next;
2561 ruby_xfree(q);
2565 static int
2566 thlist_signal(rb_thread_list_t **list, unsigned int maxth, rb_thread_t **woken_thread)
2568 int woken = 0;
2569 rb_thread_list_t *q;
2571 while ((q = *list) != NULL) {
2572 rb_thread_t *th = q->th;
2574 *list = q->next;
2575 ruby_xfree(q);
2576 if (th->status != THREAD_KILLED) {
2577 rb_thread_ready(th);
2578 if (!woken && woken_thread) *woken_thread = th;
2579 if (++woken >= maxth && maxth) break;
2582 return woken;
2585 typedef struct {
2586 rb_thread_t *owner;
2587 rb_thread_list_t *waiting, **tail;
2588 } rb_barrier_t;
2590 static void
2591 barrier_mark(void *ptr)
2593 rb_barrier_t *b = ptr;
2595 if (b->owner) rb_gc_mark(b->owner->self);
2596 thlist_mark(b->waiting);
2599 static void
2600 barrier_free(void *ptr)
2602 rb_barrier_t *b = ptr;
2604 b->owner = 0;
2605 thlist_free(b->waiting);
2606 b->waiting = 0;
2607 ruby_xfree(ptr);
2610 static VALUE
2611 barrier_alloc(VALUE klass)
2613 VALUE volatile obj;
2614 rb_barrier_t *barrier;
2616 obj = Data_Make_Struct(klass, rb_barrier_t,
2617 barrier_mark, barrier_free, barrier);
2618 barrier->owner = GET_THREAD();
2619 barrier->waiting = 0;
2620 barrier->tail = &barrier->waiting;
2621 return obj;
2624 VALUE
2625 rb_barrier_new(void)
2627 return barrier_alloc(rb_cBarrier);
2630 VALUE
2631 rb_barrier_wait(VALUE self)
2633 rb_barrier_t *barrier;
2634 rb_thread_list_t *q;
2636 Data_Get_Struct(self, rb_barrier_t, barrier);
2637 if (!barrier->owner || barrier->owner->status == THREAD_KILLED) {
2638 barrier->owner = 0;
2639 if (thlist_signal(&barrier->waiting, 1, &barrier->owner)) return Qfalse;
2640 return Qtrue;
2642 else if (barrier->owner == GET_THREAD()) {
2643 return Qfalse;
2645 else {
2646 *barrier->tail = q = ALLOC(rb_thread_list_t);
2647 q->th = GET_THREAD();
2648 q->next = 0;
2649 barrier->tail = &q->next;
2650 rb_thread_sleep_forever();
2651 return barrier->owner == GET_THREAD() ? Qtrue : Qfalse;
2655 VALUE
2656 rb_barrier_release(VALUE self)
2658 rb_barrier_t *barrier;
2659 unsigned int n;
2661 Data_Get_Struct(self, rb_barrier_t, barrier);
2662 if (barrier->owner != GET_THREAD()) {
2663 rb_raise(rb_eThreadError, "not owned");
2665 n = thlist_signal(&barrier->waiting, 0, &barrier->owner);
2666 return n ? UINT2NUM(n) : Qfalse;
2669 /* variables for recursive traversals */
2670 static ID recursive_key;
2672 static VALUE
2673 recursive_check(VALUE hash, VALUE obj)
2675 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
2676 return Qfalse;
2678 else {
2679 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
2681 if (NIL_P(list) || TYPE(list) != T_HASH)
2682 return Qfalse;
2683 if (NIL_P(rb_hash_lookup(list, obj)))
2684 return Qfalse;
2685 return Qtrue;
2689 static VALUE
2690 recursive_push(VALUE hash, VALUE obj)
2692 VALUE list, sym;
2694 sym = ID2SYM(rb_frame_this_func());
2695 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
2696 hash = rb_hash_new();
2697 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
2698 list = Qnil;
2700 else {
2701 list = rb_hash_aref(hash, sym);
2703 if (NIL_P(list) || TYPE(list) != T_HASH) {
2704 list = rb_hash_new();
2705 rb_hash_aset(hash, sym, list);
2707 rb_hash_aset(list, obj, Qtrue);
2708 return hash;
2711 static void
2712 recursive_pop(VALUE hash, VALUE obj)
2714 VALUE list, sym;
2716 sym = ID2SYM(rb_frame_this_func());
2717 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
2718 VALUE symname;
2719 VALUE thrname;
2720 symname = rb_inspect(sym);
2721 thrname = rb_inspect(rb_thread_current());
2723 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
2724 StringValuePtr(symname), StringValuePtr(thrname));
2726 list = rb_hash_aref(hash, sym);
2727 if (NIL_P(list) || TYPE(list) != T_HASH) {
2728 VALUE symname = rb_inspect(sym);
2729 VALUE thrname = rb_inspect(rb_thread_current());
2730 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
2731 StringValuePtr(symname), StringValuePtr(thrname));
2733 rb_hash_delete(list, obj);
2736 VALUE
2737 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
2739 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
2740 VALUE objid = rb_obj_id(obj);
2742 if (recursive_check(hash, objid)) {
2743 return (*func) (obj, arg, Qtrue);
2745 else {
2746 VALUE result = Qundef;
2747 int state;
2749 hash = recursive_push(hash, objid);
2750 PUSH_TAG();
2751 if ((state = EXEC_TAG()) == 0) {
2752 result = (*func) (obj, arg, Qfalse);
2754 POP_TAG();
2755 recursive_pop(hash, objid);
2756 if (state)
2757 JUMP_TAG(state);
2758 return result;
2762 /* tracer */
2764 static rb_event_hook_t *
2765 alloc_event_fook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
2767 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
2768 hook->func = func;
2769 hook->flag = events;
2770 hook->data = data;
2771 return hook;
2774 static void
2775 thread_reset_event_flags(rb_thread_t *th)
2777 rb_event_hook_t *hook = th->event_hooks;
2778 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
2780 while (hook) {
2781 flag |= hook->flag;
2782 hook = hook->next;
2786 void
2787 rb_thread_add_event_hook(rb_thread_t *th,
2788 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
2790 rb_event_hook_t *hook = alloc_event_fook(func, events, data);
2791 hook->next = th->event_hooks;
2792 th->event_hooks = hook;
2793 thread_reset_event_flags(th);
2796 static int
2797 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
2799 VALUE thval = key;
2800 rb_thread_t *th;
2801 GetThreadPtr(thval, th);
2803 if (flag) {
2804 th->event_flags |= RUBY_EVENT_VM;
2806 else {
2807 th->event_flags &= (~RUBY_EVENT_VM);
2809 return ST_CONTINUE;
2812 static void
2813 set_threads_event_flags(int flag)
2815 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
2818 void
2819 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
2821 rb_event_hook_t *hook = alloc_event_fook(func, events, data);
2822 rb_vm_t *vm = GET_VM();
2824 hook->next = vm->event_hooks;
2825 vm->event_hooks = hook;
2827 set_threads_event_flags(1);
2830 static int
2831 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
2833 rb_event_hook_t *prev = NULL, *hook = *root, *next;
2835 while (hook) {
2836 next = hook->next;
2837 if (func == 0 || hook->func == func) {
2838 if (prev) {
2839 prev->next = hook->next;
2841 else {
2842 *root = hook->next;
2844 xfree(hook);
2846 else {
2847 prev = hook;
2849 hook = next;
2851 return -1;
2855 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
2857 int ret = remove_event_hook(&th->event_hooks, func);
2858 thread_reset_event_flags(th);
2859 return ret;
2863 rb_remove_event_hook(rb_event_hook_func_t func)
2865 rb_vm_t *vm = GET_VM();
2866 rb_event_hook_t *hook = vm->event_hooks;
2867 int ret = remove_event_hook(&vm->event_hooks, func);
2869 if (hook != NULL && vm->event_hooks == NULL) {
2870 set_threads_event_flags(0);
2873 return ret;
2876 static int
2877 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
2879 rb_thread_t *th;
2880 GetThreadPtr((VALUE)key, th);
2881 rb_thread_remove_event_hook(th, 0);
2882 return ST_CONTINUE;
2885 void
2886 rb_clear_trace_func(void)
2888 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
2889 rb_remove_event_hook(0);
2892 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
2895 * call-seq:
2896 * set_trace_func(proc) => proc
2897 * set_trace_func(nil) => nil
2899 * Establishes _proc_ as the handler for tracing, or disables
2900 * tracing if the parameter is +nil+. _proc_ takes up
2901 * to six parameters: an event name, a filename, a line number, an
2902 * object id, a binding, and the name of a class. _proc_ is
2903 * invoked whenever an event occurs. Events are: <code>c-call</code>
2904 * (call a C-language routine), <code>c-return</code> (return from a
2905 * C-language routine), <code>call</code> (call a Ruby method),
2906 * <code>class</code> (start a class or module definition),
2907 * <code>end</code> (finish a class or module definition),
2908 * <code>line</code> (execute code on a new line), <code>raise</code>
2909 * (raise an exception), and <code>return</code> (return from a Ruby
2910 * method). Tracing is disabled within the context of _proc_.
2912 * class Test
2913 * def test
2914 * a = 1
2915 * b = 2
2916 * end
2917 * end
2919 * set_trace_func proc { |event, file, line, id, binding, classname|
2920 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
2922 * t = Test.new
2923 * t.test
2925 * line prog.rb:11 false
2926 * c-call prog.rb:11 new Class
2927 * c-call prog.rb:11 initialize Object
2928 * c-return prog.rb:11 initialize Object
2929 * c-return prog.rb:11 new Class
2930 * line prog.rb:12 false
2931 * call prog.rb:2 test Test
2932 * line prog.rb:3 test Test
2933 * line prog.rb:4 test Test
2934 * return prog.rb:4 test Test
2937 static VALUE
2938 set_trace_func(VALUE obj, VALUE trace)
2940 rb_remove_event_hook(call_trace_func);
2942 if (NIL_P(trace)) {
2943 return Qnil;
2946 if (!rb_obj_is_proc(trace)) {
2947 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
2950 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
2951 return trace;
2954 static void
2955 thread_add_trace_func(rb_thread_t *th, VALUE trace)
2957 if (!rb_obj_is_proc(trace)) {
2958 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
2961 rb_thread_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
2964 static VALUE
2965 thread_add_trace_func_m(VALUE obj, VALUE trace)
2967 rb_thread_t *th;
2968 GetThreadPtr(obj, th);
2969 thread_add_trace_func(th, trace);
2970 return trace;
2973 static VALUE
2974 thread_set_trace_func_m(VALUE obj, VALUE trace)
2976 rb_thread_t *th;
2977 GetThreadPtr(obj, th);
2978 rb_thread_remove_event_hook(th, call_trace_func);
2980 if (!NIL_P(trace)) {
2981 return Qnil;
2983 thread_add_trace_func(th, trace);
2984 return trace;
2987 static char *
2988 get_event_name(rb_event_flag_t event)
2990 switch (event) {
2991 case RUBY_EVENT_LINE:
2992 return "line";
2993 case RUBY_EVENT_CLASS:
2994 return "class";
2995 case RUBY_EVENT_END:
2996 return "end";
2997 case RUBY_EVENT_CALL:
2998 return "call";
2999 case RUBY_EVENT_RETURN:
3000 return "return";
3001 case RUBY_EVENT_C_CALL:
3002 return "c-call";
3003 case RUBY_EVENT_C_RETURN:
3004 return "c-return";
3005 case RUBY_EVENT_RAISE:
3006 return "raise";
3007 default:
3008 return "unknown";
3012 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3014 struct call_trace_func_args {
3015 rb_event_flag_t event;
3016 VALUE proc;
3017 VALUE self;
3018 ID id;
3019 VALUE klass;
3022 static VALUE
3023 call_trace_proc(VALUE args, int tracing)
3025 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3026 VALUE eventname = rb_str_new2(get_event_name(p->event));
3027 VALUE filename = rb_str_new2(rb_sourcefile());
3028 int line = rb_sourceline();
3029 ID id = 0;
3030 VALUE klass = 0;
3032 if (p->event == RUBY_EVENT_C_CALL ||
3033 p->event == RUBY_EVENT_C_RETURN) {
3034 id = p->id;
3035 klass = p->klass;
3037 else {
3038 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3040 if (id == ID_ALLOCATOR)
3041 return Qnil;
3042 if (klass) {
3043 if (TYPE(klass) == T_ICLASS) {
3044 klass = RBASIC(klass)->klass;
3046 else if (FL_TEST(klass, FL_SINGLETON)) {
3047 klass = rb_iv_get(klass, "__attached__");
3050 return rb_proc_call(p->proc, rb_ary_new3(6,
3051 eventname, filename, INT2FIX(line),
3052 id ? ID2SYM(id) : Qnil,
3053 p->self ? rb_binding_new() : Qnil,
3054 klass ? klass : Qnil));
3057 static void
3058 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3060 struct call_trace_func_args args;
3062 args.event = event;
3063 args.proc = proc;
3064 args.self = self;
3065 args.id = id;
3066 args.klass = klass;
3067 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3070 VALUE
3071 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3073 rb_thread_t *th = GET_THREAD();
3074 int state, raised, tracing;
3075 VALUE result = Qnil;
3077 if ((tracing = th->tracing) != 0 && !always) {
3078 return Qnil;
3080 else {
3081 th->tracing = 1;
3084 raised = rb_thread_reset_raised(th);
3086 PUSH_TAG();
3087 if ((state = EXEC_TAG()) == 0) {
3088 result = (*func)(arg, tracing);
3091 if (raised) {
3092 rb_thread_set_raised(th);
3094 POP_TAG();
3096 th->tracing = tracing;
3097 if (state) {
3098 JUMP_TAG(state);
3101 return result;
3105 * +Thread+ encapsulates the behavior of a thread of
3106 * execution, including the main thread of the Ruby script.
3108 * In the descriptions of the methods in this class, the parameter _sym_
3109 * refers to a symbol, which is either a quoted string or a
3110 * +Symbol+ (such as <code>:name</code>).
3113 void
3114 Init_Thread(void)
3116 VALUE cThGroup;
3118 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3119 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3120 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3121 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3122 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3123 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3124 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3125 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3126 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3127 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3128 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3129 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3130 #if THREAD_DEBUG < 0
3131 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3132 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3133 #endif
3135 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3136 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3137 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3138 rb_define_method(rb_cThread, "value", thread_value, 0);
3139 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3140 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3141 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3142 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3143 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3144 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3145 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3146 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3147 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3148 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3149 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3150 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3151 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3152 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3153 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3154 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3155 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3156 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3158 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3160 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3161 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3162 rb_define_method(cThGroup, "list", thgroup_list, 0);
3163 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3164 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3165 rb_define_method(cThGroup, "add", thgroup_add, 1);
3168 rb_thread_t *th = GET_THREAD();
3169 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3170 rb_define_const(cThGroup, "Default", th->thgroup);
3173 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3174 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3175 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3176 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3177 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3178 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3179 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3180 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3182 recursive_key = rb_intern("__recursive_key__");
3183 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3185 /* trace */
3186 rb_define_global_function("set_trace_func", set_trace_func, 1);
3187 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
3188 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
3190 /* init thread core */
3191 Init_native_thread();
3193 /* main thread setting */
3195 /* acquire global interpreter lock */
3196 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_interpreter_lock;
3197 native_mutex_initialize(lp);
3198 native_mutex_lock(lp);
3199 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3203 rb_thread_create_timer_thread();
3207 ruby_native_thread_p(void)
3209 rb_thread_t *rb_thread_check_ptr(rb_thread_t *ptr);
3210 rb_thread_t *th = ruby_thread_from_native();
3212 return th ? Qtrue : Qfalse;