add test for Struct.new(0).
[ruby-svn.git] / thread.c
blobf0f2dce870190939933c56ccc1f095ce882bbf0e
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Desgin
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
25 model 2:
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
38 model 3:
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
45 /* for model 2 */
47 #include "eval_intern.h"
48 #include "vm.h"
49 #include "gc.h"
51 #ifndef THREAD_DEBUG
52 #define THREAD_DEBUG 0
53 #endif
55 VALUE rb_cMutex;
56 VALUE rb_cBarrier;
58 static void sleep_timeval(rb_thread_t *th, struct timeval time);
59 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
60 static void sleep_forever(rb_thread_t *th);
61 static double timeofday(void);
62 struct timeval rb_time_interval(VALUE);
63 static int rb_thread_dead(rb_thread_t *th);
65 void rb_signal_exec(rb_thread_t *th, int sig);
66 void rb_disable_interrupt(void);
68 static VALUE eKillSignal = INT2FIX(0);
69 static VALUE eTerminateSignal = INT2FIX(1);
70 static volatile int system_working = 1;
72 inline static void
73 st_delete_wrap(st_table * table, VALUE key)
75 st_delete(table, (st_data_t *) & key, 0);
78 /********************************************************************************/
80 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
82 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *ptr,
83 rb_unblock_function_t **oldfunc, void **oldptr);
85 #define GVL_UNLOCK_BEGIN() do { \
86 rb_thread_t *_th_stored = GET_THREAD(); \
87 rb_gc_save_machine_context(_th_stored); \
88 native_mutex_unlock(&_th_stored->vm->global_interpreter_lock)
90 #define GVL_UNLOCK_END() \
91 native_mutex_lock(&_th_stored->vm->global_interpreter_lock); \
92 rb_thread_set_current(_th_stored); \
93 } while(0)
95 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
96 rb_thread_t *__th = GET_THREAD(); \
97 int __prev_status = __th->status; \
98 rb_unblock_function_t *__oldubf; \
99 void *__oldubfarg; \
100 set_unblock_function(__th, ubf, ubfarg, &__oldubf, &__oldubfarg); \
101 __th->status = THREAD_STOPPED; \
102 thread_debug("enter blocking region (%p)\n", __th); \
103 GVL_UNLOCK_BEGIN(); {\
104 exec; \
106 GVL_UNLOCK_END(); \
107 thread_debug("leave blocking region (%p)\n", __th); \
108 remove_signal_thread_list(__th); \
109 set_unblock_function(__th, __oldubf, __oldubfarg, 0, 0); \
110 if (__th->status == THREAD_STOPPED) { \
111 __th->status = __prev_status; \
113 RUBY_VM_CHECK_INTS(); \
114 } while(0)
116 #if THREAD_DEBUG
117 void rb_thread_debug(const char *fmt, ...);
119 # if THREAD_DEBUG < 0
120 static int rb_thread_debug_enabled;
122 static VALUE
123 rb_thread_s_debug(void)
125 return INT2NUM(rb_thread_debug_enabled);
128 static VALUE
129 rb_thread_s_debug_set(VALUE self, VALUE val)
131 rb_thread_debug_enabled = RTEST(val);
132 return val;
134 # else
135 # define rb_thread_debug_enabled THREAD_DEBUG
136 # endif
137 #define thread_debug rb_thread_debug
138 #else
139 #define thread_debug if(0)printf
140 #endif
142 #ifndef __ia64
143 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
144 #endif
145 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
146 VALUE *register_stack_start));
148 #if defined(_WIN32)
149 #include "thread_win32.c"
151 #define DEBUG_OUT() \
152 WaitForSingleObject(&debug_mutex, INFINITE); \
153 printf("%p - %s", GetCurrentThreadId(), buf); \
154 fflush(stdout); \
155 ReleaseMutex(&debug_mutex);
157 #elif defined(HAVE_PTHREAD_H)
158 #include "thread_pthread.c"
160 #define DEBUG_OUT() \
161 pthread_mutex_lock(&debug_mutex); \
162 printf("%p - %s", pthread_self(), buf); \
163 fflush(stdout); \
164 pthread_mutex_unlock(&debug_mutex);
166 #else
167 #error "unsupported thread type"
168 #endif
170 #if THREAD_DEBUG
171 static int debug_mutex_initialized = 1;
172 static rb_thread_lock_t debug_mutex;
174 void
175 rb_thread_debug(const char *fmt, ...)
177 va_list args;
178 char buf[BUFSIZ];
180 if (!rb_thread_debug_enabled) return;
182 if (debug_mutex_initialized == 1) {
183 debug_mutex_initialized = 0;
184 native_mutex_initialize(&debug_mutex);
187 va_start(args, fmt);
188 vsnprintf(buf, BUFSIZ, fmt, args);
189 va_end(args);
191 DEBUG_OUT();
193 #endif
196 static void
197 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
198 rb_unblock_function_t **oldfunc, void **oldarg)
200 check_ints:
201 RUBY_VM_CHECK_INTS(); /* check signal or so */
202 native_mutex_lock(&th->interrupt_lock);
203 if (th->interrupt_flag) {
204 native_mutex_unlock(&th->interrupt_lock);
205 goto check_ints;
207 else {
208 if (oldfunc) *oldfunc = th->unblock_function;
209 if (oldarg) *oldarg = th->unblock_function_arg;
210 th->unblock_function = func;
211 th->unblock_function_arg = arg;
213 native_mutex_unlock(&th->interrupt_lock);
216 static void
217 rb_thread_interrupt(rb_thread_t *th)
219 native_mutex_lock(&th->interrupt_lock);
220 RUBY_VM_SET_INTERRUPT(th);
221 if (th->unblock_function) {
222 (th->unblock_function)(th->unblock_function_arg);
224 else {
225 /* none */
227 native_mutex_unlock(&th->interrupt_lock);
231 static int
232 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
234 VALUE thval = key;
235 rb_thread_t *th;
236 GetThreadPtr(thval, th);
238 if (th != main_thread) {
239 thread_debug("terminate_i: %p\n", th);
240 rb_thread_interrupt(th);
241 th->thrown_errinfo = eTerminateSignal;
242 th->status = THREAD_TO_KILL;
244 else {
245 thread_debug("terminate_i: main thread (%p)\n", th);
247 return ST_CONTINUE;
250 void
251 rb_thread_terminate_all(void)
253 rb_thread_t *th = GET_THREAD(); /* main thread */
254 rb_vm_t *vm = th->vm;
255 if (vm->main_thread != th) {
256 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm->main_thread, th);
259 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th);
260 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
262 while (!rb_thread_alone()) {
263 PUSH_TAG();
264 if (EXEC_TAG() == 0) {
265 rb_thread_schedule();
267 else {
268 /* ignore exception */
270 POP_TAG();
272 system_working = 0;
275 static void
276 thread_cleanup_func(void *th_ptr)
278 rb_thread_t *th = th_ptr;
279 th->status = THREAD_KILLED;
280 th->machine_stack_start = th->machine_stack_end = 0;
281 #ifdef __ia64
282 th->machine_register_stack_start = th->machine_register_stack_end = 0;
283 #endif
284 native_thread_destroy(th);
287 extern void ruby_error_print(void);
288 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
289 void rb_thread_recycle_stack_release(VALUE *);
291 static int
292 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
294 int state;
295 VALUE args = th->first_args;
296 rb_proc_t *proc;
297 rb_thread_t *join_th;
298 rb_thread_t *main_th;
299 VALUE errinfo = Qnil;
301 th->machine_stack_start = stack_start;
302 #ifdef __ia64
303 th->machine_register_stack_start = register_stack_start;
304 #endif
305 thread_debug("thread start: %p\n", th);
307 native_mutex_lock(&th->vm->global_interpreter_lock);
309 thread_debug("thread start (get lock): %p\n", th);
310 rb_thread_set_current(th);
312 TH_PUSH_TAG(th);
313 if ((state = EXEC_TAG()) == 0) {
314 SAVE_ROOT_JMPBUF(th, {
315 if (th->first_proc) {
316 GetProcPtr(th->first_proc, proc);
317 th->errinfo = Qnil;
318 th->local_lfp = proc->block.lfp;
319 th->local_svar = Qnil;
320 th->value = vm_invoke_proc(th, proc, proc->block.self,
321 RARRAY_LEN(args), RARRAY_PTR(args), 0);
323 else {
324 th->value = (*th->first_func)((void *)th->first_args);
328 else {
329 if (th->safe_level < 4 &&
330 (th->vm->thread_abort_on_exception ||
331 th->abort_on_exception || RTEST(ruby_debug))) {
332 errinfo = th->errinfo;
333 if (NIL_P(errinfo)) errinfo = rb_errinfo();
335 th->value = Qnil;
338 th->status = THREAD_KILLED;
339 thread_debug("thread end: %p\n", th);
341 main_th = th->vm->main_thread;
342 if (th != main_th) {
343 if (TYPE(errinfo) == T_OBJECT) {
344 /* treat with normal error object */
345 rb_thread_raise(1, &errinfo, main_th);
348 TH_POP_TAG();
350 st_delete_wrap(th->vm->living_threads, th->self);
352 /* wake up joinning threads */
353 join_th = th->join_list_head;
354 while (join_th) {
355 if (join_th == main_th) errinfo = Qnil;
356 rb_thread_interrupt(join_th);
357 join_th = join_th->join_list_next;
359 st_delete_wrap(th->vm->living_threads, th->self);
361 if (!th->root_fiber) {
362 rb_thread_recycle_stack_release(th->stack);
363 th->stack = 0;
366 thread_cleanup_func(th);
367 native_mutex_unlock(&th->vm->global_interpreter_lock);
369 return 0;
372 static VALUE
373 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
375 rb_thread_t *th;
377 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
378 rb_raise(rb_eThreadError,
379 "can't start a new thread (frozen ThreadGroup)");
381 GetThreadPtr(thval, th);
383 /* setup thread environment */
384 th->first_args = args;
385 th->first_proc = fn ? Qfalse : rb_block_proc();
386 th->first_func = fn;
388 th->priority = GET_THREAD()->priority;
389 th->thgroup = GET_THREAD()->thgroup;
391 native_mutex_initialize(&th->interrupt_lock);
392 /* kick thread */
393 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
394 native_thread_create(th);
395 return thval;
398 static VALUE
399 thread_s_new(int argc, VALUE *argv, VALUE klass)
401 rb_thread_t *th;
402 VALUE thread = rb_thread_alloc(klass);
403 rb_obj_call_init(thread, argc, argv);
404 GetThreadPtr(thread, th);
405 if (!th->first_args) {
406 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
407 rb_class2name(klass));
409 return thread;
413 * call-seq:
414 * Thread.start([args]*) {|args| block } => thread
415 * Thread.fork([args]*) {|args| block } => thread
417 * Basically the same as <code>Thread::new</code>. However, if class
418 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
419 * subclass will not invoke the subclass's <code>initialize</code> method.
422 static VALUE
423 thread_start(VALUE klass, VALUE args)
425 return thread_create_core(rb_thread_alloc(klass), args, 0);
428 static VALUE
429 thread_initialize(VALUE thread, VALUE args)
431 rb_thread_t *th;
432 if (!rb_block_given_p()) {
433 rb_raise(rb_eThreadError, "must be called with a block");
435 GetThreadPtr(thread, th);
436 if (th->first_args) {
437 VALUE rb_proc_location(VALUE self);
438 VALUE proc = th->first_proc, line, loc;
439 const char *file;
440 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
441 rb_raise(rb_eThreadError, "already initialized thread");
443 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
444 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
445 rb_raise(rb_eThreadError, "already initialized thread - %s",
446 file);
448 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
449 file, NUM2INT(line));
451 return thread_create_core(thread, args, 0);
454 VALUE
455 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
457 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
461 /* +infty, for this purpose */
462 #define DELAY_INFTY 1E30
464 struct join_arg {
465 rb_thread_t *target, *waiting;
466 double limit;
467 int forever;
470 static VALUE
471 remove_from_join_list(VALUE arg)
473 struct join_arg *p = (struct join_arg *)arg;
474 rb_thread_t *target_th = p->target, *th = p->waiting;
476 if (target_th->status != THREAD_KILLED) {
477 rb_thread_t **pth = &target_th->join_list_head;
479 while (*pth) {
480 if (*pth == th) {
481 *pth = th->join_list_next;
482 break;
484 pth = &(*pth)->join_list_next;
488 return Qnil;
491 static VALUE
492 thread_join_sleep(VALUE arg)
494 struct join_arg *p = (struct join_arg *)arg;
495 rb_thread_t *target_th = p->target, *th = p->waiting;
496 double now, limit = p->limit;
498 while (target_th->status != THREAD_KILLED) {
499 if (p->forever) {
500 sleep_forever(th);
502 else {
503 now = timeofday();
504 if (now > limit) {
505 thread_debug("thread_join: timeout (thid: %p)\n",
506 (void *)target_th->thread_id);
507 return Qfalse;
509 sleep_wait_for_interrupt(th, limit - now);
511 thread_debug("thread_join: interrupted (thid: %p)\n",
512 (void *)target_th->thread_id);
514 return Qtrue;
517 static VALUE
518 thread_join(rb_thread_t *target_th, double delay)
520 rb_thread_t *th = GET_THREAD();
521 struct join_arg arg;
523 arg.target = target_th;
524 arg.waiting = th;
525 arg.limit = timeofday() + delay;
526 arg.forever = delay == DELAY_INFTY;
528 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
530 if (target_th->status != THREAD_KILLED) {
531 th->join_list_next = target_th->join_list_head;
532 target_th->join_list_head = th;
533 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
534 remove_from_join_list, (VALUE)&arg)) {
535 return Qnil;
539 thread_debug("thread_join: success (thid: %p)\n",
540 (void *)target_th->thread_id);
542 if (target_th->errinfo != Qnil) {
543 VALUE err = target_th->errinfo;
545 if (FIXNUM_P(err)) {
546 /* */
548 else if (TYPE(target_th->errinfo) == T_NODE) {
549 rb_exc_raise(vm_make_jump_tag_but_local_jump(
550 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
552 else {
553 /* normal exception */
554 rb_exc_raise(err);
557 return target_th->self;
561 * call-seq:
562 * thr.join => thr
563 * thr.join(limit) => thr
565 * The calling thread will suspend execution and run <i>thr</i>. Does not
566 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
567 * the time limit expires, <code>nil</code> will be returned, otherwise
568 * <i>thr</i> is returned.
570 * Any threads not joined will be killed when the main program exits. If
571 * <i>thr</i> had previously raised an exception and the
572 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
573 * (so the exception has not yet been processed) it will be processed at this
574 * time.
576 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
577 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
578 * x.join # Let x thread finish, a will be killed on exit.
580 * <em>produces:</em>
582 * axyz
584 * The following example illustrates the <i>limit</i> parameter.
586 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
587 * puts "Waiting" until y.join(0.15)
589 * <em>produces:</em>
591 * tick...
592 * Waiting
593 * tick...
594 * Waitingtick...
597 * tick...
600 static VALUE
601 thread_join_m(int argc, VALUE *argv, VALUE self)
603 rb_thread_t *target_th;
604 double delay = DELAY_INFTY;
605 VALUE limit;
607 GetThreadPtr(self, target_th);
609 rb_scan_args(argc, argv, "01", &limit);
610 if (!NIL_P(limit)) {
611 delay = rb_num2dbl(limit);
614 return thread_join(target_th, delay);
618 * call-seq:
619 * thr.value => obj
621 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
622 * its value.
624 * a = Thread.new { 2 + 2 }
625 * a.value #=> 4
628 static VALUE
629 thread_value(VALUE self)
631 rb_thread_t *th;
632 GetThreadPtr(self, th);
633 thread_join(th, DELAY_INFTY);
634 return th->value;
638 * Thread Scheduling
641 static struct timeval
642 double2timeval(double d)
644 struct timeval time;
646 time.tv_sec = (int)d;
647 time.tv_usec = (int)((d - (int)d) * 1e6);
648 if (time.tv_usec < 0) {
649 time.tv_usec += (long)1e6;
650 time.tv_sec -= 1;
652 return time;
655 static void
656 sleep_forever(rb_thread_t *th)
658 native_sleep(th, 0);
661 static void
662 sleep_timeval(rb_thread_t *th, struct timeval tv)
664 native_sleep(th, &tv);
667 void
668 rb_thread_sleep_forever()
670 thread_debug("rb_thread_sleep_forever\n");
671 sleep_forever(GET_THREAD());
674 static double
675 timeofday(void)
677 struct timeval tv;
678 gettimeofday(&tv, NULL);
679 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
682 static void
683 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
685 sleep_timeval(th, double2timeval(sleepsec));
688 static void
689 sleep_for_polling(rb_thread_t *th)
691 struct timeval time;
692 time.tv_sec = 0;
693 time.tv_usec = 100 * 1000; /* 0.1 sec */
694 sleep_timeval(th, time);
697 void
698 rb_thread_wait_for(struct timeval time)
700 rb_thread_t *th = GET_THREAD();
701 sleep_timeval(th, time);
704 void
705 rb_thread_polling(void)
707 RUBY_VM_CHECK_INTS();
708 if (!rb_thread_alone()) {
709 rb_thread_t *th = GET_THREAD();
710 sleep_for_polling(th);
714 struct timeval rb_time_timeval();
716 void
717 rb_thread_sleep(int sec)
719 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
722 void
723 rb_thread_schedule(void)
725 thread_debug("rb_thread_schedule\n");
726 if (!rb_thread_alone()) {
727 rb_thread_t *th = GET_THREAD();
729 thread_debug("rb_thread_schedule/switch start\n");
731 rb_gc_save_machine_context(th);
732 native_mutex_unlock(&th->vm->global_interpreter_lock);
734 native_thread_yield();
736 native_mutex_lock(&th->vm->global_interpreter_lock);
738 rb_thread_set_current(th);
739 thread_debug("rb_thread_schedule/switch done\n");
741 RUBY_VM_CHECK_INTS();
745 int rb_thread_critical; /* TODO: dummy variable */
747 VALUE
748 rb_thread_blocking_region(
749 rb_blocking_function_t *func, void *data1,
750 rb_unblock_function_t *ubf, void *data2)
752 VALUE val;
753 rb_thread_t *th = GET_THREAD();
755 if (ubf == RB_UBF_DFL) {
756 ubf = ubf_select;
757 data2 = th;
760 BLOCKING_REGION({
761 val = func(data1);
762 }, ubf, data2);
764 return val;
768 * call-seq:
769 * Thread.pass => nil
771 * Invokes the thread scheduler to pass execution to another thread.
773 * a = Thread.new { print "a"; Thread.pass;
774 * print "b"; Thread.pass;
775 * print "c" }
776 * b = Thread.new { print "x"; Thread.pass;
777 * print "y"; Thread.pass;
778 * print "z" }
779 * a.join
780 * b.join
782 * <em>produces:</em>
784 * axbycz
787 static VALUE
788 thread_s_pass(VALUE klass)
790 rb_thread_schedule();
791 return Qnil;
798 void
799 rb_thread_execute_interrupts(rb_thread_t *th)
801 while (th->interrupt_flag) {
802 int status = th->status;
803 th->status = THREAD_RUNNABLE;
804 th->interrupt_flag = 0;
806 /* signal handling */
807 if (th->exec_signal) {
808 int sig = th->exec_signal;
809 th->exec_signal = 0;
810 rb_signal_exec(th, sig);
813 /* exception from another thread */
814 if (th->thrown_errinfo) {
815 VALUE err = th->thrown_errinfo;
816 th->thrown_errinfo = 0;
817 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
819 if (err == eKillSignal || err == eTerminateSignal) {
820 th->errinfo = INT2FIX(TAG_FATAL);
821 TH_JUMP_TAG(th, TAG_FATAL);
823 else {
824 rb_exc_raise(err);
827 th->status = status;
829 /* thread pass */
830 rb_thread_schedule();
832 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
836 void
837 rb_gc_mark_threads(void)
839 /* TODO: remove */
842 /*****************************************************/
844 static void
845 rb_thread_ready(rb_thread_t *th)
847 rb_thread_interrupt(th);
850 static VALUE
851 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
853 VALUE exc;
855 again:
856 if (rb_thread_dead(th)) {
857 return Qnil;
860 if (th->thrown_errinfo != 0 || th->raised_flag) {
861 rb_thread_schedule();
862 goto again;
865 exc = rb_make_exception(argc, argv);
866 th->thrown_errinfo = exc;
867 rb_thread_ready(th);
868 return Qnil;
871 void
872 rb_thread_signal_raise(void *thptr, int sig)
874 VALUE argv[2];
875 rb_thread_t *th = thptr;
877 argv[0] = rb_eSignal;
878 argv[1] = INT2FIX(sig);
879 rb_thread_raise(2, argv, th->vm->main_thread);
882 void
883 rb_thread_signal_exit(void *thptr)
885 VALUE argv[2];
886 rb_thread_t *th = thptr;
888 argv[0] = rb_eSystemExit;
889 argv[1] = rb_str_new2("exit");
890 rb_thread_raise(2, argv, th->vm->main_thread);
894 rb_thread_set_raised(rb_thread_t *th)
896 if (th->raised_flag & RAISED_EXCEPTION) {
897 return 1;
899 th->raised_flag |= RAISED_EXCEPTION;
900 return 0;
904 rb_thread_reset_raised(rb_thread_t *th)
906 if (!(th->raised_flag & RAISED_EXCEPTION)) {
907 return 0;
909 th->raised_flag &= ~RAISED_EXCEPTION;
910 return 1;
913 void
914 rb_thread_fd_close(int fd)
916 /* TODO: fix me */
920 * call-seq:
921 * thr.raise(exception)
923 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
924 * caller does not have to be <i>thr</i>.
926 * Thread.abort_on_exception = true
927 * a = Thread.new { sleep(200) }
928 * a.raise("Gotcha")
930 * <em>produces:</em>
932 * prog.rb:3: Gotcha (RuntimeError)
933 * from prog.rb:2:in `initialize'
934 * from prog.rb:2:in `new'
935 * from prog.rb:2
938 static VALUE
939 thread_raise_m(int argc, VALUE *argv, VALUE self)
941 rb_thread_t *th;
942 GetThreadPtr(self, th);
943 rb_thread_raise(argc, argv, th);
944 return Qnil;
949 * call-seq:
950 * thr.exit => thr or nil
951 * thr.kill => thr or nil
952 * thr.terminate => thr or nil
954 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
955 * is already marked to be killed, <code>exit</code> returns the
956 * <code>Thread</code>. If this is the main thread, or the last thread, exits
957 * the process.
960 VALUE
961 rb_thread_kill(VALUE thread)
963 rb_thread_t *th;
965 GetThreadPtr(thread, th);
967 if (th != GET_THREAD() && th->safe_level < 4) {
968 rb_secure(4);
970 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
971 return thread;
973 if (th == th->vm->main_thread) {
974 rb_exit(EXIT_SUCCESS);
977 thread_debug("rb_thread_kill: %p (%p)\n", th, (void *)th->thread_id);
979 rb_thread_interrupt(th);
980 th->thrown_errinfo = eKillSignal;
981 th->status = THREAD_TO_KILL;
983 return thread;
988 * call-seq:
989 * Thread.kill(thread) => thread
991 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
993 * count = 0
994 * a = Thread.new { loop { count += 1 } }
995 * sleep(0.1) #=> 0
996 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
997 * count #=> 93947
998 * a.alive? #=> false
1001 static VALUE
1002 rb_thread_s_kill(VALUE obj, VALUE th)
1004 return rb_thread_kill(th);
1009 * call-seq:
1010 * Thread.exit => thread
1012 * Terminates the currently running thread and schedules another thread to be
1013 * run. If this thread is already marked to be killed, <code>exit</code>
1014 * returns the <code>Thread</code>. If this is the main thread, or the last
1015 * thread, exit the process.
1018 static VALUE
1019 rb_thread_exit(void)
1021 return rb_thread_kill(GET_THREAD()->self);
1026 * call-seq:
1027 * thr.wakeup => thr
1029 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1030 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1032 * c = Thread.new { Thread.stop; puts "hey!" }
1033 * c.wakeup
1035 * <em>produces:</em>
1037 * hey!
1040 VALUE
1041 rb_thread_wakeup(VALUE thread)
1043 rb_thread_t *th;
1044 GetThreadPtr(thread, th);
1046 if (th->status == THREAD_KILLED) {
1047 rb_raise(rb_eThreadError, "killed thread");
1049 rb_thread_ready(th);
1050 return thread;
1055 * call-seq:
1056 * thr.run => thr
1058 * Wakes up <i>thr</i>, making it eligible for scheduling.
1060 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1061 * Thread.pass
1062 * puts "Got here"
1063 * a.run
1064 * a.join
1066 * <em>produces:</em>
1069 * Got here
1073 VALUE
1074 rb_thread_run(VALUE thread)
1076 rb_thread_wakeup(thread);
1077 rb_thread_schedule();
1078 return thread;
1083 * call-seq:
1084 * Thread.stop => nil
1086 * Stops execution of the current thread, putting it into a ``sleep'' state,
1087 * and schedules execution of another thread.
1089 * a = Thread.new { print "a"; Thread.stop; print "c" }
1090 * Thread.pass
1091 * print "b"
1092 * a.run
1093 * a.join
1095 * <em>produces:</em>
1097 * abc
1100 VALUE
1101 rb_thread_stop(void)
1103 if (rb_thread_alone()) {
1104 rb_raise(rb_eThreadError,
1105 "stopping only thread\n\tnote: use sleep to stop forever");
1107 rb_thread_sleep_forever();
1108 return Qnil;
1111 static int
1112 thread_list_i(st_data_t key, st_data_t val, void *data)
1114 VALUE ary = (VALUE)data;
1115 rb_thread_t *th;
1116 GetThreadPtr((VALUE)key, th);
1118 switch (th->status) {
1119 case THREAD_RUNNABLE:
1120 case THREAD_STOPPED:
1121 case THREAD_TO_KILL:
1122 rb_ary_push(ary, th->self);
1123 default:
1124 break;
1126 return ST_CONTINUE;
1129 /********************************************************************/
1132 * call-seq:
1133 * Thread.list => array
1135 * Returns an array of <code>Thread</code> objects for all threads that are
1136 * either runnable or stopped.
1138 * Thread.new { sleep(200) }
1139 * Thread.new { 1000000.times {|i| i*i } }
1140 * Thread.new { Thread.stop }
1141 * Thread.list.each {|t| p t}
1143 * <em>produces:</em>
1145 * #<Thread:0x401b3e84 sleep>
1146 * #<Thread:0x401b3f38 run>
1147 * #<Thread:0x401b3fb0 sleep>
1148 * #<Thread:0x401bdf4c run>
1151 VALUE
1152 rb_thread_list(void)
1154 VALUE ary = rb_ary_new();
1155 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1156 return ary;
1159 VALUE
1160 rb_thread_current(void)
1162 return GET_THREAD()->self;
1166 * call-seq:
1167 * Thread.current => thread
1169 * Returns the currently executing thread.
1171 * Thread.current #=> #<Thread:0x401bdf4c run>
1174 static VALUE
1175 thread_s_current(VALUE klass)
1177 return rb_thread_current();
1180 VALUE
1181 rb_thread_main(void)
1183 return GET_THREAD()->vm->main_thread->self;
1186 static VALUE
1187 rb_thread_s_main(VALUE klass)
1189 return rb_thread_main();
1194 * call-seq:
1195 * Thread.abort_on_exception => true or false
1197 * Returns the status of the global ``abort on exception'' condition. The
1198 * default is <code>false</code>. When set to <code>true</code>, or if the
1199 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1200 * command line option <code>-d</code> was specified) all threads will abort
1201 * (the process will <code>exit(0)</code>) if an exception is raised in any
1202 * thread. See also <code>Thread::abort_on_exception=</code>.
1205 static VALUE
1206 rb_thread_s_abort_exc(void)
1208 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1213 * call-seq:
1214 * Thread.abort_on_exception= boolean => true or false
1216 * When set to <code>true</code>, all threads will abort if an exception is
1217 * raised. Returns the new state.
1219 * Thread.abort_on_exception = true
1220 * t1 = Thread.new do
1221 * puts "In new thread"
1222 * raise "Exception from thread"
1223 * end
1224 * sleep(1)
1225 * puts "not reached"
1227 * <em>produces:</em>
1229 * In new thread
1230 * prog.rb:4: Exception from thread (RuntimeError)
1231 * from prog.rb:2:in `initialize'
1232 * from prog.rb:2:in `new'
1233 * from prog.rb:2
1236 static VALUE
1237 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1239 rb_secure(4);
1240 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1241 return val;
1246 * call-seq:
1247 * thr.abort_on_exception => true or false
1249 * Returns the status of the thread-local ``abort on exception'' condition for
1250 * <i>thr</i>. The default is <code>false</code>. See also
1251 * <code>Thread::abort_on_exception=</code>.
1254 static VALUE
1255 rb_thread_abort_exc(VALUE thread)
1257 rb_thread_t *th;
1258 GetThreadPtr(thread, th);
1259 return th->abort_on_exception ? Qtrue : Qfalse;
1264 * call-seq:
1265 * thr.abort_on_exception= boolean => true or false
1267 * When set to <code>true</code>, causes all threads (including the main
1268 * program) to abort if an exception is raised in <i>thr</i>. The process will
1269 * effectively <code>exit(0)</code>.
1272 static VALUE
1273 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1275 rb_thread_t *th;
1276 rb_secure(4);
1278 GetThreadPtr(thread, th);
1279 th->abort_on_exception = RTEST(val);
1280 return val;
1285 * call-seq:
1286 * thr.group => thgrp or nil
1288 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1289 * the thread is not a member of any group.
1291 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1294 VALUE
1295 rb_thread_group(VALUE thread)
1297 rb_thread_t *th;
1298 VALUE group;
1299 GetThreadPtr(thread, th);
1300 group = th->thgroup;
1302 if (!group) {
1303 group = Qnil;
1305 return group;
1308 static const char *
1309 thread_status_name(enum rb_thread_status status)
1311 switch (status) {
1312 case THREAD_RUNNABLE:
1313 return "run";
1314 case THREAD_STOPPED:
1315 return "sleep";
1316 case THREAD_TO_KILL:
1317 return "aborting";
1318 case THREAD_KILLED:
1319 return "dead";
1320 default:
1321 return "unknown";
1325 static int
1326 rb_thread_dead(rb_thread_t *th)
1328 return th->status == THREAD_KILLED;
1333 * call-seq:
1334 * thr.status => string, false or nil
1336 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1337 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1338 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1339 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1340 * terminated with an exception.
1342 * a = Thread.new { raise("die now") }
1343 * b = Thread.new { Thread.stop }
1344 * c = Thread.new { Thread.exit }
1345 * d = Thread.new { sleep }
1346 * d.kill #=> #<Thread:0x401b3678 aborting>
1347 * a.status #=> nil
1348 * b.status #=> "sleep"
1349 * c.status #=> false
1350 * d.status #=> "aborting"
1351 * Thread.current.status #=> "run"
1354 static VALUE
1355 rb_thread_status(VALUE thread)
1357 rb_thread_t *th;
1358 GetThreadPtr(thread, th);
1360 if (rb_thread_dead(th)) {
1361 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1362 /* TODO */ ) {
1363 return Qnil;
1365 return Qfalse;
1367 return rb_str_new2(thread_status_name(th->status));
1372 * call-seq:
1373 * thr.alive? => true or false
1375 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1377 * thr = Thread.new { }
1378 * thr.join #=> #<Thread:0x401b3fb0 dead>
1379 * Thread.current.alive? #=> true
1380 * thr.alive? #=> false
1383 static VALUE
1384 rb_thread_alive_p(VALUE thread)
1386 rb_thread_t *th;
1387 GetThreadPtr(thread, th);
1389 if (rb_thread_dead(th))
1390 return Qfalse;
1391 return Qtrue;
1395 * call-seq:
1396 * thr.stop? => true or false
1398 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1400 * a = Thread.new { Thread.stop }
1401 * b = Thread.current
1402 * a.stop? #=> true
1403 * b.stop? #=> false
1406 static VALUE
1407 rb_thread_stop_p(VALUE thread)
1409 rb_thread_t *th;
1410 GetThreadPtr(thread, th);
1412 if (rb_thread_dead(th))
1413 return Qtrue;
1414 if (th->status == THREAD_STOPPED)
1415 return Qtrue;
1416 return Qfalse;
1420 * call-seq:
1421 * thr.safe_level => integer
1423 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1424 * levels can help when implementing sandboxes which run insecure code.
1426 * thr = Thread.new { $SAFE = 3; sleep }
1427 * Thread.current.safe_level #=> 0
1428 * thr.safe_level #=> 3
1431 static VALUE
1432 rb_thread_safe_level(VALUE thread)
1434 rb_thread_t *th;
1435 GetThreadPtr(thread, th);
1437 return INT2NUM(th->safe_level);
1441 * call-seq:
1442 * thr.inspect => string
1444 * Dump the name, id, and status of _thr_ to a string.
1447 static VALUE
1448 rb_thread_inspect(VALUE thread)
1450 char *cname = rb_obj_classname(thread);
1451 rb_thread_t *th;
1452 const char *status;
1453 VALUE str;
1455 GetThreadPtr(thread, th);
1456 status = thread_status_name(th->status);
1457 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1458 OBJ_INFECT(str, thread);
1460 return str;
1463 VALUE
1464 rb_thread_local_aref(VALUE thread, ID id)
1466 rb_thread_t *th;
1467 VALUE val;
1469 GetThreadPtr(thread, th);
1470 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1471 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1473 if (!th->local_storage) {
1474 return Qnil;
1476 if (st_lookup(th->local_storage, id, &val)) {
1477 return val;
1479 return Qnil;
1483 * call-seq:
1484 * thr[sym] => obj or nil
1486 * Attribute Reference---Returns the value of a thread-local variable, using
1487 * either a symbol or a string name. If the specified variable does not exist,
1488 * returns <code>nil</code>.
1490 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1491 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1492 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1493 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1495 * <em>produces:</em>
1497 * #<Thread:0x401b3b3c sleep>: C
1498 * #<Thread:0x401b3bc8 sleep>: B
1499 * #<Thread:0x401b3c68 sleep>: A
1500 * #<Thread:0x401bdf4c run>:
1503 static VALUE
1504 rb_thread_aref(VALUE thread, VALUE id)
1506 return rb_thread_local_aref(thread, rb_to_id(id));
1509 VALUE
1510 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1512 rb_thread_t *th;
1513 GetThreadPtr(thread, th);
1515 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1516 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1518 if (OBJ_FROZEN(thread)) {
1519 rb_error_frozen("thread locals");
1521 if (!th->local_storage) {
1522 th->local_storage = st_init_numtable();
1524 if (NIL_P(val)) {
1525 st_delete(th->local_storage, (st_data_t *) & id, 0);
1526 return Qnil;
1528 st_insert(th->local_storage, id, val);
1529 return val;
1533 * call-seq:
1534 * thr[sym] = obj => obj
1536 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1537 * using either a symbol or a string. See also <code>Thread#[]</code>.
1540 static VALUE
1541 rb_thread_aset(VALUE self, ID id, VALUE val)
1543 return rb_thread_local_aset(self, rb_to_id(id), val);
1547 * call-seq:
1548 * thr.key?(sym) => true or false
1550 * Returns <code>true</code> if the given string (or symbol) exists as a
1551 * thread-local variable.
1553 * me = Thread.current
1554 * me[:oliver] = "a"
1555 * me.key?(:oliver) #=> true
1556 * me.key?(:stanley) #=> false
1559 static VALUE
1560 rb_thread_key_p(VALUE self, ID id)
1562 rb_thread_t *th;
1563 GetThreadPtr(self, th);
1565 if (!th->local_storage) {
1566 return Qfalse;
1568 if (st_lookup(th->local_storage, rb_to_id(id), 0)) {
1569 return Qtrue;
1571 return Qfalse;
1574 static int
1575 thread_keys_i(ID key, VALUE value, VALUE ary)
1577 rb_ary_push(ary, ID2SYM(key));
1578 return ST_CONTINUE;
1582 rb_thread_alone()
1584 int num = 1;
1585 if (GET_THREAD()->vm->living_threads) {
1586 num = GET_THREAD()->vm->living_threads->num_entries;
1587 thread_debug("rb_thread_alone: %d\n", num);
1589 return num == 1;
1593 * call-seq:
1594 * thr.keys => array
1596 * Returns an an array of the names of the thread-local variables (as Symbols).
1598 * thr = Thread.new do
1599 * Thread.current[:cat] = 'meow'
1600 * Thread.current["dog"] = 'woof'
1601 * end
1602 * thr.join #=> #<Thread:0x401b3f10 dead>
1603 * thr.keys #=> [:dog, :cat]
1606 static VALUE
1607 rb_thread_keys(VALUE self)
1609 rb_thread_t *th;
1610 VALUE ary = rb_ary_new();
1611 GetThreadPtr(self, th);
1613 if (th->local_storage) {
1614 st_foreach(th->local_storage, thread_keys_i, ary);
1616 return ary;
1620 * call-seq:
1621 * thr.priority => integer
1623 * Returns the priority of <i>thr</i>. Default is inherited from the
1624 * current thread which creating the new thread, or zero for the
1625 * initial main thread; higher-priority threads will run before
1626 * lower-priority threads.
1628 * Thread.current.priority #=> 0
1631 static VALUE
1632 rb_thread_priority(VALUE thread)
1634 rb_thread_t *th;
1635 GetThreadPtr(thread, th);
1636 return INT2NUM(th->priority);
1641 * call-seq:
1642 * thr.priority= integer => thr
1644 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1645 * will run before lower-priority threads.
1647 * count1 = count2 = 0
1648 * a = Thread.new do
1649 * loop { count1 += 1 }
1650 * end
1651 * a.priority = -1
1653 * b = Thread.new do
1654 * loop { count2 += 1 }
1655 * end
1656 * b.priority = -2
1657 * sleep 1 #=> 1
1658 * count1 #=> 622504
1659 * count2 #=> 5832
1662 static VALUE
1663 rb_thread_priority_set(VALUE thread, VALUE prio)
1665 rb_thread_t *th;
1666 GetThreadPtr(thread, th);
1668 rb_secure(4);
1670 th->priority = NUM2INT(prio);
1671 native_thread_apply_priority(th);
1672 return prio;
1675 /* for IO */
1677 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1678 void
1679 rb_fd_init(volatile rb_fdset_t *fds)
1681 fds->maxfd = 0;
1682 fds->fdset = ALLOC(fd_set);
1683 FD_ZERO(fds->fdset);
1686 void
1687 rb_fd_term(rb_fdset_t *fds)
1689 if (fds->fdset) free(fds->fdset);
1690 fds->maxfd = 0;
1691 fds->fdset = 0;
1694 void
1695 rb_fd_zero(rb_fdset_t *fds)
1697 if (fds->fdset) {
1698 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
1699 FD_ZERO(fds->fdset);
1703 static void
1704 rb_fd_resize(int n, rb_fdset_t *fds)
1706 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
1707 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
1709 if (m < sizeof(fd_set)) m = sizeof(fd_set);
1710 if (o < sizeof(fd_set)) o = sizeof(fd_set);
1712 if (m > o) {
1713 fds->fdset = realloc(fds->fdset, m);
1714 memset((char *)fds->fdset + o, 0, m - o);
1716 if (n >= fds->maxfd) fds->maxfd = n + 1;
1719 void
1720 rb_fd_set(int n, rb_fdset_t *fds)
1722 rb_fd_resize(n, fds);
1723 FD_SET(n, fds->fdset);
1726 void
1727 rb_fd_clr(int n, rb_fdset_t *fds)
1729 if (n >= fds->maxfd) return;
1730 FD_CLR(n, fds->fdset);
1734 rb_fd_isset(int n, const rb_fdset_t *fds)
1736 if (n >= fds->maxfd) return 0;
1737 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1740 void
1741 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
1743 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
1745 if (size < sizeof(fd_set)) size = sizeof(fd_set);
1746 dst->maxfd = max;
1747 dst->fdset = realloc(dst->fdset, size);
1748 memcpy(dst->fdset, src, size);
1752 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
1754 fd_set *r = NULL, *w = NULL, *e = NULL;
1755 if (readfds) {
1756 rb_fd_resize(n - 1, readfds);
1757 r = rb_fd_ptr(readfds);
1759 if (writefds) {
1760 rb_fd_resize(n - 1, writefds);
1761 w = rb_fd_ptr(writefds);
1763 if (exceptfds) {
1764 rb_fd_resize(n - 1, exceptfds);
1765 e = rb_fd_ptr(exceptfds);
1767 return select(n, r, w, e, timeout);
1770 #undef FD_ZERO
1771 #undef FD_SET
1772 #undef FD_CLR
1773 #undef FD_ISSET
1775 #define FD_ZERO(f) rb_fd_zero(f)
1776 #define FD_SET(i, f) rb_fd_set(i, f)
1777 #define FD_CLR(i, f) rb_fd_clr(i, f)
1778 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1780 #endif
1782 #if defined(__CYGWIN__) || defined(_WIN32)
1783 static long
1784 cmp_tv(const struct timeval *a, const struct timeval *b)
1786 long d = (a->tv_sec - b->tv_sec);
1787 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
1790 static int
1791 subst(struct timeval *rest, const struct timeval *wait)
1793 while (rest->tv_usec < wait->tv_usec) {
1794 if (rest->tv_sec <= wait->tv_sec) {
1795 return 0;
1797 rest->tv_sec -= 1;
1798 rest->tv_usec += 1000 * 1000;
1800 rest->tv_sec -= wait->tv_sec;
1801 rest->tv_usec -= wait->tv_usec;
1802 return 1;
1804 #endif
1806 static int
1807 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
1808 struct timeval *timeout)
1810 int result, lerrno;
1811 fd_set orig_read, orig_write, orig_except;
1813 #ifndef linux
1814 double limit = 0;
1815 struct timeval wait_rest;
1817 if (timeout) {
1818 limit = timeofday() +
1819 (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
1820 wait_rest = *timeout;
1821 timeout = &wait_rest;
1823 #endif
1825 if (read) orig_read = *read;
1826 if (write) orig_write = *write;
1827 if (except) orig_except = *except;
1829 retry:
1830 lerrno = 0;
1832 #if defined(__CYGWIN__) || defined(_WIN32)
1834 /* polling duration: 100ms */
1835 struct timeval wait_100ms, *wait;
1836 wait_100ms.tv_sec = 0;
1837 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
1839 do {
1840 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
1841 BLOCKING_REGION({
1842 do {
1843 result = select(n, read, write, except, wait);
1844 if (result < 0) lerrno = errno;
1845 if (result != 0) break;
1847 if (read) *read = orig_read;
1848 if (write) *write = orig_write;
1849 if (except) *except = orig_except;
1850 wait = &wait_100ms;
1851 } while (__th->interrupt_flag == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
1852 }, 0, 0);
1853 } while (result == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
1855 #else
1856 BLOCKING_REGION({
1857 result = select(n, read, write, except, timeout);
1858 if (result < 0) lerrno = errno;
1859 }, ubf_select, GET_THREAD());
1860 #endif
1862 errno = lerrno;
1864 if (result < 0) {
1865 switch (errno) {
1866 case EINTR:
1867 #ifdef ERESTART
1868 case ERESTART:
1869 #endif
1870 if (read) *read = orig_read;
1871 if (write) *write = orig_write;
1872 if (except) *except = orig_except;
1873 #ifndef linux
1874 if (timeout) {
1875 double d = limit - timeofday();
1877 wait_rest.tv_sec = (unsigned int)d;
1878 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
1879 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
1880 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
1882 #endif
1883 goto retry;
1884 default:
1885 break;
1888 return result;
1891 static void
1892 rb_thread_wait_fd_rw(int fd, int read)
1894 int result = 0;
1895 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
1897 while (result <= 0) {
1898 rb_fdset_t set;
1899 rb_fd_init(&set);
1900 FD_SET(fd, &set);
1902 if (read) {
1903 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
1905 else {
1906 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
1909 rb_fd_term(&set);
1911 if (result < 0) {
1912 rb_sys_fail(0);
1916 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
1919 void
1920 rb_thread_wait_fd(int fd)
1922 rb_thread_wait_fd_rw(fd, 1);
1926 rb_thread_fd_writable(int fd)
1928 rb_thread_wait_fd_rw(fd, 0);
1929 return Qtrue;
1933 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
1934 struct timeval *timeout)
1936 if (!read && !write && !except) {
1937 if (!timeout) {
1938 rb_thread_sleep_forever();
1939 return 0;
1941 rb_thread_wait_for(*timeout);
1942 return 0;
1944 else {
1945 return do_select(max, read, write, except, timeout);
1951 * for GC
1954 #ifdef USE_CONSERVATIVE_STACK_END
1955 void
1956 rb_gc_set_stack_end(VALUE **stack_end_p)
1958 VALUE stack_end;
1959 *stack_end_p = &stack_end;
1961 #endif
1963 void
1964 rb_gc_save_machine_context(rb_thread_t *th)
1966 SET_MACHINE_STACK_END(&th->machine_stack_end);
1967 #ifdef __ia64
1968 th->machine_register_stack_end = rb_ia64_bsp();
1969 #endif
1970 setjmp(th->machine_regs);
1977 int rb_get_next_signal(rb_vm_t *vm);
1979 static void
1980 timer_thread_function(void)
1982 rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
1984 /* for time slice */
1985 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
1987 /* check signal */
1988 if (vm->buffered_signal_size && vm->main_thread->exec_signal == 0) {
1989 vm->main_thread->exec_signal = rb_get_next_signal(vm);
1990 thread_debug("buffered_signal_size: %ld, sig: %d\n",
1991 (long)vm->buffered_signal_size, vm->main_thread->exec_signal);
1992 rb_thread_interrupt(vm->main_thread);
1995 #if 0
1996 /* prove profiler */
1997 if (vm->prove_profile.enable) {
1998 rb_thread_t *th = vm->running_thread;
2000 if (vm->during_gc) {
2001 /* GC prove profiling */
2004 #endif
2007 void
2008 rb_thread_stop_timer_thread(void)
2010 if (timer_thread_id) {
2011 system_working = 0;
2012 native_thread_join(timer_thread_id);
2013 timer_thread_id = 0;
2017 void
2018 rb_thread_reset_timer_thread(void)
2020 timer_thread_id = 0;
2023 void
2024 rb_thread_start_timer_thread(void)
2026 rb_thread_create_timer_thread();
2029 static int
2030 terminate_atfork_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2032 VALUE thval = key;
2033 rb_thread_t *th;
2034 GetThreadPtr(thval, th);
2036 if (th != current_th) {
2037 thread_cleanup_func(th);
2039 return ST_CONTINUE;
2042 void
2043 rb_thread_atfork(void)
2045 rb_thread_t *th = GET_THREAD();
2046 rb_vm_t *vm = th->vm;
2047 VALUE thval = th->self;
2048 vm->main_thread = th;
2050 st_foreach(vm->living_threads, terminate_atfork_i, (st_data_t)th);
2051 st_clear(vm->living_threads);
2052 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2055 struct thgroup {
2056 int enclosed;
2057 VALUE group;
2061 * Document-class: ThreadGroup
2063 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2064 * threads as a group. A <code>Thread</code> can belong to only one
2065 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2066 * remove it from any previous group.
2068 * Newly created threads belong to the same group as the thread from which they
2069 * were created.
2072 static VALUE thgroup_s_alloc(VALUE);
2073 static VALUE
2074 thgroup_s_alloc(VALUE klass)
2076 VALUE group;
2077 struct thgroup *data;
2079 group = Data_Make_Struct(klass, struct thgroup, 0, free, data);
2080 data->enclosed = 0;
2081 data->group = group;
2083 return group;
2086 struct thgroup_list_params {
2087 VALUE ary;
2088 VALUE group;
2091 static int
2092 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2094 VALUE thread = (VALUE)key;
2095 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2096 VALUE group = ((struct thgroup_list_params *)data)->group;
2097 rb_thread_t *th;
2098 GetThreadPtr(thread, th);
2100 if (th->thgroup == group) {
2101 rb_ary_push(ary, thread);
2103 return ST_CONTINUE;
2107 * call-seq:
2108 * thgrp.list => array
2110 * Returns an array of all existing <code>Thread</code> objects that belong to
2111 * this group.
2113 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2116 static VALUE
2117 thgroup_list(VALUE group)
2119 VALUE ary = rb_ary_new();
2120 struct thgroup_list_params param;
2122 param.ary = ary;
2123 param.group = group;
2124 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2125 return ary;
2130 * call-seq:
2131 * thgrp.enclose => thgrp
2133 * Prevents threads from being added to or removed from the receiving
2134 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2135 * <code>ThreadGroup</code>.
2137 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2138 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2139 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2140 * tg.add thr
2142 * <em>produces:</em>
2144 * ThreadError: can't move from the enclosed thread group
2147 VALUE
2148 thgroup_enclose(VALUE group)
2150 struct thgroup *data;
2152 Data_Get_Struct(group, struct thgroup, data);
2153 data->enclosed = 1;
2155 return group;
2160 * call-seq:
2161 * thgrp.enclosed? => true or false
2163 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2164 * ThreadGroup#enclose.
2167 static VALUE
2168 thgroup_enclosed_p(VALUE group)
2170 struct thgroup *data;
2172 Data_Get_Struct(group, struct thgroup, data);
2173 if (data->enclosed)
2174 return Qtrue;
2175 return Qfalse;
2180 * call-seq:
2181 * thgrp.add(thread) => thgrp
2183 * Adds the given <em>thread</em> to this group, removing it from any other
2184 * group to which it may have previously belonged.
2186 * puts "Initial group is #{ThreadGroup::Default.list}"
2187 * tg = ThreadGroup.new
2188 * t1 = Thread.new { sleep }
2189 * t2 = Thread.new { sleep }
2190 * puts "t1 is #{t1}"
2191 * puts "t2 is #{t2}"
2192 * tg.add(t1)
2193 * puts "Initial group now #{ThreadGroup::Default.list}"
2194 * puts "tg group now #{tg.list}"
2196 * <em>produces:</em>
2198 * Initial group is #<Thread:0x401bdf4c>
2199 * t1 is #<Thread:0x401b3c90>
2200 * t2 is #<Thread:0x401b3c18>
2201 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2202 * tg group now #<Thread:0x401b3c90>
2205 static VALUE
2206 thgroup_add(VALUE group, VALUE thread)
2208 rb_thread_t *th;
2209 struct thgroup *data;
2211 rb_secure(4);
2212 GetThreadPtr(thread, th);
2214 if (OBJ_FROZEN(group)) {
2215 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2217 Data_Get_Struct(group, struct thgroup, data);
2218 if (data->enclosed) {
2219 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2222 if (!th->thgroup) {
2223 return Qnil;
2226 if (OBJ_FROZEN(th->thgroup)) {
2227 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2229 Data_Get_Struct(th->thgroup, struct thgroup, data);
2230 if (data->enclosed) {
2231 rb_raise(rb_eThreadError,
2232 "can't move from the enclosed thread group");
2235 th->thgroup = group;
2236 return group;
2241 * Document-class: Mutex
2243 * Mutex implements a simple semaphore that can be used to coordinate access to
2244 * shared data from multiple concurrent threads.
2246 * Example:
2248 * require 'thread'
2249 * semaphore = Mutex.new
2251 * a = Thread.new {
2252 * semaphore.synchronize {
2253 * # access shared resource
2257 * b = Thread.new {
2258 * semaphore.synchronize {
2259 * # access shared resource
2265 typedef struct mutex_struct {
2266 rb_thread_lock_t lock;
2267 rb_thread_cond_t cond;
2268 rb_thread_t volatile *th;
2269 volatile int cond_waiting;
2270 } mutex_t;
2272 #define GetMutexPtr(obj, tobj) \
2273 Data_Get_Struct(obj, mutex_t, tobj)
2275 static void
2276 mutex_mark(void *ptr)
2278 if (ptr) {
2279 mutex_t *mutex = ptr;
2280 if (mutex->th) {
2281 rb_gc_mark(mutex->th->self);
2286 static void
2287 mutex_free(void *ptr)
2289 if (ptr) {
2290 mutex_t *mutex = ptr;
2291 native_mutex_destroy(&mutex->lock);
2292 native_cond_destroy(&mutex->cond);
2294 ruby_xfree(ptr);
2297 static VALUE
2298 mutex_alloc(VALUE klass)
2300 VALUE volatile obj;
2301 mutex_t *mutex;
2303 obj = Data_Make_Struct(klass, mutex_t, mutex_mark, mutex_free, mutex);
2304 native_mutex_initialize(&mutex->lock);
2305 native_cond_initialize(&mutex->cond);
2306 return obj;
2310 * call-seq:
2311 * Mutex.new => mutex
2313 * Creates a new Mutex
2315 static VALUE
2316 mutex_initialize(VALUE self)
2318 return self;
2321 VALUE
2322 rb_mutex_new(void)
2324 return mutex_alloc(rb_cMutex);
2328 * call-seq:
2329 * mutex.locked? => true or false
2331 * Returns +true+ if this lock is currently held by some thread.
2333 VALUE
2334 rb_mutex_locked_p(VALUE self)
2336 mutex_t *mutex;
2337 GetMutexPtr(self, mutex);
2338 return mutex->th ? Qtrue : Qfalse;
2342 * call-seq:
2343 * mutex.try_lock => true or false
2345 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2346 * lock was granted.
2348 VALUE
2349 rb_mutex_trylock(VALUE self)
2351 mutex_t *mutex;
2352 VALUE locked = Qfalse;
2353 GetMutexPtr(self, mutex);
2355 if (mutex->th == GET_THREAD()) {
2356 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2359 native_mutex_lock(&mutex->lock);
2360 if (mutex->th == 0) {
2361 mutex->th = GET_THREAD();
2362 locked = Qtrue;
2364 native_mutex_unlock(&mutex->lock);
2366 return locked;
2369 static int
2370 lock_func(rb_thread_t *th, mutex_t *mutex)
2372 int interrupted = Qfalse;
2374 native_mutex_lock(&mutex->lock);
2375 while (mutex->th || (mutex->th = th, 0)) {
2376 mutex->cond_waiting++;
2377 native_cond_wait(&mutex->cond, &mutex->lock);
2379 if (th->interrupt_flag) {
2380 interrupted = Qtrue;
2381 break;
2384 native_mutex_unlock(&mutex->lock);
2385 return interrupted;
2388 static void
2389 lock_interrupt(void *ptr)
2391 mutex_t *mutex = (mutex_t *)ptr;
2392 native_mutex_lock(&mutex->lock);
2393 if (mutex->cond_waiting > 0) {
2394 native_cond_broadcast(&mutex->cond);
2395 mutex->cond_waiting = 0;
2397 native_mutex_unlock(&mutex->lock);
2401 * call-seq:
2402 * mutex.lock => true or false
2404 * Attempts to grab the lock and waits if it isn't available.
2405 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2407 VALUE
2408 rb_mutex_lock(VALUE self)
2410 if (rb_mutex_trylock(self) == Qfalse) {
2411 mutex_t *mutex;
2412 rb_thread_t *th = GET_THREAD();
2413 GetMutexPtr(self, mutex);
2415 while (mutex->th != th) {
2416 int interrupted;
2418 BLOCKING_REGION({
2419 interrupted = lock_func(th, mutex);
2420 }, lock_interrupt, mutex);
2422 if (interrupted) {
2423 RUBY_VM_CHECK_INTS();
2427 return self;
2431 * call-seq:
2432 * mutex.unlock => self
2434 * Releases the lock.
2435 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2437 VALUE
2438 rb_mutex_unlock(VALUE self)
2440 mutex_t *mutex;
2441 char *err = NULL;
2442 GetMutexPtr(self, mutex);
2444 native_mutex_lock(&mutex->lock);
2446 if (mutex->th == 0) {
2447 err = "Attempt to unlock a mutex which is not locked";
2449 else if (mutex->th != GET_THREAD()) {
2450 err = "Attempt to unlock a mutex which is locked by another thread";
2452 else {
2453 mutex->th = 0;
2454 if (mutex->cond_waiting > 0) {
2455 /* waiting thread */
2456 native_cond_signal(&mutex->cond);
2457 mutex->cond_waiting--;
2461 native_mutex_unlock(&mutex->lock);
2463 if (err) rb_raise(rb_eThreadError, err);
2465 return self;
2468 static VALUE
2469 rb_mutex_sleep_forever(VALUE time)
2471 rb_thread_sleep_forever();
2472 return Qnil;
2475 static VALUE
2476 rb_mutex_wait_for(VALUE time)
2478 const struct timeval *t = (struct timeval *)time;
2479 rb_thread_wait_for(*t);
2480 return Qnil;
2483 VALUE
2484 rb_mutex_sleep(VALUE self, VALUE timeout)
2486 time_t beg, end;
2487 struct timeval t;
2489 if (!NIL_P(timeout)) {
2490 t = rb_time_interval(timeout);
2492 rb_mutex_unlock(self);
2493 beg = time(0);
2494 if (NIL_P(timeout)) {
2495 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
2497 else {
2498 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
2500 end = time(0) - beg;
2501 return INT2FIX(end);
2505 * call-seq:
2506 * mutex.sleep(timeout = nil) => number
2508 * Releases the lock and sleeps +timeout+ seconds if it is given and
2509 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2510 * the current thread.
2512 static VALUE
2513 mutex_sleep(int argc, VALUE *argv, VALUE self)
2515 VALUE timeout;
2517 rb_scan_args(argc, argv, "01", &timeout);
2518 return rb_mutex_sleep(self, timeout);
2522 * call-seq:
2523 * mutex.synchronize { ... } => result of the block
2525 * Obtains a lock, runs the block, and releases the lock when the block
2526 * completes. See the example under +Mutex+.
2529 VALUE
2530 rb_thread_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
2532 rb_mutex_lock(mutex);
2533 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
2537 * Document-class: Barrier
2539 typedef struct rb_thread_list_struct rb_thread_list_t;
2541 struct rb_thread_list_struct {
2542 rb_thread_t *th;
2543 rb_thread_list_t *next;
2546 static void
2547 thlist_mark(void *ptr)
2549 rb_thread_list_t *q = ptr;
2551 for (; q; q = q->next) {
2552 rb_gc_mark(q->th->self);
2556 static void
2557 thlist_free(void *ptr)
2559 rb_thread_list_t *q = ptr, *next;
2561 for (; q; q = next) {
2562 next = q->next;
2563 ruby_xfree(q);
2567 static int
2568 thlist_signal(rb_thread_list_t **list, unsigned int maxth, rb_thread_t **woken_thread)
2570 int woken = 0;
2571 rb_thread_list_t *q;
2573 while ((q = *list) != NULL) {
2574 rb_thread_t *th = q->th;
2576 *list = q->next;
2577 ruby_xfree(q);
2578 if (th->status != THREAD_KILLED) {
2579 rb_thread_ready(th);
2580 if (!woken && woken_thread) *woken_thread = th;
2581 if (++woken >= maxth && maxth) break;
2584 return woken;
2587 typedef struct {
2588 rb_thread_t *owner;
2589 rb_thread_list_t *waiting, **tail;
2590 } rb_barrier_t;
2592 static void
2593 barrier_mark(void *ptr)
2595 rb_barrier_t *b = ptr;
2597 if (b->owner) rb_gc_mark(b->owner->self);
2598 thlist_mark(b->waiting);
2601 static void
2602 barrier_free(void *ptr)
2604 rb_barrier_t *b = ptr;
2606 b->owner = 0;
2607 thlist_free(b->waiting);
2608 b->waiting = 0;
2609 ruby_xfree(ptr);
2612 static VALUE
2613 barrier_alloc(VALUE klass)
2615 VALUE volatile obj;
2616 rb_barrier_t *barrier;
2618 obj = Data_Make_Struct(klass, rb_barrier_t,
2619 barrier_mark, barrier_free, barrier);
2620 barrier->owner = GET_THREAD();
2621 barrier->waiting = 0;
2622 barrier->tail = &barrier->waiting;
2623 return obj;
2626 VALUE
2627 rb_barrier_new(void)
2629 return barrier_alloc(rb_cBarrier);
2632 VALUE
2633 rb_barrier_wait(VALUE self)
2635 rb_barrier_t *barrier;
2636 rb_thread_list_t *q;
2638 Data_Get_Struct(self, rb_barrier_t, barrier);
2639 if (!barrier->owner || barrier->owner->status == THREAD_KILLED) {
2640 barrier->owner = 0;
2641 if (thlist_signal(&barrier->waiting, 1, &barrier->owner)) return Qfalse;
2642 return Qtrue;
2644 else if (barrier->owner == GET_THREAD()) {
2645 return Qfalse;
2647 else {
2648 *barrier->tail = q = ALLOC(rb_thread_list_t);
2649 q->th = GET_THREAD();
2650 q->next = 0;
2651 barrier->tail = &q->next;
2652 rb_thread_sleep_forever();
2653 return barrier->owner == GET_THREAD() ? Qtrue : Qfalse;
2657 VALUE
2658 rb_barrier_release(VALUE self)
2660 rb_barrier_t *barrier;
2661 unsigned int n;
2663 Data_Get_Struct(self, rb_barrier_t, barrier);
2664 if (barrier->owner != GET_THREAD()) {
2665 rb_raise(rb_eThreadError, "not owned");
2667 n = thlist_signal(&barrier->waiting, 0, &barrier->owner);
2668 return n ? UINT2NUM(n) : Qfalse;
2671 /* variables for recursive traversals */
2672 static ID recursive_key;
2674 static VALUE
2675 recursive_check(VALUE hash, VALUE obj)
2677 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
2678 return Qfalse;
2680 else {
2681 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
2683 if (NIL_P(list) || TYPE(list) != T_HASH)
2684 return Qfalse;
2685 if (NIL_P(rb_hash_lookup(list, obj)))
2686 return Qfalse;
2687 return Qtrue;
2691 static VALUE
2692 recursive_push(VALUE hash, VALUE obj)
2694 VALUE list, sym;
2696 sym = ID2SYM(rb_frame_this_func());
2697 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
2698 hash = rb_hash_new();
2699 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
2700 list = Qnil;
2702 else {
2703 list = rb_hash_aref(hash, sym);
2705 if (NIL_P(list) || TYPE(list) != T_HASH) {
2706 list = rb_hash_new();
2707 rb_hash_aset(hash, sym, list);
2709 rb_hash_aset(list, obj, Qtrue);
2710 return hash;
2713 static void
2714 recursive_pop(VALUE hash, VALUE obj)
2716 VALUE list, sym;
2718 sym = ID2SYM(rb_frame_this_func());
2719 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
2720 VALUE symname;
2721 VALUE thrname;
2722 symname = rb_inspect(sym);
2723 thrname = rb_inspect(rb_thread_current());
2725 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
2726 StringValuePtr(symname), StringValuePtr(thrname));
2728 list = rb_hash_aref(hash, sym);
2729 if (NIL_P(list) || TYPE(list) != T_HASH) {
2730 VALUE symname = rb_inspect(sym);
2731 VALUE thrname = rb_inspect(rb_thread_current());
2732 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
2733 StringValuePtr(symname), StringValuePtr(thrname));
2735 rb_hash_delete(list, obj);
2738 VALUE
2739 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
2741 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
2742 VALUE objid = rb_obj_id(obj);
2744 if (recursive_check(hash, objid)) {
2745 return (*func) (obj, arg, Qtrue);
2747 else {
2748 VALUE result = Qundef;
2749 int state;
2751 hash = recursive_push(hash, objid);
2752 PUSH_TAG();
2753 if ((state = EXEC_TAG()) == 0) {
2754 result = (*func) (obj, arg, Qfalse);
2756 POP_TAG();
2757 recursive_pop(hash, objid);
2758 if (state)
2759 JUMP_TAG(state);
2760 return result;
2764 /* tracer */
2766 static rb_event_hook_t *
2767 alloc_event_fook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
2769 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
2770 hook->func = func;
2771 hook->flag = events;
2772 hook->data = data;
2773 return hook;
2776 static void
2777 thread_reset_event_flags(rb_thread_t *th)
2779 rb_event_hook_t *hook = th->event_hooks;
2780 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
2782 while (hook) {
2783 flag |= hook->flag;
2784 hook = hook->next;
2788 void
2789 rb_thread_add_event_hook(rb_thread_t *th,
2790 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
2792 rb_event_hook_t *hook = alloc_event_fook(func, events, data);
2793 hook->next = th->event_hooks;
2794 th->event_hooks = hook;
2795 thread_reset_event_flags(th);
2798 static int
2799 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
2801 VALUE thval = key;
2802 rb_thread_t *th;
2803 GetThreadPtr(thval, th);
2805 if (flag) {
2806 th->event_flags |= RUBY_EVENT_VM;
2808 else {
2809 th->event_flags &= (~RUBY_EVENT_VM);
2811 return ST_CONTINUE;
2814 static void
2815 set_threads_event_flags(int flag)
2817 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
2820 void
2821 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
2823 rb_event_hook_t *hook = alloc_event_fook(func, events, data);
2824 rb_vm_t *vm = GET_VM();
2826 hook->next = vm->event_hooks;
2827 vm->event_hooks = hook;
2829 set_threads_event_flags(1);
2832 static int
2833 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
2835 rb_event_hook_t *prev = NULL, *hook = *root, *next;
2837 while (hook) {
2838 next = hook->next;
2839 if (func == 0 || hook->func == func) {
2840 if (prev) {
2841 prev->next = hook->next;
2843 else {
2844 *root = hook->next;
2846 xfree(hook);
2848 else {
2849 prev = hook;
2851 hook = next;
2853 return -1;
2857 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
2859 int ret = remove_event_hook(&th->event_hooks, func);
2860 thread_reset_event_flags(th);
2861 return ret;
2865 rb_remove_event_hook(rb_event_hook_func_t func)
2867 rb_vm_t *vm = GET_VM();
2868 rb_event_hook_t *hook = vm->event_hooks;
2869 int ret = remove_event_hook(&vm->event_hooks, func);
2871 if (hook != NULL && vm->event_hooks == NULL) {
2872 set_threads_event_flags(0);
2875 return ret;
2878 static int
2879 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
2881 rb_thread_t *th;
2882 GetThreadPtr((VALUE)key, th);
2883 rb_thread_remove_event_hook(th, 0);
2884 return ST_CONTINUE;
2887 void
2888 rb_clear_trace_func(void)
2890 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
2891 rb_remove_event_hook(0);
2894 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
2897 * call-seq:
2898 * set_trace_func(proc) => proc
2899 * set_trace_func(nil) => nil
2901 * Establishes _proc_ as the handler for tracing, or disables
2902 * tracing if the parameter is +nil+. _proc_ takes up
2903 * to six parameters: an event name, a filename, a line number, an
2904 * object id, a binding, and the name of a class. _proc_ is
2905 * invoked whenever an event occurs. Events are: <code>c-call</code>
2906 * (call a C-language routine), <code>c-return</code> (return from a
2907 * C-language routine), <code>call</code> (call a Ruby method),
2908 * <code>class</code> (start a class or module definition),
2909 * <code>end</code> (finish a class or module definition),
2910 * <code>line</code> (execute code on a new line), <code>raise</code>
2911 * (raise an exception), and <code>return</code> (return from a Ruby
2912 * method). Tracing is disabled within the context of _proc_.
2914 * class Test
2915 * def test
2916 * a = 1
2917 * b = 2
2918 * end
2919 * end
2921 * set_trace_func proc { |event, file, line, id, binding, classname|
2922 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
2924 * t = Test.new
2925 * t.test
2927 * line prog.rb:11 false
2928 * c-call prog.rb:11 new Class
2929 * c-call prog.rb:11 initialize Object
2930 * c-return prog.rb:11 initialize Object
2931 * c-return prog.rb:11 new Class
2932 * line prog.rb:12 false
2933 * call prog.rb:2 test Test
2934 * line prog.rb:3 test Test
2935 * line prog.rb:4 test Test
2936 * return prog.rb:4 test Test
2939 static VALUE
2940 set_trace_func(VALUE obj, VALUE trace)
2942 rb_remove_event_hook(call_trace_func);
2944 if (NIL_P(trace)) {
2945 return Qnil;
2948 if (!rb_obj_is_proc(trace)) {
2949 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
2952 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
2953 return trace;
2956 static void
2957 thread_add_trace_func(rb_thread_t *th, VALUE trace)
2959 if (!rb_obj_is_proc(trace)) {
2960 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
2963 rb_thread_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
2966 static VALUE
2967 thread_add_trace_func_m(VALUE obj, VALUE trace)
2969 rb_thread_t *th;
2970 GetThreadPtr(obj, th);
2971 thread_add_trace_func(th, trace);
2972 return trace;
2975 static VALUE
2976 thread_set_trace_func_m(VALUE obj, VALUE trace)
2978 rb_thread_t *th;
2979 GetThreadPtr(obj, th);
2980 rb_thread_remove_event_hook(th, call_trace_func);
2982 if (!NIL_P(trace)) {
2983 return Qnil;
2985 thread_add_trace_func(th, trace);
2986 return trace;
2989 static char *
2990 get_event_name(rb_event_flag_t event)
2992 switch (event) {
2993 case RUBY_EVENT_LINE:
2994 return "line";
2995 case RUBY_EVENT_CLASS:
2996 return "class";
2997 case RUBY_EVENT_END:
2998 return "end";
2999 case RUBY_EVENT_CALL:
3000 return "call";
3001 case RUBY_EVENT_RETURN:
3002 return "return";
3003 case RUBY_EVENT_C_CALL:
3004 return "c-call";
3005 case RUBY_EVENT_C_RETURN:
3006 return "c-return";
3007 case RUBY_EVENT_RAISE:
3008 return "raise";
3009 default:
3010 return "unknown";
3014 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3016 struct call_trace_func_args {
3017 rb_event_flag_t event;
3018 VALUE proc;
3019 VALUE self;
3020 ID id;
3021 VALUE klass;
3024 static VALUE
3025 call_trace_proc(VALUE args, int tracing)
3027 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3028 VALUE eventname = rb_str_new2(get_event_name(p->event));
3029 VALUE filename = rb_str_new2(rb_sourcefile());
3030 int line = rb_sourceline();
3031 ID id = 0;
3032 VALUE klass = 0;
3034 if (p->event == RUBY_EVENT_C_CALL ||
3035 p->event == RUBY_EVENT_C_RETURN) {
3036 id = p->id;
3037 klass = p->klass;
3039 else {
3040 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3042 if (id == ID_ALLOCATOR)
3043 return Qnil;
3044 if (klass) {
3045 if (TYPE(klass) == T_ICLASS) {
3046 klass = RBASIC(klass)->klass;
3048 else if (FL_TEST(klass, FL_SINGLETON)) {
3049 klass = rb_iv_get(klass, "__attached__");
3052 return rb_proc_call(p->proc, rb_ary_new3(6,
3053 eventname, filename, INT2FIX(line),
3054 id ? ID2SYM(id) : Qnil,
3055 p->self ? rb_binding_new() : Qnil,
3056 klass ? klass : Qnil));
3059 static void
3060 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3062 struct call_trace_func_args args;
3064 args.event = event;
3065 args.proc = proc;
3066 args.self = self;
3067 args.id = id;
3068 args.klass = klass;
3069 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3072 VALUE
3073 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3075 rb_thread_t *th = GET_THREAD();
3076 int state, raised, tracing;
3077 VALUE result = Qnil;
3079 if ((tracing = th->tracing) != 0 && !always) {
3080 return Qnil;
3082 else {
3083 th->tracing = 1;
3086 raised = rb_thread_reset_raised(th);
3088 PUSH_TAG();
3089 if ((state = EXEC_TAG()) == 0) {
3090 result = (*func)(arg, tracing);
3093 if (raised) {
3094 rb_thread_set_raised(th);
3096 POP_TAG();
3098 th->tracing = tracing;
3099 if (state) {
3100 JUMP_TAG(state);
3103 return result;
3107 * +Thread+ encapsulates the behavior of a thread of
3108 * execution, including the main thread of the Ruby script.
3110 * In the descriptions of the methods in this class, the parameter _sym_
3111 * refers to a symbol, which is either a quoted string or a
3112 * +Symbol+ (such as <code>:name</code>).
3115 void
3116 Init_Thread(void)
3118 VALUE cThGroup;
3120 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3121 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3122 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3123 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3124 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3125 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3126 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3127 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3128 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3129 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3130 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3131 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3132 #if THREAD_DEBUG < 0
3133 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3134 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3135 #endif
3137 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3138 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3139 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3140 rb_define_method(rb_cThread, "value", thread_value, 0);
3141 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3142 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3143 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3144 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3145 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3146 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3147 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3148 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3149 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3150 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3151 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3152 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3153 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3154 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3155 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3156 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3157 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3158 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3160 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3162 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3163 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3164 rb_define_method(cThGroup, "list", thgroup_list, 0);
3165 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3166 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3167 rb_define_method(cThGroup, "add", thgroup_add, 1);
3170 rb_thread_t *th = GET_THREAD();
3171 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3172 rb_define_const(cThGroup, "Default", th->thgroup);
3175 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3176 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3177 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3178 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3179 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3180 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3181 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3182 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3184 recursive_key = rb_intern("__recursive_key__");
3185 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3187 /* trace */
3188 rb_define_global_function("set_trace_func", set_trace_func, 1);
3189 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
3190 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
3192 /* init thread core */
3193 Init_native_thread();
3195 /* main thread setting */
3197 /* acquire global interpreter lock */
3198 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_interpreter_lock;
3199 native_mutex_initialize(lp);
3200 native_mutex_lock(lp);
3201 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3205 rb_thread_create_timer_thread();
3209 ruby_native_thread_p(void)
3211 rb_thread_t *rb_thread_check_ptr(rb_thread_t *ptr);
3212 rb_thread_t *th = ruby_thread_from_native();
3214 return th ? Qtrue : Qfalse;