* encoding.c (rb_filesystem_encoding): use locale encoding on Unix.
[ruby-svn.git] / thread.c
blob88f5b5225e5f58d5822fd55af4a81576147852d2
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Desgin
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
25 model 2:
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
38 model 3:
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
45 /* for model 2 */
47 #include "eval_intern.h"
48 #include "vm.h"
49 #include "gc.h"
51 #ifndef THREAD_DEBUG
52 #define THREAD_DEBUG 0
53 #endif
55 VALUE rb_cMutex;
56 VALUE rb_cBarrier;
58 static void sleep_timeval(rb_thread_t *th, struct timeval time);
59 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
60 static void sleep_forever(rb_thread_t *th, int nodeadlock);
61 static double timeofday(void);
62 struct timeval rb_time_interval(VALUE);
63 static int rb_thread_dead(rb_thread_t *th);
65 static void rb_mutex_unlock_all(VALUE);
66 static void rb_check_deadlock(rb_vm_t *vm);
68 void rb_signal_exec(rb_thread_t *th, int sig);
69 void rb_disable_interrupt(void);
71 static const VALUE eKillSignal = INT2FIX(0);
72 static const VALUE eTerminateSignal = INT2FIX(1);
73 static volatile int system_working = 1;
75 inline static void
76 st_delete_wrap(st_table *table, st_data_t key)
78 st_delete(table, &key, 0);
81 /********************************************************************************/
83 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
85 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
86 struct rb_unblock_callback *old);
87 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
89 #define GVL_UNLOCK_BEGIN() do { \
90 rb_thread_t *_th_stored = GET_THREAD(); \
91 rb_gc_save_machine_context(_th_stored); \
92 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
94 #define GVL_UNLOCK_END() \
95 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
96 rb_thread_set_current(_th_stored); \
97 } while(0)
99 #define BLOCKING_REGION_CORE(exec) do { \
100 GVL_UNLOCK_BEGIN(); {\
101 exec; \
103 GVL_UNLOCK_END(); \
104 } while(0);
106 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
107 rb_thread_t *__th = GET_THREAD(); \
108 enum rb_thread_status __prev_status = __th->status; \
109 struct rb_unblock_callback __oldubf; \
110 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
111 __th->status = THREAD_STOPPED; \
112 thread_debug("enter blocking region (%p)\n", __th); \
113 BLOCKING_REGION_CORE(exec); \
114 thread_debug("leave blocking region (%p)\n", __th); \
115 remove_signal_thread_list(__th); \
116 reset_unblock_function(__th, &__oldubf); \
117 if (__th->status == THREAD_STOPPED) { \
118 __th->status = __prev_status; \
120 RUBY_VM_CHECK_INTS(); \
121 } while(0)
123 #if THREAD_DEBUG
124 #ifdef HAVE_VA_ARGS_MACRO
125 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
126 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
127 #define POSITION_FORMAT "%s:%d:"
128 #define POSITION_ARGS ,file, line
129 #else
130 void rb_thread_debug(const char *fmt, ...);
131 #define thread_debug rb_thread_debug
132 #define POSITION_FORMAT
133 #define POSITION_ARGS
134 #endif
136 # if THREAD_DEBUG < 0
137 static int rb_thread_debug_enabled;
139 static VALUE
140 rb_thread_s_debug(void)
142 return INT2NUM(rb_thread_debug_enabled);
145 static VALUE
146 rb_thread_s_debug_set(VALUE self, VALUE val)
148 rb_thread_debug_enabled = RTEST(val);
149 return val;
151 # else
152 # define rb_thread_debug_enabled THREAD_DEBUG
153 # endif
154 #else
155 #define thread_debug if(0)printf
156 #endif
158 #ifndef __ia64
159 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
160 #endif
161 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
162 VALUE *register_stack_start));
163 static void timer_thread_function(void *);
165 #if defined(_WIN32)
166 #include "thread_win32.c"
168 #define DEBUG_OUT() \
169 WaitForSingleObject(&debug_mutex, INFINITE); \
170 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
171 fflush(stdout); \
172 ReleaseMutex(&debug_mutex);
174 #elif defined(HAVE_PTHREAD_H)
175 #include "thread_pthread.c"
177 #define DEBUG_OUT() \
178 pthread_mutex_lock(&debug_mutex); \
179 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
180 fflush(stdout); \
181 pthread_mutex_unlock(&debug_mutex);
183 #else
184 #error "unsupported thread type"
185 #endif
187 #if THREAD_DEBUG
188 static int debug_mutex_initialized = 1;
189 static rb_thread_lock_t debug_mutex;
191 void
192 rb_thread_debug(
193 #ifdef HAVE_VA_ARGS_MACRO
194 const char *file, int line,
195 #endif
196 const char *fmt, ...)
198 va_list args;
199 char buf[BUFSIZ];
201 if (!rb_thread_debug_enabled) return;
203 if (debug_mutex_initialized == 1) {
204 debug_mutex_initialized = 0;
205 native_mutex_initialize(&debug_mutex);
208 va_start(args, fmt);
209 vsnprintf(buf, BUFSIZ, fmt, args);
210 va_end(args);
212 DEBUG_OUT();
214 #endif
217 static void
218 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
219 struct rb_unblock_callback *old)
221 check_ints:
222 RUBY_VM_CHECK_INTS(); /* check signal or so */
223 native_mutex_lock(&th->interrupt_lock);
224 if (th->interrupt_flag) {
225 native_mutex_unlock(&th->interrupt_lock);
226 goto check_ints;
228 else {
229 if (old) *old = th->unblock;
230 th->unblock.func = func;
231 th->unblock.arg = arg;
233 native_mutex_unlock(&th->interrupt_lock);
236 static void
237 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
239 native_mutex_lock(&th->interrupt_lock);
240 th->unblock = *old;
241 native_mutex_unlock(&th->interrupt_lock);
244 static void
245 rb_thread_interrupt(rb_thread_t *th)
247 native_mutex_lock(&th->interrupt_lock);
248 RUBY_VM_SET_INTERRUPT(th);
249 if (th->unblock.func) {
250 (th->unblock.func)(th->unblock.arg);
252 else {
253 /* none */
255 native_mutex_unlock(&th->interrupt_lock);
259 static int
260 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
262 VALUE thval = key;
263 rb_thread_t *th;
264 GetThreadPtr(thval, th);
266 if (th != main_thread) {
267 thread_debug("terminate_i: %p\n", th);
268 rb_thread_interrupt(th);
269 th->thrown_errinfo = eTerminateSignal;
270 th->status = THREAD_TO_KILL;
272 else {
273 thread_debug("terminate_i: main thread (%p)\n", th);
275 return ST_CONTINUE;
278 void
279 rb_thread_terminate_all(void)
281 rb_thread_t *th = GET_THREAD(); /* main thread */
282 rb_vm_t *vm = th->vm;
283 if (vm->main_thread != th) {
284 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm->main_thread, th);
287 /* unlock all locking mutexes */
288 if (th->keeping_mutexes) {
289 rb_mutex_unlock_all(th->keeping_mutexes);
292 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th);
293 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
295 while (!rb_thread_alone()) {
296 PUSH_TAG();
297 if (EXEC_TAG() == 0) {
298 rb_thread_schedule();
300 else {
301 /* ignore exception */
303 POP_TAG();
305 system_working = 0;
308 static void
309 thread_cleanup_func_before_exec(void *th_ptr)
311 rb_thread_t *th = th_ptr;
312 th->status = THREAD_KILLED;
313 th->machine_stack_start = th->machine_stack_end = 0;
314 #ifdef __ia64
315 th->machine_register_stack_start = th->machine_register_stack_end = 0;
316 #endif
319 static void
320 thread_cleanup_func(void *th_ptr)
322 rb_thread_t *th = th_ptr;
323 thread_cleanup_func_before_exec(th_ptr);
324 native_thread_destroy(th);
327 extern void ruby_error_print(void);
328 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
329 void rb_thread_recycle_stack_release(VALUE *);
331 void
332 ruby_thread_init_stack(rb_thread_t *th)
334 native_thread_init_stack(th);
337 static int
338 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
340 int state;
341 VALUE args = th->first_args;
342 rb_proc_t *proc;
343 rb_thread_t *join_th;
344 rb_thread_t *main_th;
345 VALUE errinfo = Qnil;
347 th->machine_stack_start = stack_start;
348 #ifdef __ia64
349 th->machine_register_stack_start = register_stack_start;
350 #endif
351 thread_debug("thread start: %p\n", th);
353 native_mutex_lock(&th->vm->global_vm_lock);
355 thread_debug("thread start (get lock): %p\n", th);
356 rb_thread_set_current(th);
358 TH_PUSH_TAG(th);
359 if ((state = EXEC_TAG()) == 0) {
360 SAVE_ROOT_JMPBUF(th, {
361 if (th->first_proc) {
362 GetProcPtr(th->first_proc, proc);
363 th->errinfo = Qnil;
364 th->local_lfp = proc->block.lfp;
365 th->local_svar = Qnil;
366 th->value = vm_invoke_proc(th, proc, proc->block.self,
367 RARRAY_LEN(args), RARRAY_PTR(args), 0);
369 else {
370 th->value = (*th->first_func)((void *)th->first_args);
374 else {
375 if (th->safe_level < 4 &&
376 (th->vm->thread_abort_on_exception ||
377 th->abort_on_exception || RTEST(ruby_debug))) {
378 errinfo = th->errinfo;
379 if (NIL_P(errinfo)) errinfo = rb_errinfo();
381 th->value = Qnil;
384 th->status = THREAD_KILLED;
385 thread_debug("thread end: %p\n", th);
387 main_th = th->vm->main_thread;
388 if (th != main_th) {
389 if (TYPE(errinfo) == T_OBJECT) {
390 /* treat with normal error object */
391 rb_thread_raise(1, &errinfo, main_th);
394 TH_POP_TAG();
396 /* locking_mutex must be Qfalse */
397 if (th->locking_mutex != Qfalse) {
398 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
399 th, th->locking_mutex);
402 /* unlock all locking mutexes */
403 if (th->keeping_mutexes) {
404 rb_mutex_unlock_all(th->keeping_mutexes);
405 th->keeping_mutexes = Qfalse;
408 /* delete self from living_threads */
409 st_delete_wrap(th->vm->living_threads, th->self);
411 /* wake up joinning threads */
412 join_th = th->join_list_head;
413 while (join_th) {
414 if (join_th == main_th) errinfo = Qnil;
415 rb_thread_interrupt(join_th);
416 switch (join_th->status) {
417 case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
418 join_th->status = THREAD_RUNNABLE;
419 default: break;
421 join_th = join_th->join_list_next;
423 if (th != main_th) rb_check_deadlock(th->vm);
425 if (!th->root_fiber) {
426 rb_thread_recycle_stack_release(th->stack);
427 th->stack = 0;
430 thread_cleanup_func(th);
431 native_mutex_unlock(&th->vm->global_vm_lock);
433 return 0;
436 static VALUE
437 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
439 rb_thread_t *th;
441 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
442 rb_raise(rb_eThreadError,
443 "can't start a new thread (frozen ThreadGroup)");
445 GetThreadPtr(thval, th);
447 /* setup thread environment */
448 th->first_func = fn;
449 th->first_proc = fn ? Qfalse : rb_block_proc();
450 th->first_args = args; /* GC: shouldn't put before above line */
452 th->priority = GET_THREAD()->priority;
453 th->thgroup = GET_THREAD()->thgroup;
455 native_mutex_initialize(&th->interrupt_lock);
456 /* kick thread */
457 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
458 native_thread_create(th);
459 return thval;
462 static VALUE
463 thread_s_new(int argc, VALUE *argv, VALUE klass)
465 rb_thread_t *th;
466 VALUE thread = rb_thread_alloc(klass);
467 rb_obj_call_init(thread, argc, argv);
468 GetThreadPtr(thread, th);
469 if (!th->first_args) {
470 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
471 rb_class2name(klass));
473 return thread;
477 * call-seq:
478 * Thread.start([args]*) {|args| block } => thread
479 * Thread.fork([args]*) {|args| block } => thread
481 * Basically the same as <code>Thread::new</code>. However, if class
482 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
483 * subclass will not invoke the subclass's <code>initialize</code> method.
486 static VALUE
487 thread_start(VALUE klass, VALUE args)
489 return thread_create_core(rb_thread_alloc(klass), args, 0);
492 static VALUE
493 thread_initialize(VALUE thread, VALUE args)
495 rb_thread_t *th;
496 if (!rb_block_given_p()) {
497 rb_raise(rb_eThreadError, "must be called with a block");
499 GetThreadPtr(thread, th);
500 if (th->first_args) {
501 VALUE rb_proc_location(VALUE self);
502 VALUE proc = th->first_proc, line, loc;
503 const char *file;
504 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
505 rb_raise(rb_eThreadError, "already initialized thread");
507 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
508 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
509 rb_raise(rb_eThreadError, "already initialized thread - %s",
510 file);
512 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
513 file, NUM2INT(line));
515 return thread_create_core(thread, args, 0);
518 VALUE
519 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
521 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
525 /* +infty, for this purpose */
526 #define DELAY_INFTY 1E30
528 struct join_arg {
529 rb_thread_t *target, *waiting;
530 double limit;
531 int forever;
534 static VALUE
535 remove_from_join_list(VALUE arg)
537 struct join_arg *p = (struct join_arg *)arg;
538 rb_thread_t *target_th = p->target, *th = p->waiting;
540 if (target_th->status != THREAD_KILLED) {
541 rb_thread_t **pth = &target_th->join_list_head;
543 while (*pth) {
544 if (*pth == th) {
545 *pth = th->join_list_next;
546 break;
548 pth = &(*pth)->join_list_next;
552 return Qnil;
555 static VALUE
556 thread_join_sleep(VALUE arg)
558 struct join_arg *p = (struct join_arg *)arg;
559 rb_thread_t *target_th = p->target, *th = p->waiting;
560 double now, limit = p->limit;
562 while (target_th->status != THREAD_KILLED) {
563 if (p->forever) {
564 sleep_forever(th, 1);
566 else {
567 now = timeofday();
568 if (now > limit) {
569 thread_debug("thread_join: timeout (thid: %p)\n",
570 (void *)target_th->thread_id);
571 return Qfalse;
573 sleep_wait_for_interrupt(th, limit - now);
575 thread_debug("thread_join: interrupted (thid: %p)\n",
576 (void *)target_th->thread_id);
578 return Qtrue;
581 static VALUE
582 thread_join(rb_thread_t *target_th, double delay)
584 rb_thread_t *th = GET_THREAD();
585 struct join_arg arg;
587 arg.target = target_th;
588 arg.waiting = th;
589 arg.limit = timeofday() + delay;
590 arg.forever = delay == DELAY_INFTY;
592 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
594 if (target_th->status != THREAD_KILLED) {
595 th->join_list_next = target_th->join_list_head;
596 target_th->join_list_head = th;
597 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
598 remove_from_join_list, (VALUE)&arg)) {
599 return Qnil;
603 thread_debug("thread_join: success (thid: %p)\n",
604 (void *)target_th->thread_id);
606 if (target_th->errinfo != Qnil) {
607 VALUE err = target_th->errinfo;
609 if (FIXNUM_P(err)) {
610 /* */
612 else if (TYPE(target_th->errinfo) == T_NODE) {
613 rb_exc_raise(vm_make_jump_tag_but_local_jump(
614 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
616 else {
617 /* normal exception */
618 rb_exc_raise(err);
621 return target_th->self;
625 * call-seq:
626 * thr.join => thr
627 * thr.join(limit) => thr
629 * The calling thread will suspend execution and run <i>thr</i>. Does not
630 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
631 * the time limit expires, <code>nil</code> will be returned, otherwise
632 * <i>thr</i> is returned.
634 * Any threads not joined will be killed when the main program exits. If
635 * <i>thr</i> had previously raised an exception and the
636 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
637 * (so the exception has not yet been processed) it will be processed at this
638 * time.
640 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
641 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
642 * x.join # Let x thread finish, a will be killed on exit.
644 * <em>produces:</em>
646 * axyz
648 * The following example illustrates the <i>limit</i> parameter.
650 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
651 * puts "Waiting" until y.join(0.15)
653 * <em>produces:</em>
655 * tick...
656 * Waiting
657 * tick...
658 * Waitingtick...
661 * tick...
664 static VALUE
665 thread_join_m(int argc, VALUE *argv, VALUE self)
667 rb_thread_t *target_th;
668 double delay = DELAY_INFTY;
669 VALUE limit;
671 GetThreadPtr(self, target_th);
673 rb_scan_args(argc, argv, "01", &limit);
674 if (!NIL_P(limit)) {
675 delay = rb_num2dbl(limit);
678 return thread_join(target_th, delay);
682 * call-seq:
683 * thr.value => obj
685 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
686 * its value.
688 * a = Thread.new { 2 + 2 }
689 * a.value #=> 4
692 static VALUE
693 thread_value(VALUE self)
695 rb_thread_t *th;
696 GetThreadPtr(self, th);
697 thread_join(th, DELAY_INFTY);
698 return th->value;
702 * Thread Scheduling
705 static struct timeval
706 double2timeval(double d)
708 struct timeval time;
710 time.tv_sec = (int)d;
711 time.tv_usec = (int)((d - (int)d) * 1e6);
712 if (time.tv_usec < 0) {
713 time.tv_usec += (long)1e6;
714 time.tv_sec -= 1;
716 return time;
719 static void
720 sleep_forever(rb_thread_t *th, int deadlockable)
722 enum rb_thread_status prev_status = th->status;
724 th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
725 do {
726 if (deadlockable) {
727 th->vm->sleeper++;
728 rb_check_deadlock(th->vm);
730 native_sleep(th, 0);
731 if (deadlockable) {
732 th->vm->sleeper--;
734 RUBY_VM_CHECK_INTS();
735 } while (th->status == THREAD_STOPPED_FOREVER);
736 th->status = prev_status;
739 static void
740 getclockofday(struct timeval *tp)
742 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
743 struct timespec ts;
745 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
746 tp->tv_sec = ts.tv_sec;
747 tp->tv_usec = ts.tv_nsec / 1000;
748 } else
749 #endif
751 gettimeofday(tp, NULL);
755 static void
756 sleep_timeval(rb_thread_t *th, struct timeval tv)
758 struct timeval to, tvn;
759 enum rb_thread_status prev_status = th->status;
761 getclockofday(&to);
762 to.tv_sec += tv.tv_sec;
763 if ((to.tv_usec += tv.tv_usec) >= 1000000) {
764 to.tv_sec++;
765 to.tv_usec -= 1000000;
768 th->status = THREAD_STOPPED;
769 do {
770 native_sleep(th, &tv);
771 RUBY_VM_CHECK_INTS();
772 getclockofday(&tvn);
773 if (to.tv_sec < tvn.tv_sec) break;
774 if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
775 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
776 (long)to.tv_sec, to.tv_usec,
777 (long)tvn.tv_sec, tvn.tv_usec);
778 tv.tv_sec = to.tv_sec - tvn.tv_sec;
779 if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
780 --tv.tv_sec;
781 tv.tv_usec += 1000000;
783 } while (th->status == THREAD_STOPPED);
784 th->status = prev_status;
787 void
788 rb_thread_sleep_forever()
790 thread_debug("rb_thread_sleep_forever\n");
791 sleep_forever(GET_THREAD(), 0);
794 static void
795 rb_thread_sleep_deadly()
797 thread_debug("rb_thread_sleep_deadly\n");
798 sleep_forever(GET_THREAD(), 1);
801 static double
802 timeofday(void)
804 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
805 struct timespec tp;
807 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
808 return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
809 } else
810 #endif
812 struct timeval tv;
813 gettimeofday(&tv, NULL);
814 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
818 static void
819 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
821 sleep_timeval(th, double2timeval(sleepsec));
824 static void
825 sleep_for_polling(rb_thread_t *th)
827 struct timeval time;
828 time.tv_sec = 0;
829 time.tv_usec = 100 * 1000; /* 0.1 sec */
830 sleep_timeval(th, time);
833 void
834 rb_thread_wait_for(struct timeval time)
836 rb_thread_t *th = GET_THREAD();
837 sleep_timeval(th, time);
840 void
841 rb_thread_polling(void)
843 RUBY_VM_CHECK_INTS();
844 if (!rb_thread_alone()) {
845 rb_thread_t *th = GET_THREAD();
846 sleep_for_polling(th);
850 struct timeval rb_time_timeval();
852 void
853 rb_thread_sleep(int sec)
855 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
858 void
859 rb_thread_schedule(void)
861 thread_debug("rb_thread_schedule\n");
862 if (!rb_thread_alone()) {
863 rb_thread_t *th = GET_THREAD();
865 thread_debug("rb_thread_schedule/switch start\n");
867 rb_gc_save_machine_context(th);
868 native_mutex_unlock(&th->vm->global_vm_lock);
870 native_thread_yield();
872 native_mutex_lock(&th->vm->global_vm_lock);
874 rb_thread_set_current(th);
875 thread_debug("rb_thread_schedule/switch done\n");
877 RUBY_VM_CHECK_INTS();
881 int rb_thread_critical; /* TODO: dummy variable */
883 VALUE
884 rb_thread_blocking_region(
885 rb_blocking_function_t *func, void *data1,
886 rb_unblock_function_t *ubf, void *data2)
888 VALUE val;
889 rb_thread_t *th = GET_THREAD();
891 if (ubf == RB_UBF_DFL) {
892 ubf = ubf_select;
893 data2 = th;
896 BLOCKING_REGION({
897 val = func(data1);
898 }, ubf, data2);
900 return val;
904 * call-seq:
905 * Thread.pass => nil
907 * Invokes the thread scheduler to pass execution to another thread.
909 * a = Thread.new { print "a"; Thread.pass;
910 * print "b"; Thread.pass;
911 * print "c" }
912 * b = Thread.new { print "x"; Thread.pass;
913 * print "y"; Thread.pass;
914 * print "z" }
915 * a.join
916 * b.join
918 * <em>produces:</em>
920 * axbycz
923 static VALUE
924 thread_s_pass(VALUE klass)
926 rb_thread_schedule();
927 return Qnil;
934 void
935 rb_thread_execute_interrupts(rb_thread_t *th)
937 if (th->raised_flag) return;
938 while (th->interrupt_flag) {
939 enum rb_thread_status status = th->status;
940 th->status = THREAD_RUNNABLE;
941 th->interrupt_flag = 0;
943 /* signal handling */
944 if (th->exec_signal) {
945 int sig = th->exec_signal;
946 th->exec_signal = 0;
947 rb_signal_exec(th, sig);
950 /* exception from another thread */
951 if (th->thrown_errinfo) {
952 VALUE err = th->thrown_errinfo;
953 th->thrown_errinfo = 0;
954 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
956 if (err == eKillSignal || err == eTerminateSignal) {
957 th->errinfo = INT2FIX(TAG_FATAL);
958 TH_JUMP_TAG(th, TAG_FATAL);
960 else {
961 rb_exc_raise(err);
964 th->status = status;
966 /* thread pass */
967 rb_thread_schedule();
969 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
973 void
974 rb_gc_mark_threads(void)
976 /* TODO: remove */
979 /*****************************************************/
981 static void
982 rb_thread_ready(rb_thread_t *th)
984 rb_thread_interrupt(th);
987 static VALUE
988 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
990 VALUE exc;
992 again:
993 if (rb_thread_dead(th)) {
994 return Qnil;
997 if (th->thrown_errinfo != 0 || th->raised_flag) {
998 rb_thread_schedule();
999 goto again;
1002 exc = rb_make_exception(argc, argv);
1003 th->thrown_errinfo = exc;
1004 rb_thread_ready(th);
1005 return Qnil;
1008 void
1009 rb_thread_signal_raise(void *thptr, int sig)
1011 VALUE argv[2];
1012 rb_thread_t *th = thptr;
1014 argv[0] = rb_eSignal;
1015 argv[1] = INT2FIX(sig);
1016 rb_thread_raise(2, argv, th->vm->main_thread);
1019 void
1020 rb_thread_signal_exit(void *thptr)
1022 VALUE argv[2];
1023 rb_thread_t *th = thptr;
1025 argv[0] = rb_eSystemExit;
1026 argv[1] = rb_str_new2("exit");
1027 rb_thread_raise(2, argv, th->vm->main_thread);
1031 rb_thread_set_raised(rb_thread_t *th)
1033 if (th->raised_flag & RAISED_EXCEPTION) {
1034 return 1;
1036 th->raised_flag |= RAISED_EXCEPTION;
1037 return 0;
1041 rb_thread_reset_raised(rb_thread_t *th)
1043 if (!(th->raised_flag & RAISED_EXCEPTION)) {
1044 return 0;
1046 th->raised_flag &= ~RAISED_EXCEPTION;
1047 return 1;
1050 void
1051 rb_thread_fd_close(int fd)
1053 /* TODO: fix me */
1057 * call-seq:
1058 * thr.raise(exception)
1060 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1061 * caller does not have to be <i>thr</i>.
1063 * Thread.abort_on_exception = true
1064 * a = Thread.new { sleep(200) }
1065 * a.raise("Gotcha")
1067 * <em>produces:</em>
1069 * prog.rb:3: Gotcha (RuntimeError)
1070 * from prog.rb:2:in `initialize'
1071 * from prog.rb:2:in `new'
1072 * from prog.rb:2
1075 static VALUE
1076 thread_raise_m(int argc, VALUE *argv, VALUE self)
1078 rb_thread_t *th;
1079 GetThreadPtr(self, th);
1080 rb_thread_raise(argc, argv, th);
1081 return Qnil;
1086 * call-seq:
1087 * thr.exit => thr or nil
1088 * thr.kill => thr or nil
1089 * thr.terminate => thr or nil
1091 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1092 * is already marked to be killed, <code>exit</code> returns the
1093 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1094 * the process.
1097 VALUE
1098 rb_thread_kill(VALUE thread)
1100 rb_thread_t *th;
1102 GetThreadPtr(thread, th);
1104 if (th != GET_THREAD() && th->safe_level < 4) {
1105 rb_secure(4);
1107 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1108 return thread;
1110 if (th == th->vm->main_thread) {
1111 rb_exit(EXIT_SUCCESS);
1114 thread_debug("rb_thread_kill: %p (%p)\n", th, (void *)th->thread_id);
1116 rb_thread_interrupt(th);
1117 th->thrown_errinfo = eKillSignal;
1118 th->status = THREAD_TO_KILL;
1120 return thread;
1125 * call-seq:
1126 * Thread.kill(thread) => thread
1128 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1130 * count = 0
1131 * a = Thread.new { loop { count += 1 } }
1132 * sleep(0.1) #=> 0
1133 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1134 * count #=> 93947
1135 * a.alive? #=> false
1138 static VALUE
1139 rb_thread_s_kill(VALUE obj, VALUE th)
1141 return rb_thread_kill(th);
1146 * call-seq:
1147 * Thread.exit => thread
1149 * Terminates the currently running thread and schedules another thread to be
1150 * run. If this thread is already marked to be killed, <code>exit</code>
1151 * returns the <code>Thread</code>. If this is the main thread, or the last
1152 * thread, exit the process.
1155 static VALUE
1156 rb_thread_exit(void)
1158 return rb_thread_kill(GET_THREAD()->self);
1163 * call-seq:
1164 * thr.wakeup => thr
1166 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1167 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1169 * c = Thread.new { Thread.stop; puts "hey!" }
1170 * c.wakeup
1172 * <em>produces:</em>
1174 * hey!
1177 VALUE
1178 rb_thread_wakeup(VALUE thread)
1180 rb_thread_t *th;
1181 GetThreadPtr(thread, th);
1183 if (th->status == THREAD_KILLED) {
1184 rb_raise(rb_eThreadError, "killed thread");
1186 rb_thread_ready(th);
1187 if (th->status != THREAD_TO_KILL) {
1188 th->status = THREAD_RUNNABLE;
1190 return thread;
1195 * call-seq:
1196 * thr.run => thr
1198 * Wakes up <i>thr</i>, making it eligible for scheduling.
1200 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1201 * Thread.pass
1202 * puts "Got here"
1203 * a.run
1204 * a.join
1206 * <em>produces:</em>
1209 * Got here
1213 VALUE
1214 rb_thread_run(VALUE thread)
1216 rb_thread_wakeup(thread);
1217 rb_thread_schedule();
1218 return thread;
1223 * call-seq:
1224 * Thread.stop => nil
1226 * Stops execution of the current thread, putting it into a ``sleep'' state,
1227 * and schedules execution of another thread.
1229 * a = Thread.new { print "a"; Thread.stop; print "c" }
1230 * Thread.pass
1231 * print "b"
1232 * a.run
1233 * a.join
1235 * <em>produces:</em>
1237 * abc
1240 VALUE
1241 rb_thread_stop(void)
1243 if (rb_thread_alone()) {
1244 rb_raise(rb_eThreadError,
1245 "stopping only thread\n\tnote: use sleep to stop forever");
1247 rb_thread_sleep_deadly();
1248 return Qnil;
1251 static int
1252 thread_list_i(st_data_t key, st_data_t val, void *data)
1254 VALUE ary = (VALUE)data;
1255 rb_thread_t *th;
1256 GetThreadPtr((VALUE)key, th);
1258 switch (th->status) {
1259 case THREAD_RUNNABLE:
1260 case THREAD_STOPPED:
1261 case THREAD_STOPPED_FOREVER:
1262 case THREAD_TO_KILL:
1263 rb_ary_push(ary, th->self);
1264 default:
1265 break;
1267 return ST_CONTINUE;
1270 /********************************************************************/
1273 * call-seq:
1274 * Thread.list => array
1276 * Returns an array of <code>Thread</code> objects for all threads that are
1277 * either runnable or stopped.
1279 * Thread.new { sleep(200) }
1280 * Thread.new { 1000000.times {|i| i*i } }
1281 * Thread.new { Thread.stop }
1282 * Thread.list.each {|t| p t}
1284 * <em>produces:</em>
1286 * #<Thread:0x401b3e84 sleep>
1287 * #<Thread:0x401b3f38 run>
1288 * #<Thread:0x401b3fb0 sleep>
1289 * #<Thread:0x401bdf4c run>
1292 VALUE
1293 rb_thread_list(void)
1295 VALUE ary = rb_ary_new();
1296 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1297 return ary;
1300 VALUE
1301 rb_thread_current(void)
1303 return GET_THREAD()->self;
1307 * call-seq:
1308 * Thread.current => thread
1310 * Returns the currently executing thread.
1312 * Thread.current #=> #<Thread:0x401bdf4c run>
1315 static VALUE
1316 thread_s_current(VALUE klass)
1318 return rb_thread_current();
1321 VALUE
1322 rb_thread_main(void)
1324 return GET_THREAD()->vm->main_thread->self;
1327 static VALUE
1328 rb_thread_s_main(VALUE klass)
1330 return rb_thread_main();
1335 * call-seq:
1336 * Thread.abort_on_exception => true or false
1338 * Returns the status of the global ``abort on exception'' condition. The
1339 * default is <code>false</code>. When set to <code>true</code>, or if the
1340 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1341 * command line option <code>-d</code> was specified) all threads will abort
1342 * (the process will <code>exit(0)</code>) if an exception is raised in any
1343 * thread. See also <code>Thread::abort_on_exception=</code>.
1346 static VALUE
1347 rb_thread_s_abort_exc(void)
1349 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1354 * call-seq:
1355 * Thread.abort_on_exception= boolean => true or false
1357 * When set to <code>true</code>, all threads will abort if an exception is
1358 * raised. Returns the new state.
1360 * Thread.abort_on_exception = true
1361 * t1 = Thread.new do
1362 * puts "In new thread"
1363 * raise "Exception from thread"
1364 * end
1365 * sleep(1)
1366 * puts "not reached"
1368 * <em>produces:</em>
1370 * In new thread
1371 * prog.rb:4: Exception from thread (RuntimeError)
1372 * from prog.rb:2:in `initialize'
1373 * from prog.rb:2:in `new'
1374 * from prog.rb:2
1377 static VALUE
1378 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1380 rb_secure(4);
1381 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1382 return val;
1387 * call-seq:
1388 * thr.abort_on_exception => true or false
1390 * Returns the status of the thread-local ``abort on exception'' condition for
1391 * <i>thr</i>. The default is <code>false</code>. See also
1392 * <code>Thread::abort_on_exception=</code>.
1395 static VALUE
1396 rb_thread_abort_exc(VALUE thread)
1398 rb_thread_t *th;
1399 GetThreadPtr(thread, th);
1400 return th->abort_on_exception ? Qtrue : Qfalse;
1405 * call-seq:
1406 * thr.abort_on_exception= boolean => true or false
1408 * When set to <code>true</code>, causes all threads (including the main
1409 * program) to abort if an exception is raised in <i>thr</i>. The process will
1410 * effectively <code>exit(0)</code>.
1413 static VALUE
1414 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1416 rb_thread_t *th;
1417 rb_secure(4);
1419 GetThreadPtr(thread, th);
1420 th->abort_on_exception = RTEST(val);
1421 return val;
1426 * call-seq:
1427 * thr.group => thgrp or nil
1429 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1430 * the thread is not a member of any group.
1432 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1435 VALUE
1436 rb_thread_group(VALUE thread)
1438 rb_thread_t *th;
1439 VALUE group;
1440 GetThreadPtr(thread, th);
1441 group = th->thgroup;
1443 if (!group) {
1444 group = Qnil;
1446 return group;
1449 static const char *
1450 thread_status_name(enum rb_thread_status status)
1452 switch (status) {
1453 case THREAD_RUNNABLE:
1454 return "run";
1455 case THREAD_STOPPED:
1456 case THREAD_STOPPED_FOREVER:
1457 return "sleep";
1458 case THREAD_TO_KILL:
1459 return "aborting";
1460 case THREAD_KILLED:
1461 return "dead";
1462 default:
1463 return "unknown";
1467 static int
1468 rb_thread_dead(rb_thread_t *th)
1470 return th->status == THREAD_KILLED;
1475 * call-seq:
1476 * thr.status => string, false or nil
1478 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1479 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1480 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1481 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1482 * terminated with an exception.
1484 * a = Thread.new { raise("die now") }
1485 * b = Thread.new { Thread.stop }
1486 * c = Thread.new { Thread.exit }
1487 * d = Thread.new { sleep }
1488 * d.kill #=> #<Thread:0x401b3678 aborting>
1489 * a.status #=> nil
1490 * b.status #=> "sleep"
1491 * c.status #=> false
1492 * d.status #=> "aborting"
1493 * Thread.current.status #=> "run"
1496 static VALUE
1497 rb_thread_status(VALUE thread)
1499 rb_thread_t *th;
1500 GetThreadPtr(thread, th);
1502 if (rb_thread_dead(th)) {
1503 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1504 /* TODO */ ) {
1505 return Qnil;
1507 return Qfalse;
1509 return rb_str_new2(thread_status_name(th->status));
1514 * call-seq:
1515 * thr.alive? => true or false
1517 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1519 * thr = Thread.new { }
1520 * thr.join #=> #<Thread:0x401b3fb0 dead>
1521 * Thread.current.alive? #=> true
1522 * thr.alive? #=> false
1525 static VALUE
1526 rb_thread_alive_p(VALUE thread)
1528 rb_thread_t *th;
1529 GetThreadPtr(thread, th);
1531 if (rb_thread_dead(th))
1532 return Qfalse;
1533 return Qtrue;
1537 * call-seq:
1538 * thr.stop? => true or false
1540 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1542 * a = Thread.new { Thread.stop }
1543 * b = Thread.current
1544 * a.stop? #=> true
1545 * b.stop? #=> false
1548 static VALUE
1549 rb_thread_stop_p(VALUE thread)
1551 rb_thread_t *th;
1552 GetThreadPtr(thread, th);
1554 if (rb_thread_dead(th))
1555 return Qtrue;
1556 if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
1557 return Qtrue;
1558 return Qfalse;
1562 * call-seq:
1563 * thr.safe_level => integer
1565 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1566 * levels can help when implementing sandboxes which run insecure code.
1568 * thr = Thread.new { $SAFE = 3; sleep }
1569 * Thread.current.safe_level #=> 0
1570 * thr.safe_level #=> 3
1573 static VALUE
1574 rb_thread_safe_level(VALUE thread)
1576 rb_thread_t *th;
1577 GetThreadPtr(thread, th);
1579 return INT2NUM(th->safe_level);
1583 * call-seq:
1584 * thr.inspect => string
1586 * Dump the name, id, and status of _thr_ to a string.
1589 static VALUE
1590 rb_thread_inspect(VALUE thread)
1592 const char *cname = rb_obj_classname(thread);
1593 rb_thread_t *th;
1594 const char *status;
1595 VALUE str;
1597 GetThreadPtr(thread, th);
1598 status = thread_status_name(th->status);
1599 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1600 OBJ_INFECT(str, thread);
1602 return str;
1605 VALUE
1606 rb_thread_local_aref(VALUE thread, ID id)
1608 rb_thread_t *th;
1609 VALUE val;
1611 GetThreadPtr(thread, th);
1612 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1613 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1615 if (!th->local_storage) {
1616 return Qnil;
1618 if (st_lookup(th->local_storage, id, &val)) {
1619 return val;
1621 return Qnil;
1625 * call-seq:
1626 * thr[sym] => obj or nil
1628 * Attribute Reference---Returns the value of a thread-local variable, using
1629 * either a symbol or a string name. If the specified variable does not exist,
1630 * returns <code>nil</code>.
1632 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1633 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1634 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1635 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1637 * <em>produces:</em>
1639 * #<Thread:0x401b3b3c sleep>: C
1640 * #<Thread:0x401b3bc8 sleep>: B
1641 * #<Thread:0x401b3c68 sleep>: A
1642 * #<Thread:0x401bdf4c run>:
1645 static VALUE
1646 rb_thread_aref(VALUE thread, VALUE id)
1648 return rb_thread_local_aref(thread, rb_to_id(id));
1651 VALUE
1652 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1654 rb_thread_t *th;
1655 GetThreadPtr(thread, th);
1657 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1658 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1660 if (OBJ_FROZEN(thread)) {
1661 rb_error_frozen("thread locals");
1663 if (!th->local_storage) {
1664 th->local_storage = st_init_numtable();
1666 if (NIL_P(val)) {
1667 st_delete_wrap(th->local_storage, id);
1668 return Qnil;
1670 st_insert(th->local_storage, id, val);
1671 return val;
1675 * call-seq:
1676 * thr[sym] = obj => obj
1678 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1679 * using either a symbol or a string. See also <code>Thread#[]</code>.
1682 static VALUE
1683 rb_thread_aset(VALUE self, ID id, VALUE val)
1685 return rb_thread_local_aset(self, rb_to_id(id), val);
1689 * call-seq:
1690 * thr.key?(sym) => true or false
1692 * Returns <code>true</code> if the given string (or symbol) exists as a
1693 * thread-local variable.
1695 * me = Thread.current
1696 * me[:oliver] = "a"
1697 * me.key?(:oliver) #=> true
1698 * me.key?(:stanley) #=> false
1701 static VALUE
1702 rb_thread_key_p(VALUE self, VALUE key)
1704 rb_thread_t *th;
1705 ID id = rb_to_id(key);
1707 GetThreadPtr(self, th);
1709 if (!th->local_storage) {
1710 return Qfalse;
1712 if (st_lookup(th->local_storage, id, 0)) {
1713 return Qtrue;
1715 return Qfalse;
1718 static int
1719 thread_keys_i(ID key, VALUE value, VALUE ary)
1721 rb_ary_push(ary, ID2SYM(key));
1722 return ST_CONTINUE;
1725 static int
1726 vm_living_thread_num(rb_vm_t *vm)
1728 return vm->living_threads->num_entries;
1732 rb_thread_alone()
1734 int num = 1;
1735 if (GET_THREAD()->vm->living_threads) {
1736 num = vm_living_thread_num(GET_THREAD()->vm);
1737 thread_debug("rb_thread_alone: %d\n", num);
1739 return num == 1;
1743 * call-seq:
1744 * thr.keys => array
1746 * Returns an an array of the names of the thread-local variables (as Symbols).
1748 * thr = Thread.new do
1749 * Thread.current[:cat] = 'meow'
1750 * Thread.current["dog"] = 'woof'
1751 * end
1752 * thr.join #=> #<Thread:0x401b3f10 dead>
1753 * thr.keys #=> [:dog, :cat]
1756 static VALUE
1757 rb_thread_keys(VALUE self)
1759 rb_thread_t *th;
1760 VALUE ary = rb_ary_new();
1761 GetThreadPtr(self, th);
1763 if (th->local_storage) {
1764 st_foreach(th->local_storage, thread_keys_i, ary);
1766 return ary;
1770 * call-seq:
1771 * thr.priority => integer
1773 * Returns the priority of <i>thr</i>. Default is inherited from the
1774 * current thread which creating the new thread, or zero for the
1775 * initial main thread; higher-priority threads will run before
1776 * lower-priority threads.
1778 * Thread.current.priority #=> 0
1781 static VALUE
1782 rb_thread_priority(VALUE thread)
1784 rb_thread_t *th;
1785 GetThreadPtr(thread, th);
1786 return INT2NUM(th->priority);
1791 * call-seq:
1792 * thr.priority= integer => thr
1794 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1795 * will run before lower-priority threads.
1797 * count1 = count2 = 0
1798 * a = Thread.new do
1799 * loop { count1 += 1 }
1800 * end
1801 * a.priority = -1
1803 * b = Thread.new do
1804 * loop { count2 += 1 }
1805 * end
1806 * b.priority = -2
1807 * sleep 1 #=> 1
1808 * count1 #=> 622504
1809 * count2 #=> 5832
1812 static VALUE
1813 rb_thread_priority_set(VALUE thread, VALUE prio)
1815 rb_thread_t *th;
1816 GetThreadPtr(thread, th);
1818 rb_secure(4);
1820 th->priority = NUM2INT(prio);
1821 native_thread_apply_priority(th);
1822 return prio;
1825 /* for IO */
1827 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1828 void
1829 rb_fd_init(volatile rb_fdset_t *fds)
1831 fds->maxfd = 0;
1832 fds->fdset = ALLOC(fd_set);
1833 FD_ZERO(fds->fdset);
1836 void
1837 rb_fd_term(rb_fdset_t *fds)
1839 if (fds->fdset) xfree(fds->fdset);
1840 fds->maxfd = 0;
1841 fds->fdset = 0;
1844 void
1845 rb_fd_zero(rb_fdset_t *fds)
1847 if (fds->fdset) {
1848 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
1849 FD_ZERO(fds->fdset);
1853 static void
1854 rb_fd_resize(int n, rb_fdset_t *fds)
1856 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
1857 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
1859 if (m < sizeof(fd_set)) m = sizeof(fd_set);
1860 if (o < sizeof(fd_set)) o = sizeof(fd_set);
1862 if (m > o) {
1863 fds->fdset = realloc(fds->fdset, m);
1864 memset((char *)fds->fdset + o, 0, m - o);
1866 if (n >= fds->maxfd) fds->maxfd = n + 1;
1869 void
1870 rb_fd_set(int n, rb_fdset_t *fds)
1872 rb_fd_resize(n, fds);
1873 FD_SET(n, fds->fdset);
1876 void
1877 rb_fd_clr(int n, rb_fdset_t *fds)
1879 if (n >= fds->maxfd) return;
1880 FD_CLR(n, fds->fdset);
1884 rb_fd_isset(int n, const rb_fdset_t *fds)
1886 if (n >= fds->maxfd) return 0;
1887 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1890 void
1891 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
1893 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
1895 if (size < sizeof(fd_set)) size = sizeof(fd_set);
1896 dst->maxfd = max;
1897 dst->fdset = realloc(dst->fdset, size);
1898 memcpy(dst->fdset, src, size);
1902 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
1904 fd_set *r = NULL, *w = NULL, *e = NULL;
1905 if (readfds) {
1906 rb_fd_resize(n - 1, readfds);
1907 r = rb_fd_ptr(readfds);
1909 if (writefds) {
1910 rb_fd_resize(n - 1, writefds);
1911 w = rb_fd_ptr(writefds);
1913 if (exceptfds) {
1914 rb_fd_resize(n - 1, exceptfds);
1915 e = rb_fd_ptr(exceptfds);
1917 return select(n, r, w, e, timeout);
1920 #undef FD_ZERO
1921 #undef FD_SET
1922 #undef FD_CLR
1923 #undef FD_ISSET
1925 #define FD_ZERO(f) rb_fd_zero(f)
1926 #define FD_SET(i, f) rb_fd_set(i, f)
1927 #define FD_CLR(i, f) rb_fd_clr(i, f)
1928 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1930 #endif
1932 #if defined(__CYGWIN__) || defined(_WIN32)
1933 static long
1934 cmp_tv(const struct timeval *a, const struct timeval *b)
1936 long d = (a->tv_sec - b->tv_sec);
1937 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
1940 static int
1941 subst(struct timeval *rest, const struct timeval *wait)
1943 while (rest->tv_usec < wait->tv_usec) {
1944 if (rest->tv_sec <= wait->tv_sec) {
1945 return 0;
1947 rest->tv_sec -= 1;
1948 rest->tv_usec += 1000 * 1000;
1950 rest->tv_sec -= wait->tv_sec;
1951 rest->tv_usec -= wait->tv_usec;
1952 return 1;
1954 #endif
1956 static int
1957 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
1958 struct timeval *timeout)
1960 int result, lerrno;
1961 fd_set orig_read, orig_write, orig_except;
1963 #ifndef linux
1964 double limit = 0;
1965 struct timeval wait_rest;
1967 if (timeout) {
1968 limit = timeofday() +
1969 (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
1970 wait_rest = *timeout;
1971 timeout = &wait_rest;
1973 #endif
1975 if (read) orig_read = *read;
1976 if (write) orig_write = *write;
1977 if (except) orig_except = *except;
1979 retry:
1980 lerrno = 0;
1982 #if defined(__CYGWIN__) || defined(_WIN32)
1984 /* polling duration: 100ms */
1985 struct timeval wait_100ms, *wait;
1986 wait_100ms.tv_sec = 0;
1987 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
1989 do {
1990 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
1991 BLOCKING_REGION({
1992 do {
1993 result = select(n, read, write, except, wait);
1994 if (result < 0) lerrno = errno;
1995 if (result != 0) break;
1997 if (read) *read = orig_read;
1998 if (write) *write = orig_write;
1999 if (except) *except = orig_except;
2000 wait = &wait_100ms;
2001 } while (__th->interrupt_flag == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
2002 }, 0, 0);
2003 } while (result == 0 && (timeout == 0 || subst(timeout, &wait_100ms)));
2005 #else
2006 BLOCKING_REGION({
2007 result = select(n, read, write, except, timeout);
2008 if (result < 0) lerrno = errno;
2009 }, ubf_select, GET_THREAD());
2010 #endif
2012 errno = lerrno;
2014 if (result < 0) {
2015 switch (errno) {
2016 case EINTR:
2017 #ifdef ERESTART
2018 case ERESTART:
2019 #endif
2020 if (read) *read = orig_read;
2021 if (write) *write = orig_write;
2022 if (except) *except = orig_except;
2023 #ifndef linux
2024 if (timeout) {
2025 double d = limit - timeofday();
2027 wait_rest.tv_sec = (unsigned int)d;
2028 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
2029 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2030 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2032 #endif
2033 goto retry;
2034 default:
2035 break;
2038 return result;
2041 static void
2042 rb_thread_wait_fd_rw(int fd, int read)
2044 int result = 0;
2045 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2047 if (fd < 0) {
2048 rb_raise(rb_eIOError, "closed stream");
2050 while (result <= 0) {
2051 rb_fdset_t set;
2052 rb_fd_init(&set);
2053 FD_SET(fd, &set);
2055 if (read) {
2056 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
2058 else {
2059 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
2062 rb_fd_term(&set);
2064 if (result < 0) {
2065 rb_sys_fail(0);
2069 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2072 void
2073 rb_thread_wait_fd(int fd)
2075 rb_thread_wait_fd_rw(fd, 1);
2079 rb_thread_fd_writable(int fd)
2081 rb_thread_wait_fd_rw(fd, 0);
2082 return Qtrue;
2086 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2087 struct timeval *timeout)
2089 if (!read && !write && !except) {
2090 if (!timeout) {
2091 rb_thread_sleep_forever();
2092 return 0;
2094 rb_thread_wait_for(*timeout);
2095 return 0;
2097 else {
2098 return do_select(max, read, write, except, timeout);
2104 * for GC
2107 #ifdef USE_CONSERVATIVE_STACK_END
2108 void
2109 rb_gc_set_stack_end(VALUE **stack_end_p)
2111 VALUE stack_end;
2112 *stack_end_p = &stack_end;
2114 #endif
2116 void
2117 rb_gc_save_machine_context(rb_thread_t *th)
2119 SET_MACHINE_STACK_END(&th->machine_stack_end);
2120 FLUSH_REGISTER_WINDOWS;
2121 #ifdef __ia64
2122 th->machine_register_stack_end = rb_ia64_bsp();
2123 #endif
2124 setjmp(th->machine_regs);
2131 int rb_get_next_signal(rb_vm_t *vm);
2133 static void
2134 timer_thread_function(void *arg)
2136 rb_vm_t *vm = arg; /* TODO: fix me for Multi-VM */
2138 /* for time slice */
2139 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
2141 /* check signal */
2142 if (vm->buffered_signal_size && vm->main_thread->exec_signal == 0) {
2143 rb_thread_t *mth = vm->main_thread;
2144 enum rb_thread_status prev_status = mth->status;
2145 mth->exec_signal = rb_get_next_signal(vm);
2146 thread_debug("main_thread: %s\n", thread_status_name(prev_status));
2147 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2148 (long)vm->buffered_signal_size, vm->main_thread->exec_signal);
2149 if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
2150 rb_thread_interrupt(mth);
2151 mth->status = prev_status;
2154 #if 0
2155 /* prove profiler */
2156 if (vm->prove_profile.enable) {
2157 rb_thread_t *th = vm->running_thread;
2159 if (vm->during_gc) {
2160 /* GC prove profiling */
2163 #endif
2166 void
2167 rb_thread_stop_timer_thread(void)
2169 if (timer_thread_id) {
2170 system_working = 0;
2171 native_thread_join(timer_thread_id);
2172 timer_thread_id = 0;
2176 void
2177 rb_thread_reset_timer_thread(void)
2179 timer_thread_id = 0;
2182 void
2183 rb_thread_start_timer_thread(void)
2185 rb_thread_create_timer_thread();
2188 static int
2189 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
2191 int i;
2192 VALUE lines = (VALUE)val;
2194 for (i = 0; i < RARRAY_LEN(lines); i++) {
2195 if (RARRAY_PTR(lines)[i] != Qnil) {
2196 RARRAY_PTR(lines)[i] = INT2FIX(0);
2199 return ST_CONTINUE;
2202 static void
2203 clear_coverage(void)
2205 extern VALUE rb_get_coverages(void);
2206 VALUE coverages = rb_get_coverages();
2207 if (RTEST(coverages)) {
2208 st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
2212 static int
2213 terminate_atfork_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2215 VALUE thval = key;
2216 rb_thread_t *th;
2217 GetThreadPtr(thval, th);
2219 if (th != current_th) {
2220 thread_cleanup_func(th);
2222 return ST_CONTINUE;
2225 void
2226 rb_thread_atfork(void)
2228 rb_thread_t *th = GET_THREAD();
2229 rb_vm_t *vm = th->vm;
2230 VALUE thval = th->self;
2231 vm->main_thread = th;
2233 st_foreach(vm->living_threads, terminate_atfork_i, (st_data_t)th);
2234 st_clear(vm->living_threads);
2235 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2236 vm->sleeper = 0;
2237 clear_coverage();
2238 rb_reset_random_seed();
2241 static int
2242 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2244 VALUE thval = key;
2245 rb_thread_t *th;
2246 GetThreadPtr(thval, th);
2248 if (th != current_th) {
2249 thread_cleanup_func_before_exec(th);
2251 return ST_CONTINUE;
2254 void
2255 rb_thread_atfork_before_exec(void)
2257 rb_thread_t *th = GET_THREAD();
2258 rb_vm_t *vm = th->vm;
2259 VALUE thval = th->self;
2260 vm->main_thread = th;
2262 st_foreach(vm->living_threads, terminate_atfork_before_exec_i, (st_data_t)th);
2263 st_clear(vm->living_threads);
2264 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2265 vm->sleeper = 0;
2266 clear_coverage();
2269 struct thgroup {
2270 int enclosed;
2271 VALUE group;
2275 * Document-class: ThreadGroup
2277 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2278 * threads as a group. A <code>Thread</code> can belong to only one
2279 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2280 * remove it from any previous group.
2282 * Newly created threads belong to the same group as the thread from which they
2283 * were created.
2286 static VALUE thgroup_s_alloc(VALUE);
2287 static VALUE
2288 thgroup_s_alloc(VALUE klass)
2290 VALUE group;
2291 struct thgroup *data;
2293 group = Data_Make_Struct(klass, struct thgroup, 0, -1, data);
2294 data->enclosed = 0;
2295 data->group = group;
2297 return group;
2300 struct thgroup_list_params {
2301 VALUE ary;
2302 VALUE group;
2305 static int
2306 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2308 VALUE thread = (VALUE)key;
2309 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2310 VALUE group = ((struct thgroup_list_params *)data)->group;
2311 rb_thread_t *th;
2312 GetThreadPtr(thread, th);
2314 if (th->thgroup == group) {
2315 rb_ary_push(ary, thread);
2317 return ST_CONTINUE;
2321 * call-seq:
2322 * thgrp.list => array
2324 * Returns an array of all existing <code>Thread</code> objects that belong to
2325 * this group.
2327 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2330 static VALUE
2331 thgroup_list(VALUE group)
2333 VALUE ary = rb_ary_new();
2334 struct thgroup_list_params param;
2336 param.ary = ary;
2337 param.group = group;
2338 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2339 return ary;
2344 * call-seq:
2345 * thgrp.enclose => thgrp
2347 * Prevents threads from being added to or removed from the receiving
2348 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2349 * <code>ThreadGroup</code>.
2351 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2352 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2353 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2354 * tg.add thr
2356 * <em>produces:</em>
2358 * ThreadError: can't move from the enclosed thread group
2361 VALUE
2362 thgroup_enclose(VALUE group)
2364 struct thgroup *data;
2366 Data_Get_Struct(group, struct thgroup, data);
2367 data->enclosed = 1;
2369 return group;
2374 * call-seq:
2375 * thgrp.enclosed? => true or false
2377 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2378 * ThreadGroup#enclose.
2381 static VALUE
2382 thgroup_enclosed_p(VALUE group)
2384 struct thgroup *data;
2386 Data_Get_Struct(group, struct thgroup, data);
2387 if (data->enclosed)
2388 return Qtrue;
2389 return Qfalse;
2394 * call-seq:
2395 * thgrp.add(thread) => thgrp
2397 * Adds the given <em>thread</em> to this group, removing it from any other
2398 * group to which it may have previously belonged.
2400 * puts "Initial group is #{ThreadGroup::Default.list}"
2401 * tg = ThreadGroup.new
2402 * t1 = Thread.new { sleep }
2403 * t2 = Thread.new { sleep }
2404 * puts "t1 is #{t1}"
2405 * puts "t2 is #{t2}"
2406 * tg.add(t1)
2407 * puts "Initial group now #{ThreadGroup::Default.list}"
2408 * puts "tg group now #{tg.list}"
2410 * <em>produces:</em>
2412 * Initial group is #<Thread:0x401bdf4c>
2413 * t1 is #<Thread:0x401b3c90>
2414 * t2 is #<Thread:0x401b3c18>
2415 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2416 * tg group now #<Thread:0x401b3c90>
2419 static VALUE
2420 thgroup_add(VALUE group, VALUE thread)
2422 rb_thread_t *th;
2423 struct thgroup *data;
2425 rb_secure(4);
2426 GetThreadPtr(thread, th);
2428 if (OBJ_FROZEN(group)) {
2429 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2431 Data_Get_Struct(group, struct thgroup, data);
2432 if (data->enclosed) {
2433 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2436 if (!th->thgroup) {
2437 return Qnil;
2440 if (OBJ_FROZEN(th->thgroup)) {
2441 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2443 Data_Get_Struct(th->thgroup, struct thgroup, data);
2444 if (data->enclosed) {
2445 rb_raise(rb_eThreadError,
2446 "can't move from the enclosed thread group");
2449 th->thgroup = group;
2450 return group;
2455 * Document-class: Mutex
2457 * Mutex implements a simple semaphore that can be used to coordinate access to
2458 * shared data from multiple concurrent threads.
2460 * Example:
2462 * require 'thread'
2463 * semaphore = Mutex.new
2465 * a = Thread.new {
2466 * semaphore.synchronize {
2467 * # access shared resource
2471 * b = Thread.new {
2472 * semaphore.synchronize {
2473 * # access shared resource
2479 typedef struct mutex_struct {
2480 rb_thread_lock_t lock;
2481 rb_thread_cond_t cond;
2482 rb_thread_t volatile *th;
2483 volatile int cond_waiting, cond_notified;
2484 VALUE next_mutex;
2485 } mutex_t;
2487 #define GetMutexPtr(obj, tobj) \
2488 Data_Get_Struct(obj, mutex_t, tobj)
2490 static const char *mutex_unlock(mutex_t *mutex);
2492 static void
2493 mutex_mark(void *ptr)
2495 if (ptr) {
2496 mutex_t *mutex = ptr;
2497 if (mutex->th) {
2498 rb_gc_mark(mutex->th->self);
2503 static void
2504 mutex_free(void *ptr)
2506 if (ptr) {
2507 mutex_t *mutex = ptr;
2508 if (mutex->th) {
2509 /* rb_warn("free locked mutex"); */
2510 mutex_unlock(mutex);
2512 native_mutex_destroy(&mutex->lock);
2513 native_cond_destroy(&mutex->cond);
2515 ruby_xfree(ptr);
2518 static VALUE
2519 mutex_alloc(VALUE klass)
2521 VALUE volatile obj;
2522 mutex_t *mutex;
2524 obj = Data_Make_Struct(klass, mutex_t, mutex_mark, mutex_free, mutex);
2525 native_mutex_initialize(&mutex->lock);
2526 native_cond_initialize(&mutex->cond);
2527 return obj;
2531 * call-seq:
2532 * Mutex.new => mutex
2534 * Creates a new Mutex
2536 static VALUE
2537 mutex_initialize(VALUE self)
2539 return self;
2542 VALUE
2543 rb_mutex_new(void)
2545 return mutex_alloc(rb_cMutex);
2549 * call-seq:
2550 * mutex.locked? => true or false
2552 * Returns +true+ if this lock is currently held by some thread.
2554 VALUE
2555 rb_mutex_locked_p(VALUE self)
2557 mutex_t *mutex;
2558 GetMutexPtr(self, mutex);
2559 return mutex->th ? Qtrue : Qfalse;
2562 static void
2563 mutex_locked(rb_thread_t *th, VALUE self)
2565 if (th->keeping_mutexes) {
2566 mutex_t *mutex;
2567 GetMutexPtr(self, mutex);
2568 mutex->next_mutex = th->keeping_mutexes;
2570 th->keeping_mutexes = self;
2574 * call-seq:
2575 * mutex.try_lock => true or false
2577 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2578 * lock was granted.
2580 VALUE
2581 rb_mutex_trylock(VALUE self)
2583 mutex_t *mutex;
2584 VALUE locked = Qfalse;
2585 GetMutexPtr(self, mutex);
2587 if (mutex->th == GET_THREAD()) {
2588 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2591 native_mutex_lock(&mutex->lock);
2592 if (mutex->th == 0) {
2593 mutex->th = GET_THREAD();
2594 locked = Qtrue;
2596 mutex_locked(GET_THREAD(), self);
2598 native_mutex_unlock(&mutex->lock);
2600 return locked;
2603 static int
2604 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
2606 int interrupted = 0;
2607 #if 0 /* for debug */
2608 native_thread_yield();
2609 #endif
2611 native_mutex_lock(&mutex->lock);
2612 th->transition_for_lock = 0;
2613 while (mutex->th || (mutex->th = th, 0)) {
2614 if (last_thread) {
2615 interrupted = 2;
2616 break;
2619 mutex->cond_waiting++;
2620 native_cond_wait(&mutex->cond, &mutex->lock);
2621 mutex->cond_notified--;
2623 if (RUBY_VM_INTERRUPTED(th)) {
2624 interrupted = 1;
2625 break;
2628 th->transition_for_lock = 1;
2629 native_mutex_unlock(&mutex->lock);
2631 if (interrupted == 2) native_thread_yield();
2632 #if 0 /* for debug */
2633 native_thread_yield();
2634 #endif
2636 return interrupted;
2639 static void
2640 lock_interrupt(void *ptr)
2642 mutex_t *mutex = (mutex_t *)ptr;
2643 native_mutex_lock(&mutex->lock);
2644 if (mutex->cond_waiting > 0) {
2645 native_cond_broadcast(&mutex->cond);
2646 mutex->cond_notified = mutex->cond_waiting;
2647 mutex->cond_waiting = 0;
2649 native_mutex_unlock(&mutex->lock);
2653 * call-seq:
2654 * mutex.lock => true or false
2656 * Attempts to grab the lock and waits if it isn't available.
2657 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2659 VALUE
2660 rb_mutex_lock(VALUE self)
2662 if (rb_mutex_trylock(self) == Qfalse) {
2663 mutex_t *mutex;
2664 rb_thread_t *th = GET_THREAD();
2665 GetMutexPtr(self, mutex);
2667 while (mutex->th != th) {
2668 int interrupted;
2669 enum rb_thread_status prev_status = th->status;
2670 int last_thread = 0;
2671 struct rb_unblock_callback oldubf;
2673 set_unblock_function(th, lock_interrupt, mutex, &oldubf);
2674 th->status = THREAD_STOPPED_FOREVER;
2675 th->vm->sleeper++;
2676 th->locking_mutex = self;
2677 if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
2678 last_thread = 1;
2681 th->transition_for_lock = 1;
2682 BLOCKING_REGION_CORE({
2683 interrupted = lock_func(th, mutex, last_thread);
2685 th->transition_for_lock = 0;
2686 remove_signal_thread_list(th);
2687 reset_unblock_function(th, &oldubf);
2689 th->locking_mutex = Qfalse;
2690 if (mutex->th && interrupted == 2) {
2691 rb_check_deadlock(th->vm);
2693 if (th->status == THREAD_STOPPED_FOREVER) {
2694 th->status = prev_status;
2696 th->vm->sleeper--;
2698 if (mutex->th == th) mutex_locked(th, self);
2700 if (interrupted) {
2701 RUBY_VM_CHECK_INTS();
2705 return self;
2708 static const char *
2709 mutex_unlock(mutex_t *mutex)
2711 const char *err = NULL;
2712 rb_thread_t *th = GET_THREAD();
2713 mutex_t *th_mutex;
2715 native_mutex_lock(&mutex->lock);
2717 if (mutex->th == 0) {
2718 err = "Attempt to unlock a mutex which is not locked";
2720 else if (mutex->th != GET_THREAD()) {
2721 err = "Attempt to unlock a mutex which is locked by another thread";
2723 else {
2724 mutex->th = 0;
2725 if (mutex->cond_waiting > 0) {
2726 /* waiting thread */
2727 native_cond_signal(&mutex->cond);
2728 mutex->cond_waiting--;
2729 mutex->cond_notified++;
2733 native_mutex_unlock(&mutex->lock);
2735 if (!err) {
2736 GetMutexPtr(th->keeping_mutexes, th_mutex);
2737 if (th_mutex == mutex) {
2738 th->keeping_mutexes = mutex->next_mutex;
2740 else {
2741 while (1) {
2742 mutex_t *tmp_mutex;
2743 GetMutexPtr(th_mutex->next_mutex, tmp_mutex);
2744 if (tmp_mutex == mutex) {
2745 th_mutex->next_mutex = tmp_mutex->next_mutex;
2746 break;
2748 th_mutex = tmp_mutex;
2751 mutex->next_mutex = Qfalse;
2754 return err;
2758 * call-seq:
2759 * mutex.unlock => self
2761 * Releases the lock.
2762 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2764 VALUE
2765 rb_mutex_unlock(VALUE self)
2767 const char *err;
2768 mutex_t *mutex;
2769 GetMutexPtr(self, mutex);
2771 err = mutex_unlock(mutex);
2772 if (err) rb_raise(rb_eThreadError, err);
2774 return self;
2777 static void
2778 rb_mutex_unlock_all(VALUE mutexes)
2780 const char *err;
2781 mutex_t *mutex;
2783 while (mutexes) {
2784 GetMutexPtr(mutexes, mutex);
2785 /* rb_warn("mutex #<%s:%p> remains to be locked by terminated thread",
2786 rb_obj_classname(mutexes), (void*)mutexes); */
2787 mutexes = mutex->next_mutex;
2788 err = mutex_unlock(mutex);
2789 if (err) rb_bug("invalid keeping_mutexes: %s", err);
2793 static VALUE
2794 rb_mutex_sleep_forever(VALUE time)
2796 rb_thread_sleep_deadly();
2797 return Qnil;
2800 static VALUE
2801 rb_mutex_wait_for(VALUE time)
2803 const struct timeval *t = (struct timeval *)time;
2804 rb_thread_wait_for(*t);
2805 return Qnil;
2808 VALUE
2809 rb_mutex_sleep(VALUE self, VALUE timeout)
2811 time_t beg, end;
2812 struct timeval t;
2814 if (!NIL_P(timeout)) {
2815 t = rb_time_interval(timeout);
2817 rb_mutex_unlock(self);
2818 beg = time(0);
2819 if (NIL_P(timeout)) {
2820 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
2822 else {
2823 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
2825 end = time(0) - beg;
2826 return INT2FIX(end);
2830 * call-seq:
2831 * mutex.sleep(timeout = nil) => number
2833 * Releases the lock and sleeps +timeout+ seconds if it is given and
2834 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2835 * the current thread.
2837 static VALUE
2838 mutex_sleep(int argc, VALUE *argv, VALUE self)
2840 VALUE timeout;
2842 rb_scan_args(argc, argv, "01", &timeout);
2843 return rb_mutex_sleep(self, timeout);
2847 * call-seq:
2848 * mutex.synchronize { ... } => result of the block
2850 * Obtains a lock, runs the block, and releases the lock when the block
2851 * completes. See the example under +Mutex+.
2854 VALUE
2855 rb_thread_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
2857 rb_mutex_lock(mutex);
2858 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
2862 * Document-class: Barrier
2864 typedef struct rb_thread_list_struct rb_thread_list_t;
2866 struct rb_thread_list_struct {
2867 rb_thread_t *th;
2868 rb_thread_list_t *next;
2871 static void
2872 thlist_mark(void *ptr)
2874 rb_thread_list_t *q = ptr;
2876 for (; q; q = q->next) {
2877 rb_gc_mark(q->th->self);
2881 static void
2882 thlist_free(void *ptr)
2884 rb_thread_list_t *q = ptr, *next;
2886 for (; q; q = next) {
2887 next = q->next;
2888 ruby_xfree(q);
2892 static int
2893 thlist_signal(rb_thread_list_t **list, unsigned int maxth, rb_thread_t **woken_thread)
2895 int woken = 0;
2896 rb_thread_list_t *q;
2898 while ((q = *list) != NULL) {
2899 rb_thread_t *th = q->th;
2901 *list = q->next;
2902 ruby_xfree(q);
2903 if (th->status != THREAD_KILLED) {
2904 rb_thread_ready(th);
2905 if (!woken && woken_thread) *woken_thread = th;
2906 if (++woken >= maxth && maxth) break;
2909 return woken;
2912 typedef struct {
2913 rb_thread_t *owner;
2914 rb_thread_list_t *waiting, **tail;
2915 } rb_barrier_t;
2917 static void
2918 barrier_mark(void *ptr)
2920 rb_barrier_t *b = ptr;
2922 if (b->owner) rb_gc_mark(b->owner->self);
2923 thlist_mark(b->waiting);
2926 static void
2927 barrier_free(void *ptr)
2929 rb_barrier_t *b = ptr;
2931 b->owner = 0;
2932 thlist_free(b->waiting);
2933 b->waiting = 0;
2934 ruby_xfree(ptr);
2937 static VALUE
2938 barrier_alloc(VALUE klass)
2940 VALUE volatile obj;
2941 rb_barrier_t *barrier;
2943 obj = Data_Make_Struct(klass, rb_barrier_t, barrier_mark, barrier_free, barrier);
2944 barrier->owner = GET_THREAD();
2945 barrier->waiting = 0;
2946 barrier->tail = &barrier->waiting;
2947 return obj;
2950 VALUE
2951 rb_barrier_new(void)
2953 return barrier_alloc(rb_cBarrier);
2956 VALUE
2957 rb_barrier_wait(VALUE self)
2959 rb_barrier_t *barrier;
2960 rb_thread_list_t *q;
2962 Data_Get_Struct(self, rb_barrier_t, barrier);
2963 if (!barrier->owner || barrier->owner->status == THREAD_KILLED) {
2964 barrier->owner = 0;
2965 if (thlist_signal(&barrier->waiting, 1, &barrier->owner)) return Qfalse;
2966 return Qtrue;
2968 else if (barrier->owner == GET_THREAD()) {
2969 return Qfalse;
2971 else {
2972 *barrier->tail = q = ALLOC(rb_thread_list_t);
2973 q->th = GET_THREAD();
2974 q->next = 0;
2975 barrier->tail = &q->next;
2976 rb_thread_sleep_forever();
2977 return barrier->owner == GET_THREAD() ? Qtrue : Qfalse;
2981 VALUE
2982 rb_barrier_release(VALUE self)
2984 rb_barrier_t *barrier;
2985 unsigned int n;
2987 Data_Get_Struct(self, rb_barrier_t, barrier);
2988 if (barrier->owner != GET_THREAD()) {
2989 rb_raise(rb_eThreadError, "not owned");
2991 n = thlist_signal(&barrier->waiting, 0, &barrier->owner);
2992 return n ? UINT2NUM(n) : Qfalse;
2995 /* variables for recursive traversals */
2996 static ID recursive_key;
2998 static VALUE
2999 recursive_check(VALUE hash, VALUE obj)
3001 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3002 return Qfalse;
3004 else {
3005 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
3007 if (NIL_P(list) || TYPE(list) != T_HASH)
3008 return Qfalse;
3009 if (NIL_P(rb_hash_lookup(list, obj)))
3010 return Qfalse;
3011 return Qtrue;
3015 static VALUE
3016 recursive_push(VALUE hash, VALUE obj)
3018 VALUE list, sym;
3020 sym = ID2SYM(rb_frame_this_func());
3021 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3022 hash = rb_hash_new();
3023 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3024 list = Qnil;
3026 else {
3027 list = rb_hash_aref(hash, sym);
3029 if (NIL_P(list) || TYPE(list) != T_HASH) {
3030 list = rb_hash_new();
3031 rb_hash_aset(hash, sym, list);
3033 rb_hash_aset(list, obj, Qtrue);
3034 return hash;
3037 static void
3038 recursive_pop(VALUE hash, VALUE obj)
3040 VALUE list, sym;
3042 sym = ID2SYM(rb_frame_this_func());
3043 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3044 VALUE symname;
3045 VALUE thrname;
3046 symname = rb_inspect(sym);
3047 thrname = rb_inspect(rb_thread_current());
3049 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
3050 StringValuePtr(symname), StringValuePtr(thrname));
3052 list = rb_hash_aref(hash, sym);
3053 if (NIL_P(list) || TYPE(list) != T_HASH) {
3054 VALUE symname = rb_inspect(sym);
3055 VALUE thrname = rb_inspect(rb_thread_current());
3056 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
3057 StringValuePtr(symname), StringValuePtr(thrname));
3059 rb_hash_delete(list, obj);
3062 VALUE
3063 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
3065 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3066 VALUE objid = rb_obj_id(obj);
3068 if (recursive_check(hash, objid)) {
3069 return (*func) (obj, arg, Qtrue);
3071 else {
3072 VALUE result = Qundef;
3073 int state;
3075 hash = recursive_push(hash, objid);
3076 PUSH_TAG();
3077 if ((state = EXEC_TAG()) == 0) {
3078 result = (*func) (obj, arg, Qfalse);
3080 POP_TAG();
3081 recursive_pop(hash, objid);
3082 if (state)
3083 JUMP_TAG(state);
3084 return result;
3088 /* tracer */
3090 static rb_event_hook_t *
3091 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3093 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
3094 hook->func = func;
3095 hook->flag = events;
3096 hook->data = data;
3097 return hook;
3100 static void
3101 thread_reset_event_flags(rb_thread_t *th)
3103 rb_event_hook_t *hook = th->event_hooks;
3104 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
3106 while (hook) {
3107 flag |= hook->flag;
3108 hook = hook->next;
3112 void
3113 rb_thread_add_event_hook(rb_thread_t *th,
3114 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3116 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3117 hook->next = th->event_hooks;
3118 th->event_hooks = hook;
3119 thread_reset_event_flags(th);
3122 static int
3123 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
3125 VALUE thval = key;
3126 rb_thread_t *th;
3127 GetThreadPtr(thval, th);
3129 if (flag) {
3130 th->event_flags |= RUBY_EVENT_VM;
3132 else {
3133 th->event_flags &= (~RUBY_EVENT_VM);
3135 return ST_CONTINUE;
3138 static void
3139 set_threads_event_flags(int flag)
3141 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
3144 void
3145 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3147 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3148 rb_vm_t *vm = GET_VM();
3150 hook->next = vm->event_hooks;
3151 vm->event_hooks = hook;
3153 set_threads_event_flags(1);
3156 static int
3157 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
3159 rb_event_hook_t *prev = NULL, *hook = *root, *next;
3161 while (hook) {
3162 next = hook->next;
3163 if (func == 0 || hook->func == func) {
3164 if (prev) {
3165 prev->next = hook->next;
3167 else {
3168 *root = hook->next;
3170 xfree(hook);
3172 else {
3173 prev = hook;
3175 hook = next;
3177 return -1;
3181 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
3183 int ret = remove_event_hook(&th->event_hooks, func);
3184 thread_reset_event_flags(th);
3185 return ret;
3189 rb_remove_event_hook(rb_event_hook_func_t func)
3191 rb_vm_t *vm = GET_VM();
3192 rb_event_hook_t *hook = vm->event_hooks;
3193 int ret = remove_event_hook(&vm->event_hooks, func);
3195 if (hook != NULL && vm->event_hooks == NULL) {
3196 set_threads_event_flags(0);
3199 return ret;
3202 static int
3203 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
3205 rb_thread_t *th;
3206 GetThreadPtr((VALUE)key, th);
3207 rb_thread_remove_event_hook(th, 0);
3208 return ST_CONTINUE;
3211 void
3212 rb_clear_trace_func(void)
3214 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
3215 rb_remove_event_hook(0);
3218 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
3221 * call-seq:
3222 * set_trace_func(proc) => proc
3223 * set_trace_func(nil) => nil
3225 * Establishes _proc_ as the handler for tracing, or disables
3226 * tracing if the parameter is +nil+. _proc_ takes up
3227 * to six parameters: an event name, a filename, a line number, an
3228 * object id, a binding, and the name of a class. _proc_ is
3229 * invoked whenever an event occurs. Events are: <code>c-call</code>
3230 * (call a C-language routine), <code>c-return</code> (return from a
3231 * C-language routine), <code>call</code> (call a Ruby method),
3232 * <code>class</code> (start a class or module definition),
3233 * <code>end</code> (finish a class or module definition),
3234 * <code>line</code> (execute code on a new line), <code>raise</code>
3235 * (raise an exception), and <code>return</code> (return from a Ruby
3236 * method). Tracing is disabled within the context of _proc_.
3238 * class Test
3239 * def test
3240 * a = 1
3241 * b = 2
3242 * end
3243 * end
3245 * set_trace_func proc { |event, file, line, id, binding, classname|
3246 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3248 * t = Test.new
3249 * t.test
3251 * line prog.rb:11 false
3252 * c-call prog.rb:11 new Class
3253 * c-call prog.rb:11 initialize Object
3254 * c-return prog.rb:11 initialize Object
3255 * c-return prog.rb:11 new Class
3256 * line prog.rb:12 false
3257 * call prog.rb:2 test Test
3258 * line prog.rb:3 test Test
3259 * line prog.rb:4 test Test
3260 * return prog.rb:4 test Test
3263 static VALUE
3264 set_trace_func(VALUE obj, VALUE trace)
3266 rb_remove_event_hook(call_trace_func);
3268 if (NIL_P(trace)) {
3269 return Qnil;
3272 if (!rb_obj_is_proc(trace)) {
3273 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3276 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
3277 return trace;
3280 static void
3281 thread_add_trace_func(rb_thread_t *th, VALUE trace)
3283 if (!rb_obj_is_proc(trace)) {
3284 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3287 rb_thread_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
3290 static VALUE
3291 thread_add_trace_func_m(VALUE obj, VALUE trace)
3293 rb_thread_t *th;
3294 GetThreadPtr(obj, th);
3295 thread_add_trace_func(th, trace);
3296 return trace;
3299 static VALUE
3300 thread_set_trace_func_m(VALUE obj, VALUE trace)
3302 rb_thread_t *th;
3303 GetThreadPtr(obj, th);
3304 rb_thread_remove_event_hook(th, call_trace_func);
3306 if (NIL_P(trace)) {
3307 return Qnil;
3309 thread_add_trace_func(th, trace);
3310 return trace;
3313 static const char *
3314 get_event_name(rb_event_flag_t event)
3316 switch (event) {
3317 case RUBY_EVENT_LINE:
3318 return "line";
3319 case RUBY_EVENT_CLASS:
3320 return "class";
3321 case RUBY_EVENT_END:
3322 return "end";
3323 case RUBY_EVENT_CALL:
3324 return "call";
3325 case RUBY_EVENT_RETURN:
3326 return "return";
3327 case RUBY_EVENT_C_CALL:
3328 return "c-call";
3329 case RUBY_EVENT_C_RETURN:
3330 return "c-return";
3331 case RUBY_EVENT_RAISE:
3332 return "raise";
3333 default:
3334 return "unknown";
3338 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3340 struct call_trace_func_args {
3341 rb_event_flag_t event;
3342 VALUE proc;
3343 VALUE self;
3344 ID id;
3345 VALUE klass;
3348 static VALUE
3349 call_trace_proc(VALUE args, int tracing)
3351 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3352 VALUE eventname = rb_str_new2(get_event_name(p->event));
3353 VALUE filename = rb_str_new2(rb_sourcefile());
3354 VALUE argv[6];
3355 int line = rb_sourceline();
3356 ID id = 0;
3357 VALUE klass = 0;
3359 if (p->event == RUBY_EVENT_C_CALL ||
3360 p->event == RUBY_EVENT_C_RETURN) {
3361 id = p->id;
3362 klass = p->klass;
3364 else {
3365 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3367 if (id == ID_ALLOCATOR)
3368 return Qnil;
3369 if (klass) {
3370 if (TYPE(klass) == T_ICLASS) {
3371 klass = RBASIC(klass)->klass;
3373 else if (FL_TEST(klass, FL_SINGLETON)) {
3374 klass = rb_iv_get(klass, "__attached__");
3378 argv[0] = eventname;
3379 argv[1] = filename;
3380 argv[2] = INT2FIX(line);
3381 argv[3] = id ? ID2SYM(id) : Qnil;
3382 argv[4] = p->self ? rb_binding_new() : Qnil;
3383 argv[5] = klass ? klass : Qnil;
3385 return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
3388 static void
3389 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3391 struct call_trace_func_args args;
3393 args.event = event;
3394 args.proc = proc;
3395 args.self = self;
3396 args.id = id;
3397 args.klass = klass;
3398 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3401 VALUE
3402 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3404 rb_thread_t *th = GET_THREAD();
3405 int state, raised, tracing;
3406 VALUE result = Qnil;
3408 if ((tracing = th->tracing) != 0 && !always) {
3409 return Qnil;
3411 else {
3412 th->tracing = 1;
3415 raised = rb_thread_reset_raised(th);
3417 PUSH_TAG();
3418 if ((state = EXEC_TAG()) == 0) {
3419 result = (*func)(arg, tracing);
3422 if (raised) {
3423 rb_thread_set_raised(th);
3425 POP_TAG();
3427 th->tracing = tracing;
3428 if (state) {
3429 JUMP_TAG(state);
3432 return result;
3436 * +Thread+ encapsulates the behavior of a thread of
3437 * execution, including the main thread of the Ruby script.
3439 * In the descriptions of the methods in this class, the parameter _sym_
3440 * refers to a symbol, which is either a quoted string or a
3441 * +Symbol+ (such as <code>:name</code>).
3444 void
3445 Init_Thread(void)
3447 #undef rb_intern
3449 VALUE cThGroup;
3451 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3452 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3453 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3454 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3455 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3456 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3457 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3458 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3459 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3460 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3461 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3462 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3463 #if THREAD_DEBUG < 0
3464 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3465 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3466 #endif
3468 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3469 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3470 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3471 rb_define_method(rb_cThread, "value", thread_value, 0);
3472 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3473 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3474 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3475 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3476 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3477 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3478 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3479 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3480 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3481 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3482 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3483 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3484 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3485 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3486 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3487 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3488 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3489 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3491 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3493 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3494 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3495 rb_define_method(cThGroup, "list", thgroup_list, 0);
3496 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3497 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3498 rb_define_method(cThGroup, "add", thgroup_add, 1);
3501 rb_thread_t *th = GET_THREAD();
3502 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3503 rb_define_const(cThGroup, "Default", th->thgroup);
3506 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3507 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3508 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3509 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3510 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3511 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3512 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3513 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3515 recursive_key = rb_intern("__recursive_key__");
3516 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3518 /* trace */
3519 rb_define_global_function("set_trace_func", set_trace_func, 1);
3520 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
3521 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
3523 /* init thread core */
3524 Init_native_thread();
3526 /* main thread setting */
3528 /* acquire global interpreter lock */
3529 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
3530 native_mutex_initialize(lp);
3531 native_mutex_lock(lp);
3532 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3536 rb_thread_create_timer_thread();
3538 (void)native_mutex_trylock;
3539 (void)ruby_thread_set_native;
3543 ruby_native_thread_p(void)
3545 rb_thread_t *th = ruby_thread_from_native();
3547 return th ? Qtrue : Qfalse;
3550 static int
3551 check_deadlock_i(st_data_t key, st_data_t val, int *found)
3553 VALUE thval = key;
3554 rb_thread_t *th;
3555 GetThreadPtr(thval, th);
3557 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
3558 *found = 1;
3560 else if (th->locking_mutex) {
3561 mutex_t *mutex;
3562 GetMutexPtr(th->locking_mutex, mutex);
3564 native_mutex_lock(&mutex->lock);
3565 if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
3566 *found = 1;
3568 native_mutex_unlock(&mutex->lock);
3571 return (*found) ? ST_STOP : ST_CONTINUE;
3574 #if 0 /* for debug */
3575 static int
3576 debug_i(st_data_t key, st_data_t val, int *found)
3578 VALUE thval = key;
3579 rb_thread_t *th;
3580 GetThreadPtr(thval, th);
3582 printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
3583 if (th->locking_mutex) {
3584 mutex_t *mutex;
3585 GetMutexPtr(th->locking_mutex, mutex);
3587 native_mutex_lock(&mutex->lock);
3588 printf(" %p %d\n", mutex->th, mutex->cond_notified);
3589 native_mutex_unlock(&mutex->lock);
3591 else puts("");
3593 return ST_CONTINUE;
3595 #endif
3597 static void
3598 rb_check_deadlock(rb_vm_t *vm)
3600 int found = 0;
3602 if (vm_living_thread_num(vm) > vm->sleeper) return;
3603 if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3605 st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
3607 if (!found) {
3608 VALUE argv[2];
3609 argv[0] = rb_eFatal;
3610 argv[1] = rb_str_new2("deadlock detected");
3611 #if 0 /* for debug */
3612 printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
3613 st_foreach(vm->living_threads, debug_i, (st_data_t)0);
3614 #endif
3615 rb_thread_raise(2, argv, vm->main_thread);
3619 static void
3620 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3622 VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
3623 if (coverage && RBASIC(coverage)->klass == 0) {
3624 long line = rb_sourceline() - 1;
3625 long count;
3626 if (RARRAY_PTR(coverage)[line] == Qnil) {
3627 rb_bug("bug");
3629 count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
3630 if (POSFIXABLE(count)) {
3631 RARRAY_PTR(coverage)[line] = LONG2FIX(count);
3636 VALUE
3637 rb_get_coverages(void)
3639 return GET_VM()->coverages;
3642 void
3643 rb_set_coverages(VALUE coverages)
3645 GET_VM()->coverages = coverages;
3646 rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
3649 void
3650 rb_reset_coverages(void)
3652 GET_VM()->coverages = Qfalse;
3653 rb_remove_event_hook(update_coverage);