* tool/transcode-tblgen.rb (ActionMap#eql?): use == to compare @map.
[ruby-svn.git] / thread.c
blobffa3fce476611b9c1574e1bee68666e522dfe4bf
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Desgin
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
25 model 2:
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
38 model 3:
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
45 /* for model 2 */
47 #include "eval_intern.h"
48 #include "vm.h"
49 #include "gc.h"
51 #ifndef THREAD_DEBUG
52 #define THREAD_DEBUG 0
53 #endif
55 VALUE rb_cMutex;
56 VALUE rb_cBarrier;
58 static void sleep_timeval(rb_thread_t *th, struct timeval time);
59 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
60 static void sleep_forever(rb_thread_t *th, int nodeadlock);
61 static double timeofday(void);
62 struct timeval rb_time_interval(VALUE);
63 static int rb_thread_dead(rb_thread_t *th);
65 static void rb_check_deadlock(rb_vm_t *vm);
67 void rb_signal_exec(rb_thread_t *th, int sig);
68 void rb_disable_interrupt(void);
70 static const VALUE eKillSignal = INT2FIX(0);
71 static const VALUE eTerminateSignal = INT2FIX(1);
72 static volatile int system_working = 1;
74 inline static void
75 st_delete_wrap(st_table *table, st_data_t key)
77 st_delete(table, &key, 0);
80 /********************************************************************************/
82 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
84 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
85 struct rb_unblock_callback *old);
86 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
88 #define GVL_UNLOCK_BEGIN() do { \
89 rb_thread_t *_th_stored = GET_THREAD(); \
90 rb_gc_save_machine_context(_th_stored); \
91 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
93 #define GVL_UNLOCK_END() \
94 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
95 rb_thread_set_current(_th_stored); \
96 } while(0)
98 #define BLOCKING_REGION_CORE(exec) do { \
99 GVL_UNLOCK_BEGIN(); {\
100 exec; \
102 GVL_UNLOCK_END(); \
103 } while(0);
105 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
106 rb_thread_t *__th = GET_THREAD(); \
107 enum rb_thread_status __prev_status = __th->status; \
108 struct rb_unblock_callback __oldubf; \
109 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
110 __th->status = THREAD_STOPPED; \
111 thread_debug("enter blocking region (%p)\n", __th); \
112 BLOCKING_REGION_CORE(exec); \
113 thread_debug("leave blocking region (%p)\n", __th); \
114 remove_signal_thread_list(__th); \
115 reset_unblock_function(__th, &__oldubf); \
116 if (__th->status == THREAD_STOPPED) { \
117 __th->status = __prev_status; \
119 RUBY_VM_CHECK_INTS(); \
120 } while(0)
122 #if THREAD_DEBUG
123 #ifdef HAVE_VA_ARGS_MACRO
124 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
125 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
126 #define POSITION_FORMAT "%s:%d:"
127 #define POSITION_ARGS ,file, line
128 #else
129 void rb_thread_debug(const char *fmt, ...);
130 #define thread_debug rb_thread_debug
131 #define POSITION_FORMAT
132 #define POSITION_ARGS
133 #endif
135 # if THREAD_DEBUG < 0
136 static int rb_thread_debug_enabled;
138 static VALUE
139 rb_thread_s_debug(void)
141 return INT2NUM(rb_thread_debug_enabled);
144 static VALUE
145 rb_thread_s_debug_set(VALUE self, VALUE val)
147 rb_thread_debug_enabled = RTEST(val);
148 return val;
150 # else
151 # define rb_thread_debug_enabled THREAD_DEBUG
152 # endif
153 #else
154 #define thread_debug if(0)printf
155 #endif
157 #ifndef __ia64
158 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
159 #endif
160 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
161 VALUE *register_stack_start));
162 static void timer_thread_function(void *);
164 #if defined(_WIN32)
165 #include "thread_win32.c"
167 #define DEBUG_OUT() \
168 WaitForSingleObject(&debug_mutex, INFINITE); \
169 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
170 fflush(stdout); \
171 ReleaseMutex(&debug_mutex);
173 #elif defined(HAVE_PTHREAD_H)
174 #include "thread_pthread.c"
176 #define DEBUG_OUT() \
177 pthread_mutex_lock(&debug_mutex); \
178 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
179 fflush(stdout); \
180 pthread_mutex_unlock(&debug_mutex);
182 #else
183 #error "unsupported thread type"
184 #endif
186 #if THREAD_DEBUG
187 static int debug_mutex_initialized = 1;
188 static rb_thread_lock_t debug_mutex;
190 void
191 rb_thread_debug(
192 #ifdef HAVE_VA_ARGS_MACRO
193 const char *file, int line,
194 #endif
195 const char *fmt, ...)
197 va_list args;
198 char buf[BUFSIZ];
200 if (!rb_thread_debug_enabled) return;
202 if (debug_mutex_initialized == 1) {
203 debug_mutex_initialized = 0;
204 native_mutex_initialize(&debug_mutex);
207 va_start(args, fmt);
208 vsnprintf(buf, BUFSIZ, fmt, args);
209 va_end(args);
211 DEBUG_OUT();
213 #endif
216 static void
217 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
218 struct rb_unblock_callback *old)
220 check_ints:
221 RUBY_VM_CHECK_INTS(); /* check signal or so */
222 native_mutex_lock(&th->interrupt_lock);
223 if (th->interrupt_flag) {
224 native_mutex_unlock(&th->interrupt_lock);
225 goto check_ints;
227 else {
228 if (old) *old = th->unblock;
229 th->unblock.func = func;
230 th->unblock.arg = arg;
232 native_mutex_unlock(&th->interrupt_lock);
235 static void
236 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
238 native_mutex_lock(&th->interrupt_lock);
239 th->unblock = *old;
240 native_mutex_unlock(&th->interrupt_lock);
243 static void
244 rb_thread_interrupt(rb_thread_t *th)
246 native_mutex_lock(&th->interrupt_lock);
247 RUBY_VM_SET_INTERRUPT(th);
248 if (th->unblock.func) {
249 (th->unblock.func)(th->unblock.arg);
251 else {
252 /* none */
254 native_mutex_unlock(&th->interrupt_lock);
258 static int
259 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
261 VALUE thval = key;
262 rb_thread_t *th;
263 GetThreadPtr(thval, th);
265 if (th != main_thread) {
266 thread_debug("terminate_i: %p\n", th);
267 rb_thread_interrupt(th);
268 th->thrown_errinfo = eTerminateSignal;
269 th->status = THREAD_TO_KILL;
271 else {
272 thread_debug("terminate_i: main thread (%p)\n", th);
274 return ST_CONTINUE;
277 typedef struct rb_mutex_struct
279 rb_thread_lock_t lock;
280 rb_thread_cond_t cond;
281 struct rb_thread_struct volatile *th;
282 volatile int cond_waiting, cond_notified;
283 struct rb_mutex_struct *next_mutex;
284 } mutex_t;
286 static void rb_mutex_unlock_all(mutex_t *mutex);
288 void
289 rb_thread_terminate_all(void)
291 rb_thread_t *th = GET_THREAD(); /* main thread */
292 rb_vm_t *vm = th->vm;
293 if (vm->main_thread != th) {
294 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm->main_thread, th);
297 /* unlock all locking mutexes */
298 if (th->keeping_mutexes) {
299 rb_mutex_unlock_all(th->keeping_mutexes);
302 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th);
303 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
305 while (!rb_thread_alone()) {
306 PUSH_TAG();
307 if (EXEC_TAG() == 0) {
308 rb_thread_schedule();
310 else {
311 /* ignore exception */
313 POP_TAG();
315 system_working = 0;
318 static void
319 thread_cleanup_func_before_exec(void *th_ptr)
321 rb_thread_t *th = th_ptr;
322 th->status = THREAD_KILLED;
323 th->machine_stack_start = th->machine_stack_end = 0;
324 #ifdef __ia64
325 th->machine_register_stack_start = th->machine_register_stack_end = 0;
326 #endif
329 static void
330 thread_cleanup_func(void *th_ptr)
332 rb_thread_t *th = th_ptr;
333 thread_cleanup_func_before_exec(th_ptr);
334 native_thread_destroy(th);
337 extern void ruby_error_print(void);
338 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
339 void rb_thread_recycle_stack_release(VALUE *);
341 void
342 ruby_thread_init_stack(rb_thread_t *th)
344 native_thread_init_stack(th);
347 static int
348 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
350 int state;
351 VALUE args = th->first_args;
352 rb_proc_t *proc;
353 rb_thread_t *join_th;
354 rb_thread_t *main_th;
355 VALUE errinfo = Qnil;
357 th->machine_stack_start = stack_start;
358 #ifdef __ia64
359 th->machine_register_stack_start = register_stack_start;
360 #endif
361 thread_debug("thread start: %p\n", th);
363 native_mutex_lock(&th->vm->global_vm_lock);
365 thread_debug("thread start (get lock): %p\n", th);
366 rb_thread_set_current(th);
368 TH_PUSH_TAG(th);
369 if ((state = EXEC_TAG()) == 0) {
370 SAVE_ROOT_JMPBUF(th, {
371 if (th->first_proc) {
372 GetProcPtr(th->first_proc, proc);
373 th->errinfo = Qnil;
374 th->local_lfp = proc->block.lfp;
375 th->local_svar = Qnil;
376 th->value = vm_invoke_proc(th, proc, proc->block.self,
377 RARRAY_LEN(args), RARRAY_PTR(args), 0);
379 else {
380 th->value = (*th->first_func)((void *)th->first_args);
384 else {
385 errinfo = th->errinfo;
386 if (NIL_P(errinfo)) errinfo = rb_errinfo();
387 if (state == TAG_FATAL) {
388 /* fatal error within this thread, need to stop whole script */
390 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
391 if (th->safe_level >= 4) {
392 th->errinfo = rb_exc_new3(rb_eSecurityError,
393 rb_sprintf("Insecure exit at level %d", th->safe_level));
394 errinfo = Qnil;
397 else if (th->safe_level < 4 &&
398 (th->vm->thread_abort_on_exception ||
399 th->abort_on_exception || RTEST(ruby_debug))) {
400 /* exit on main_thread */
402 else {
403 errinfo = Qnil;
405 th->value = Qnil;
408 th->status = THREAD_KILLED;
409 thread_debug("thread end: %p\n", th);
411 main_th = th->vm->main_thread;
412 if (th != main_th) {
413 if (TYPE(errinfo) == T_OBJECT) {
414 /* treat with normal error object */
415 rb_thread_raise(1, &errinfo, main_th);
418 TH_POP_TAG();
420 /* locking_mutex must be Qfalse */
421 if (th->locking_mutex != Qfalse) {
422 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
423 th, th->locking_mutex);
426 /* unlock all locking mutexes */
427 if (th->keeping_mutexes) {
428 rb_mutex_unlock_all(th->keeping_mutexes);
429 th->keeping_mutexes = NULL;
432 /* delete self from living_threads */
433 st_delete_wrap(th->vm->living_threads, th->self);
435 /* wake up joinning threads */
436 join_th = th->join_list_head;
437 while (join_th) {
438 if (join_th == main_th) errinfo = Qnil;
439 rb_thread_interrupt(join_th);
440 switch (join_th->status) {
441 case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
442 join_th->status = THREAD_RUNNABLE;
443 default: break;
445 join_th = join_th->join_list_next;
447 if (th != main_th) rb_check_deadlock(th->vm);
449 if (!th->root_fiber) {
450 rb_thread_recycle_stack_release(th->stack);
451 th->stack = 0;
454 thread_cleanup_func(th);
455 native_mutex_unlock(&th->vm->global_vm_lock);
457 return 0;
460 static VALUE
461 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
463 rb_thread_t *th;
465 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
466 rb_raise(rb_eThreadError,
467 "can't start a new thread (frozen ThreadGroup)");
469 GetThreadPtr(thval, th);
471 /* setup thread environment */
472 th->first_func = fn;
473 th->first_proc = fn ? Qfalse : rb_block_proc();
474 th->first_args = args; /* GC: shouldn't put before above line */
476 th->priority = GET_THREAD()->priority;
477 th->thgroup = GET_THREAD()->thgroup;
479 native_mutex_initialize(&th->interrupt_lock);
480 /* kick thread */
481 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
482 native_thread_create(th);
483 return thval;
486 static VALUE
487 thread_s_new(int argc, VALUE *argv, VALUE klass)
489 rb_thread_t *th;
490 VALUE thread = rb_thread_alloc(klass);
491 rb_obj_call_init(thread, argc, argv);
492 GetThreadPtr(thread, th);
493 if (!th->first_args) {
494 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
495 rb_class2name(klass));
497 return thread;
501 * call-seq:
502 * Thread.start([args]*) {|args| block } => thread
503 * Thread.fork([args]*) {|args| block } => thread
505 * Basically the same as <code>Thread::new</code>. However, if class
506 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
507 * subclass will not invoke the subclass's <code>initialize</code> method.
510 static VALUE
511 thread_start(VALUE klass, VALUE args)
513 return thread_create_core(rb_thread_alloc(klass), args, 0);
516 static VALUE
517 thread_initialize(VALUE thread, VALUE args)
519 rb_thread_t *th;
520 if (!rb_block_given_p()) {
521 rb_raise(rb_eThreadError, "must be called with a block");
523 GetThreadPtr(thread, th);
524 if (th->first_args) {
525 VALUE rb_proc_location(VALUE self);
526 VALUE proc = th->first_proc, line, loc;
527 const char *file;
528 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
529 rb_raise(rb_eThreadError, "already initialized thread");
531 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
532 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
533 rb_raise(rb_eThreadError, "already initialized thread - %s",
534 file);
536 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
537 file, NUM2INT(line));
539 return thread_create_core(thread, args, 0);
542 VALUE
543 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
545 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
549 /* +infty, for this purpose */
550 #define DELAY_INFTY 1E30
552 struct join_arg {
553 rb_thread_t *target, *waiting;
554 double limit;
555 int forever;
558 static VALUE
559 remove_from_join_list(VALUE arg)
561 struct join_arg *p = (struct join_arg *)arg;
562 rb_thread_t *target_th = p->target, *th = p->waiting;
564 if (target_th->status != THREAD_KILLED) {
565 rb_thread_t **pth = &target_th->join_list_head;
567 while (*pth) {
568 if (*pth == th) {
569 *pth = th->join_list_next;
570 break;
572 pth = &(*pth)->join_list_next;
576 return Qnil;
579 static VALUE
580 thread_join_sleep(VALUE arg)
582 struct join_arg *p = (struct join_arg *)arg;
583 rb_thread_t *target_th = p->target, *th = p->waiting;
584 double now, limit = p->limit;
586 while (target_th->status != THREAD_KILLED) {
587 if (p->forever) {
588 sleep_forever(th, 1);
590 else {
591 now = timeofday();
592 if (now > limit) {
593 thread_debug("thread_join: timeout (thid: %p)\n",
594 (void *)target_th->thread_id);
595 return Qfalse;
597 sleep_wait_for_interrupt(th, limit - now);
599 thread_debug("thread_join: interrupted (thid: %p)\n",
600 (void *)target_th->thread_id);
602 return Qtrue;
605 static VALUE
606 thread_join(rb_thread_t *target_th, double delay)
608 rb_thread_t *th = GET_THREAD();
609 struct join_arg arg;
611 arg.target = target_th;
612 arg.waiting = th;
613 arg.limit = timeofday() + delay;
614 arg.forever = delay == DELAY_INFTY;
616 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
618 if (target_th->status != THREAD_KILLED) {
619 th->join_list_next = target_th->join_list_head;
620 target_th->join_list_head = th;
621 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
622 remove_from_join_list, (VALUE)&arg)) {
623 return Qnil;
627 thread_debug("thread_join: success (thid: %p)\n",
628 (void *)target_th->thread_id);
630 if (target_th->errinfo != Qnil) {
631 VALUE err = target_th->errinfo;
633 if (FIXNUM_P(err)) {
634 /* */
636 else if (TYPE(target_th->errinfo) == T_NODE) {
637 rb_exc_raise(vm_make_jump_tag_but_local_jump(
638 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
640 else {
641 /* normal exception */
642 rb_exc_raise(err);
645 return target_th->self;
649 * call-seq:
650 * thr.join => thr
651 * thr.join(limit) => thr
653 * The calling thread will suspend execution and run <i>thr</i>. Does not
654 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
655 * the time limit expires, <code>nil</code> will be returned, otherwise
656 * <i>thr</i> is returned.
658 * Any threads not joined will be killed when the main program exits. If
659 * <i>thr</i> had previously raised an exception and the
660 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
661 * (so the exception has not yet been processed) it will be processed at this
662 * time.
664 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
665 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
666 * x.join # Let x thread finish, a will be killed on exit.
668 * <em>produces:</em>
670 * axyz
672 * The following example illustrates the <i>limit</i> parameter.
674 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
675 * puts "Waiting" until y.join(0.15)
677 * <em>produces:</em>
679 * tick...
680 * Waiting
681 * tick...
682 * Waitingtick...
685 * tick...
688 static VALUE
689 thread_join_m(int argc, VALUE *argv, VALUE self)
691 rb_thread_t *target_th;
692 double delay = DELAY_INFTY;
693 VALUE limit;
695 GetThreadPtr(self, target_th);
697 rb_scan_args(argc, argv, "01", &limit);
698 if (!NIL_P(limit)) {
699 delay = rb_num2dbl(limit);
702 return thread_join(target_th, delay);
706 * call-seq:
707 * thr.value => obj
709 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
710 * its value.
712 * a = Thread.new { 2 + 2 }
713 * a.value #=> 4
716 static VALUE
717 thread_value(VALUE self)
719 rb_thread_t *th;
720 GetThreadPtr(self, th);
721 thread_join(th, DELAY_INFTY);
722 return th->value;
726 * Thread Scheduling
729 static struct timeval
730 double2timeval(double d)
732 struct timeval time;
734 time.tv_sec = (int)d;
735 time.tv_usec = (int)((d - (int)d) * 1e6);
736 if (time.tv_usec < 0) {
737 time.tv_usec += (long)1e6;
738 time.tv_sec -= 1;
740 return time;
743 static void
744 sleep_forever(rb_thread_t *th, int deadlockable)
746 enum rb_thread_status prev_status = th->status;
748 th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
749 do {
750 if (deadlockable) {
751 th->vm->sleeper++;
752 rb_check_deadlock(th->vm);
754 native_sleep(th, 0);
755 if (deadlockable) {
756 th->vm->sleeper--;
758 RUBY_VM_CHECK_INTS();
759 } while (th->status == THREAD_STOPPED_FOREVER);
760 th->status = prev_status;
763 static void
764 getclockofday(struct timeval *tp)
766 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
767 struct timespec ts;
769 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
770 tp->tv_sec = ts.tv_sec;
771 tp->tv_usec = ts.tv_nsec / 1000;
772 } else
773 #endif
775 gettimeofday(tp, NULL);
779 static void
780 sleep_timeval(rb_thread_t *th, struct timeval tv)
782 struct timeval to, tvn;
783 enum rb_thread_status prev_status = th->status;
785 getclockofday(&to);
786 to.tv_sec += tv.tv_sec;
787 if ((to.tv_usec += tv.tv_usec) >= 1000000) {
788 to.tv_sec++;
789 to.tv_usec -= 1000000;
792 th->status = THREAD_STOPPED;
793 do {
794 native_sleep(th, &tv);
795 RUBY_VM_CHECK_INTS();
796 getclockofday(&tvn);
797 if (to.tv_sec < tvn.tv_sec) break;
798 if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
799 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
800 (long)to.tv_sec, to.tv_usec,
801 (long)tvn.tv_sec, tvn.tv_usec);
802 tv.tv_sec = to.tv_sec - tvn.tv_sec;
803 if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
804 --tv.tv_sec;
805 tv.tv_usec += 1000000;
807 } while (th->status == THREAD_STOPPED);
808 th->status = prev_status;
811 void
812 rb_thread_sleep_forever()
814 thread_debug("rb_thread_sleep_forever\n");
815 sleep_forever(GET_THREAD(), 0);
818 static void
819 rb_thread_sleep_deadly()
821 thread_debug("rb_thread_sleep_deadly\n");
822 sleep_forever(GET_THREAD(), 1);
825 static double
826 timeofday(void)
828 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
829 struct timespec tp;
831 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
832 return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
833 } else
834 #endif
836 struct timeval tv;
837 gettimeofday(&tv, NULL);
838 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
842 static void
843 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
845 sleep_timeval(th, double2timeval(sleepsec));
848 static void
849 sleep_for_polling(rb_thread_t *th)
851 struct timeval time;
852 time.tv_sec = 0;
853 time.tv_usec = 100 * 1000; /* 0.1 sec */
854 sleep_timeval(th, time);
857 void
858 rb_thread_wait_for(struct timeval time)
860 rb_thread_t *th = GET_THREAD();
861 sleep_timeval(th, time);
864 void
865 rb_thread_polling(void)
867 RUBY_VM_CHECK_INTS();
868 if (!rb_thread_alone()) {
869 rb_thread_t *th = GET_THREAD();
870 sleep_for_polling(th);
874 struct timeval rb_time_timeval();
876 void
877 rb_thread_sleep(int sec)
879 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
882 void
883 rb_thread_schedule(void)
885 thread_debug("rb_thread_schedule\n");
886 if (!rb_thread_alone()) {
887 rb_thread_t *th = GET_THREAD();
889 thread_debug("rb_thread_schedule/switch start\n");
891 rb_gc_save_machine_context(th);
892 native_mutex_unlock(&th->vm->global_vm_lock);
894 native_thread_yield();
896 native_mutex_lock(&th->vm->global_vm_lock);
898 rb_thread_set_current(th);
899 thread_debug("rb_thread_schedule/switch done\n");
901 RUBY_VM_CHECK_INTS();
905 int rb_thread_critical; /* TODO: dummy variable */
907 VALUE
908 rb_thread_blocking_region(
909 rb_blocking_function_t *func, void *data1,
910 rb_unblock_function_t *ubf, void *data2)
912 VALUE val;
913 rb_thread_t *th = GET_THREAD();
915 if (ubf == RB_UBF_DFL) {
916 ubf = ubf_select;
917 data2 = th;
920 BLOCKING_REGION({
921 val = func(data1);
922 }, ubf, data2);
924 return val;
928 * call-seq:
929 * Thread.pass => nil
931 * Invokes the thread scheduler to pass execution to another thread.
933 * a = Thread.new { print "a"; Thread.pass;
934 * print "b"; Thread.pass;
935 * print "c" }
936 * b = Thread.new { print "x"; Thread.pass;
937 * print "y"; Thread.pass;
938 * print "z" }
939 * a.join
940 * b.join
942 * <em>produces:</em>
944 * axbycz
947 static VALUE
948 thread_s_pass(VALUE klass)
950 rb_thread_schedule();
951 return Qnil;
958 void
959 rb_thread_execute_interrupts(rb_thread_t *th)
961 if (th->raised_flag) return;
963 while (th->interrupt_flag) {
964 enum rb_thread_status status = th->status;
965 int timer_interrupt = th->interrupt_flag & 0x01;
966 int finalizer_interrupt = th->interrupt_flag & 0x04;
968 th->status = THREAD_RUNNABLE;
969 th->interrupt_flag = 0;
971 /* signal handling */
972 if (th->exec_signal) {
973 int sig = th->exec_signal;
974 th->exec_signal = 0;
975 rb_signal_exec(th, sig);
978 /* exception from another thread */
979 if (th->thrown_errinfo) {
980 VALUE err = th->thrown_errinfo;
981 th->thrown_errinfo = 0;
982 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
984 if (err == eKillSignal || err == eTerminateSignal) {
985 th->errinfo = INT2FIX(TAG_FATAL);
986 TH_JUMP_TAG(th, TAG_FATAL);
988 else {
989 rb_exc_raise(err);
992 th->status = status;
994 if (finalizer_interrupt) {
995 rb_gc_finalize_deferred();
998 if (timer_interrupt) {
999 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1000 rb_thread_schedule();
1006 void
1007 rb_gc_mark_threads(void)
1009 /* TODO: remove */
1012 /*****************************************************/
1014 static void
1015 rb_thread_ready(rb_thread_t *th)
1017 rb_thread_interrupt(th);
1020 static VALUE
1021 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
1023 VALUE exc;
1025 again:
1026 if (rb_thread_dead(th)) {
1027 return Qnil;
1030 if (th->thrown_errinfo != 0 || th->raised_flag) {
1031 rb_thread_schedule();
1032 goto again;
1035 exc = rb_make_exception(argc, argv);
1036 th->thrown_errinfo = exc;
1037 rb_thread_ready(th);
1038 return Qnil;
1041 void
1042 rb_thread_signal_raise(void *thptr, int sig)
1044 VALUE argv[2];
1045 rb_thread_t *th = thptr;
1047 argv[0] = rb_eSignal;
1048 argv[1] = INT2FIX(sig);
1049 rb_thread_raise(2, argv, th->vm->main_thread);
1052 void
1053 rb_thread_signal_exit(void *thptr)
1055 VALUE argv[2];
1056 rb_thread_t *th = thptr;
1058 argv[0] = rb_eSystemExit;
1059 argv[1] = rb_str_new2("exit");
1060 rb_thread_raise(2, argv, th->vm->main_thread);
1064 rb_thread_set_raised(rb_thread_t *th)
1066 if (th->raised_flag & RAISED_EXCEPTION) {
1067 return 1;
1069 th->raised_flag |= RAISED_EXCEPTION;
1070 return 0;
1074 rb_thread_reset_raised(rb_thread_t *th)
1076 if (!(th->raised_flag & RAISED_EXCEPTION)) {
1077 return 0;
1079 th->raised_flag &= ~RAISED_EXCEPTION;
1080 return 1;
1083 void
1084 rb_thread_fd_close(int fd)
1086 /* TODO: fix me */
1090 * call-seq:
1091 * thr.raise(exception)
1093 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1094 * caller does not have to be <i>thr</i>.
1096 * Thread.abort_on_exception = true
1097 * a = Thread.new { sleep(200) }
1098 * a.raise("Gotcha")
1100 * <em>produces:</em>
1102 * prog.rb:3: Gotcha (RuntimeError)
1103 * from prog.rb:2:in `initialize'
1104 * from prog.rb:2:in `new'
1105 * from prog.rb:2
1108 static VALUE
1109 thread_raise_m(int argc, VALUE *argv, VALUE self)
1111 rb_thread_t *th;
1112 GetThreadPtr(self, th);
1113 rb_thread_raise(argc, argv, th);
1114 return Qnil;
1119 * call-seq:
1120 * thr.exit => thr or nil
1121 * thr.kill => thr or nil
1122 * thr.terminate => thr or nil
1124 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1125 * is already marked to be killed, <code>exit</code> returns the
1126 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1127 * the process.
1130 VALUE
1131 rb_thread_kill(VALUE thread)
1133 rb_thread_t *th;
1135 GetThreadPtr(thread, th);
1137 if (th != GET_THREAD() && th->safe_level < 4) {
1138 rb_secure(4);
1140 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1141 return thread;
1143 if (th == th->vm->main_thread) {
1144 rb_exit(EXIT_SUCCESS);
1147 thread_debug("rb_thread_kill: %p (%p)\n", th, (void *)th->thread_id);
1149 rb_thread_interrupt(th);
1150 th->thrown_errinfo = eKillSignal;
1151 th->status = THREAD_TO_KILL;
1153 return thread;
1158 * call-seq:
1159 * Thread.kill(thread) => thread
1161 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1163 * count = 0
1164 * a = Thread.new { loop { count += 1 } }
1165 * sleep(0.1) #=> 0
1166 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1167 * count #=> 93947
1168 * a.alive? #=> false
1171 static VALUE
1172 rb_thread_s_kill(VALUE obj, VALUE th)
1174 return rb_thread_kill(th);
1179 * call-seq:
1180 * Thread.exit => thread
1182 * Terminates the currently running thread and schedules another thread to be
1183 * run. If this thread is already marked to be killed, <code>exit</code>
1184 * returns the <code>Thread</code>. If this is the main thread, or the last
1185 * thread, exit the process.
1188 static VALUE
1189 rb_thread_exit(void)
1191 return rb_thread_kill(GET_THREAD()->self);
1196 * call-seq:
1197 * thr.wakeup => thr
1199 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1200 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1202 * c = Thread.new { Thread.stop; puts "hey!" }
1203 * c.wakeup
1205 * <em>produces:</em>
1207 * hey!
1210 VALUE
1211 rb_thread_wakeup(VALUE thread)
1213 rb_thread_t *th;
1214 GetThreadPtr(thread, th);
1216 if (th->status == THREAD_KILLED) {
1217 rb_raise(rb_eThreadError, "killed thread");
1219 rb_thread_ready(th);
1220 if (th->status != THREAD_TO_KILL) {
1221 th->status = THREAD_RUNNABLE;
1223 return thread;
1228 * call-seq:
1229 * thr.run => thr
1231 * Wakes up <i>thr</i>, making it eligible for scheduling.
1233 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1234 * Thread.pass
1235 * puts "Got here"
1236 * a.run
1237 * a.join
1239 * <em>produces:</em>
1242 * Got here
1246 VALUE
1247 rb_thread_run(VALUE thread)
1249 rb_thread_wakeup(thread);
1250 rb_thread_schedule();
1251 return thread;
1256 * call-seq:
1257 * Thread.stop => nil
1259 * Stops execution of the current thread, putting it into a ``sleep'' state,
1260 * and schedules execution of another thread.
1262 * a = Thread.new { print "a"; Thread.stop; print "c" }
1263 * Thread.pass
1264 * print "b"
1265 * a.run
1266 * a.join
1268 * <em>produces:</em>
1270 * abc
1273 VALUE
1274 rb_thread_stop(void)
1276 if (rb_thread_alone()) {
1277 rb_raise(rb_eThreadError,
1278 "stopping only thread\n\tnote: use sleep to stop forever");
1280 rb_thread_sleep_deadly();
1281 return Qnil;
1284 static int
1285 thread_list_i(st_data_t key, st_data_t val, void *data)
1287 VALUE ary = (VALUE)data;
1288 rb_thread_t *th;
1289 GetThreadPtr((VALUE)key, th);
1291 switch (th->status) {
1292 case THREAD_RUNNABLE:
1293 case THREAD_STOPPED:
1294 case THREAD_STOPPED_FOREVER:
1295 case THREAD_TO_KILL:
1296 rb_ary_push(ary, th->self);
1297 default:
1298 break;
1300 return ST_CONTINUE;
1303 /********************************************************************/
1306 * call-seq:
1307 * Thread.list => array
1309 * Returns an array of <code>Thread</code> objects for all threads that are
1310 * either runnable or stopped.
1312 * Thread.new { sleep(200) }
1313 * Thread.new { 1000000.times {|i| i*i } }
1314 * Thread.new { Thread.stop }
1315 * Thread.list.each {|t| p t}
1317 * <em>produces:</em>
1319 * #<Thread:0x401b3e84 sleep>
1320 * #<Thread:0x401b3f38 run>
1321 * #<Thread:0x401b3fb0 sleep>
1322 * #<Thread:0x401bdf4c run>
1325 VALUE
1326 rb_thread_list(void)
1328 VALUE ary = rb_ary_new();
1329 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1330 return ary;
1333 VALUE
1334 rb_thread_current(void)
1336 return GET_THREAD()->self;
1340 * call-seq:
1341 * Thread.current => thread
1343 * Returns the currently executing thread.
1345 * Thread.current #=> #<Thread:0x401bdf4c run>
1348 static VALUE
1349 thread_s_current(VALUE klass)
1351 return rb_thread_current();
1354 VALUE
1355 rb_thread_main(void)
1357 return GET_THREAD()->vm->main_thread->self;
1360 static VALUE
1361 rb_thread_s_main(VALUE klass)
1363 return rb_thread_main();
1368 * call-seq:
1369 * Thread.abort_on_exception => true or false
1371 * Returns the status of the global ``abort on exception'' condition. The
1372 * default is <code>false</code>. When set to <code>true</code>, or if the
1373 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1374 * command line option <code>-d</code> was specified) all threads will abort
1375 * (the process will <code>exit(0)</code>) if an exception is raised in any
1376 * thread. See also <code>Thread::abort_on_exception=</code>.
1379 static VALUE
1380 rb_thread_s_abort_exc(void)
1382 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1387 * call-seq:
1388 * Thread.abort_on_exception= boolean => true or false
1390 * When set to <code>true</code>, all threads will abort if an exception is
1391 * raised. Returns the new state.
1393 * Thread.abort_on_exception = true
1394 * t1 = Thread.new do
1395 * puts "In new thread"
1396 * raise "Exception from thread"
1397 * end
1398 * sleep(1)
1399 * puts "not reached"
1401 * <em>produces:</em>
1403 * In new thread
1404 * prog.rb:4: Exception from thread (RuntimeError)
1405 * from prog.rb:2:in `initialize'
1406 * from prog.rb:2:in `new'
1407 * from prog.rb:2
1410 static VALUE
1411 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1413 rb_secure(4);
1414 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1415 return val;
1420 * call-seq:
1421 * thr.abort_on_exception => true or false
1423 * Returns the status of the thread-local ``abort on exception'' condition for
1424 * <i>thr</i>. The default is <code>false</code>. See also
1425 * <code>Thread::abort_on_exception=</code>.
1428 static VALUE
1429 rb_thread_abort_exc(VALUE thread)
1431 rb_thread_t *th;
1432 GetThreadPtr(thread, th);
1433 return th->abort_on_exception ? Qtrue : Qfalse;
1438 * call-seq:
1439 * thr.abort_on_exception= boolean => true or false
1441 * When set to <code>true</code>, causes all threads (including the main
1442 * program) to abort if an exception is raised in <i>thr</i>. The process will
1443 * effectively <code>exit(0)</code>.
1446 static VALUE
1447 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1449 rb_thread_t *th;
1450 rb_secure(4);
1452 GetThreadPtr(thread, th);
1453 th->abort_on_exception = RTEST(val);
1454 return val;
1459 * call-seq:
1460 * thr.group => thgrp or nil
1462 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1463 * the thread is not a member of any group.
1465 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1468 VALUE
1469 rb_thread_group(VALUE thread)
1471 rb_thread_t *th;
1472 VALUE group;
1473 GetThreadPtr(thread, th);
1474 group = th->thgroup;
1476 if (!group) {
1477 group = Qnil;
1479 return group;
1482 static const char *
1483 thread_status_name(enum rb_thread_status status)
1485 switch (status) {
1486 case THREAD_RUNNABLE:
1487 return "run";
1488 case THREAD_STOPPED:
1489 case THREAD_STOPPED_FOREVER:
1490 return "sleep";
1491 case THREAD_TO_KILL:
1492 return "aborting";
1493 case THREAD_KILLED:
1494 return "dead";
1495 default:
1496 return "unknown";
1500 static int
1501 rb_thread_dead(rb_thread_t *th)
1503 return th->status == THREAD_KILLED;
1508 * call-seq:
1509 * thr.status => string, false or nil
1511 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1512 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1513 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1514 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1515 * terminated with an exception.
1517 * a = Thread.new { raise("die now") }
1518 * b = Thread.new { Thread.stop }
1519 * c = Thread.new { Thread.exit }
1520 * d = Thread.new { sleep }
1521 * d.kill #=> #<Thread:0x401b3678 aborting>
1522 * a.status #=> nil
1523 * b.status #=> "sleep"
1524 * c.status #=> false
1525 * d.status #=> "aborting"
1526 * Thread.current.status #=> "run"
1529 static VALUE
1530 rb_thread_status(VALUE thread)
1532 rb_thread_t *th;
1533 GetThreadPtr(thread, th);
1535 if (rb_thread_dead(th)) {
1536 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1537 /* TODO */ ) {
1538 return Qnil;
1540 return Qfalse;
1542 return rb_str_new2(thread_status_name(th->status));
1547 * call-seq:
1548 * thr.alive? => true or false
1550 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1552 * thr = Thread.new { }
1553 * thr.join #=> #<Thread:0x401b3fb0 dead>
1554 * Thread.current.alive? #=> true
1555 * thr.alive? #=> false
1558 static VALUE
1559 rb_thread_alive_p(VALUE thread)
1561 rb_thread_t *th;
1562 GetThreadPtr(thread, th);
1564 if (rb_thread_dead(th))
1565 return Qfalse;
1566 return Qtrue;
1570 * call-seq:
1571 * thr.stop? => true or false
1573 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1575 * a = Thread.new { Thread.stop }
1576 * b = Thread.current
1577 * a.stop? #=> true
1578 * b.stop? #=> false
1581 static VALUE
1582 rb_thread_stop_p(VALUE thread)
1584 rb_thread_t *th;
1585 GetThreadPtr(thread, th);
1587 if (rb_thread_dead(th))
1588 return Qtrue;
1589 if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
1590 return Qtrue;
1591 return Qfalse;
1595 * call-seq:
1596 * thr.safe_level => integer
1598 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1599 * levels can help when implementing sandboxes which run insecure code.
1601 * thr = Thread.new { $SAFE = 3; sleep }
1602 * Thread.current.safe_level #=> 0
1603 * thr.safe_level #=> 3
1606 static VALUE
1607 rb_thread_safe_level(VALUE thread)
1609 rb_thread_t *th;
1610 GetThreadPtr(thread, th);
1612 return INT2NUM(th->safe_level);
1616 * call-seq:
1617 * thr.inspect => string
1619 * Dump the name, id, and status of _thr_ to a string.
1622 static VALUE
1623 rb_thread_inspect(VALUE thread)
1625 const char *cname = rb_obj_classname(thread);
1626 rb_thread_t *th;
1627 const char *status;
1628 VALUE str;
1630 GetThreadPtr(thread, th);
1631 status = thread_status_name(th->status);
1632 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1633 OBJ_INFECT(str, thread);
1635 return str;
1638 VALUE
1639 rb_thread_local_aref(VALUE thread, ID id)
1641 rb_thread_t *th;
1642 VALUE val;
1644 GetThreadPtr(thread, th);
1645 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1646 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1648 if (!th->local_storage) {
1649 return Qnil;
1651 if (st_lookup(th->local_storage, id, &val)) {
1652 return val;
1654 return Qnil;
1658 * call-seq:
1659 * thr[sym] => obj or nil
1661 * Attribute Reference---Returns the value of a thread-local variable, using
1662 * either a symbol or a string name. If the specified variable does not exist,
1663 * returns <code>nil</code>.
1665 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1666 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1667 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1668 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1670 * <em>produces:</em>
1672 * #<Thread:0x401b3b3c sleep>: C
1673 * #<Thread:0x401b3bc8 sleep>: B
1674 * #<Thread:0x401b3c68 sleep>: A
1675 * #<Thread:0x401bdf4c run>:
1678 static VALUE
1679 rb_thread_aref(VALUE thread, VALUE id)
1681 return rb_thread_local_aref(thread, rb_to_id(id));
1684 VALUE
1685 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1687 rb_thread_t *th;
1688 GetThreadPtr(thread, th);
1690 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1691 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1693 if (OBJ_FROZEN(thread)) {
1694 rb_error_frozen("thread locals");
1696 if (!th->local_storage) {
1697 th->local_storage = st_init_numtable();
1699 if (NIL_P(val)) {
1700 st_delete_wrap(th->local_storage, id);
1701 return Qnil;
1703 st_insert(th->local_storage, id, val);
1704 return val;
1708 * call-seq:
1709 * thr[sym] = obj => obj
1711 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1712 * using either a symbol or a string. See also <code>Thread#[]</code>.
1715 static VALUE
1716 rb_thread_aset(VALUE self, ID id, VALUE val)
1718 return rb_thread_local_aset(self, rb_to_id(id), val);
1722 * call-seq:
1723 * thr.key?(sym) => true or false
1725 * Returns <code>true</code> if the given string (or symbol) exists as a
1726 * thread-local variable.
1728 * me = Thread.current
1729 * me[:oliver] = "a"
1730 * me.key?(:oliver) #=> true
1731 * me.key?(:stanley) #=> false
1734 static VALUE
1735 rb_thread_key_p(VALUE self, VALUE key)
1737 rb_thread_t *th;
1738 ID id = rb_to_id(key);
1740 GetThreadPtr(self, th);
1742 if (!th->local_storage) {
1743 return Qfalse;
1745 if (st_lookup(th->local_storage, id, 0)) {
1746 return Qtrue;
1748 return Qfalse;
1751 static int
1752 thread_keys_i(ID key, VALUE value, VALUE ary)
1754 rb_ary_push(ary, ID2SYM(key));
1755 return ST_CONTINUE;
1758 static int
1759 vm_living_thread_num(rb_vm_t *vm)
1761 return vm->living_threads->num_entries;
1765 rb_thread_alone()
1767 int num = 1;
1768 if (GET_THREAD()->vm->living_threads) {
1769 num = vm_living_thread_num(GET_THREAD()->vm);
1770 thread_debug("rb_thread_alone: %d\n", num);
1772 return num == 1;
1776 * call-seq:
1777 * thr.keys => array
1779 * Returns an an array of the names of the thread-local variables (as Symbols).
1781 * thr = Thread.new do
1782 * Thread.current[:cat] = 'meow'
1783 * Thread.current["dog"] = 'woof'
1784 * end
1785 * thr.join #=> #<Thread:0x401b3f10 dead>
1786 * thr.keys #=> [:dog, :cat]
1789 static VALUE
1790 rb_thread_keys(VALUE self)
1792 rb_thread_t *th;
1793 VALUE ary = rb_ary_new();
1794 GetThreadPtr(self, th);
1796 if (th->local_storage) {
1797 st_foreach(th->local_storage, thread_keys_i, ary);
1799 return ary;
1803 * call-seq:
1804 * thr.priority => integer
1806 * Returns the priority of <i>thr</i>. Default is inherited from the
1807 * current thread which creating the new thread, or zero for the
1808 * initial main thread; higher-priority threads will run before
1809 * lower-priority threads.
1811 * Thread.current.priority #=> 0
1814 static VALUE
1815 rb_thread_priority(VALUE thread)
1817 rb_thread_t *th;
1818 GetThreadPtr(thread, th);
1819 return INT2NUM(th->priority);
1824 * call-seq:
1825 * thr.priority= integer => thr
1827 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1828 * will run before lower-priority threads.
1830 * count1 = count2 = 0
1831 * a = Thread.new do
1832 * loop { count1 += 1 }
1833 * end
1834 * a.priority = -1
1836 * b = Thread.new do
1837 * loop { count2 += 1 }
1838 * end
1839 * b.priority = -2
1840 * sleep 1 #=> 1
1841 * count1 #=> 622504
1842 * count2 #=> 5832
1845 static VALUE
1846 rb_thread_priority_set(VALUE thread, VALUE prio)
1848 rb_thread_t *th;
1849 GetThreadPtr(thread, th);
1851 rb_secure(4);
1853 th->priority = NUM2INT(prio);
1854 native_thread_apply_priority(th);
1855 return prio;
1858 /* for IO */
1860 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1861 void
1862 rb_fd_init(volatile rb_fdset_t *fds)
1864 fds->maxfd = 0;
1865 fds->fdset = ALLOC(fd_set);
1866 FD_ZERO(fds->fdset);
1869 void
1870 rb_fd_term(rb_fdset_t *fds)
1872 if (fds->fdset) xfree(fds->fdset);
1873 fds->maxfd = 0;
1874 fds->fdset = 0;
1877 void
1878 rb_fd_zero(rb_fdset_t *fds)
1880 if (fds->fdset) {
1881 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
1882 FD_ZERO(fds->fdset);
1886 static void
1887 rb_fd_resize(int n, rb_fdset_t *fds)
1889 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
1890 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
1892 if (m < sizeof(fd_set)) m = sizeof(fd_set);
1893 if (o < sizeof(fd_set)) o = sizeof(fd_set);
1895 if (m > o) {
1896 fds->fdset = realloc(fds->fdset, m);
1897 memset((char *)fds->fdset + o, 0, m - o);
1899 if (n >= fds->maxfd) fds->maxfd = n + 1;
1902 void
1903 rb_fd_set(int n, rb_fdset_t *fds)
1905 rb_fd_resize(n, fds);
1906 FD_SET(n, fds->fdset);
1909 void
1910 rb_fd_clr(int n, rb_fdset_t *fds)
1912 if (n >= fds->maxfd) return;
1913 FD_CLR(n, fds->fdset);
1917 rb_fd_isset(int n, const rb_fdset_t *fds)
1919 if (n >= fds->maxfd) return 0;
1920 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1923 void
1924 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
1926 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
1928 if (size < sizeof(fd_set)) size = sizeof(fd_set);
1929 dst->maxfd = max;
1930 dst->fdset = realloc(dst->fdset, size);
1931 memcpy(dst->fdset, src, size);
1935 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
1937 fd_set *r = NULL, *w = NULL, *e = NULL;
1938 if (readfds) {
1939 rb_fd_resize(n - 1, readfds);
1940 r = rb_fd_ptr(readfds);
1942 if (writefds) {
1943 rb_fd_resize(n - 1, writefds);
1944 w = rb_fd_ptr(writefds);
1946 if (exceptfds) {
1947 rb_fd_resize(n - 1, exceptfds);
1948 e = rb_fd_ptr(exceptfds);
1950 return select(n, r, w, e, timeout);
1953 #undef FD_ZERO
1954 #undef FD_SET
1955 #undef FD_CLR
1956 #undef FD_ISSET
1958 #define FD_ZERO(f) rb_fd_zero(f)
1959 #define FD_SET(i, f) rb_fd_set(i, f)
1960 #define FD_CLR(i, f) rb_fd_clr(i, f)
1961 #define FD_ISSET(i, f) rb_fd_isset(i, f)
1963 #endif
1965 #if defined(__CYGWIN__) || defined(_WIN32)
1966 static long
1967 cmp_tv(const struct timeval *a, const struct timeval *b)
1969 long d = (a->tv_sec - b->tv_sec);
1970 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
1973 static int
1974 subtract_tv(struct timeval *rest, const struct timeval *wait)
1976 while (rest->tv_usec < wait->tv_usec) {
1977 if (rest->tv_sec <= wait->tv_sec) {
1978 return 0;
1980 rest->tv_sec -= 1;
1981 rest->tv_usec += 1000 * 1000;
1983 rest->tv_sec -= wait->tv_sec;
1984 rest->tv_usec -= wait->tv_usec;
1985 return 1;
1987 #endif
1989 static int
1990 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
1991 struct timeval *timeout)
1993 int result, lerrno;
1994 fd_set orig_read, orig_write, orig_except;
1996 #ifndef linux
1997 double limit = 0;
1998 struct timeval wait_rest;
1999 # if defined(__CYGWIN__) || defined(_WIN32)
2000 struct timeval start_time;
2001 # endif
2003 if (timeout) {
2004 # if defined(__CYGWIN__) || defined(_WIN32)
2005 gettimeofday(&start_time, NULL);
2006 limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2007 # else
2008 limit = timeofday();
2009 # endif
2010 limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2011 wait_rest = *timeout;
2012 timeout = &wait_rest;
2014 #endif
2016 if (read) orig_read = *read;
2017 if (write) orig_write = *write;
2018 if (except) orig_except = *except;
2020 retry:
2021 lerrno = 0;
2023 #if defined(__CYGWIN__) || defined(_WIN32)
2025 int finish = 0;
2026 /* polling duration: 100ms */
2027 struct timeval wait_100ms, *wait;
2028 wait_100ms.tv_sec = 0;
2029 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2031 do {
2032 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
2033 BLOCKING_REGION({
2034 do {
2035 result = select(n, read, write, except, wait);
2036 if (result < 0) lerrno = errno;
2037 if (result != 0) break;
2039 if (read) *read = orig_read;
2040 if (write) *write = orig_write;
2041 if (except) *except = orig_except;
2042 wait = &wait_100ms;
2043 if (timeout) {
2044 struct timeval elapsed;
2045 gettimeofday(&elapsed, NULL);
2046 subtract_tv(&elapsed, &start_time);
2047 if (!subtract_tv(timeout, &elapsed)) {
2048 finish = 1;
2049 break;
2051 if (cmp_tv(&wait_100ms, timeout) < 0) wait = timeout;
2053 } while (__th->interrupt_flag == 0);
2054 }, 0, 0);
2055 } while (result == 0 && !finish);
2057 #else
2058 BLOCKING_REGION({
2059 result = select(n, read, write, except, timeout);
2060 if (result < 0) lerrno = errno;
2061 }, ubf_select, GET_THREAD());
2062 #endif
2064 errno = lerrno;
2066 if (result < 0) {
2067 switch (errno) {
2068 case EINTR:
2069 #ifdef ERESTART
2070 case ERESTART:
2071 #endif
2072 if (read) *read = orig_read;
2073 if (write) *write = orig_write;
2074 if (except) *except = orig_except;
2075 #ifndef linux
2076 if (timeout) {
2077 double d = limit - timeofday();
2079 wait_rest.tv_sec = (unsigned int)d;
2080 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
2081 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2082 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2084 #endif
2085 goto retry;
2086 default:
2087 break;
2090 return result;
2093 static void
2094 rb_thread_wait_fd_rw(int fd, int read)
2096 int result = 0;
2097 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2099 if (fd < 0) {
2100 rb_raise(rb_eIOError, "closed stream");
2102 while (result <= 0) {
2103 rb_fdset_t set;
2104 rb_fd_init(&set);
2105 FD_SET(fd, &set);
2107 if (read) {
2108 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
2110 else {
2111 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
2114 rb_fd_term(&set);
2116 if (result < 0) {
2117 rb_sys_fail(0);
2121 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2124 void
2125 rb_thread_wait_fd(int fd)
2127 rb_thread_wait_fd_rw(fd, 1);
2131 rb_thread_fd_writable(int fd)
2133 rb_thread_wait_fd_rw(fd, 0);
2134 return Qtrue;
2138 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2139 struct timeval *timeout)
2141 if (!read && !write && !except) {
2142 if (!timeout) {
2143 rb_thread_sleep_forever();
2144 return 0;
2146 rb_thread_wait_for(*timeout);
2147 return 0;
2149 else {
2150 return do_select(max, read, write, except, timeout);
2156 * for GC
2159 #ifdef USE_CONSERVATIVE_STACK_END
2160 void
2161 rb_gc_set_stack_end(VALUE **stack_end_p)
2163 VALUE stack_end;
2164 *stack_end_p = &stack_end;
2166 #endif
2168 void
2169 rb_gc_save_machine_context(rb_thread_t *th)
2171 SET_MACHINE_STACK_END(&th->machine_stack_end);
2172 FLUSH_REGISTER_WINDOWS;
2173 #ifdef __ia64
2174 th->machine_register_stack_end = rb_ia64_bsp();
2175 #endif
2176 setjmp(th->machine_regs);
2183 int rb_get_next_signal(rb_vm_t *vm);
2185 static void
2186 timer_thread_function(void *arg)
2188 rb_vm_t *vm = arg; /* TODO: fix me for Multi-VM */
2190 /* for time slice */
2191 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
2193 /* check signal */
2194 if (vm->buffered_signal_size && vm->main_thread->exec_signal == 0) {
2195 rb_thread_t *mth = vm->main_thread;
2196 enum rb_thread_status prev_status = mth->status;
2197 mth->exec_signal = rb_get_next_signal(vm);
2198 thread_debug("main_thread: %s\n", thread_status_name(prev_status));
2199 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2200 (long)vm->buffered_signal_size, vm->main_thread->exec_signal);
2201 if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
2202 rb_thread_interrupt(mth);
2203 mth->status = prev_status;
2206 #if 0
2207 /* prove profiler */
2208 if (vm->prove_profile.enable) {
2209 rb_thread_t *th = vm->running_thread;
2211 if (vm->during_gc) {
2212 /* GC prove profiling */
2215 #endif
2218 void
2219 rb_thread_stop_timer_thread(void)
2221 if (timer_thread_id) {
2222 system_working = 0;
2223 native_thread_join(timer_thread_id);
2224 timer_thread_id = 0;
2228 void
2229 rb_thread_reset_timer_thread(void)
2231 timer_thread_id = 0;
2234 void
2235 rb_thread_start_timer_thread(void)
2237 rb_thread_create_timer_thread();
2240 static int
2241 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
2243 int i;
2244 VALUE lines = (VALUE)val;
2246 for (i = 0; i < RARRAY_LEN(lines); i++) {
2247 if (RARRAY_PTR(lines)[i] != Qnil) {
2248 RARRAY_PTR(lines)[i] = INT2FIX(0);
2251 return ST_CONTINUE;
2254 static void
2255 clear_coverage(void)
2257 extern VALUE rb_get_coverages(void);
2258 VALUE coverages = rb_get_coverages();
2259 if (RTEST(coverages)) {
2260 st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
2264 static int
2265 terminate_atfork_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2267 VALUE thval = key;
2268 rb_thread_t *th;
2269 GetThreadPtr(thval, th);
2271 if (th != current_th) {
2272 thread_cleanup_func(th);
2274 return ST_CONTINUE;
2277 void
2278 rb_thread_atfork(void)
2280 rb_thread_t *th = GET_THREAD();
2281 rb_vm_t *vm = th->vm;
2282 VALUE thval = th->self;
2283 vm->main_thread = th;
2285 st_foreach(vm->living_threads, terminate_atfork_i, (st_data_t)th);
2286 st_clear(vm->living_threads);
2287 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2288 vm->sleeper = 0;
2289 clear_coverage();
2290 rb_reset_random_seed();
2293 static int
2294 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2296 VALUE thval = key;
2297 rb_thread_t *th;
2298 GetThreadPtr(thval, th);
2300 if (th != current_th) {
2301 thread_cleanup_func_before_exec(th);
2303 return ST_CONTINUE;
2306 void
2307 rb_thread_atfork_before_exec(void)
2309 rb_thread_t *th = GET_THREAD();
2310 rb_vm_t *vm = th->vm;
2311 VALUE thval = th->self;
2312 vm->main_thread = th;
2314 st_foreach(vm->living_threads, terminate_atfork_before_exec_i, (st_data_t)th);
2315 st_clear(vm->living_threads);
2316 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2317 vm->sleeper = 0;
2318 clear_coverage();
2321 struct thgroup {
2322 int enclosed;
2323 VALUE group;
2327 * Document-class: ThreadGroup
2329 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2330 * threads as a group. A <code>Thread</code> can belong to only one
2331 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2332 * remove it from any previous group.
2334 * Newly created threads belong to the same group as the thread from which they
2335 * were created.
2338 static VALUE thgroup_s_alloc(VALUE);
2339 static VALUE
2340 thgroup_s_alloc(VALUE klass)
2342 VALUE group;
2343 struct thgroup *data;
2345 group = Data_Make_Struct(klass, struct thgroup, 0, -1, data);
2346 data->enclosed = 0;
2347 data->group = group;
2349 return group;
2352 struct thgroup_list_params {
2353 VALUE ary;
2354 VALUE group;
2357 static int
2358 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2360 VALUE thread = (VALUE)key;
2361 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2362 VALUE group = ((struct thgroup_list_params *)data)->group;
2363 rb_thread_t *th;
2364 GetThreadPtr(thread, th);
2366 if (th->thgroup == group) {
2367 rb_ary_push(ary, thread);
2369 return ST_CONTINUE;
2373 * call-seq:
2374 * thgrp.list => array
2376 * Returns an array of all existing <code>Thread</code> objects that belong to
2377 * this group.
2379 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2382 static VALUE
2383 thgroup_list(VALUE group)
2385 VALUE ary = rb_ary_new();
2386 struct thgroup_list_params param;
2388 param.ary = ary;
2389 param.group = group;
2390 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2391 return ary;
2396 * call-seq:
2397 * thgrp.enclose => thgrp
2399 * Prevents threads from being added to or removed from the receiving
2400 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2401 * <code>ThreadGroup</code>.
2403 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2404 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2405 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2406 * tg.add thr
2408 * <em>produces:</em>
2410 * ThreadError: can't move from the enclosed thread group
2413 VALUE
2414 thgroup_enclose(VALUE group)
2416 struct thgroup *data;
2418 Data_Get_Struct(group, struct thgroup, data);
2419 data->enclosed = 1;
2421 return group;
2426 * call-seq:
2427 * thgrp.enclosed? => true or false
2429 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2430 * ThreadGroup#enclose.
2433 static VALUE
2434 thgroup_enclosed_p(VALUE group)
2436 struct thgroup *data;
2438 Data_Get_Struct(group, struct thgroup, data);
2439 if (data->enclosed)
2440 return Qtrue;
2441 return Qfalse;
2446 * call-seq:
2447 * thgrp.add(thread) => thgrp
2449 * Adds the given <em>thread</em> to this group, removing it from any other
2450 * group to which it may have previously belonged.
2452 * puts "Initial group is #{ThreadGroup::Default.list}"
2453 * tg = ThreadGroup.new
2454 * t1 = Thread.new { sleep }
2455 * t2 = Thread.new { sleep }
2456 * puts "t1 is #{t1}"
2457 * puts "t2 is #{t2}"
2458 * tg.add(t1)
2459 * puts "Initial group now #{ThreadGroup::Default.list}"
2460 * puts "tg group now #{tg.list}"
2462 * <em>produces:</em>
2464 * Initial group is #<Thread:0x401bdf4c>
2465 * t1 is #<Thread:0x401b3c90>
2466 * t2 is #<Thread:0x401b3c18>
2467 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2468 * tg group now #<Thread:0x401b3c90>
2471 static VALUE
2472 thgroup_add(VALUE group, VALUE thread)
2474 rb_thread_t *th;
2475 struct thgroup *data;
2477 rb_secure(4);
2478 GetThreadPtr(thread, th);
2480 if (OBJ_FROZEN(group)) {
2481 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2483 Data_Get_Struct(group, struct thgroup, data);
2484 if (data->enclosed) {
2485 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2488 if (!th->thgroup) {
2489 return Qnil;
2492 if (OBJ_FROZEN(th->thgroup)) {
2493 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2495 Data_Get_Struct(th->thgroup, struct thgroup, data);
2496 if (data->enclosed) {
2497 rb_raise(rb_eThreadError,
2498 "can't move from the enclosed thread group");
2501 th->thgroup = group;
2502 return group;
2507 * Document-class: Mutex
2509 * Mutex implements a simple semaphore that can be used to coordinate access to
2510 * shared data from multiple concurrent threads.
2512 * Example:
2514 * require 'thread'
2515 * semaphore = Mutex.new
2517 * a = Thread.new {
2518 * semaphore.synchronize {
2519 * # access shared resource
2523 * b = Thread.new {
2524 * semaphore.synchronize {
2525 * # access shared resource
2531 #define GetMutexPtr(obj, tobj) \
2532 Data_Get_Struct(obj, mutex_t, tobj)
2534 static const char *mutex_unlock(mutex_t *mutex);
2536 static void
2537 mutex_free(void *ptr)
2539 if (ptr) {
2540 mutex_t *mutex = ptr;
2541 if (mutex->th) {
2542 /* rb_warn("free locked mutex"); */
2543 mutex_unlock(mutex);
2545 native_mutex_destroy(&mutex->lock);
2546 native_cond_destroy(&mutex->cond);
2548 ruby_xfree(ptr);
2551 static VALUE
2552 mutex_alloc(VALUE klass)
2554 VALUE volatile obj;
2555 mutex_t *mutex;
2557 obj = Data_Make_Struct(klass, mutex_t, NULL, mutex_free, mutex);
2558 native_mutex_initialize(&mutex->lock);
2559 native_cond_initialize(&mutex->cond);
2560 return obj;
2564 * call-seq:
2565 * Mutex.new => mutex
2567 * Creates a new Mutex
2569 static VALUE
2570 mutex_initialize(VALUE self)
2572 return self;
2575 VALUE
2576 rb_mutex_new(void)
2578 return mutex_alloc(rb_cMutex);
2582 * call-seq:
2583 * mutex.locked? => true or false
2585 * Returns +true+ if this lock is currently held by some thread.
2587 VALUE
2588 rb_mutex_locked_p(VALUE self)
2590 mutex_t *mutex;
2591 GetMutexPtr(self, mutex);
2592 return mutex->th ? Qtrue : Qfalse;
2595 static void
2596 mutex_locked(rb_thread_t *th, VALUE self)
2598 mutex_t *mutex;
2599 GetMutexPtr(self, mutex);
2601 if (th->keeping_mutexes) {
2602 mutex->next_mutex = th->keeping_mutexes;
2604 th->keeping_mutexes = mutex;
2608 * call-seq:
2609 * mutex.try_lock => true or false
2611 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2612 * lock was granted.
2614 VALUE
2615 rb_mutex_trylock(VALUE self)
2617 mutex_t *mutex;
2618 VALUE locked = Qfalse;
2619 GetMutexPtr(self, mutex);
2621 if (mutex->th == GET_THREAD()) {
2622 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2625 native_mutex_lock(&mutex->lock);
2626 if (mutex->th == 0) {
2627 mutex->th = GET_THREAD();
2628 locked = Qtrue;
2630 mutex_locked(GET_THREAD(), self);
2632 native_mutex_unlock(&mutex->lock);
2634 return locked;
2637 static int
2638 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
2640 int interrupted = 0;
2641 #if 0 /* for debug */
2642 native_thread_yield();
2643 #endif
2645 native_mutex_lock(&mutex->lock);
2646 th->transition_for_lock = 0;
2647 while (mutex->th || (mutex->th = th, 0)) {
2648 if (last_thread) {
2649 interrupted = 2;
2650 break;
2653 mutex->cond_waiting++;
2654 native_cond_wait(&mutex->cond, &mutex->lock);
2655 mutex->cond_notified--;
2657 if (RUBY_VM_INTERRUPTED(th)) {
2658 interrupted = 1;
2659 break;
2662 th->transition_for_lock = 1;
2663 native_mutex_unlock(&mutex->lock);
2665 if (interrupted == 2) native_thread_yield();
2666 #if 0 /* for debug */
2667 native_thread_yield();
2668 #endif
2670 return interrupted;
2673 static void
2674 lock_interrupt(void *ptr)
2676 mutex_t *mutex = (mutex_t *)ptr;
2677 native_mutex_lock(&mutex->lock);
2678 if (mutex->cond_waiting > 0) {
2679 native_cond_broadcast(&mutex->cond);
2680 mutex->cond_notified = mutex->cond_waiting;
2681 mutex->cond_waiting = 0;
2683 native_mutex_unlock(&mutex->lock);
2687 * call-seq:
2688 * mutex.lock => true or false
2690 * Attempts to grab the lock and waits if it isn't available.
2691 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2693 VALUE
2694 rb_mutex_lock(VALUE self)
2696 if (rb_mutex_trylock(self) == Qfalse) {
2697 mutex_t *mutex;
2698 rb_thread_t *th = GET_THREAD();
2699 GetMutexPtr(self, mutex);
2701 while (mutex->th != th) {
2702 int interrupted;
2703 enum rb_thread_status prev_status = th->status;
2704 int last_thread = 0;
2705 struct rb_unblock_callback oldubf;
2707 set_unblock_function(th, lock_interrupt, mutex, &oldubf);
2708 th->status = THREAD_STOPPED_FOREVER;
2709 th->vm->sleeper++;
2710 th->locking_mutex = self;
2711 if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
2712 last_thread = 1;
2715 th->transition_for_lock = 1;
2716 BLOCKING_REGION_CORE({
2717 interrupted = lock_func(th, mutex, last_thread);
2719 th->transition_for_lock = 0;
2720 remove_signal_thread_list(th);
2721 reset_unblock_function(th, &oldubf);
2723 th->locking_mutex = Qfalse;
2724 if (mutex->th && interrupted == 2) {
2725 rb_check_deadlock(th->vm);
2727 if (th->status == THREAD_STOPPED_FOREVER) {
2728 th->status = prev_status;
2730 th->vm->sleeper--;
2732 if (mutex->th == th) mutex_locked(th, self);
2734 if (interrupted) {
2735 RUBY_VM_CHECK_INTS();
2739 return self;
2742 static const char *
2743 mutex_unlock(mutex_t *mutex)
2745 const char *err = NULL;
2746 rb_thread_t *th = GET_THREAD();
2747 mutex_t *th_mutex;
2749 native_mutex_lock(&mutex->lock);
2751 if (mutex->th == 0) {
2752 err = "Attempt to unlock a mutex which is not locked";
2754 else if (mutex->th != GET_THREAD()) {
2755 err = "Attempt to unlock a mutex which is locked by another thread";
2757 else {
2758 mutex->th = 0;
2759 if (mutex->cond_waiting > 0) {
2760 /* waiting thread */
2761 native_cond_signal(&mutex->cond);
2762 mutex->cond_waiting--;
2763 mutex->cond_notified++;
2767 native_mutex_unlock(&mutex->lock);
2769 if (!err) {
2770 th_mutex = th->keeping_mutexes;
2771 if (th_mutex == mutex) {
2772 th->keeping_mutexes = mutex->next_mutex;
2774 else {
2775 while (1) {
2776 mutex_t *tmp_mutex;
2777 tmp_mutex = th_mutex->next_mutex;
2778 if (tmp_mutex == mutex) {
2779 th_mutex->next_mutex = tmp_mutex->next_mutex;
2780 break;
2782 th_mutex = tmp_mutex;
2785 mutex->next_mutex = NULL;
2788 return err;
2792 * call-seq:
2793 * mutex.unlock => self
2795 * Releases the lock.
2796 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2798 VALUE
2799 rb_mutex_unlock(VALUE self)
2801 const char *err;
2802 mutex_t *mutex;
2803 GetMutexPtr(self, mutex);
2805 err = mutex_unlock(mutex);
2806 if (err) rb_raise(rb_eThreadError, err);
2808 return self;
2811 static void
2812 rb_mutex_unlock_all(mutex_t *mutexes)
2814 const char *err;
2815 mutex_t *mutex;
2817 while (mutexes) {
2818 mutex = mutexes;
2819 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
2820 mutexes); */
2821 mutexes = mutex->next_mutex;
2822 err = mutex_unlock(mutex);
2823 if (err) rb_bug("invalid keeping_mutexes: %s", err);
2827 static VALUE
2828 rb_mutex_sleep_forever(VALUE time)
2830 rb_thread_sleep_deadly();
2831 return Qnil;
2834 static VALUE
2835 rb_mutex_wait_for(VALUE time)
2837 const struct timeval *t = (struct timeval *)time;
2838 rb_thread_wait_for(*t);
2839 return Qnil;
2842 VALUE
2843 rb_mutex_sleep(VALUE self, VALUE timeout)
2845 time_t beg, end;
2846 struct timeval t;
2848 if (!NIL_P(timeout)) {
2849 t = rb_time_interval(timeout);
2851 rb_mutex_unlock(self);
2852 beg = time(0);
2853 if (NIL_P(timeout)) {
2854 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
2856 else {
2857 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
2859 end = time(0) - beg;
2860 return INT2FIX(end);
2864 * call-seq:
2865 * mutex.sleep(timeout = nil) => number
2867 * Releases the lock and sleeps +timeout+ seconds if it is given and
2868 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2869 * the current thread.
2871 static VALUE
2872 mutex_sleep(int argc, VALUE *argv, VALUE self)
2874 VALUE timeout;
2876 rb_scan_args(argc, argv, "01", &timeout);
2877 return rb_mutex_sleep(self, timeout);
2881 * call-seq:
2882 * mutex.synchronize { ... } => result of the block
2884 * Obtains a lock, runs the block, and releases the lock when the block
2885 * completes. See the example under +Mutex+.
2888 VALUE
2889 rb_thread_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
2891 rb_mutex_lock(mutex);
2892 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
2896 * Document-class: Barrier
2898 typedef struct rb_thread_list_struct rb_thread_list_t;
2900 struct rb_thread_list_struct {
2901 rb_thread_t *th;
2902 rb_thread_list_t *next;
2905 static void
2906 thlist_mark(void *ptr)
2908 rb_thread_list_t *q = ptr;
2910 for (; q; q = q->next) {
2911 rb_gc_mark(q->th->self);
2915 static void
2916 thlist_free(void *ptr)
2918 rb_thread_list_t *q = ptr, *next;
2920 for (; q; q = next) {
2921 next = q->next;
2922 ruby_xfree(q);
2926 static int
2927 thlist_signal(rb_thread_list_t **list, unsigned int maxth, rb_thread_t **woken_thread)
2929 int woken = 0;
2930 rb_thread_list_t *q;
2932 while ((q = *list) != NULL) {
2933 rb_thread_t *th = q->th;
2935 *list = q->next;
2936 ruby_xfree(q);
2937 if (th->status != THREAD_KILLED) {
2938 rb_thread_ready(th);
2939 if (!woken && woken_thread) *woken_thread = th;
2940 if (++woken >= maxth && maxth) break;
2943 return woken;
2946 typedef struct {
2947 rb_thread_t *owner;
2948 rb_thread_list_t *waiting, **tail;
2949 } rb_barrier_t;
2951 static void
2952 barrier_mark(void *ptr)
2954 rb_barrier_t *b = ptr;
2956 if (b->owner) rb_gc_mark(b->owner->self);
2957 thlist_mark(b->waiting);
2960 static void
2961 barrier_free(void *ptr)
2963 rb_barrier_t *b = ptr;
2965 b->owner = 0;
2966 thlist_free(b->waiting);
2967 b->waiting = 0;
2968 ruby_xfree(ptr);
2971 static VALUE
2972 barrier_alloc(VALUE klass)
2974 VALUE volatile obj;
2975 rb_barrier_t *barrier;
2977 obj = Data_Make_Struct(klass, rb_barrier_t, barrier_mark, barrier_free, barrier);
2978 barrier->owner = GET_THREAD();
2979 barrier->waiting = 0;
2980 barrier->tail = &barrier->waiting;
2981 return obj;
2984 VALUE
2985 rb_barrier_new(void)
2987 return barrier_alloc(rb_cBarrier);
2990 VALUE
2991 rb_barrier_wait(VALUE self)
2993 rb_barrier_t *barrier;
2994 rb_thread_list_t *q;
2996 Data_Get_Struct(self, rb_barrier_t, barrier);
2997 if (!barrier->owner || barrier->owner->status == THREAD_KILLED) {
2998 barrier->owner = 0;
2999 if (thlist_signal(&barrier->waiting, 1, &barrier->owner)) return Qfalse;
3000 return Qtrue;
3002 else if (barrier->owner == GET_THREAD()) {
3003 return Qfalse;
3005 else {
3006 *barrier->tail = q = ALLOC(rb_thread_list_t);
3007 q->th = GET_THREAD();
3008 q->next = 0;
3009 barrier->tail = &q->next;
3010 rb_thread_sleep_forever();
3011 return barrier->owner == GET_THREAD() ? Qtrue : Qfalse;
3015 VALUE
3016 rb_barrier_release(VALUE self)
3018 rb_barrier_t *barrier;
3019 unsigned int n;
3021 Data_Get_Struct(self, rb_barrier_t, barrier);
3022 if (barrier->owner != GET_THREAD()) {
3023 rb_raise(rb_eThreadError, "not owned");
3025 n = thlist_signal(&barrier->waiting, 0, &barrier->owner);
3026 return n ? UINT2NUM(n) : Qfalse;
3029 /* variables for recursive traversals */
3030 static ID recursive_key;
3032 static VALUE
3033 recursive_check(VALUE hash, VALUE obj)
3035 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3036 return Qfalse;
3038 else {
3039 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
3041 if (NIL_P(list) || TYPE(list) != T_HASH)
3042 return Qfalse;
3043 if (NIL_P(rb_hash_lookup(list, obj)))
3044 return Qfalse;
3045 return Qtrue;
3049 static VALUE
3050 recursive_push(VALUE hash, VALUE obj)
3052 VALUE list, sym;
3054 sym = ID2SYM(rb_frame_this_func());
3055 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3056 hash = rb_hash_new();
3057 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3058 list = Qnil;
3060 else {
3061 list = rb_hash_aref(hash, sym);
3063 if (NIL_P(list) || TYPE(list) != T_HASH) {
3064 list = rb_hash_new();
3065 rb_hash_aset(hash, sym, list);
3067 rb_hash_aset(list, obj, Qtrue);
3068 return hash;
3071 static void
3072 recursive_pop(VALUE hash, VALUE obj)
3074 VALUE list, sym;
3076 sym = ID2SYM(rb_frame_this_func());
3077 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3078 VALUE symname;
3079 VALUE thrname;
3080 symname = rb_inspect(sym);
3081 thrname = rb_inspect(rb_thread_current());
3083 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
3084 StringValuePtr(symname), StringValuePtr(thrname));
3086 list = rb_hash_aref(hash, sym);
3087 if (NIL_P(list) || TYPE(list) != T_HASH) {
3088 VALUE symname = rb_inspect(sym);
3089 VALUE thrname = rb_inspect(rb_thread_current());
3090 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
3091 StringValuePtr(symname), StringValuePtr(thrname));
3093 rb_hash_delete(list, obj);
3096 VALUE
3097 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
3099 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3100 VALUE objid = rb_obj_id(obj);
3102 if (recursive_check(hash, objid)) {
3103 return (*func) (obj, arg, Qtrue);
3105 else {
3106 VALUE result = Qundef;
3107 int state;
3109 hash = recursive_push(hash, objid);
3110 PUSH_TAG();
3111 if ((state = EXEC_TAG()) == 0) {
3112 result = (*func) (obj, arg, Qfalse);
3114 POP_TAG();
3115 recursive_pop(hash, objid);
3116 if (state)
3117 JUMP_TAG(state);
3118 return result;
3122 /* tracer */
3124 static rb_event_hook_t *
3125 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3127 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
3128 hook->func = func;
3129 hook->flag = events;
3130 hook->data = data;
3131 return hook;
3134 static void
3135 thread_reset_event_flags(rb_thread_t *th)
3137 rb_event_hook_t *hook = th->event_hooks;
3138 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
3140 while (hook) {
3141 flag |= hook->flag;
3142 hook = hook->next;
3146 void
3147 rb_thread_add_event_hook(rb_thread_t *th,
3148 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3150 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3151 hook->next = th->event_hooks;
3152 th->event_hooks = hook;
3153 thread_reset_event_flags(th);
3156 static int
3157 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
3159 VALUE thval = key;
3160 rb_thread_t *th;
3161 GetThreadPtr(thval, th);
3163 if (flag) {
3164 th->event_flags |= RUBY_EVENT_VM;
3166 else {
3167 th->event_flags &= (~RUBY_EVENT_VM);
3169 return ST_CONTINUE;
3172 static void
3173 set_threads_event_flags(int flag)
3175 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
3178 void
3179 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3181 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3182 rb_vm_t *vm = GET_VM();
3184 hook->next = vm->event_hooks;
3185 vm->event_hooks = hook;
3187 set_threads_event_flags(1);
3190 static int
3191 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
3193 rb_event_hook_t *prev = NULL, *hook = *root, *next;
3195 while (hook) {
3196 next = hook->next;
3197 if (func == 0 || hook->func == func) {
3198 if (prev) {
3199 prev->next = hook->next;
3201 else {
3202 *root = hook->next;
3204 xfree(hook);
3206 else {
3207 prev = hook;
3209 hook = next;
3211 return -1;
3215 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
3217 int ret = remove_event_hook(&th->event_hooks, func);
3218 thread_reset_event_flags(th);
3219 return ret;
3223 rb_remove_event_hook(rb_event_hook_func_t func)
3225 rb_vm_t *vm = GET_VM();
3226 rb_event_hook_t *hook = vm->event_hooks;
3227 int ret = remove_event_hook(&vm->event_hooks, func);
3229 if (hook != NULL && vm->event_hooks == NULL) {
3230 set_threads_event_flags(0);
3233 return ret;
3236 static int
3237 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
3239 rb_thread_t *th;
3240 GetThreadPtr((VALUE)key, th);
3241 rb_thread_remove_event_hook(th, 0);
3242 return ST_CONTINUE;
3245 void
3246 rb_clear_trace_func(void)
3248 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
3249 rb_remove_event_hook(0);
3252 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
3255 * call-seq:
3256 * set_trace_func(proc) => proc
3257 * set_trace_func(nil) => nil
3259 * Establishes _proc_ as the handler for tracing, or disables
3260 * tracing if the parameter is +nil+. _proc_ takes up
3261 * to six parameters: an event name, a filename, a line number, an
3262 * object id, a binding, and the name of a class. _proc_ is
3263 * invoked whenever an event occurs. Events are: <code>c-call</code>
3264 * (call a C-language routine), <code>c-return</code> (return from a
3265 * C-language routine), <code>call</code> (call a Ruby method),
3266 * <code>class</code> (start a class or module definition),
3267 * <code>end</code> (finish a class or module definition),
3268 * <code>line</code> (execute code on a new line), <code>raise</code>
3269 * (raise an exception), and <code>return</code> (return from a Ruby
3270 * method). Tracing is disabled within the context of _proc_.
3272 * class Test
3273 * def test
3274 * a = 1
3275 * b = 2
3276 * end
3277 * end
3279 * set_trace_func proc { |event, file, line, id, binding, classname|
3280 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3282 * t = Test.new
3283 * t.test
3285 * line prog.rb:11 false
3286 * c-call prog.rb:11 new Class
3287 * c-call prog.rb:11 initialize Object
3288 * c-return prog.rb:11 initialize Object
3289 * c-return prog.rb:11 new Class
3290 * line prog.rb:12 false
3291 * call prog.rb:2 test Test
3292 * line prog.rb:3 test Test
3293 * line prog.rb:4 test Test
3294 * return prog.rb:4 test Test
3297 static VALUE
3298 set_trace_func(VALUE obj, VALUE trace)
3300 rb_remove_event_hook(call_trace_func);
3302 if (NIL_P(trace)) {
3303 return Qnil;
3306 if (!rb_obj_is_proc(trace)) {
3307 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3310 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
3311 return trace;
3314 static void
3315 thread_add_trace_func(rb_thread_t *th, VALUE trace)
3317 if (!rb_obj_is_proc(trace)) {
3318 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3321 rb_thread_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
3324 static VALUE
3325 thread_add_trace_func_m(VALUE obj, VALUE trace)
3327 rb_thread_t *th;
3328 GetThreadPtr(obj, th);
3329 thread_add_trace_func(th, trace);
3330 return trace;
3333 static VALUE
3334 thread_set_trace_func_m(VALUE obj, VALUE trace)
3336 rb_thread_t *th;
3337 GetThreadPtr(obj, th);
3338 rb_thread_remove_event_hook(th, call_trace_func);
3340 if (NIL_P(trace)) {
3341 return Qnil;
3343 thread_add_trace_func(th, trace);
3344 return trace;
3347 static const char *
3348 get_event_name(rb_event_flag_t event)
3350 switch (event) {
3351 case RUBY_EVENT_LINE:
3352 return "line";
3353 case RUBY_EVENT_CLASS:
3354 return "class";
3355 case RUBY_EVENT_END:
3356 return "end";
3357 case RUBY_EVENT_CALL:
3358 return "call";
3359 case RUBY_EVENT_RETURN:
3360 return "return";
3361 case RUBY_EVENT_C_CALL:
3362 return "c-call";
3363 case RUBY_EVENT_C_RETURN:
3364 return "c-return";
3365 case RUBY_EVENT_RAISE:
3366 return "raise";
3367 default:
3368 return "unknown";
3372 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3374 struct call_trace_func_args {
3375 rb_event_flag_t event;
3376 VALUE proc;
3377 VALUE self;
3378 ID id;
3379 VALUE klass;
3382 static VALUE
3383 call_trace_proc(VALUE args, int tracing)
3385 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3386 VALUE eventname = rb_str_new2(get_event_name(p->event));
3387 VALUE filename = rb_str_new2(rb_sourcefile());
3388 VALUE argv[6];
3389 int line = rb_sourceline();
3390 ID id = 0;
3391 VALUE klass = 0;
3393 if (p->event == RUBY_EVENT_C_CALL ||
3394 p->event == RUBY_EVENT_C_RETURN) {
3395 id = p->id;
3396 klass = p->klass;
3398 else {
3399 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3401 if (id == ID_ALLOCATOR)
3402 return Qnil;
3403 if (klass) {
3404 if (TYPE(klass) == T_ICLASS) {
3405 klass = RBASIC(klass)->klass;
3407 else if (FL_TEST(klass, FL_SINGLETON)) {
3408 klass = rb_iv_get(klass, "__attached__");
3412 argv[0] = eventname;
3413 argv[1] = filename;
3414 argv[2] = INT2FIX(line);
3415 argv[3] = id ? ID2SYM(id) : Qnil;
3416 argv[4] = p->self ? rb_binding_new() : Qnil;
3417 argv[5] = klass ? klass : Qnil;
3419 return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
3422 static void
3423 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3425 struct call_trace_func_args args;
3427 args.event = event;
3428 args.proc = proc;
3429 args.self = self;
3430 args.id = id;
3431 args.klass = klass;
3432 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3435 VALUE
3436 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3438 rb_thread_t *th = GET_THREAD();
3439 int state, raised, tracing;
3440 VALUE result = Qnil;
3442 if ((tracing = th->tracing) != 0 && !always) {
3443 return Qnil;
3445 else {
3446 th->tracing = 1;
3449 raised = rb_thread_reset_raised(th);
3451 PUSH_TAG();
3452 if ((state = EXEC_TAG()) == 0) {
3453 result = (*func)(arg, tracing);
3456 if (raised) {
3457 rb_thread_set_raised(th);
3459 POP_TAG();
3461 th->tracing = tracing;
3462 if (state) {
3463 JUMP_TAG(state);
3466 return result;
3470 * +Thread+ encapsulates the behavior of a thread of
3471 * execution, including the main thread of the Ruby script.
3473 * In the descriptions of the methods in this class, the parameter _sym_
3474 * refers to a symbol, which is either a quoted string or a
3475 * +Symbol+ (such as <code>:name</code>).
3478 void
3479 Init_Thread(void)
3481 #undef rb_intern
3483 VALUE cThGroup;
3485 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3486 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3487 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3488 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3489 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3490 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3491 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3492 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3493 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3494 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3495 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3496 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3497 #if THREAD_DEBUG < 0
3498 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3499 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3500 #endif
3502 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3503 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3504 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3505 rb_define_method(rb_cThread, "value", thread_value, 0);
3506 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3507 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3508 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3509 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3510 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3511 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3512 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3513 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3514 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3515 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3516 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3517 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3518 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3519 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3520 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3521 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3522 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3523 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3525 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3527 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3528 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3529 rb_define_method(cThGroup, "list", thgroup_list, 0);
3530 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3531 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3532 rb_define_method(cThGroup, "add", thgroup_add, 1);
3535 rb_thread_t *th = GET_THREAD();
3536 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3537 rb_define_const(cThGroup, "Default", th->thgroup);
3540 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3541 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3542 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3543 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3544 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3545 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3546 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3547 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3549 recursive_key = rb_intern("__recursive_key__");
3550 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3552 /* trace */
3553 rb_define_global_function("set_trace_func", set_trace_func, 1);
3554 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
3555 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
3557 /* init thread core */
3558 Init_native_thread();
3560 /* main thread setting */
3562 /* acquire global interpreter lock */
3563 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
3564 native_mutex_initialize(lp);
3565 native_mutex_lock(lp);
3566 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3570 rb_thread_create_timer_thread();
3572 (void)native_mutex_trylock;
3573 (void)ruby_thread_set_native;
3577 ruby_native_thread_p(void)
3579 rb_thread_t *th = ruby_thread_from_native();
3581 return th ? Qtrue : Qfalse;
3584 static int
3585 check_deadlock_i(st_data_t key, st_data_t val, int *found)
3587 VALUE thval = key;
3588 rb_thread_t *th;
3589 GetThreadPtr(thval, th);
3591 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
3592 *found = 1;
3594 else if (th->locking_mutex) {
3595 mutex_t *mutex;
3596 GetMutexPtr(th->locking_mutex, mutex);
3598 native_mutex_lock(&mutex->lock);
3599 if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
3600 *found = 1;
3602 native_mutex_unlock(&mutex->lock);
3605 return (*found) ? ST_STOP : ST_CONTINUE;
3608 #if 0 /* for debug */
3609 static int
3610 debug_i(st_data_t key, st_data_t val, int *found)
3612 VALUE thval = key;
3613 rb_thread_t *th;
3614 GetThreadPtr(thval, th);
3616 printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
3617 if (th->locking_mutex) {
3618 mutex_t *mutex;
3619 GetMutexPtr(th->locking_mutex, mutex);
3621 native_mutex_lock(&mutex->lock);
3622 printf(" %p %d\n", mutex->th, mutex->cond_notified);
3623 native_mutex_unlock(&mutex->lock);
3625 else puts("");
3627 return ST_CONTINUE;
3629 #endif
3631 static void
3632 rb_check_deadlock(rb_vm_t *vm)
3634 int found = 0;
3636 if (vm_living_thread_num(vm) > vm->sleeper) return;
3637 if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3639 st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
3641 if (!found) {
3642 VALUE argv[2];
3643 argv[0] = rb_eFatal;
3644 argv[1] = rb_str_new2("deadlock detected");
3645 #if 0 /* for debug */
3646 printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
3647 st_foreach(vm->living_threads, debug_i, (st_data_t)0);
3648 #endif
3649 rb_thread_raise(2, argv, vm->main_thread);
3653 static void
3654 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3656 VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
3657 if (coverage && RBASIC(coverage)->klass == 0) {
3658 long line = rb_sourceline() - 1;
3659 long count;
3660 if (RARRAY_PTR(coverage)[line] == Qnil) {
3661 rb_bug("bug");
3663 count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
3664 if (POSFIXABLE(count)) {
3665 RARRAY_PTR(coverage)[line] = LONG2FIX(count);
3670 VALUE
3671 rb_get_coverages(void)
3673 return GET_VM()->coverages;
3676 void
3677 rb_set_coverages(VALUE coverages)
3679 GET_VM()->coverages = coverages;
3680 rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
3683 void
3684 rb_reset_coverages(void)
3686 GET_VM()->coverages = Qfalse;
3687 rb_remove_event_hook(update_coverage);