* transcode.c (econv_primitive_convert): add output_byteoffset
[ruby-svn.git] / thread.c
blobe30e4e33d75d3eaa9349c70b70c4ec346a709491
1 /**********************************************************************
3 thread.c -
5 $Author$
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
12 YARV Thread Desgin
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
17 model 2: Native Thread with Giant VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
23 ------------------------------------------------------------------------
25 model 2:
26 A thread has mutex (GVL: Global VM Lock) can run. When thread
27 scheduling, running thread release GVL. If running thread
28 try blocking operation, this thread must release GVL and another
29 thread can continue this flow. After blocking operation, thread
30 must check interrupt (RUBY_VM_CHECK_INTS).
32 Every VM can run parallel.
34 Ruby threads are scheduled by OS thread scheduler.
36 ------------------------------------------------------------------------
38 model 3:
39 Every threads run concurrent or parallel and to access shared object
40 exclusive access control is needed. For example, to access String
41 object or Array object, fine grain lock must be locked every time.
45 /* for model 2 */
47 #include "eval_intern.h"
48 #include "vm.h"
49 #include "gc.h"
51 #ifndef USE_NATIVE_THREAD_PRIORITY
52 #define USE_NATIVE_THREAD_PRIORITY 0
53 #define RUBY_THREAD_PRIORITY_MAX 3
54 #define RUBY_THREAD_PRIORITY_MIN -3
55 #endif
57 #ifndef THREAD_DEBUG
58 #define THREAD_DEBUG 0
59 #endif
61 VALUE rb_cMutex;
62 VALUE rb_cBarrier;
64 static void sleep_timeval(rb_thread_t *th, struct timeval time);
65 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec);
66 static void sleep_forever(rb_thread_t *th, int nodeadlock);
67 static double timeofday(void);
68 struct timeval rb_time_interval(VALUE);
69 static int rb_thread_dead(rb_thread_t *th);
71 static void rb_check_deadlock(rb_vm_t *vm);
73 void rb_signal_exec(rb_thread_t *th, int sig);
74 void rb_disable_interrupt(void);
76 static const VALUE eKillSignal = INT2FIX(0);
77 static const VALUE eTerminateSignal = INT2FIX(1);
78 static volatile int system_working = 1;
80 inline static void
81 st_delete_wrap(st_table *table, st_data_t key)
83 st_delete(table, &key, 0);
86 /********************************************************************************/
88 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
90 static void set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
91 struct rb_unblock_callback *old);
92 static void reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old);
94 #define GVL_UNLOCK_BEGIN() do { \
95 rb_thread_t *_th_stored = GET_THREAD(); \
96 rb_gc_save_machine_context(_th_stored); \
97 native_mutex_unlock(&_th_stored->vm->global_vm_lock)
99 #define GVL_UNLOCK_END() \
100 native_mutex_lock(&_th_stored->vm->global_vm_lock); \
101 rb_thread_set_current(_th_stored); \
102 } while(0)
104 #define BLOCKING_REGION_CORE(exec) do { \
105 GVL_UNLOCK_BEGIN(); {\
106 exec; \
108 GVL_UNLOCK_END(); \
109 } while(0);
111 #define BLOCKING_REGION(exec, ubf, ubfarg) do { \
112 rb_thread_t *__th = GET_THREAD(); \
113 enum rb_thread_status __prev_status = __th->status; \
114 struct rb_unblock_callback __oldubf; \
115 set_unblock_function(__th, ubf, ubfarg, &__oldubf); \
116 __th->status = THREAD_STOPPED; \
117 thread_debug("enter blocking region (%p)\n", __th); \
118 BLOCKING_REGION_CORE(exec); \
119 thread_debug("leave blocking region (%p)\n", __th); \
120 remove_signal_thread_list(__th); \
121 reset_unblock_function(__th, &__oldubf); \
122 if (__th->status == THREAD_STOPPED) { \
123 __th->status = __prev_status; \
125 RUBY_VM_CHECK_INTS(); \
126 } while(0)
128 #if THREAD_DEBUG
129 #ifdef HAVE_VA_ARGS_MACRO
130 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
131 #define thread_debug(fmt, ...) rb_thread_debug(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
132 #define POSITION_FORMAT "%s:%d:"
133 #define POSITION_ARGS ,file, line
134 #else
135 void rb_thread_debug(const char *fmt, ...);
136 #define thread_debug rb_thread_debug
137 #define POSITION_FORMAT
138 #define POSITION_ARGS
139 #endif
141 # if THREAD_DEBUG < 0
142 static int rb_thread_debug_enabled;
144 static VALUE
145 rb_thread_s_debug(void)
147 return INT2NUM(rb_thread_debug_enabled);
150 static VALUE
151 rb_thread_s_debug_set(VALUE self, VALUE val)
153 rb_thread_debug_enabled = RTEST(val);
154 return val;
156 # else
157 # define rb_thread_debug_enabled THREAD_DEBUG
158 # endif
159 #else
160 #define thread_debug if(0)printf
161 #endif
163 #ifndef __ia64
164 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
165 #endif
166 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
167 VALUE *register_stack_start));
168 static void timer_thread_function(void *);
170 #if defined(_WIN32)
171 #include "thread_win32.c"
173 #define DEBUG_OUT() \
174 WaitForSingleObject(&debug_mutex, INFINITE); \
175 printf(POSITION_FORMAT"%p - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
176 fflush(stdout); \
177 ReleaseMutex(&debug_mutex);
179 #elif defined(HAVE_PTHREAD_H)
180 #include "thread_pthread.c"
182 #define DEBUG_OUT() \
183 pthread_mutex_lock(&debug_mutex); \
184 printf(POSITION_FORMAT"%#"PRIxVALUE" - %s" POSITION_ARGS, (VALUE)pthread_self(), buf); \
185 fflush(stdout); \
186 pthread_mutex_unlock(&debug_mutex);
188 #else
189 #error "unsupported thread type"
190 #endif
192 #if THREAD_DEBUG
193 static int debug_mutex_initialized = 1;
194 static rb_thread_lock_t debug_mutex;
196 void
197 rb_thread_debug(
198 #ifdef HAVE_VA_ARGS_MACRO
199 const char *file, int line,
200 #endif
201 const char *fmt, ...)
203 va_list args;
204 char buf[BUFSIZ];
206 if (!rb_thread_debug_enabled) return;
208 if (debug_mutex_initialized == 1) {
209 debug_mutex_initialized = 0;
210 native_mutex_initialize(&debug_mutex);
213 va_start(args, fmt);
214 vsnprintf(buf, BUFSIZ, fmt, args);
215 va_end(args);
217 DEBUG_OUT();
219 #endif
222 static void
223 set_unblock_function(rb_thread_t *th, rb_unblock_function_t *func, void *arg,
224 struct rb_unblock_callback *old)
226 check_ints:
227 RUBY_VM_CHECK_INTS(); /* check signal or so */
228 native_mutex_lock(&th->interrupt_lock);
229 if (th->interrupt_flag) {
230 native_mutex_unlock(&th->interrupt_lock);
231 goto check_ints;
233 else {
234 if (old) *old = th->unblock;
235 th->unblock.func = func;
236 th->unblock.arg = arg;
238 native_mutex_unlock(&th->interrupt_lock);
241 static void
242 reset_unblock_function(rb_thread_t *th, const struct rb_unblock_callback *old)
244 native_mutex_lock(&th->interrupt_lock);
245 th->unblock = *old;
246 native_mutex_unlock(&th->interrupt_lock);
249 static void
250 rb_thread_interrupt(rb_thread_t *th)
252 native_mutex_lock(&th->interrupt_lock);
253 RUBY_VM_SET_INTERRUPT(th);
254 if (th->unblock.func) {
255 (th->unblock.func)(th->unblock.arg);
257 else {
258 /* none */
260 native_mutex_unlock(&th->interrupt_lock);
264 static int
265 terminate_i(st_data_t key, st_data_t val, rb_thread_t *main_thread)
267 VALUE thval = key;
268 rb_thread_t *th;
269 GetThreadPtr(thval, th);
271 if (th != main_thread) {
272 thread_debug("terminate_i: %p\n", th);
273 rb_thread_interrupt(th);
274 th->thrown_errinfo = eTerminateSignal;
275 th->status = THREAD_TO_KILL;
277 else {
278 thread_debug("terminate_i: main thread (%p)\n", th);
280 return ST_CONTINUE;
283 typedef struct rb_mutex_struct
285 rb_thread_lock_t lock;
286 rb_thread_cond_t cond;
287 struct rb_thread_struct volatile *th;
288 volatile int cond_waiting, cond_notified;
289 struct rb_mutex_struct *next_mutex;
290 } mutex_t;
292 static void rb_mutex_unlock_all(mutex_t *mutex);
294 void
295 rb_thread_terminate_all(void)
297 rb_thread_t *th = GET_THREAD(); /* main thread */
298 rb_vm_t *vm = th->vm;
299 if (vm->main_thread != th) {
300 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)", vm->main_thread, th);
303 /* unlock all locking mutexes */
304 if (th->keeping_mutexes) {
305 rb_mutex_unlock_all(th->keeping_mutexes);
308 thread_debug("rb_thread_terminate_all (main thread: %p)\n", th);
309 st_foreach(vm->living_threads, terminate_i, (st_data_t)th);
311 while (!rb_thread_alone()) {
312 PUSH_TAG();
313 if (EXEC_TAG() == 0) {
314 rb_thread_schedule();
316 else {
317 /* ignore exception */
319 POP_TAG();
321 system_working = 0;
324 static void
325 thread_cleanup_func_before_exec(void *th_ptr)
327 rb_thread_t *th = th_ptr;
328 th->status = THREAD_KILLED;
329 th->machine_stack_start = th->machine_stack_end = 0;
330 #ifdef __ia64
331 th->machine_register_stack_start = th->machine_register_stack_end = 0;
332 #endif
335 static void
336 thread_cleanup_func(void *th_ptr)
338 rb_thread_t *th = th_ptr;
339 thread_cleanup_func_before_exec(th_ptr);
340 native_thread_destroy(th);
343 extern void ruby_error_print(void);
344 static VALUE rb_thread_raise(int, VALUE *, rb_thread_t *);
345 void rb_thread_recycle_stack_release(VALUE *);
347 void
348 ruby_thread_init_stack(rb_thread_t *th)
350 native_thread_init_stack(th);
353 static int
354 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
356 int state;
357 VALUE args = th->first_args;
358 rb_proc_t *proc;
359 rb_thread_t *join_th;
360 rb_thread_t *main_th;
361 VALUE errinfo = Qnil;
363 th->machine_stack_start = stack_start;
364 #ifdef __ia64
365 th->machine_register_stack_start = register_stack_start;
366 #endif
367 thread_debug("thread start: %p\n", th);
369 native_mutex_lock(&th->vm->global_vm_lock);
371 thread_debug("thread start (get lock): %p\n", th);
372 rb_thread_set_current(th);
374 TH_PUSH_TAG(th);
375 if ((state = EXEC_TAG()) == 0) {
376 SAVE_ROOT_JMPBUF(th, {
377 if (th->first_proc) {
378 GetProcPtr(th->first_proc, proc);
379 th->errinfo = Qnil;
380 th->local_lfp = proc->block.lfp;
381 th->local_svar = Qnil;
382 th->value = vm_invoke_proc(th, proc, proc->block.self,
383 RARRAY_LEN(args), RARRAY_PTR(args), 0);
385 else {
386 th->value = (*th->first_func)((void *)th->first_args);
390 else {
391 errinfo = th->errinfo;
392 if (NIL_P(errinfo)) errinfo = rb_errinfo();
393 if (state == TAG_FATAL) {
394 /* fatal error within this thread, need to stop whole script */
396 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
397 if (th->safe_level >= 4) {
398 th->errinfo = rb_exc_new3(rb_eSecurityError,
399 rb_sprintf("Insecure exit at level %d", th->safe_level));
400 errinfo = Qnil;
403 else if (th->safe_level < 4 &&
404 (th->vm->thread_abort_on_exception ||
405 th->abort_on_exception || RTEST(ruby_debug))) {
406 /* exit on main_thread */
408 else {
409 errinfo = Qnil;
411 th->value = Qnil;
414 th->status = THREAD_KILLED;
415 thread_debug("thread end: %p\n", th);
417 main_th = th->vm->main_thread;
418 if (th != main_th) {
419 if (TYPE(errinfo) == T_OBJECT) {
420 /* treat with normal error object */
421 rb_thread_raise(1, &errinfo, main_th);
424 TH_POP_TAG();
426 /* locking_mutex must be Qfalse */
427 if (th->locking_mutex != Qfalse) {
428 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
429 th, th->locking_mutex);
432 /* unlock all locking mutexes */
433 if (th->keeping_mutexes) {
434 rb_mutex_unlock_all(th->keeping_mutexes);
435 th->keeping_mutexes = NULL;
438 /* delete self from living_threads */
439 st_delete_wrap(th->vm->living_threads, th->self);
441 /* wake up joinning threads */
442 join_th = th->join_list_head;
443 while (join_th) {
444 if (join_th == main_th) errinfo = Qnil;
445 rb_thread_interrupt(join_th);
446 switch (join_th->status) {
447 case THREAD_STOPPED: case THREAD_STOPPED_FOREVER:
448 join_th->status = THREAD_RUNNABLE;
449 default: break;
451 join_th = join_th->join_list_next;
453 if (th != main_th) rb_check_deadlock(th->vm);
455 if (!th->root_fiber) {
456 rb_thread_recycle_stack_release(th->stack);
457 th->stack = 0;
460 thread_cleanup_func(th);
461 native_mutex_unlock(&th->vm->global_vm_lock);
463 return 0;
466 static VALUE
467 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
469 rb_thread_t *th;
471 if (OBJ_FROZEN(GET_THREAD()->thgroup)) {
472 rb_raise(rb_eThreadError,
473 "can't start a new thread (frozen ThreadGroup)");
475 GetThreadPtr(thval, th);
477 /* setup thread environment */
478 th->first_func = fn;
479 th->first_proc = fn ? Qfalse : rb_block_proc();
480 th->first_args = args; /* GC: shouldn't put before above line */
482 th->priority = GET_THREAD()->priority;
483 th->thgroup = GET_THREAD()->thgroup;
485 native_mutex_initialize(&th->interrupt_lock);
486 /* kick thread */
487 st_insert(th->vm->living_threads, thval, (st_data_t) th->thread_id);
488 native_thread_create(th);
489 return thval;
492 static VALUE
493 thread_s_new(int argc, VALUE *argv, VALUE klass)
495 rb_thread_t *th;
496 VALUE thread = rb_thread_alloc(klass);
497 rb_obj_call_init(thread, argc, argv);
498 GetThreadPtr(thread, th);
499 if (!th->first_args) {
500 rb_raise(rb_eThreadError, "uninitialized thread - check `%s#initialize'",
501 rb_class2name(klass));
503 return thread;
507 * call-seq:
508 * Thread.start([args]*) {|args| block } => thread
509 * Thread.fork([args]*) {|args| block } => thread
511 * Basically the same as <code>Thread::new</code>. However, if class
512 * <code>Thread</code> is subclassed, then calling <code>start</code> in that
513 * subclass will not invoke the subclass's <code>initialize</code> method.
516 static VALUE
517 thread_start(VALUE klass, VALUE args)
519 return thread_create_core(rb_thread_alloc(klass), args, 0);
522 static VALUE
523 thread_initialize(VALUE thread, VALUE args)
525 rb_thread_t *th;
526 if (!rb_block_given_p()) {
527 rb_raise(rb_eThreadError, "must be called with a block");
529 GetThreadPtr(thread, th);
530 if (th->first_args) {
531 VALUE rb_proc_location(VALUE self);
532 VALUE proc = th->first_proc, line, loc;
533 const char *file;
534 if (!proc || !RTEST(loc = rb_proc_location(proc))) {
535 rb_raise(rb_eThreadError, "already initialized thread");
537 file = RSTRING_PTR(RARRAY_PTR(loc)[0]);
538 if (NIL_P(line = RARRAY_PTR(loc)[1])) {
539 rb_raise(rb_eThreadError, "already initialized thread - %s",
540 file);
542 rb_raise(rb_eThreadError, "already initialized thread - %s:%d",
543 file, NUM2INT(line));
545 return thread_create_core(thread, args, 0);
548 VALUE
549 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
551 return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
555 /* +infty, for this purpose */
556 #define DELAY_INFTY 1E30
558 struct join_arg {
559 rb_thread_t *target, *waiting;
560 double limit;
561 int forever;
564 static VALUE
565 remove_from_join_list(VALUE arg)
567 struct join_arg *p = (struct join_arg *)arg;
568 rb_thread_t *target_th = p->target, *th = p->waiting;
570 if (target_th->status != THREAD_KILLED) {
571 rb_thread_t **pth = &target_th->join_list_head;
573 while (*pth) {
574 if (*pth == th) {
575 *pth = th->join_list_next;
576 break;
578 pth = &(*pth)->join_list_next;
582 return Qnil;
585 static VALUE
586 thread_join_sleep(VALUE arg)
588 struct join_arg *p = (struct join_arg *)arg;
589 rb_thread_t *target_th = p->target, *th = p->waiting;
590 double now, limit = p->limit;
592 while (target_th->status != THREAD_KILLED) {
593 if (p->forever) {
594 sleep_forever(th, 1);
596 else {
597 now = timeofday();
598 if (now > limit) {
599 thread_debug("thread_join: timeout (thid: %p)\n",
600 (void *)target_th->thread_id);
601 return Qfalse;
603 sleep_wait_for_interrupt(th, limit - now);
605 thread_debug("thread_join: interrupted (thid: %p)\n",
606 (void *)target_th->thread_id);
608 return Qtrue;
611 static VALUE
612 thread_join(rb_thread_t *target_th, double delay)
614 rb_thread_t *th = GET_THREAD();
615 struct join_arg arg;
617 arg.target = target_th;
618 arg.waiting = th;
619 arg.limit = timeofday() + delay;
620 arg.forever = delay == DELAY_INFTY;
622 thread_debug("thread_join (thid: %p)\n", (void *)target_th->thread_id);
624 if (target_th->status != THREAD_KILLED) {
625 th->join_list_next = target_th->join_list_head;
626 target_th->join_list_head = th;
627 if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
628 remove_from_join_list, (VALUE)&arg)) {
629 return Qnil;
633 thread_debug("thread_join: success (thid: %p)\n",
634 (void *)target_th->thread_id);
636 if (target_th->errinfo != Qnil) {
637 VALUE err = target_th->errinfo;
639 if (FIXNUM_P(err)) {
640 /* */
642 else if (TYPE(target_th->errinfo) == T_NODE) {
643 rb_exc_raise(vm_make_jump_tag_but_local_jump(
644 GET_THROWOBJ_STATE(err), GET_THROWOBJ_VAL(err)));
646 else {
647 /* normal exception */
648 rb_exc_raise(err);
651 return target_th->self;
655 * call-seq:
656 * thr.join => thr
657 * thr.join(limit) => thr
659 * The calling thread will suspend execution and run <i>thr</i>. Does not
660 * return until <i>thr</i> exits or until <i>limit</i> seconds have passed. If
661 * the time limit expires, <code>nil</code> will be returned, otherwise
662 * <i>thr</i> is returned.
664 * Any threads not joined will be killed when the main program exits. If
665 * <i>thr</i> had previously raised an exception and the
666 * <code>abort_on_exception</code> and <code>$DEBUG</code> flags are not set
667 * (so the exception has not yet been processed) it will be processed at this
668 * time.
670 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
671 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
672 * x.join # Let x thread finish, a will be killed on exit.
674 * <em>produces:</em>
676 * axyz
678 * The following example illustrates the <i>limit</i> parameter.
680 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
681 * puts "Waiting" until y.join(0.15)
683 * <em>produces:</em>
685 * tick...
686 * Waiting
687 * tick...
688 * Waitingtick...
691 * tick...
694 static VALUE
695 thread_join_m(int argc, VALUE *argv, VALUE self)
697 rb_thread_t *target_th;
698 double delay = DELAY_INFTY;
699 VALUE limit;
701 GetThreadPtr(self, target_th);
703 rb_scan_args(argc, argv, "01", &limit);
704 if (!NIL_P(limit)) {
705 delay = rb_num2dbl(limit);
708 return thread_join(target_th, delay);
712 * call-seq:
713 * thr.value => obj
715 * Waits for <i>thr</i> to complete (via <code>Thread#join</code>) and returns
716 * its value.
718 * a = Thread.new { 2 + 2 }
719 * a.value #=> 4
722 static VALUE
723 thread_value(VALUE self)
725 rb_thread_t *th;
726 GetThreadPtr(self, th);
727 thread_join(th, DELAY_INFTY);
728 return th->value;
732 * Thread Scheduling
735 static struct timeval
736 double2timeval(double d)
738 struct timeval time;
740 time.tv_sec = (int)d;
741 time.tv_usec = (int)((d - (int)d) * 1e6);
742 if (time.tv_usec < 0) {
743 time.tv_usec += (long)1e6;
744 time.tv_sec -= 1;
746 return time;
749 static void
750 sleep_forever(rb_thread_t *th, int deadlockable)
752 enum rb_thread_status prev_status = th->status;
754 th->status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
755 do {
756 if (deadlockable) {
757 th->vm->sleeper++;
758 rb_check_deadlock(th->vm);
760 native_sleep(th, 0);
761 if (deadlockable) {
762 th->vm->sleeper--;
764 RUBY_VM_CHECK_INTS();
765 } while (th->status == THREAD_STOPPED_FOREVER);
766 th->status = prev_status;
769 static void
770 getclockofday(struct timeval *tp)
772 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
773 struct timespec ts;
775 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
776 tp->tv_sec = ts.tv_sec;
777 tp->tv_usec = ts.tv_nsec / 1000;
778 } else
779 #endif
781 gettimeofday(tp, NULL);
785 static void
786 sleep_timeval(rb_thread_t *th, struct timeval tv)
788 struct timeval to, tvn;
789 enum rb_thread_status prev_status = th->status;
791 getclockofday(&to);
792 to.tv_sec += tv.tv_sec;
793 if ((to.tv_usec += tv.tv_usec) >= 1000000) {
794 to.tv_sec++;
795 to.tv_usec -= 1000000;
798 th->status = THREAD_STOPPED;
799 do {
800 native_sleep(th, &tv);
801 RUBY_VM_CHECK_INTS();
802 getclockofday(&tvn);
803 if (to.tv_sec < tvn.tv_sec) break;
804 if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
805 thread_debug("sleep_timeval: %ld.%.6ld > %ld.%.6ld\n",
806 (long)to.tv_sec, to.tv_usec,
807 (long)tvn.tv_sec, tvn.tv_usec);
808 tv.tv_sec = to.tv_sec - tvn.tv_sec;
809 if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
810 --tv.tv_sec;
811 tv.tv_usec += 1000000;
813 } while (th->status == THREAD_STOPPED);
814 th->status = prev_status;
817 void
818 rb_thread_sleep_forever()
820 thread_debug("rb_thread_sleep_forever\n");
821 sleep_forever(GET_THREAD(), 0);
824 static void
825 rb_thread_sleep_deadly()
827 thread_debug("rb_thread_sleep_deadly\n");
828 sleep_forever(GET_THREAD(), 1);
831 static double
832 timeofday(void)
834 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
835 struct timespec tp;
837 if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
838 return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
839 } else
840 #endif
842 struct timeval tv;
843 gettimeofday(&tv, NULL);
844 return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
848 static void
849 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec)
851 sleep_timeval(th, double2timeval(sleepsec));
854 static void
855 sleep_for_polling(rb_thread_t *th)
857 struct timeval time;
858 time.tv_sec = 0;
859 time.tv_usec = 100 * 1000; /* 0.1 sec */
860 sleep_timeval(th, time);
863 void
864 rb_thread_wait_for(struct timeval time)
866 rb_thread_t *th = GET_THREAD();
867 sleep_timeval(th, time);
870 void
871 rb_thread_polling(void)
873 RUBY_VM_CHECK_INTS();
874 if (!rb_thread_alone()) {
875 rb_thread_t *th = GET_THREAD();
876 sleep_for_polling(th);
881 * CAUTION: This function causes thread switching.
882 * rb_thread_check_ints() check ruby's interrupts.
883 * some interrupt needs thread switching/invoke handlers,
884 * and so on.
887 void
888 rb_thread_check_ints(void)
890 RUBY_VM_CHECK_INTS();
893 struct timeval rb_time_timeval();
895 void
896 rb_thread_sleep(int sec)
898 rb_thread_wait_for(rb_time_timeval(INT2FIX(sec)));
901 void
902 rb_thread_schedule(void)
904 thread_debug("rb_thread_schedule\n");
905 if (!rb_thread_alone()) {
906 rb_thread_t *th = GET_THREAD();
908 thread_debug("rb_thread_schedule/switch start\n");
910 rb_gc_save_machine_context(th);
911 native_mutex_unlock(&th->vm->global_vm_lock);
913 native_thread_yield();
915 native_mutex_lock(&th->vm->global_vm_lock);
917 rb_thread_set_current(th);
918 thread_debug("rb_thread_schedule/switch done\n");
920 RUBY_VM_CHECK_INTS();
924 int rb_thread_critical; /* TODO: dummy variable */
926 VALUE
927 rb_thread_blocking_region(
928 rb_blocking_function_t *func, void *data1,
929 rb_unblock_function_t *ubf, void *data2)
931 VALUE val;
932 rb_thread_t *th = GET_THREAD();
934 if (ubf == RB_UBF_DFL) {
935 ubf = ubf_select;
936 data2 = th;
939 BLOCKING_REGION({
940 val = func(data1);
941 }, ubf, data2);
943 return val;
947 * call-seq:
948 * Thread.pass => nil
950 * Invokes the thread scheduler to pass execution to another thread.
952 * a = Thread.new { print "a"; Thread.pass;
953 * print "b"; Thread.pass;
954 * print "c" }
955 * b = Thread.new { print "x"; Thread.pass;
956 * print "y"; Thread.pass;
957 * print "z" }
958 * a.join
959 * b.join
961 * <em>produces:</em>
963 * axbycz
966 static VALUE
967 thread_s_pass(VALUE klass)
969 rb_thread_schedule();
970 return Qnil;
977 void
978 rb_thread_execute_interrupts(rb_thread_t *th)
980 if (th->raised_flag) return;
982 while (th->interrupt_flag) {
983 enum rb_thread_status status = th->status;
984 int timer_interrupt = th->interrupt_flag & 0x01;
985 int finalizer_interrupt = th->interrupt_flag & 0x04;
987 th->status = THREAD_RUNNABLE;
988 th->interrupt_flag = 0;
990 /* signal handling */
991 if (th->exec_signal) {
992 int sig = th->exec_signal;
993 th->exec_signal = 0;
994 rb_signal_exec(th, sig);
997 /* exception from another thread */
998 if (th->thrown_errinfo) {
999 VALUE err = th->thrown_errinfo;
1000 th->thrown_errinfo = 0;
1001 thread_debug("rb_thread_execute_interrupts: %ld\n", err);
1003 if (err == eKillSignal || err == eTerminateSignal) {
1004 th->errinfo = INT2FIX(TAG_FATAL);
1005 TH_JUMP_TAG(th, TAG_FATAL);
1007 else {
1008 rb_exc_raise(err);
1011 th->status = status;
1013 if (finalizer_interrupt) {
1014 rb_gc_finalize_deferred();
1017 if (timer_interrupt) {
1018 #if USE_NATIVE_THREAD_PRIORITY
1019 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1020 rb_thread_schedule();
1021 #else
1022 if (th->slice > 0) {
1023 th->slice--;
1025 else {
1026 reschedule:
1027 EXEC_EVENT_HOOK(th, RUBY_EVENT_SWITCH, th->cfp->self, 0, 0);
1028 rb_thread_schedule();
1029 if (th->slice < 0) {
1030 th->slice++;
1031 goto reschedule;
1033 else {
1034 th->slice = th->priority;
1037 #endif
1043 void
1044 rb_gc_mark_threads(void)
1046 /* TODO: remove */
1049 /*****************************************************/
1051 static void
1052 rb_thread_ready(rb_thread_t *th)
1054 rb_thread_interrupt(th);
1057 static VALUE
1058 rb_thread_raise(int argc, VALUE *argv, rb_thread_t *th)
1060 VALUE exc;
1062 again:
1063 if (rb_thread_dead(th)) {
1064 return Qnil;
1067 if (th->thrown_errinfo != 0 || th->raised_flag) {
1068 rb_thread_schedule();
1069 goto again;
1072 exc = rb_make_exception(argc, argv);
1073 th->thrown_errinfo = exc;
1074 rb_thread_ready(th);
1075 return Qnil;
1078 void
1079 rb_thread_signal_raise(void *thptr, int sig)
1081 VALUE argv[2];
1082 rb_thread_t *th = thptr;
1084 argv[0] = rb_eSignal;
1085 argv[1] = INT2FIX(sig);
1086 rb_thread_raise(2, argv, th->vm->main_thread);
1089 void
1090 rb_thread_signal_exit(void *thptr)
1092 VALUE argv[2];
1093 rb_thread_t *th = thptr;
1095 argv[0] = rb_eSystemExit;
1096 argv[1] = rb_str_new2("exit");
1097 rb_thread_raise(2, argv, th->vm->main_thread);
1101 rb_thread_set_raised(rb_thread_t *th)
1103 if (th->raised_flag & RAISED_EXCEPTION) {
1104 return 1;
1106 th->raised_flag |= RAISED_EXCEPTION;
1107 return 0;
1111 rb_thread_reset_raised(rb_thread_t *th)
1113 if (!(th->raised_flag & RAISED_EXCEPTION)) {
1114 return 0;
1116 th->raised_flag &= ~RAISED_EXCEPTION;
1117 return 1;
1120 void
1121 rb_thread_fd_close(int fd)
1123 /* TODO: fix me */
1127 * call-seq:
1128 * thr.raise(exception)
1130 * Raises an exception (see <code>Kernel::raise</code>) from <i>thr</i>. The
1131 * caller does not have to be <i>thr</i>.
1133 * Thread.abort_on_exception = true
1134 * a = Thread.new { sleep(200) }
1135 * a.raise("Gotcha")
1137 * <em>produces:</em>
1139 * prog.rb:3: Gotcha (RuntimeError)
1140 * from prog.rb:2:in `initialize'
1141 * from prog.rb:2:in `new'
1142 * from prog.rb:2
1145 static VALUE
1146 thread_raise_m(int argc, VALUE *argv, VALUE self)
1148 rb_thread_t *th;
1149 GetThreadPtr(self, th);
1150 rb_thread_raise(argc, argv, th);
1151 return Qnil;
1156 * call-seq:
1157 * thr.exit => thr or nil
1158 * thr.kill => thr or nil
1159 * thr.terminate => thr or nil
1161 * Terminates <i>thr</i> and schedules another thread to be run. If this thread
1162 * is already marked to be killed, <code>exit</code> returns the
1163 * <code>Thread</code>. If this is the main thread, or the last thread, exits
1164 * the process.
1167 VALUE
1168 rb_thread_kill(VALUE thread)
1170 rb_thread_t *th;
1172 GetThreadPtr(thread, th);
1174 if (th != GET_THREAD() && th->safe_level < 4) {
1175 rb_secure(4);
1177 if (th->status == THREAD_TO_KILL || th->status == THREAD_KILLED) {
1178 return thread;
1180 if (th == th->vm->main_thread) {
1181 rb_exit(EXIT_SUCCESS);
1184 thread_debug("rb_thread_kill: %p (%p)\n", th, (void *)th->thread_id);
1186 rb_thread_interrupt(th);
1187 th->thrown_errinfo = eKillSignal;
1188 th->status = THREAD_TO_KILL;
1190 return thread;
1195 * call-seq:
1196 * Thread.kill(thread) => thread
1198 * Causes the given <em>thread</em> to exit (see <code>Thread::exit</code>).
1200 * count = 0
1201 * a = Thread.new { loop { count += 1 } }
1202 * sleep(0.1) #=> 0
1203 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
1204 * count #=> 93947
1205 * a.alive? #=> false
1208 static VALUE
1209 rb_thread_s_kill(VALUE obj, VALUE th)
1211 return rb_thread_kill(th);
1216 * call-seq:
1217 * Thread.exit => thread
1219 * Terminates the currently running thread and schedules another thread to be
1220 * run. If this thread is already marked to be killed, <code>exit</code>
1221 * returns the <code>Thread</code>. If this is the main thread, or the last
1222 * thread, exit the process.
1225 static VALUE
1226 rb_thread_exit(void)
1228 return rb_thread_kill(GET_THREAD()->self);
1233 * call-seq:
1234 * thr.wakeup => thr
1236 * Marks <i>thr</i> as eligible for scheduling (it may still remain blocked on
1237 * I/O, however). Does not invoke the scheduler (see <code>Thread#run</code>).
1239 * c = Thread.new { Thread.stop; puts "hey!" }
1240 * c.wakeup
1242 * <em>produces:</em>
1244 * hey!
1247 VALUE
1248 rb_thread_wakeup(VALUE thread)
1250 rb_thread_t *th;
1251 GetThreadPtr(thread, th);
1253 if (th->status == THREAD_KILLED) {
1254 rb_raise(rb_eThreadError, "killed thread");
1256 rb_thread_ready(th);
1257 if (th->status != THREAD_TO_KILL) {
1258 th->status = THREAD_RUNNABLE;
1260 return thread;
1265 * call-seq:
1266 * thr.run => thr
1268 * Wakes up <i>thr</i>, making it eligible for scheduling.
1270 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
1271 * Thread.pass
1272 * puts "Got here"
1273 * a.run
1274 * a.join
1276 * <em>produces:</em>
1279 * Got here
1283 VALUE
1284 rb_thread_run(VALUE thread)
1286 rb_thread_wakeup(thread);
1287 rb_thread_schedule();
1288 return thread;
1293 * call-seq:
1294 * Thread.stop => nil
1296 * Stops execution of the current thread, putting it into a ``sleep'' state,
1297 * and schedules execution of another thread.
1299 * a = Thread.new { print "a"; Thread.stop; print "c" }
1300 * Thread.pass
1301 * print "b"
1302 * a.run
1303 * a.join
1305 * <em>produces:</em>
1307 * abc
1310 VALUE
1311 rb_thread_stop(void)
1313 if (rb_thread_alone()) {
1314 rb_raise(rb_eThreadError,
1315 "stopping only thread\n\tnote: use sleep to stop forever");
1317 rb_thread_sleep_deadly();
1318 return Qnil;
1321 static int
1322 thread_list_i(st_data_t key, st_data_t val, void *data)
1324 VALUE ary = (VALUE)data;
1325 rb_thread_t *th;
1326 GetThreadPtr((VALUE)key, th);
1328 switch (th->status) {
1329 case THREAD_RUNNABLE:
1330 case THREAD_STOPPED:
1331 case THREAD_STOPPED_FOREVER:
1332 case THREAD_TO_KILL:
1333 rb_ary_push(ary, th->self);
1334 default:
1335 break;
1337 return ST_CONTINUE;
1340 /********************************************************************/
1343 * call-seq:
1344 * Thread.list => array
1346 * Returns an array of <code>Thread</code> objects for all threads that are
1347 * either runnable or stopped.
1349 * Thread.new { sleep(200) }
1350 * Thread.new { 1000000.times {|i| i*i } }
1351 * Thread.new { Thread.stop }
1352 * Thread.list.each {|t| p t}
1354 * <em>produces:</em>
1356 * #<Thread:0x401b3e84 sleep>
1357 * #<Thread:0x401b3f38 run>
1358 * #<Thread:0x401b3fb0 sleep>
1359 * #<Thread:0x401bdf4c run>
1362 VALUE
1363 rb_thread_list(void)
1365 VALUE ary = rb_ary_new();
1366 st_foreach(GET_THREAD()->vm->living_threads, thread_list_i, ary);
1367 return ary;
1370 VALUE
1371 rb_thread_current(void)
1373 return GET_THREAD()->self;
1377 * call-seq:
1378 * Thread.current => thread
1380 * Returns the currently executing thread.
1382 * Thread.current #=> #<Thread:0x401bdf4c run>
1385 static VALUE
1386 thread_s_current(VALUE klass)
1388 return rb_thread_current();
1391 VALUE
1392 rb_thread_main(void)
1394 return GET_THREAD()->vm->main_thread->self;
1397 static VALUE
1398 rb_thread_s_main(VALUE klass)
1400 return rb_thread_main();
1405 * call-seq:
1406 * Thread.abort_on_exception => true or false
1408 * Returns the status of the global ``abort on exception'' condition. The
1409 * default is <code>false</code>. When set to <code>true</code>, or if the
1410 * global <code>$DEBUG</code> flag is <code>true</code> (perhaps because the
1411 * command line option <code>-d</code> was specified) all threads will abort
1412 * (the process will <code>exit(0)</code>) if an exception is raised in any
1413 * thread. See also <code>Thread::abort_on_exception=</code>.
1416 static VALUE
1417 rb_thread_s_abort_exc(void)
1419 return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
1424 * call-seq:
1425 * Thread.abort_on_exception= boolean => true or false
1427 * When set to <code>true</code>, all threads will abort if an exception is
1428 * raised. Returns the new state.
1430 * Thread.abort_on_exception = true
1431 * t1 = Thread.new do
1432 * puts "In new thread"
1433 * raise "Exception from thread"
1434 * end
1435 * sleep(1)
1436 * puts "not reached"
1438 * <em>produces:</em>
1440 * In new thread
1441 * prog.rb:4: Exception from thread (RuntimeError)
1442 * from prog.rb:2:in `initialize'
1443 * from prog.rb:2:in `new'
1444 * from prog.rb:2
1447 static VALUE
1448 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
1450 rb_secure(4);
1451 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
1452 return val;
1457 * call-seq:
1458 * thr.abort_on_exception => true or false
1460 * Returns the status of the thread-local ``abort on exception'' condition for
1461 * <i>thr</i>. The default is <code>false</code>. See also
1462 * <code>Thread::abort_on_exception=</code>.
1465 static VALUE
1466 rb_thread_abort_exc(VALUE thread)
1468 rb_thread_t *th;
1469 GetThreadPtr(thread, th);
1470 return th->abort_on_exception ? Qtrue : Qfalse;
1475 * call-seq:
1476 * thr.abort_on_exception= boolean => true or false
1478 * When set to <code>true</code>, causes all threads (including the main
1479 * program) to abort if an exception is raised in <i>thr</i>. The process will
1480 * effectively <code>exit(0)</code>.
1483 static VALUE
1484 rb_thread_abort_exc_set(VALUE thread, VALUE val)
1486 rb_thread_t *th;
1487 rb_secure(4);
1489 GetThreadPtr(thread, th);
1490 th->abort_on_exception = RTEST(val);
1491 return val;
1496 * call-seq:
1497 * thr.group => thgrp or nil
1499 * Returns the <code>ThreadGroup</code> which contains <i>thr</i>, or nil if
1500 * the thread is not a member of any group.
1502 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
1505 VALUE
1506 rb_thread_group(VALUE thread)
1508 rb_thread_t *th;
1509 VALUE group;
1510 GetThreadPtr(thread, th);
1511 group = th->thgroup;
1513 if (!group) {
1514 group = Qnil;
1516 return group;
1519 static const char *
1520 thread_status_name(enum rb_thread_status status)
1522 switch (status) {
1523 case THREAD_RUNNABLE:
1524 return "run";
1525 case THREAD_STOPPED:
1526 case THREAD_STOPPED_FOREVER:
1527 return "sleep";
1528 case THREAD_TO_KILL:
1529 return "aborting";
1530 case THREAD_KILLED:
1531 return "dead";
1532 default:
1533 return "unknown";
1537 static int
1538 rb_thread_dead(rb_thread_t *th)
1540 return th->status == THREAD_KILLED;
1545 * call-seq:
1546 * thr.status => string, false or nil
1548 * Returns the status of <i>thr</i>: ``<code>sleep</code>'' if <i>thr</i> is
1549 * sleeping or waiting on I/O, ``<code>run</code>'' if <i>thr</i> is executing,
1550 * ``<code>aborting</code>'' if <i>thr</i> is aborting, <code>false</code> if
1551 * <i>thr</i> terminated normally, and <code>nil</code> if <i>thr</i>
1552 * terminated with an exception.
1554 * a = Thread.new { raise("die now") }
1555 * b = Thread.new { Thread.stop }
1556 * c = Thread.new { Thread.exit }
1557 * d = Thread.new { sleep }
1558 * d.kill #=> #<Thread:0x401b3678 aborting>
1559 * a.status #=> nil
1560 * b.status #=> "sleep"
1561 * c.status #=> false
1562 * d.status #=> "aborting"
1563 * Thread.current.status #=> "run"
1566 static VALUE
1567 rb_thread_status(VALUE thread)
1569 rb_thread_t *th;
1570 GetThreadPtr(thread, th);
1572 if (rb_thread_dead(th)) {
1573 if (!NIL_P(th->errinfo) && !FIXNUM_P(th->errinfo)
1574 /* TODO */ ) {
1575 return Qnil;
1577 return Qfalse;
1579 return rb_str_new2(thread_status_name(th->status));
1584 * call-seq:
1585 * thr.alive? => true or false
1587 * Returns <code>true</code> if <i>thr</i> is running or sleeping.
1589 * thr = Thread.new { }
1590 * thr.join #=> #<Thread:0x401b3fb0 dead>
1591 * Thread.current.alive? #=> true
1592 * thr.alive? #=> false
1595 static VALUE
1596 rb_thread_alive_p(VALUE thread)
1598 rb_thread_t *th;
1599 GetThreadPtr(thread, th);
1601 if (rb_thread_dead(th))
1602 return Qfalse;
1603 return Qtrue;
1607 * call-seq:
1608 * thr.stop? => true or false
1610 * Returns <code>true</code> if <i>thr</i> is dead or sleeping.
1612 * a = Thread.new { Thread.stop }
1613 * b = Thread.current
1614 * a.stop? #=> true
1615 * b.stop? #=> false
1618 static VALUE
1619 rb_thread_stop_p(VALUE thread)
1621 rb_thread_t *th;
1622 GetThreadPtr(thread, th);
1624 if (rb_thread_dead(th))
1625 return Qtrue;
1626 if (th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER)
1627 return Qtrue;
1628 return Qfalse;
1632 * call-seq:
1633 * thr.safe_level => integer
1635 * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
1636 * levels can help when implementing sandboxes which run insecure code.
1638 * thr = Thread.new { $SAFE = 3; sleep }
1639 * Thread.current.safe_level #=> 0
1640 * thr.safe_level #=> 3
1643 static VALUE
1644 rb_thread_safe_level(VALUE thread)
1646 rb_thread_t *th;
1647 GetThreadPtr(thread, th);
1649 return INT2NUM(th->safe_level);
1653 * call-seq:
1654 * thr.inspect => string
1656 * Dump the name, id, and status of _thr_ to a string.
1659 static VALUE
1660 rb_thread_inspect(VALUE thread)
1662 const char *cname = rb_obj_classname(thread);
1663 rb_thread_t *th;
1664 const char *status;
1665 VALUE str;
1667 GetThreadPtr(thread, th);
1668 status = thread_status_name(th->status);
1669 str = rb_sprintf("#<%s:%p %s>", cname, (void *)thread, status);
1670 OBJ_INFECT(str, thread);
1672 return str;
1675 VALUE
1676 rb_thread_local_aref(VALUE thread, ID id)
1678 rb_thread_t *th;
1679 VALUE val;
1681 GetThreadPtr(thread, th);
1682 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1683 rb_raise(rb_eSecurityError, "Insecure: thread locals");
1685 if (!th->local_storage) {
1686 return Qnil;
1688 if (st_lookup(th->local_storage, id, &val)) {
1689 return val;
1691 return Qnil;
1695 * call-seq:
1696 * thr[sym] => obj or nil
1698 * Attribute Reference---Returns the value of a thread-local variable, using
1699 * either a symbol or a string name. If the specified variable does not exist,
1700 * returns <code>nil</code>.
1702 * a = Thread.new { Thread.current["name"] = "A"; Thread.stop }
1703 * b = Thread.new { Thread.current[:name] = "B"; Thread.stop }
1704 * c = Thread.new { Thread.current["name"] = "C"; Thread.stop }
1705 * Thread.list.each {|x| puts "#{x.inspect}: #{x[:name]}" }
1707 * <em>produces:</em>
1709 * #<Thread:0x401b3b3c sleep>: C
1710 * #<Thread:0x401b3bc8 sleep>: B
1711 * #<Thread:0x401b3c68 sleep>: A
1712 * #<Thread:0x401bdf4c run>:
1715 static VALUE
1716 rb_thread_aref(VALUE thread, VALUE id)
1718 return rb_thread_local_aref(thread, rb_to_id(id));
1721 VALUE
1722 rb_thread_local_aset(VALUE thread, ID id, VALUE val)
1724 rb_thread_t *th;
1725 GetThreadPtr(thread, th);
1727 if (rb_safe_level() >= 4 && th != GET_THREAD()) {
1728 rb_raise(rb_eSecurityError, "Insecure: can't modify thread locals");
1730 if (OBJ_FROZEN(thread)) {
1731 rb_error_frozen("thread locals");
1733 if (!th->local_storage) {
1734 th->local_storage = st_init_numtable();
1736 if (NIL_P(val)) {
1737 st_delete_wrap(th->local_storage, id);
1738 return Qnil;
1740 st_insert(th->local_storage, id, val);
1741 return val;
1745 * call-seq:
1746 * thr[sym] = obj => obj
1748 * Attribute Assignment---Sets or creates the value of a thread-local variable,
1749 * using either a symbol or a string. See also <code>Thread#[]</code>.
1752 static VALUE
1753 rb_thread_aset(VALUE self, ID id, VALUE val)
1755 return rb_thread_local_aset(self, rb_to_id(id), val);
1759 * call-seq:
1760 * thr.key?(sym) => true or false
1762 * Returns <code>true</code> if the given string (or symbol) exists as a
1763 * thread-local variable.
1765 * me = Thread.current
1766 * me[:oliver] = "a"
1767 * me.key?(:oliver) #=> true
1768 * me.key?(:stanley) #=> false
1771 static VALUE
1772 rb_thread_key_p(VALUE self, VALUE key)
1774 rb_thread_t *th;
1775 ID id = rb_to_id(key);
1777 GetThreadPtr(self, th);
1779 if (!th->local_storage) {
1780 return Qfalse;
1782 if (st_lookup(th->local_storage, id, 0)) {
1783 return Qtrue;
1785 return Qfalse;
1788 static int
1789 thread_keys_i(ID key, VALUE value, VALUE ary)
1791 rb_ary_push(ary, ID2SYM(key));
1792 return ST_CONTINUE;
1795 static int
1796 vm_living_thread_num(rb_vm_t *vm)
1798 return vm->living_threads->num_entries;
1802 rb_thread_alone()
1804 int num = 1;
1805 if (GET_THREAD()->vm->living_threads) {
1806 num = vm_living_thread_num(GET_THREAD()->vm);
1807 thread_debug("rb_thread_alone: %d\n", num);
1809 return num == 1;
1813 * call-seq:
1814 * thr.keys => array
1816 * Returns an an array of the names of the thread-local variables (as Symbols).
1818 * thr = Thread.new do
1819 * Thread.current[:cat] = 'meow'
1820 * Thread.current["dog"] = 'woof'
1821 * end
1822 * thr.join #=> #<Thread:0x401b3f10 dead>
1823 * thr.keys #=> [:dog, :cat]
1826 static VALUE
1827 rb_thread_keys(VALUE self)
1829 rb_thread_t *th;
1830 VALUE ary = rb_ary_new();
1831 GetThreadPtr(self, th);
1833 if (th->local_storage) {
1834 st_foreach(th->local_storage, thread_keys_i, ary);
1836 return ary;
1840 * call-seq:
1841 * thr.priority => integer
1843 * Returns the priority of <i>thr</i>. Default is inherited from the
1844 * current thread which creating the new thread, or zero for the
1845 * initial main thread; higher-priority threads will run before
1846 * lower-priority threads.
1848 * Thread.current.priority #=> 0
1851 static VALUE
1852 rb_thread_priority(VALUE thread)
1854 rb_thread_t *th;
1855 GetThreadPtr(thread, th);
1856 return INT2NUM(th->priority);
1861 * call-seq:
1862 * thr.priority= integer => thr
1864 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
1865 * will run before lower-priority threads.
1867 * count1 = count2 = 0
1868 * a = Thread.new do
1869 * loop { count1 += 1 }
1870 * end
1871 * a.priority = -1
1873 * b = Thread.new do
1874 * loop { count2 += 1 }
1875 * end
1876 * b.priority = -2
1877 * sleep 1 #=> 1
1878 * count1 #=> 622504
1879 * count2 #=> 5832
1882 static VALUE
1883 rb_thread_priority_set(VALUE thread, VALUE prio)
1885 rb_thread_t *th;
1886 GetThreadPtr(thread, th);
1887 int priority;
1889 rb_secure(4);
1891 #if USE_NATIVE_THREAD_PRIORITY
1892 th->priority = NUM2INT(prio);
1893 native_thread_apply_priority(th);
1894 #else
1895 priority = NUM2INT(prio);
1896 if (priority > RUBY_THREAD_PRIORITY_MAX) {
1897 priority = RUBY_THREAD_PRIORITY_MAX;
1899 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
1900 priority = RUBY_THREAD_PRIORITY_MIN;
1902 th->priority = priority;
1903 th->slice = priority;
1904 #endif
1905 return INT2NUM(th->priority);
1908 /* for IO */
1910 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
1911 void
1912 rb_fd_init(volatile rb_fdset_t *fds)
1914 fds->maxfd = 0;
1915 fds->fdset = ALLOC(fd_set);
1916 FD_ZERO(fds->fdset);
1919 void
1920 rb_fd_term(rb_fdset_t *fds)
1922 if (fds->fdset) xfree(fds->fdset);
1923 fds->maxfd = 0;
1924 fds->fdset = 0;
1927 void
1928 rb_fd_zero(rb_fdset_t *fds)
1930 if (fds->fdset) {
1931 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
1932 FD_ZERO(fds->fdset);
1936 static void
1937 rb_fd_resize(int n, rb_fdset_t *fds)
1939 int m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
1940 int o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
1942 if (m < sizeof(fd_set)) m = sizeof(fd_set);
1943 if (o < sizeof(fd_set)) o = sizeof(fd_set);
1945 if (m > o) {
1946 fds->fdset = realloc(fds->fdset, m);
1947 memset((char *)fds->fdset + o, 0, m - o);
1949 if (n >= fds->maxfd) fds->maxfd = n + 1;
1952 void
1953 rb_fd_set(int n, rb_fdset_t *fds)
1955 rb_fd_resize(n, fds);
1956 FD_SET(n, fds->fdset);
1959 void
1960 rb_fd_clr(int n, rb_fdset_t *fds)
1962 if (n >= fds->maxfd) return;
1963 FD_CLR(n, fds->fdset);
1967 rb_fd_isset(int n, const rb_fdset_t *fds)
1969 if (n >= fds->maxfd) return 0;
1970 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
1973 void
1974 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
1976 int size = howmany(max, NFDBITS) * sizeof(fd_mask);
1978 if (size < sizeof(fd_set)) size = sizeof(fd_set);
1979 dst->maxfd = max;
1980 dst->fdset = realloc(dst->fdset, size);
1981 memcpy(dst->fdset, src, size);
1985 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
1987 fd_set *r = NULL, *w = NULL, *e = NULL;
1988 if (readfds) {
1989 rb_fd_resize(n - 1, readfds);
1990 r = rb_fd_ptr(readfds);
1992 if (writefds) {
1993 rb_fd_resize(n - 1, writefds);
1994 w = rb_fd_ptr(writefds);
1996 if (exceptfds) {
1997 rb_fd_resize(n - 1, exceptfds);
1998 e = rb_fd_ptr(exceptfds);
2000 return select(n, r, w, e, timeout);
2003 #undef FD_ZERO
2004 #undef FD_SET
2005 #undef FD_CLR
2006 #undef FD_ISSET
2008 #define FD_ZERO(f) rb_fd_zero(f)
2009 #define FD_SET(i, f) rb_fd_set(i, f)
2010 #define FD_CLR(i, f) rb_fd_clr(i, f)
2011 #define FD_ISSET(i, f) rb_fd_isset(i, f)
2013 #endif
2015 #if defined(__CYGWIN__) || defined(_WIN32)
2016 static long
2017 cmp_tv(const struct timeval *a, const struct timeval *b)
2019 long d = (a->tv_sec - b->tv_sec);
2020 return (d != 0) ? d : (a->tv_usec - b->tv_usec);
2023 static int
2024 subtract_tv(struct timeval *rest, const struct timeval *wait)
2026 while (rest->tv_usec < wait->tv_usec) {
2027 if (rest->tv_sec <= wait->tv_sec) {
2028 return 0;
2030 rest->tv_sec -= 1;
2031 rest->tv_usec += 1000 * 1000;
2033 rest->tv_sec -= wait->tv_sec;
2034 rest->tv_usec -= wait->tv_usec;
2035 return 1;
2037 #endif
2039 static int
2040 do_select(int n, fd_set *read, fd_set *write, fd_set *except,
2041 struct timeval *timeout)
2043 int result, lerrno;
2044 fd_set orig_read, orig_write, orig_except;
2046 #ifndef linux
2047 double limit = 0;
2048 struct timeval wait_rest;
2049 # if defined(__CYGWIN__) || defined(_WIN32)
2050 struct timeval start_time;
2051 # endif
2053 if (timeout) {
2054 # if defined(__CYGWIN__) || defined(_WIN32)
2055 gettimeofday(&start_time, NULL);
2056 limit = (double)start_time.tv_sec + (double)start_time.tv_usec*1e-6;
2057 # else
2058 limit = timeofday();
2059 # endif
2060 limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
2061 wait_rest = *timeout;
2062 timeout = &wait_rest;
2064 #endif
2066 if (read) orig_read = *read;
2067 if (write) orig_write = *write;
2068 if (except) orig_except = *except;
2070 retry:
2071 lerrno = 0;
2073 #if defined(__CYGWIN__) || defined(_WIN32)
2075 int finish = 0;
2076 /* polling duration: 100ms */
2077 struct timeval wait_100ms, *wait;
2078 wait_100ms.tv_sec = 0;
2079 wait_100ms.tv_usec = 100 * 1000; /* 100 ms */
2081 do {
2082 wait = (timeout == 0 || cmp_tv(&wait_100ms, timeout) > 0) ? &wait_100ms : timeout;
2083 BLOCKING_REGION({
2084 do {
2085 result = select(n, read, write, except, wait);
2086 if (result < 0) lerrno = errno;
2087 if (result != 0) break;
2089 if (read) *read = orig_read;
2090 if (write) *write = orig_write;
2091 if (except) *except = orig_except;
2092 wait = &wait_100ms;
2093 if (timeout) {
2094 struct timeval elapsed;
2095 gettimeofday(&elapsed, NULL);
2096 subtract_tv(&elapsed, &start_time);
2097 if (!subtract_tv(timeout, &elapsed)) {
2098 finish = 1;
2099 break;
2101 if (cmp_tv(&wait_100ms, timeout) < 0) wait = timeout;
2103 } while (__th->interrupt_flag == 0);
2104 }, 0, 0);
2105 } while (result == 0 && !finish);
2107 #else
2108 BLOCKING_REGION({
2109 result = select(n, read, write, except, timeout);
2110 if (result < 0) lerrno = errno;
2111 }, ubf_select, GET_THREAD());
2112 #endif
2114 errno = lerrno;
2116 if (result < 0) {
2117 switch (errno) {
2118 case EINTR:
2119 #ifdef ERESTART
2120 case ERESTART:
2121 #endif
2122 if (read) *read = orig_read;
2123 if (write) *write = orig_write;
2124 if (except) *except = orig_except;
2125 #ifndef linux
2126 if (timeout) {
2127 double d = limit - timeofday();
2129 wait_rest.tv_sec = (unsigned int)d;
2130 wait_rest.tv_usec = (long)((d-(double)wait_rest.tv_sec)*1e6);
2131 if (wait_rest.tv_sec < 0) wait_rest.tv_sec = 0;
2132 if (wait_rest.tv_usec < 0) wait_rest.tv_usec = 0;
2134 #endif
2135 goto retry;
2136 default:
2137 break;
2140 return result;
2143 static void
2144 rb_thread_wait_fd_rw(int fd, int read)
2146 int result = 0;
2147 thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
2149 if (fd < 0) {
2150 rb_raise(rb_eIOError, "closed stream");
2152 while (result <= 0) {
2153 rb_fdset_t set;
2154 rb_fd_init(&set);
2155 FD_SET(fd, &set);
2157 if (read) {
2158 result = do_select(fd + 1, rb_fd_ptr(&set), 0, 0, 0);
2160 else {
2161 result = do_select(fd + 1, 0, rb_fd_ptr(&set), 0, 0);
2164 rb_fd_term(&set);
2166 if (result < 0) {
2167 rb_sys_fail(0);
2171 thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
2174 void
2175 rb_thread_wait_fd(int fd)
2177 rb_thread_wait_fd_rw(fd, 1);
2181 rb_thread_fd_writable(int fd)
2183 rb_thread_wait_fd_rw(fd, 0);
2184 return Qtrue;
2188 rb_thread_select(int max, fd_set * read, fd_set * write, fd_set * except,
2189 struct timeval *timeout)
2191 if (!read && !write && !except) {
2192 if (!timeout) {
2193 rb_thread_sleep_forever();
2194 return 0;
2196 rb_thread_wait_for(*timeout);
2197 return 0;
2199 else {
2200 return do_select(max, read, write, except, timeout);
2206 * for GC
2209 #ifdef USE_CONSERVATIVE_STACK_END
2210 void
2211 rb_gc_set_stack_end(VALUE **stack_end_p)
2213 VALUE stack_end;
2214 *stack_end_p = &stack_end;
2216 #endif
2218 void
2219 rb_gc_save_machine_context(rb_thread_t *th)
2221 SET_MACHINE_STACK_END(&th->machine_stack_end);
2222 FLUSH_REGISTER_WINDOWS;
2223 #ifdef __ia64
2224 th->machine_register_stack_end = rb_ia64_bsp();
2225 #endif
2226 setjmp(th->machine_regs);
2233 int rb_get_next_signal(rb_vm_t *vm);
2235 static void
2236 timer_thread_function(void *arg)
2238 rb_vm_t *vm = arg; /* TODO: fix me for Multi-VM */
2240 /* for time slice */
2241 RUBY_VM_SET_TIMER_INTERRUPT(vm->running_thread);
2243 /* check signal */
2244 if (vm->buffered_signal_size && vm->main_thread->exec_signal == 0) {
2245 rb_thread_t *mth = vm->main_thread;
2246 enum rb_thread_status prev_status = mth->status;
2247 mth->exec_signal = rb_get_next_signal(vm);
2248 thread_debug("main_thread: %s\n", thread_status_name(prev_status));
2249 thread_debug("buffered_signal_size: %ld, sig: %d\n",
2250 (long)vm->buffered_signal_size, vm->main_thread->exec_signal);
2251 if (mth->status != THREAD_KILLED) mth->status = THREAD_RUNNABLE;
2252 rb_thread_interrupt(mth);
2253 mth->status = prev_status;
2256 #if 0
2257 /* prove profiler */
2258 if (vm->prove_profile.enable) {
2259 rb_thread_t *th = vm->running_thread;
2261 if (vm->during_gc) {
2262 /* GC prove profiling */
2265 #endif
2268 void
2269 rb_thread_stop_timer_thread(void)
2271 if (timer_thread_id) {
2272 system_working = 0;
2273 native_thread_join(timer_thread_id);
2274 timer_thread_id = 0;
2278 void
2279 rb_thread_reset_timer_thread(void)
2281 timer_thread_id = 0;
2284 void
2285 rb_thread_start_timer_thread(void)
2287 rb_thread_create_timer_thread();
2290 static int
2291 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
2293 int i;
2294 VALUE lines = (VALUE)val;
2296 for (i = 0; i < RARRAY_LEN(lines); i++) {
2297 if (RARRAY_PTR(lines)[i] != Qnil) {
2298 RARRAY_PTR(lines)[i] = INT2FIX(0);
2301 return ST_CONTINUE;
2304 static void
2305 clear_coverage(void)
2307 extern VALUE rb_get_coverages(void);
2308 VALUE coverages = rb_get_coverages();
2309 if (RTEST(coverages)) {
2310 st_foreach(RHASH_TBL(coverages), clear_coverage_i, 0);
2314 static int
2315 terminate_atfork_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2317 VALUE thval = key;
2318 rb_thread_t *th;
2319 GetThreadPtr(thval, th);
2321 if (th != current_th) {
2322 thread_cleanup_func(th);
2324 return ST_CONTINUE;
2327 void
2328 rb_thread_atfork(void)
2330 rb_thread_t *th = GET_THREAD();
2331 rb_vm_t *vm = th->vm;
2332 VALUE thval = th->self;
2333 vm->main_thread = th;
2335 st_foreach(vm->living_threads, terminate_atfork_i, (st_data_t)th);
2336 st_clear(vm->living_threads);
2337 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2338 vm->sleeper = 0;
2339 clear_coverage();
2340 rb_reset_random_seed();
2343 static int
2344 terminate_atfork_before_exec_i(st_data_t key, st_data_t val, rb_thread_t *current_th)
2346 VALUE thval = key;
2347 rb_thread_t *th;
2348 GetThreadPtr(thval, th);
2350 if (th != current_th) {
2351 thread_cleanup_func_before_exec(th);
2353 return ST_CONTINUE;
2356 void
2357 rb_thread_atfork_before_exec(void)
2359 rb_thread_t *th = GET_THREAD();
2360 rb_vm_t *vm = th->vm;
2361 VALUE thval = th->self;
2362 vm->main_thread = th;
2364 st_foreach(vm->living_threads, terminate_atfork_before_exec_i, (st_data_t)th);
2365 st_clear(vm->living_threads);
2366 st_insert(vm->living_threads, thval, (st_data_t) th->thread_id);
2367 vm->sleeper = 0;
2368 clear_coverage();
2371 struct thgroup {
2372 int enclosed;
2373 VALUE group;
2377 * Document-class: ThreadGroup
2379 * <code>ThreadGroup</code> provides a means of keeping track of a number of
2380 * threads as a group. A <code>Thread</code> can belong to only one
2381 * <code>ThreadGroup</code> at a time; adding a thread to a new group will
2382 * remove it from any previous group.
2384 * Newly created threads belong to the same group as the thread from which they
2385 * were created.
2388 static VALUE thgroup_s_alloc(VALUE);
2389 static VALUE
2390 thgroup_s_alloc(VALUE klass)
2392 VALUE group;
2393 struct thgroup *data;
2395 group = Data_Make_Struct(klass, struct thgroup, 0, -1, data);
2396 data->enclosed = 0;
2397 data->group = group;
2399 return group;
2402 struct thgroup_list_params {
2403 VALUE ary;
2404 VALUE group;
2407 static int
2408 thgroup_list_i(st_data_t key, st_data_t val, st_data_t data)
2410 VALUE thread = (VALUE)key;
2411 VALUE ary = ((struct thgroup_list_params *)data)->ary;
2412 VALUE group = ((struct thgroup_list_params *)data)->group;
2413 rb_thread_t *th;
2414 GetThreadPtr(thread, th);
2416 if (th->thgroup == group) {
2417 rb_ary_push(ary, thread);
2419 return ST_CONTINUE;
2423 * call-seq:
2424 * thgrp.list => array
2426 * Returns an array of all existing <code>Thread</code> objects that belong to
2427 * this group.
2429 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
2432 static VALUE
2433 thgroup_list(VALUE group)
2435 VALUE ary = rb_ary_new();
2436 struct thgroup_list_params param;
2438 param.ary = ary;
2439 param.group = group;
2440 st_foreach(GET_THREAD()->vm->living_threads, thgroup_list_i, (st_data_t) & param);
2441 return ary;
2446 * call-seq:
2447 * thgrp.enclose => thgrp
2449 * Prevents threads from being added to or removed from the receiving
2450 * <code>ThreadGroup</code>. New threads can still be started in an enclosed
2451 * <code>ThreadGroup</code>.
2453 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
2454 * thr = Thread::new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
2455 * tg = ThreadGroup::new #=> #<ThreadGroup:0x402752d4>
2456 * tg.add thr
2458 * <em>produces:</em>
2460 * ThreadError: can't move from the enclosed thread group
2463 VALUE
2464 thgroup_enclose(VALUE group)
2466 struct thgroup *data;
2468 Data_Get_Struct(group, struct thgroup, data);
2469 data->enclosed = 1;
2471 return group;
2476 * call-seq:
2477 * thgrp.enclosed? => true or false
2479 * Returns <code>true</code> if <em>thgrp</em> is enclosed. See also
2480 * ThreadGroup#enclose.
2483 static VALUE
2484 thgroup_enclosed_p(VALUE group)
2486 struct thgroup *data;
2488 Data_Get_Struct(group, struct thgroup, data);
2489 if (data->enclosed)
2490 return Qtrue;
2491 return Qfalse;
2496 * call-seq:
2497 * thgrp.add(thread) => thgrp
2499 * Adds the given <em>thread</em> to this group, removing it from any other
2500 * group to which it may have previously belonged.
2502 * puts "Initial group is #{ThreadGroup::Default.list}"
2503 * tg = ThreadGroup.new
2504 * t1 = Thread.new { sleep }
2505 * t2 = Thread.new { sleep }
2506 * puts "t1 is #{t1}"
2507 * puts "t2 is #{t2}"
2508 * tg.add(t1)
2509 * puts "Initial group now #{ThreadGroup::Default.list}"
2510 * puts "tg group now #{tg.list}"
2512 * <em>produces:</em>
2514 * Initial group is #<Thread:0x401bdf4c>
2515 * t1 is #<Thread:0x401b3c90>
2516 * t2 is #<Thread:0x401b3c18>
2517 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
2518 * tg group now #<Thread:0x401b3c90>
2521 static VALUE
2522 thgroup_add(VALUE group, VALUE thread)
2524 rb_thread_t *th;
2525 struct thgroup *data;
2527 rb_secure(4);
2528 GetThreadPtr(thread, th);
2530 if (OBJ_FROZEN(group)) {
2531 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
2533 Data_Get_Struct(group, struct thgroup, data);
2534 if (data->enclosed) {
2535 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
2538 if (!th->thgroup) {
2539 return Qnil;
2542 if (OBJ_FROZEN(th->thgroup)) {
2543 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
2545 Data_Get_Struct(th->thgroup, struct thgroup, data);
2546 if (data->enclosed) {
2547 rb_raise(rb_eThreadError,
2548 "can't move from the enclosed thread group");
2551 th->thgroup = group;
2552 return group;
2557 * Document-class: Mutex
2559 * Mutex implements a simple semaphore that can be used to coordinate access to
2560 * shared data from multiple concurrent threads.
2562 * Example:
2564 * require 'thread'
2565 * semaphore = Mutex.new
2567 * a = Thread.new {
2568 * semaphore.synchronize {
2569 * # access shared resource
2573 * b = Thread.new {
2574 * semaphore.synchronize {
2575 * # access shared resource
2581 #define GetMutexPtr(obj, tobj) \
2582 Data_Get_Struct(obj, mutex_t, tobj)
2584 static const char *mutex_unlock(mutex_t *mutex);
2586 static void
2587 mutex_free(void *ptr)
2589 if (ptr) {
2590 mutex_t *mutex = ptr;
2591 if (mutex->th) {
2592 /* rb_warn("free locked mutex"); */
2593 mutex_unlock(mutex);
2595 native_mutex_destroy(&mutex->lock);
2596 native_cond_destroy(&mutex->cond);
2598 ruby_xfree(ptr);
2601 static VALUE
2602 mutex_alloc(VALUE klass)
2604 VALUE volatile obj;
2605 mutex_t *mutex;
2607 obj = Data_Make_Struct(klass, mutex_t, NULL, mutex_free, mutex);
2608 native_mutex_initialize(&mutex->lock);
2609 native_cond_initialize(&mutex->cond);
2610 return obj;
2614 * call-seq:
2615 * Mutex.new => mutex
2617 * Creates a new Mutex
2619 static VALUE
2620 mutex_initialize(VALUE self)
2622 return self;
2625 VALUE
2626 rb_mutex_new(void)
2628 return mutex_alloc(rb_cMutex);
2632 * call-seq:
2633 * mutex.locked? => true or false
2635 * Returns +true+ if this lock is currently held by some thread.
2637 VALUE
2638 rb_mutex_locked_p(VALUE self)
2640 mutex_t *mutex;
2641 GetMutexPtr(self, mutex);
2642 return mutex->th ? Qtrue : Qfalse;
2645 static void
2646 mutex_locked(rb_thread_t *th, VALUE self)
2648 mutex_t *mutex;
2649 GetMutexPtr(self, mutex);
2651 if (th->keeping_mutexes) {
2652 mutex->next_mutex = th->keeping_mutexes;
2654 th->keeping_mutexes = mutex;
2658 * call-seq:
2659 * mutex.try_lock => true or false
2661 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
2662 * lock was granted.
2664 VALUE
2665 rb_mutex_trylock(VALUE self)
2667 mutex_t *mutex;
2668 VALUE locked = Qfalse;
2669 GetMutexPtr(self, mutex);
2671 if (mutex->th == GET_THREAD()) {
2672 rb_raise(rb_eThreadError, "deadlock; recursive locking");
2675 native_mutex_lock(&mutex->lock);
2676 if (mutex->th == 0) {
2677 mutex->th = GET_THREAD();
2678 locked = Qtrue;
2680 mutex_locked(GET_THREAD(), self);
2682 native_mutex_unlock(&mutex->lock);
2684 return locked;
2687 static int
2688 lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
2690 int interrupted = 0;
2691 #if 0 /* for debug */
2692 native_thread_yield();
2693 #endif
2695 native_mutex_lock(&mutex->lock);
2696 th->transition_for_lock = 0;
2697 while (mutex->th || (mutex->th = th, 0)) {
2698 if (last_thread) {
2699 interrupted = 2;
2700 break;
2703 mutex->cond_waiting++;
2704 native_cond_wait(&mutex->cond, &mutex->lock);
2705 mutex->cond_notified--;
2707 if (RUBY_VM_INTERRUPTED(th)) {
2708 interrupted = 1;
2709 break;
2712 th->transition_for_lock = 1;
2713 native_mutex_unlock(&mutex->lock);
2715 if (interrupted == 2) native_thread_yield();
2716 #if 0 /* for debug */
2717 native_thread_yield();
2718 #endif
2720 return interrupted;
2723 static void
2724 lock_interrupt(void *ptr)
2726 mutex_t *mutex = (mutex_t *)ptr;
2727 native_mutex_lock(&mutex->lock);
2728 if (mutex->cond_waiting > 0) {
2729 native_cond_broadcast(&mutex->cond);
2730 mutex->cond_notified = mutex->cond_waiting;
2731 mutex->cond_waiting = 0;
2733 native_mutex_unlock(&mutex->lock);
2737 * call-seq:
2738 * mutex.lock => true or false
2740 * Attempts to grab the lock and waits if it isn't available.
2741 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
2743 VALUE
2744 rb_mutex_lock(VALUE self)
2746 if (rb_mutex_trylock(self) == Qfalse) {
2747 mutex_t *mutex;
2748 rb_thread_t *th = GET_THREAD();
2749 GetMutexPtr(self, mutex);
2751 while (mutex->th != th) {
2752 int interrupted;
2753 enum rb_thread_status prev_status = th->status;
2754 int last_thread = 0;
2755 struct rb_unblock_callback oldubf;
2757 set_unblock_function(th, lock_interrupt, mutex, &oldubf);
2758 th->status = THREAD_STOPPED_FOREVER;
2759 th->vm->sleeper++;
2760 th->locking_mutex = self;
2761 if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
2762 last_thread = 1;
2765 th->transition_for_lock = 1;
2766 BLOCKING_REGION_CORE({
2767 interrupted = lock_func(th, mutex, last_thread);
2769 th->transition_for_lock = 0;
2770 remove_signal_thread_list(th);
2771 reset_unblock_function(th, &oldubf);
2773 th->locking_mutex = Qfalse;
2774 if (mutex->th && interrupted == 2) {
2775 rb_check_deadlock(th->vm);
2777 if (th->status == THREAD_STOPPED_FOREVER) {
2778 th->status = prev_status;
2780 th->vm->sleeper--;
2782 if (mutex->th == th) mutex_locked(th, self);
2784 if (interrupted) {
2785 RUBY_VM_CHECK_INTS();
2789 return self;
2792 static const char *
2793 mutex_unlock(mutex_t *mutex)
2795 const char *err = NULL;
2796 rb_thread_t *th = GET_THREAD();
2797 mutex_t *th_mutex;
2799 native_mutex_lock(&mutex->lock);
2801 if (mutex->th == 0) {
2802 err = "Attempt to unlock a mutex which is not locked";
2804 else if (mutex->th != GET_THREAD()) {
2805 err = "Attempt to unlock a mutex which is locked by another thread";
2807 else {
2808 mutex->th = 0;
2809 if (mutex->cond_waiting > 0) {
2810 /* waiting thread */
2811 native_cond_signal(&mutex->cond);
2812 mutex->cond_waiting--;
2813 mutex->cond_notified++;
2817 native_mutex_unlock(&mutex->lock);
2819 if (!err) {
2820 th_mutex = th->keeping_mutexes;
2821 if (th_mutex == mutex) {
2822 th->keeping_mutexes = mutex->next_mutex;
2824 else {
2825 while (1) {
2826 mutex_t *tmp_mutex;
2827 tmp_mutex = th_mutex->next_mutex;
2828 if (tmp_mutex == mutex) {
2829 th_mutex->next_mutex = tmp_mutex->next_mutex;
2830 break;
2832 th_mutex = tmp_mutex;
2835 mutex->next_mutex = NULL;
2838 return err;
2842 * call-seq:
2843 * mutex.unlock => self
2845 * Releases the lock.
2846 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
2848 VALUE
2849 rb_mutex_unlock(VALUE self)
2851 const char *err;
2852 mutex_t *mutex;
2853 GetMutexPtr(self, mutex);
2855 err = mutex_unlock(mutex);
2856 if (err) rb_raise(rb_eThreadError, err);
2858 return self;
2861 static void
2862 rb_mutex_unlock_all(mutex_t *mutexes)
2864 const char *err;
2865 mutex_t *mutex;
2867 while (mutexes) {
2868 mutex = mutexes;
2869 /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
2870 mutexes); */
2871 mutexes = mutex->next_mutex;
2872 err = mutex_unlock(mutex);
2873 if (err) rb_bug("invalid keeping_mutexes: %s", err);
2877 static VALUE
2878 rb_mutex_sleep_forever(VALUE time)
2880 rb_thread_sleep_deadly();
2881 return Qnil;
2884 static VALUE
2885 rb_mutex_wait_for(VALUE time)
2887 const struct timeval *t = (struct timeval *)time;
2888 rb_thread_wait_for(*t);
2889 return Qnil;
2892 VALUE
2893 rb_mutex_sleep(VALUE self, VALUE timeout)
2895 time_t beg, end;
2896 struct timeval t;
2898 if (!NIL_P(timeout)) {
2899 t = rb_time_interval(timeout);
2901 rb_mutex_unlock(self);
2902 beg = time(0);
2903 if (NIL_P(timeout)) {
2904 rb_ensure(rb_mutex_sleep_forever, Qnil, rb_mutex_lock, self);
2906 else {
2907 rb_ensure(rb_mutex_wait_for, (VALUE)&t, rb_mutex_lock, self);
2909 end = time(0) - beg;
2910 return INT2FIX(end);
2914 * call-seq:
2915 * mutex.sleep(timeout = nil) => number
2917 * Releases the lock and sleeps +timeout+ seconds if it is given and
2918 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
2919 * the current thread.
2921 static VALUE
2922 mutex_sleep(int argc, VALUE *argv, VALUE self)
2924 VALUE timeout;
2926 rb_scan_args(argc, argv, "01", &timeout);
2927 return rb_mutex_sleep(self, timeout);
2931 * call-seq:
2932 * mutex.synchronize { ... } => result of the block
2934 * Obtains a lock, runs the block, and releases the lock when the block
2935 * completes. See the example under +Mutex+.
2938 VALUE
2939 rb_thread_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
2941 rb_mutex_lock(mutex);
2942 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
2946 * Document-class: Barrier
2948 typedef struct rb_thread_list_struct rb_thread_list_t;
2950 struct rb_thread_list_struct {
2951 rb_thread_t *th;
2952 rb_thread_list_t *next;
2955 static void
2956 thlist_mark(void *ptr)
2958 rb_thread_list_t *q = ptr;
2960 for (; q; q = q->next) {
2961 rb_gc_mark(q->th->self);
2965 static void
2966 thlist_free(void *ptr)
2968 rb_thread_list_t *q = ptr, *next;
2970 for (; q; q = next) {
2971 next = q->next;
2972 ruby_xfree(q);
2976 static int
2977 thlist_signal(rb_thread_list_t **list, unsigned int maxth, rb_thread_t **woken_thread)
2979 int woken = 0;
2980 rb_thread_list_t *q;
2982 while ((q = *list) != NULL) {
2983 rb_thread_t *th = q->th;
2985 *list = q->next;
2986 ruby_xfree(q);
2987 if (th->status != THREAD_KILLED) {
2988 rb_thread_ready(th);
2989 if (!woken && woken_thread) *woken_thread = th;
2990 if (++woken >= maxth && maxth) break;
2993 return woken;
2996 typedef struct {
2997 rb_thread_t *owner;
2998 rb_thread_list_t *waiting, **tail;
2999 } rb_barrier_t;
3001 static void
3002 barrier_mark(void *ptr)
3004 rb_barrier_t *b = ptr;
3006 if (b->owner) rb_gc_mark(b->owner->self);
3007 thlist_mark(b->waiting);
3010 static void
3011 barrier_free(void *ptr)
3013 rb_barrier_t *b = ptr;
3015 b->owner = 0;
3016 thlist_free(b->waiting);
3017 b->waiting = 0;
3018 ruby_xfree(ptr);
3021 static VALUE
3022 barrier_alloc(VALUE klass)
3024 VALUE volatile obj;
3025 rb_barrier_t *barrier;
3027 obj = Data_Make_Struct(klass, rb_barrier_t, barrier_mark, barrier_free, barrier);
3028 barrier->owner = GET_THREAD();
3029 barrier->waiting = 0;
3030 barrier->tail = &barrier->waiting;
3031 return obj;
3034 VALUE
3035 rb_barrier_new(void)
3037 return barrier_alloc(rb_cBarrier);
3040 VALUE
3041 rb_barrier_wait(VALUE self)
3043 rb_barrier_t *barrier;
3044 rb_thread_list_t *q;
3046 Data_Get_Struct(self, rb_barrier_t, barrier);
3047 if (!barrier->owner || barrier->owner->status == THREAD_KILLED) {
3048 barrier->owner = 0;
3049 if (thlist_signal(&barrier->waiting, 1, &barrier->owner)) return Qfalse;
3050 return Qtrue;
3052 else if (barrier->owner == GET_THREAD()) {
3053 return Qfalse;
3055 else {
3056 *barrier->tail = q = ALLOC(rb_thread_list_t);
3057 q->th = GET_THREAD();
3058 q->next = 0;
3059 barrier->tail = &q->next;
3060 rb_thread_sleep_forever();
3061 return barrier->owner == GET_THREAD() ? Qtrue : Qfalse;
3065 VALUE
3066 rb_barrier_release(VALUE self)
3068 rb_barrier_t *barrier;
3069 unsigned int n;
3071 Data_Get_Struct(self, rb_barrier_t, barrier);
3072 if (barrier->owner != GET_THREAD()) {
3073 rb_raise(rb_eThreadError, "not owned");
3075 n = thlist_signal(&barrier->waiting, 0, &barrier->owner);
3076 return n ? UINT2NUM(n) : Qfalse;
3079 /* variables for recursive traversals */
3080 static ID recursive_key;
3082 static VALUE
3083 recursive_check(VALUE hash, VALUE obj)
3085 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3086 return Qfalse;
3088 else {
3089 VALUE list = rb_hash_aref(hash, ID2SYM(rb_frame_this_func()));
3091 if (NIL_P(list) || TYPE(list) != T_HASH)
3092 return Qfalse;
3093 if (NIL_P(rb_hash_lookup(list, obj)))
3094 return Qfalse;
3095 return Qtrue;
3099 static VALUE
3100 recursive_push(VALUE hash, VALUE obj)
3102 VALUE list, sym;
3104 sym = ID2SYM(rb_frame_this_func());
3105 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3106 hash = rb_hash_new();
3107 rb_thread_local_aset(rb_thread_current(), recursive_key, hash);
3108 list = Qnil;
3110 else {
3111 list = rb_hash_aref(hash, sym);
3113 if (NIL_P(list) || TYPE(list) != T_HASH) {
3114 list = rb_hash_new();
3115 rb_hash_aset(hash, sym, list);
3117 rb_hash_aset(list, obj, Qtrue);
3118 return hash;
3121 static void
3122 recursive_pop(VALUE hash, VALUE obj)
3124 VALUE list, sym;
3126 sym = ID2SYM(rb_frame_this_func());
3127 if (NIL_P(hash) || TYPE(hash) != T_HASH) {
3128 VALUE symname;
3129 VALUE thrname;
3130 symname = rb_inspect(sym);
3131 thrname = rb_inspect(rb_thread_current());
3133 rb_raise(rb_eTypeError, "invalid inspect_tbl hash for %s in %s",
3134 StringValuePtr(symname), StringValuePtr(thrname));
3136 list = rb_hash_aref(hash, sym);
3137 if (NIL_P(list) || TYPE(list) != T_HASH) {
3138 VALUE symname = rb_inspect(sym);
3139 VALUE thrname = rb_inspect(rb_thread_current());
3140 rb_raise(rb_eTypeError, "invalid inspect_tbl list for %s in %s",
3141 StringValuePtr(symname), StringValuePtr(thrname));
3143 rb_hash_delete(list, obj);
3146 VALUE
3147 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
3149 VALUE hash = rb_thread_local_aref(rb_thread_current(), recursive_key);
3150 VALUE objid = rb_obj_id(obj);
3152 if (recursive_check(hash, objid)) {
3153 return (*func) (obj, arg, Qtrue);
3155 else {
3156 VALUE result = Qundef;
3157 int state;
3159 hash = recursive_push(hash, objid);
3160 PUSH_TAG();
3161 if ((state = EXEC_TAG()) == 0) {
3162 result = (*func) (obj, arg, Qfalse);
3164 POP_TAG();
3165 recursive_pop(hash, objid);
3166 if (state)
3167 JUMP_TAG(state);
3168 return result;
3172 /* tracer */
3174 static rb_event_hook_t *
3175 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3177 rb_event_hook_t *hook = ALLOC(rb_event_hook_t);
3178 hook->func = func;
3179 hook->flag = events;
3180 hook->data = data;
3181 return hook;
3184 static void
3185 thread_reset_event_flags(rb_thread_t *th)
3187 rb_event_hook_t *hook = th->event_hooks;
3188 rb_event_flag_t flag = th->event_flags & RUBY_EVENT_VM;
3190 while (hook) {
3191 flag |= hook->flag;
3192 hook = hook->next;
3196 void
3197 rb_thread_add_event_hook(rb_thread_t *th,
3198 rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3200 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3201 hook->next = th->event_hooks;
3202 th->event_hooks = hook;
3203 thread_reset_event_flags(th);
3206 static int
3207 set_threads_event_flags_i(st_data_t key, st_data_t val, st_data_t flag)
3209 VALUE thval = key;
3210 rb_thread_t *th;
3211 GetThreadPtr(thval, th);
3213 if (flag) {
3214 th->event_flags |= RUBY_EVENT_VM;
3216 else {
3217 th->event_flags &= (~RUBY_EVENT_VM);
3219 return ST_CONTINUE;
3222 static void
3223 set_threads_event_flags(int flag)
3225 st_foreach(GET_VM()->living_threads, set_threads_event_flags_i, (st_data_t) flag);
3228 void
3229 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
3231 rb_event_hook_t *hook = alloc_event_hook(func, events, data);
3232 rb_vm_t *vm = GET_VM();
3234 hook->next = vm->event_hooks;
3235 vm->event_hooks = hook;
3237 set_threads_event_flags(1);
3240 static int
3241 remove_event_hook(rb_event_hook_t **root, rb_event_hook_func_t func)
3243 rb_event_hook_t *prev = NULL, *hook = *root, *next;
3245 while (hook) {
3246 next = hook->next;
3247 if (func == 0 || hook->func == func) {
3248 if (prev) {
3249 prev->next = hook->next;
3251 else {
3252 *root = hook->next;
3254 xfree(hook);
3256 else {
3257 prev = hook;
3259 hook = next;
3261 return -1;
3265 rb_thread_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func)
3267 int ret = remove_event_hook(&th->event_hooks, func);
3268 thread_reset_event_flags(th);
3269 return ret;
3273 rb_remove_event_hook(rb_event_hook_func_t func)
3275 rb_vm_t *vm = GET_VM();
3276 rb_event_hook_t *hook = vm->event_hooks;
3277 int ret = remove_event_hook(&vm->event_hooks, func);
3279 if (hook != NULL && vm->event_hooks == NULL) {
3280 set_threads_event_flags(0);
3283 return ret;
3286 static int
3287 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
3289 rb_thread_t *th;
3290 GetThreadPtr((VALUE)key, th);
3291 rb_thread_remove_event_hook(th, 0);
3292 return ST_CONTINUE;
3295 void
3296 rb_clear_trace_func(void)
3298 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
3299 rb_remove_event_hook(0);
3302 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
3305 * call-seq:
3306 * set_trace_func(proc) => proc
3307 * set_trace_func(nil) => nil
3309 * Establishes _proc_ as the handler for tracing, or disables
3310 * tracing if the parameter is +nil+. _proc_ takes up
3311 * to six parameters: an event name, a filename, a line number, an
3312 * object id, a binding, and the name of a class. _proc_ is
3313 * invoked whenever an event occurs. Events are: <code>c-call</code>
3314 * (call a C-language routine), <code>c-return</code> (return from a
3315 * C-language routine), <code>call</code> (call a Ruby method),
3316 * <code>class</code> (start a class or module definition),
3317 * <code>end</code> (finish a class or module definition),
3318 * <code>line</code> (execute code on a new line), <code>raise</code>
3319 * (raise an exception), and <code>return</code> (return from a Ruby
3320 * method). Tracing is disabled within the context of _proc_.
3322 * class Test
3323 * def test
3324 * a = 1
3325 * b = 2
3326 * end
3327 * end
3329 * set_trace_func proc { |event, file, line, id, binding, classname|
3330 * printf "%8s %s:%-2d %10s %8s\n", event, file, line, id, classname
3332 * t = Test.new
3333 * t.test
3335 * line prog.rb:11 false
3336 * c-call prog.rb:11 new Class
3337 * c-call prog.rb:11 initialize Object
3338 * c-return prog.rb:11 initialize Object
3339 * c-return prog.rb:11 new Class
3340 * line prog.rb:12 false
3341 * call prog.rb:2 test Test
3342 * line prog.rb:3 test Test
3343 * line prog.rb:4 test Test
3344 * return prog.rb:4 test Test
3347 static VALUE
3348 set_trace_func(VALUE obj, VALUE trace)
3350 rb_remove_event_hook(call_trace_func);
3352 if (NIL_P(trace)) {
3353 return Qnil;
3356 if (!rb_obj_is_proc(trace)) {
3357 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3360 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
3361 return trace;
3364 static void
3365 thread_add_trace_func(rb_thread_t *th, VALUE trace)
3367 if (!rb_obj_is_proc(trace)) {
3368 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
3371 rb_thread_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace);
3374 static VALUE
3375 thread_add_trace_func_m(VALUE obj, VALUE trace)
3377 rb_thread_t *th;
3378 GetThreadPtr(obj, th);
3379 thread_add_trace_func(th, trace);
3380 return trace;
3383 static VALUE
3384 thread_set_trace_func_m(VALUE obj, VALUE trace)
3386 rb_thread_t *th;
3387 GetThreadPtr(obj, th);
3388 rb_thread_remove_event_hook(th, call_trace_func);
3390 if (NIL_P(trace)) {
3391 return Qnil;
3393 thread_add_trace_func(th, trace);
3394 return trace;
3397 static const char *
3398 get_event_name(rb_event_flag_t event)
3400 switch (event) {
3401 case RUBY_EVENT_LINE:
3402 return "line";
3403 case RUBY_EVENT_CLASS:
3404 return "class";
3405 case RUBY_EVENT_END:
3406 return "end";
3407 case RUBY_EVENT_CALL:
3408 return "call";
3409 case RUBY_EVENT_RETURN:
3410 return "return";
3411 case RUBY_EVENT_C_CALL:
3412 return "c-call";
3413 case RUBY_EVENT_C_RETURN:
3414 return "c-return";
3415 case RUBY_EVENT_RAISE:
3416 return "raise";
3417 default:
3418 return "unknown";
3422 VALUE ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always);
3424 struct call_trace_func_args {
3425 rb_event_flag_t event;
3426 VALUE proc;
3427 VALUE self;
3428 ID id;
3429 VALUE klass;
3432 static VALUE
3433 call_trace_proc(VALUE args, int tracing)
3435 struct call_trace_func_args *p = (struct call_trace_func_args *)args;
3436 VALUE eventname = rb_str_new2(get_event_name(p->event));
3437 VALUE filename = rb_str_new2(rb_sourcefile());
3438 VALUE argv[6];
3439 int line = rb_sourceline();
3440 ID id = 0;
3441 VALUE klass = 0;
3443 if (p->event == RUBY_EVENT_C_CALL ||
3444 p->event == RUBY_EVENT_C_RETURN) {
3445 id = p->id;
3446 klass = p->klass;
3448 else {
3449 rb_thread_method_id_and_class(GET_THREAD(), &id, &klass);
3451 if (id == ID_ALLOCATOR)
3452 return Qnil;
3453 if (klass) {
3454 if (TYPE(klass) == T_ICLASS) {
3455 klass = RBASIC(klass)->klass;
3457 else if (FL_TEST(klass, FL_SINGLETON)) {
3458 klass = rb_iv_get(klass, "__attached__");
3462 argv[0] = eventname;
3463 argv[1] = filename;
3464 argv[2] = INT2FIX(line);
3465 argv[3] = id ? ID2SYM(id) : Qnil;
3466 argv[4] = p->self ? rb_binding_new() : Qnil;
3467 argv[5] = klass ? klass : Qnil;
3469 return rb_proc_call_with_block(p->proc, 6, argv, Qnil);
3472 static void
3473 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3475 struct call_trace_func_args args;
3477 args.event = event;
3478 args.proc = proc;
3479 args.self = self;
3480 args.id = id;
3481 args.klass = klass;
3482 ruby_suppress_tracing(call_trace_proc, (VALUE)&args, Qfalse);
3485 VALUE
3486 ruby_suppress_tracing(VALUE (*func)(VALUE, int), VALUE arg, int always)
3488 rb_thread_t *th = GET_THREAD();
3489 int state, raised, tracing;
3490 VALUE result = Qnil;
3492 if ((tracing = th->tracing) != 0 && !always) {
3493 return Qnil;
3495 else {
3496 th->tracing = 1;
3499 raised = rb_thread_reset_raised(th);
3501 PUSH_TAG();
3502 if ((state = EXEC_TAG()) == 0) {
3503 result = (*func)(arg, tracing);
3506 if (raised) {
3507 rb_thread_set_raised(th);
3509 POP_TAG();
3511 th->tracing = tracing;
3512 if (state) {
3513 JUMP_TAG(state);
3516 return result;
3520 * +Thread+ encapsulates the behavior of a thread of
3521 * execution, including the main thread of the Ruby script.
3523 * In the descriptions of the methods in this class, the parameter _sym_
3524 * refers to a symbol, which is either a quoted string or a
3525 * +Symbol+ (such as <code>:name</code>).
3528 void
3529 Init_Thread(void)
3531 #undef rb_intern
3533 VALUE cThGroup;
3535 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
3536 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
3537 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
3538 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
3539 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
3540 rb_define_singleton_method(rb_cThread, "stop", rb_thread_stop, 0);
3541 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
3542 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
3543 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
3544 rb_define_singleton_method(rb_cThread, "list", rb_thread_list, 0);
3545 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
3546 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
3547 #if THREAD_DEBUG < 0
3548 rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
3549 rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
3550 #endif
3552 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
3553 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
3554 rb_define_method(rb_cThread, "join", thread_join_m, -1);
3555 rb_define_method(rb_cThread, "value", thread_value, 0);
3556 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
3557 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
3558 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
3559 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
3560 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
3561 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
3562 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
3563 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
3564 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
3565 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
3566 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
3567 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
3568 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
3569 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
3570 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
3571 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
3572 rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
3573 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
3575 rb_define_method(rb_cThread, "inspect", rb_thread_inspect, 0);
3577 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
3578 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
3579 rb_define_method(cThGroup, "list", thgroup_list, 0);
3580 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
3581 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
3582 rb_define_method(cThGroup, "add", thgroup_add, 1);
3585 rb_thread_t *th = GET_THREAD();
3586 th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
3587 rb_define_const(cThGroup, "Default", th->thgroup);
3590 rb_cMutex = rb_define_class("Mutex", rb_cObject);
3591 rb_define_alloc_func(rb_cMutex, mutex_alloc);
3592 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
3593 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
3594 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
3595 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
3596 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
3597 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
3599 recursive_key = rb_intern("__recursive_key__");
3600 rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
3602 /* trace */
3603 rb_define_global_function("set_trace_func", set_trace_func, 1);
3604 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
3605 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
3607 /* init thread core */
3608 Init_native_thread();
3610 /* main thread setting */
3612 /* acquire global interpreter lock */
3613 rb_thread_lock_t *lp = &GET_THREAD()->vm->global_vm_lock;
3614 native_mutex_initialize(lp);
3615 native_mutex_lock(lp);
3616 native_mutex_initialize(&GET_THREAD()->interrupt_lock);
3620 rb_thread_create_timer_thread();
3622 (void)native_mutex_trylock;
3623 (void)ruby_thread_set_native;
3627 ruby_native_thread_p(void)
3629 rb_thread_t *th = ruby_thread_from_native();
3631 return th ? Qtrue : Qfalse;
3634 static int
3635 check_deadlock_i(st_data_t key, st_data_t val, int *found)
3637 VALUE thval = key;
3638 rb_thread_t *th;
3639 GetThreadPtr(thval, th);
3641 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th) || th->transition_for_lock) {
3642 *found = 1;
3644 else if (th->locking_mutex) {
3645 mutex_t *mutex;
3646 GetMutexPtr(th->locking_mutex, mutex);
3648 native_mutex_lock(&mutex->lock);
3649 if (mutex->th == th || (!mutex->th && mutex->cond_notified)) {
3650 *found = 1;
3652 native_mutex_unlock(&mutex->lock);
3655 return (*found) ? ST_STOP : ST_CONTINUE;
3658 #if 0 /* for debug */
3659 static int
3660 debug_i(st_data_t key, st_data_t val, int *found)
3662 VALUE thval = key;
3663 rb_thread_t *th;
3664 GetThreadPtr(thval, th);
3666 printf("th:%p %d %d %d", th, th->status, th->interrupt_flag, th->transition_for_lock);
3667 if (th->locking_mutex) {
3668 mutex_t *mutex;
3669 GetMutexPtr(th->locking_mutex, mutex);
3671 native_mutex_lock(&mutex->lock);
3672 printf(" %p %d\n", mutex->th, mutex->cond_notified);
3673 native_mutex_unlock(&mutex->lock);
3675 else puts("");
3677 return ST_CONTINUE;
3679 #endif
3681 static void
3682 rb_check_deadlock(rb_vm_t *vm)
3684 int found = 0;
3686 if (vm_living_thread_num(vm) > vm->sleeper) return;
3687 if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
3689 st_foreach(vm->living_threads, check_deadlock_i, (st_data_t)&found);
3691 if (!found) {
3692 VALUE argv[2];
3693 argv[0] = rb_eFatal;
3694 argv[1] = rb_str_new2("deadlock detected");
3695 #if 0 /* for debug */
3696 printf("%d %d %p %p\n", vm->living_threads->num_entries, vm->sleeper, GET_THREAD(), vm->main_thread);
3697 st_foreach(vm->living_threads, debug_i, (st_data_t)0);
3698 #endif
3699 rb_thread_raise(2, argv, vm->main_thread);
3703 static void
3704 update_coverage(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
3706 VALUE coverage = GET_THREAD()->cfp->iseq->coverage;
3707 if (coverage && RBASIC(coverage)->klass == 0) {
3708 long line = rb_sourceline() - 1;
3709 long count;
3710 if (RARRAY_PTR(coverage)[line] == Qnil) {
3711 rb_bug("bug");
3713 count = FIX2LONG(RARRAY_PTR(coverage)[line]) + 1;
3714 if (POSFIXABLE(count)) {
3715 RARRAY_PTR(coverage)[line] = LONG2FIX(count);
3720 VALUE
3721 rb_get_coverages(void)
3723 return GET_VM()->coverages;
3726 void
3727 rb_set_coverages(VALUE coverages)
3729 GET_VM()->coverages = coverages;
3730 rb_add_event_hook(update_coverage, RUBY_EVENT_COVERAGE, Qnil);
3733 void
3734 rb_reset_coverages(void)
3736 GET_VM()->coverages = Qfalse;
3737 rb_remove_event_hook(update_coverage);