* file.c (rb_find_file_ext): guard load_path from GC.
[ruby-svn.git] / cont.c
blob02264f839574dd49e74a256d81821fe7327493ec
1 /**********************************************************************
3 cont.c -
5 $Author$
6 created at: Thu May 23 09:03:43 2007
8 Copyright (C) 2007 Koichi Sasada
10 **********************************************************************/
12 #include "ruby/ruby.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
17 enum context_type {
18 CONTINUATION_CONTEXT = 0,
19 FIBER_CONTEXT = 1,
20 ROOT_FIBER_CONTEXT = 2,
23 typedef struct rb_context_struct {
24 VALUE self;
25 VALUE value;
26 VALUE *vm_stack;
27 VALUE *machine_stack;
28 VALUE *machine_stack_src;
29 #ifdef __ia64
30 VALUE *machine_register_stack;
31 VALUE *machine_register_stack_src;
32 int machine_register_stack_size;
33 #endif
34 rb_thread_t saved_thread;
35 rb_jmpbuf_t jmpbuf;
36 int machine_stack_size;
37 VALUE prev;
38 int alive;
39 enum context_type type;
40 } rb_context_t;
42 static VALUE rb_cContinuation;
43 static VALUE rb_cFiber;
44 static VALUE rb_eFiberError;
46 #define GetContPtr(obj, ptr) \
47 Data_Get_Struct(obj, rb_context_t, ptr)
49 NOINLINE(static VALUE cont_capture(volatile int *stat));
51 void rb_thread_mark(rb_thread_t *th);
53 static void
54 cont_mark(void *ptr)
56 RUBY_MARK_ENTER("cont");
57 if (ptr) {
58 rb_context_t *cont = ptr;
59 rb_gc_mark(cont->value);
60 rb_gc_mark(cont->prev);
61 rb_thread_mark(&cont->saved_thread);
63 if (cont->vm_stack) {
64 rb_gc_mark_locations(cont->vm_stack,
65 cont->vm_stack + cont->saved_thread.stack_size);
68 if (cont->machine_stack) {
69 rb_gc_mark_locations(cont->machine_stack,
70 cont->machine_stack + cont->machine_stack_size);
72 #ifdef __ia64
73 if (cont->machine_register_stack) {
74 rb_gc_mark_locations(cont->machine_register_stack,
75 cont->machine_register_stack + cont->machine_register_stack_size);
77 #endif
79 RUBY_MARK_LEAVE("cont");
82 static void
83 cont_free(void *ptr)
85 RUBY_FREE_ENTER("cont");
86 if (ptr) {
87 rb_context_t *cont = ptr;
88 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack);
89 RUBY_FREE_UNLESS_NULL(cont->machine_stack);
90 #ifdef __ia64
91 RUBY_FREE_UNLESS_NULL(cont->machine_register_stack);
92 #endif
93 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
95 if (cont->type == FIBER_CONTEXT) {
96 st_free_table(cont->saved_thread.local_storage);
99 ruby_xfree(ptr);
101 RUBY_FREE_LEAVE("cont");
104 static void
105 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
107 int size;
108 rb_thread_t *sth = &cont->saved_thread;
110 SET_MACHINE_STACK_END(&th->machine_stack_end);
111 #ifdef __ia64
112 th->machine_register_stack_end = rb_ia64_bsp();
113 #endif
115 if (th->machine_stack_start > th->machine_stack_end) {
116 size = cont->machine_stack_size = th->machine_stack_start - th->machine_stack_end;
117 cont->machine_stack_src = th->machine_stack_end;
119 else {
120 size = cont->machine_stack_size = th->machine_stack_end - th->machine_stack_start;
121 cont->machine_stack_src = th->machine_stack_start;
124 if (cont->machine_stack) {
125 REALLOC_N(cont->machine_stack, VALUE, size);
127 else {
128 cont->machine_stack = ALLOC_N(VALUE, size);
131 FLUSH_REGISTER_WINDOWS;
132 MEMCPY(cont->machine_stack, cont->machine_stack_src, VALUE, size);
134 #ifdef __ia64
135 rb_ia64_flushrs();
136 size = cont->machine_register_stack_size = th->machine_register_stack_end - th->machine_register_stack_start;
137 cont->machine_register_stack_src = th->machine_register_stack_start;
138 if (cont->machine_register_stack) {
139 REALLOC_N(cont->machine_register_stack, VALUE, size);
141 else {
142 cont->machine_register_stack = ALLOC_N(VALUE, size);
145 MEMCPY(cont->machine_register_stack, cont->machine_register_stack_src, VALUE, size);
146 #endif
148 sth->machine_stack_start = sth->machine_stack_end = 0;
149 #ifdef __ia64
150 sth->machine_register_stack_start = sth->machine_register_stack_end = 0;
151 #endif
154 static rb_context_t *
155 cont_new(VALUE klass)
157 rb_context_t *cont;
158 volatile VALUE contval;
159 rb_thread_t *th = GET_THREAD();
161 contval = Data_Make_Struct(klass, rb_context_t,
162 cont_mark, cont_free, cont);
164 cont->self = contval;
165 cont->alive = Qtrue;
167 /* save thread context */
168 cont->saved_thread = *th;
170 return cont;
173 void vm_stack_to_heap(rb_thread_t *th);
175 static VALUE
176 cont_capture(volatile int *stat)
178 rb_context_t *cont;
179 rb_thread_t *th = GET_THREAD(), *sth;
180 volatile VALUE contval;
182 vm_stack_to_heap(th);
183 cont = cont_new(rb_cContinuation);
184 contval = cont->self;
185 sth = &cont->saved_thread;
187 cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
188 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
189 sth->stack = 0;
191 cont_save_machine_stack(th, cont);
193 if (ruby_setjmp(cont->jmpbuf)) {
194 VALUE value;
196 value = cont->value;
197 cont->value = Qnil;
198 *stat = 1;
199 return value;
201 else {
202 *stat = 0;
203 return cont->self;
207 NORETURN(static void cont_restore_1(rb_context_t *));
209 static void
210 cont_restore_1(rb_context_t *cont)
212 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
214 /* restore thread context */
215 if (cont->type == CONTINUATION_CONTEXT) {
216 /* continuation */
217 VALUE fib;
219 th->fiber = sth->fiber;
220 fib = th->fiber ? th->fiber : th->root_fiber;
222 if (fib) {
223 rb_context_t *fcont;
224 GetContPtr(fib, fcont);
225 th->stack_size = fcont->saved_thread.stack_size;
226 th->stack = fcont->saved_thread.stack;
228 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
230 else {
231 /* fiber */
232 th->stack = sth->stack;
233 th->stack_size = sth->stack_size;
234 th->local_storage = sth->local_storage;
235 th->fiber = cont->self;
238 th->cfp = sth->cfp;
239 th->safe_level = sth->safe_level;
240 th->raised_flag = sth->raised_flag;
241 th->state = sth->state;
242 th->status = sth->status;
243 th->tag = sth->tag;
244 th->trap_tag = sth->trap_tag;
245 th->errinfo = sth->errinfo;
246 th->first_proc = sth->first_proc;
248 /* restore machine stack */
249 #ifdef _M_AMD64
251 /* workaround for x64 SEH */
252 jmp_buf buf;
253 setjmp(buf);
254 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
255 ((_JUMP_BUFFER*)(&buf))->Frame;
257 #endif
258 if (cont->machine_stack_src) {
259 FLUSH_REGISTER_WINDOWS;
260 MEMCPY(cont->machine_stack_src, cont->machine_stack,
261 VALUE, cont->machine_stack_size);
264 #ifdef __ia64
265 if (cont->machine_register_stack_src) {
266 MEMCPY(cont->machine_register_stack_src, cont->machine_register_stack,
267 VALUE, cont->machine_register_stack_size);
269 #endif
271 ruby_longjmp(cont->jmpbuf, 1);
274 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
276 #ifdef __ia64
277 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
278 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
279 static volatile int C(a), C(b), C(c), C(d), C(e);
280 static volatile int C(f), C(g), C(h), C(i), C(j);
281 static volatile int C(k), C(l), C(m), C(n), C(o);
282 static volatile int C(p), C(q), C(r), C(s), C(t);
283 int rb_dummy_false = 0;
284 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *)));
285 static void
286 register_stack_extend(rb_context_t *cont, VALUE *curr_bsp)
288 if (rb_dummy_false) {
289 /* use registers as much as possible */
290 E(a) = E(b) = E(c) = E(d) = E(e) =
291 E(f) = E(g) = E(h) = E(i) = E(j) =
292 E(k) = E(l) = E(m) = E(n) = E(o) =
293 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
294 E(a) = E(b) = E(c) = E(d) = E(e) =
295 E(f) = E(g) = E(h) = E(i) = E(j) =
296 E(k) = E(l) = E(m) = E(n) = E(o) =
297 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
299 if (curr_bsp < cont->machine_register_stack_src+cont->machine_register_stack_size) {
300 register_stack_extend(cont, (VALUE*)rb_ia64_bsp());
302 cont_restore_1(cont);
304 #undef C
305 #undef E
306 #endif
308 static void
309 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
311 if (cont->machine_stack_src) {
312 #define STACK_PAD_SIZE 1024
313 VALUE space[STACK_PAD_SIZE];
315 #if STACK_GROW_DIRECTION < 0 /* downward */
316 if (addr_in_prev_frame > cont->machine_stack_src) {
317 cont_restore_0(cont, &space[0]);
319 #elif STACK_GROW_DIRECTION > 0 /* upward */
320 if (addr_in_prev_frame < cont->machine_stack_src + cont->machine_stack_size) {
321 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
323 #else
324 if (addr_in_prev_frame > &space[0]) {
325 /* Stack grows downward */
326 if (addr_in_prev_frame > cont->saved_thread.machine_stack_size) {
327 cont_restore_0(cont, &space[0]);
330 else {
331 /* Stack grows upward */
332 if (addr_in_prev_frame < cont->machine_stack_src + cont->machine_stack_size) {
333 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
336 #endif
338 #ifdef __ia64
339 register_stack_extend(cont, (VALUE*)rb_ia64_bsp());
340 #else
341 cont_restore_1(cont);
342 #endif
346 * Document-class: Continuation
348 * Continuation objects are generated by
349 * <code>Kernel#callcc</code>. They hold a return address and execution
350 * context, allowing a nonlocal return to the end of the
351 * <code>callcc</code> block from anywhere within a program.
352 * Continuations are somewhat analogous to a structured version of C's
353 * <code>setjmp/longjmp</code> (although they contain more state, so
354 * you might consider them closer to threads).
356 * For instance:
358 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
359 * callcc{|$cc|}
360 * puts(message = arr.shift)
361 * $cc.call unless message =~ /Max/
363 * <em>produces:</em>
365 * Freddie
366 * Herbie
367 * Ron
368 * Max
370 * This (somewhat contrived) example allows the inner loop to abandon
371 * processing early:
373 * callcc {|cont|
374 * for i in 0..4
375 * print "\n#{i}: "
376 * for j in i*5...(i+1)*5
377 * cont.call() if j == 17
378 * printf "%3d", j
379 * end
380 * end
382 * print "\n"
384 * <em>produces:</em>
386 * 0: 0 1 2 3 4
387 * 1: 5 6 7 8 9
388 * 2: 10 11 12 13 14
389 * 3: 15 16
393 * call-seq:
394 * callcc {|cont| block } => obj
396 * Generates a <code>Continuation</code> object, which it passes to the
397 * associated block. Performing a <em>cont</em><code>.call</code> will
398 * cause the <code>callcc</code> to return (as will falling through the
399 * end of the block). The value returned by the <code>callcc</code> is
400 * the value of the block, or the value passed to
401 * <em>cont</em><code>.call</code>. See class <code>Continuation</code>
402 * for more details. Also see <code>Kernel::throw</code> for
403 * an alternative mechanism for unwinding a call stack.
406 static VALUE
407 rb_callcc(VALUE self)
409 volatile int called;
410 volatile VALUE val = cont_capture(&called);
412 if (called) {
413 return val;
415 else {
416 return rb_yield(val);
420 static VALUE
421 make_passing_arg(int argc, VALUE *argv)
423 switch(argc) {
424 case 0:
425 return Qnil;
426 case 1:
427 return argv[0];
428 default:
429 return rb_ary_new4(argc, argv);
434 * call-seq:
435 * cont.call(args, ...)
436 * cont[args, ...]
438 * Invokes the continuation. The program continues from the end of the
439 * <code>callcc</code> block. If no arguments are given, the original
440 * <code>callcc</code> returns <code>nil</code>. If one argument is
441 * given, <code>callcc</code> returns it. Otherwise, an array
442 * containing <i>args</i> is returned.
444 * callcc {|cont| cont.call } #=> nil
445 * callcc {|cont| cont.call 1 } #=> 1
446 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
449 static VALUE
450 rb_cont_call(int argc, VALUE *argv, VALUE contval)
452 rb_context_t *cont;
453 rb_thread_t *th = GET_THREAD();
454 GetContPtr(contval, cont);
456 if (cont->saved_thread.self != th->self) {
457 rb_raise(rb_eRuntimeError, "continuation called across threads");
459 if (cont->saved_thread.trap_tag != th->trap_tag) {
460 rb_raise(rb_eRuntimeError, "continuation called across trap");
462 if (cont->saved_thread.fiber) {
463 rb_context_t *fcont;
464 GetContPtr(cont->saved_thread.fiber, fcont);
466 if (th->fiber != cont->saved_thread.fiber) {
467 rb_raise(rb_eRuntimeError, "continuation called across fiber");
470 if (!fcont->alive) {
471 rb_raise(rb_eRuntimeError, "continuation called dead fiber");
475 cont->value = make_passing_arg(argc, argv);
477 cont_restore_0(cont, &contval);
478 return Qnil; /* unreachable */
481 /*********/
482 /* fiber */
483 /*********/
485 #define FIBER_VM_STACK_SIZE (4 * 1024)
487 static rb_context_t *
488 fiber_alloc(VALUE klass)
490 rb_context_t *cont = cont_new(klass);
492 cont->type = FIBER_CONTEXT;
493 cont->prev = Qnil;
495 return cont;
498 static VALUE
499 fiber_new(VALUE klass, VALUE proc)
501 rb_context_t *cont = fiber_alloc(klass);
502 VALUE contval = cont->self;
503 rb_thread_t *th = &cont->saved_thread;
505 /* initialize */
506 cont->vm_stack = 0;
508 th->stack = 0;
509 th->stack_size = FIBER_VM_STACK_SIZE;
510 th->stack = ALLOC_N(VALUE, th->stack_size);
512 th->cfp = (void *)(th->stack + th->stack_size);
513 th->cfp--;
514 th->cfp->pc = 0;
515 th->cfp->sp = th->stack + 1;
516 th->cfp->bp = 0;
517 th->cfp->lfp = th->stack;
518 *th->cfp->lfp = 0;
519 th->cfp->dfp = th->stack;
520 th->cfp->self = Qnil;
521 th->cfp->flag = 0;
522 th->cfp->iseq = 0;
523 th->cfp->proc = 0;
524 th->cfp->block_iseq = 0;
525 th->tag = 0;
526 th->local_storage = st_init_numtable();
528 th->first_proc = proc;
530 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
532 return contval;
535 VALUE
536 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
538 return fiber_new(rb_cFiber, rb_proc_new(func, obj));
541 static VALUE
542 rb_fiber_s_new(VALUE self)
544 return fiber_new(self, rb_block_proc());
547 static VALUE
548 return_fiber(void)
550 rb_context_t *cont;
551 VALUE curr = rb_fiber_current();
552 GetContPtr(curr, cont);
554 if (cont->prev == Qnil) {
555 rb_thread_t *th = GET_THREAD();
557 if (th->root_fiber != curr) {
558 return th->root_fiber;
560 else {
561 rb_raise(rb_eFiberError, "can't yield from root fiber");
564 else {
565 VALUE prev = cont->prev;
566 cont->prev = Qnil;
567 return prev;
571 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
573 static void
574 rb_fiber_terminate(rb_context_t *cont)
576 VALUE value = cont->value;
577 cont->alive = Qfalse;
578 rb_fiber_transfer(return_fiber(), 1, &value);
581 void
582 rb_fiber_start(void)
584 rb_thread_t *th = GET_THREAD();
585 rb_context_t *cont;
586 rb_proc_t *proc;
587 VALUE args;
588 int state;
590 GetContPtr(th->fiber, cont);
591 TH_PUSH_TAG(th);
592 if ((state = EXEC_TAG()) == 0) {
593 GetProcPtr(cont->saved_thread.first_proc, proc);
594 args = cont->value;
595 cont->value = Qnil;
596 th->errinfo = Qnil;
597 th->local_lfp = proc->block.lfp;
598 th->local_svar = Qnil;
600 cont->value = vm_invoke_proc(th, proc, proc->block.self, 1, &args, 0);
602 TH_POP_TAG();
604 if (state) {
605 if (TAG_RAISE) {
606 th->thrown_errinfo = th->errinfo;
608 else {
609 th->thrown_errinfo =
610 vm_make_jump_tag_but_local_jump(state, th->errinfo);
612 RUBY_VM_SET_INTERRUPT(th);
615 rb_fiber_terminate(cont);
616 rb_bug("rb_fiber_start: unreachable");
619 VALUE
620 rb_fiber_current()
622 rb_thread_t *th = GET_THREAD();
623 if (th->fiber == 0) {
624 /* save root */
625 rb_context_t *cont = fiber_alloc(rb_cFiber);
626 cont->type = ROOT_FIBER_CONTEXT;
627 th->root_fiber = th->fiber = cont->self;
629 return th->fiber;
632 static VALUE
633 fiber_store(rb_context_t *next_cont)
635 rb_thread_t *th = GET_THREAD();
636 rb_context_t *cont;
638 if (th->fiber) {
639 GetContPtr(th->fiber, cont);
640 cont->saved_thread = *th;
642 else {
643 /* create current fiber */
644 cont = fiber_alloc(rb_cFiber); /* no need to allocate vm stack */
645 cont->type = ROOT_FIBER_CONTEXT;
646 th->root_fiber = th->fiber = cont->self;
649 cont_save_machine_stack(th, cont);
651 if (ruby_setjmp(cont->jmpbuf)) {
652 /* restored */
653 GetContPtr(th->fiber, cont);
654 return cont->value;
656 else {
657 return Qundef;
661 static inline VALUE
662 fiber_switch(VALUE fib, int argc, VALUE *argv, int is_resume)
664 VALUE value;
665 rb_context_t *cont;
666 rb_thread_t *th = GET_THREAD();
668 GetContPtr(fib, cont);
670 if (cont->saved_thread.self != th->self) {
671 rb_raise(rb_eFiberError, "fiber called across threads");
673 else if (cont->saved_thread.trap_tag != th->trap_tag) {
674 rb_raise(rb_eFiberError, "fiber called across trap");
676 else if (!cont->alive) {
677 rb_raise(rb_eFiberError, "dead fiber called");
680 if (is_resume) {
681 cont->prev = rb_fiber_current();
684 cont->value = make_passing_arg(argc, argv);
686 if ((value = fiber_store(cont)) == Qundef) {
687 cont_restore_0(cont, &value);
688 rb_bug("rb_fiber_resume: unreachable");
691 RUBY_VM_CHECK_INTS();
693 return value;
696 VALUE
697 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
699 return fiber_switch(fib, argc, argv, 0);
702 VALUE
703 rb_fiber_resume(VALUE fib, int argc, VALUE *argv)
705 rb_context_t *cont;
706 GetContPtr(fib, cont);
708 if (cont->prev != Qnil) {
709 rb_raise(rb_eFiberError, "double resume");
712 return fiber_switch(fib, argc, argv, 1);
715 VALUE
716 rb_fiber_yield(int argc, VALUE *argv)
718 return rb_fiber_transfer(return_fiber(), argc, argv);
721 VALUE
722 rb_fiber_alive_p(VALUE fib)
724 rb_context_t *cont;
725 GetContPtr(fib, cont);
726 return cont->alive;
729 static VALUE
730 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
732 return rb_fiber_resume(fib, argc, argv);
735 static VALUE
736 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fib)
738 return rb_fiber_transfer(fib, argc, argv);
741 static VALUE
742 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
744 return rb_fiber_yield(argc, argv);
747 static VALUE
748 rb_fiber_s_current(VALUE klass)
750 return rb_fiber_current();
753 void
754 Init_Cont(void)
756 rb_cFiber = rb_define_class("Fiber", rb_cObject);
757 rb_undef_alloc_func(rb_cFiber);
758 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
759 rb_define_singleton_method(rb_cFiber, "new", rb_fiber_s_new, 0);
760 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
761 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
764 void
765 Init_Continuation_body(void)
767 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
768 rb_undef_alloc_func(rb_cContinuation);
769 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
770 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
771 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
772 rb_define_global_function("callcc", rb_callcc, 0);
775 void
776 Init_Fiber_as_Coroutine(void)
778 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
779 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
780 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);