1 /**********************************************************************
6 created at: Thu May 23 09:03:43 2007
8 Copyright (C) 2007 Koichi Sasada
10 **********************************************************************/
12 #include "ruby/ruby.h"
15 #include "eval_intern.h"
18 CONTINUATION_CONTEXT
= 0,
20 ROOT_FIBER_CONTEXT
= 2,
23 typedef struct rb_context_struct
{
28 VALUE
*machine_stack_src
;
30 VALUE
*machine_register_stack
;
31 VALUE
*machine_register_stack_src
;
32 int machine_register_stack_size
;
34 rb_thread_t saved_thread
;
36 int machine_stack_size
;
39 enum context_type type
;
42 static VALUE rb_cContinuation
;
43 static VALUE rb_cFiber
;
44 static VALUE rb_eFiberError
;
46 #define GetContPtr(obj, ptr) \
47 Data_Get_Struct(obj, rb_context_t, ptr)
49 NOINLINE(static VALUE
cont_capture(volatile int *stat
));
51 void rb_thread_mark(rb_thread_t
*th
);
56 RUBY_MARK_ENTER("cont");
58 rb_context_t
*cont
= ptr
;
59 rb_gc_mark(cont
->value
);
60 rb_gc_mark(cont
->prev
);
61 rb_thread_mark(&cont
->saved_thread
);
64 rb_gc_mark_locations(cont
->vm_stack
,
65 cont
->vm_stack
+ cont
->saved_thread
.stack_size
);
68 if (cont
->machine_stack
) {
69 rb_gc_mark_locations(cont
->machine_stack
,
70 cont
->machine_stack
+ cont
->machine_stack_size
);
73 if (cont
->machine_register_stack
) {
74 rb_gc_mark_locations(cont
->machine_register_stack
,
75 cont
->machine_register_stack
+ cont
->machine_register_stack_size
);
79 RUBY_MARK_LEAVE("cont");
85 RUBY_FREE_ENTER("cont");
87 rb_context_t
*cont
= ptr
;
88 RUBY_FREE_UNLESS_NULL(cont
->saved_thread
.stack
);
89 RUBY_FREE_UNLESS_NULL(cont
->machine_stack
);
91 RUBY_FREE_UNLESS_NULL(cont
->machine_register_stack
);
93 RUBY_FREE_UNLESS_NULL(cont
->vm_stack
);
95 if (cont
->type
== FIBER_CONTEXT
) {
96 st_free_table(cont
->saved_thread
.local_storage
);
101 RUBY_FREE_LEAVE("cont");
105 cont_save_machine_stack(rb_thread_t
*th
, rb_context_t
*cont
)
108 rb_thread_t
*sth
= &cont
->saved_thread
;
110 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
112 th
->machine_register_stack_end
= rb_ia64_bsp();
115 if (th
->machine_stack_start
> th
->machine_stack_end
) {
116 size
= cont
->machine_stack_size
= th
->machine_stack_start
- th
->machine_stack_end
;
117 cont
->machine_stack_src
= th
->machine_stack_end
;
120 size
= cont
->machine_stack_size
= th
->machine_stack_end
- th
->machine_stack_start
;
121 cont
->machine_stack_src
= th
->machine_stack_start
;
124 if (cont
->machine_stack
) {
125 REALLOC_N(cont
->machine_stack
, VALUE
, size
);
128 cont
->machine_stack
= ALLOC_N(VALUE
, size
);
131 FLUSH_REGISTER_WINDOWS
;
132 MEMCPY(cont
->machine_stack
, cont
->machine_stack_src
, VALUE
, size
);
136 size
= cont
->machine_register_stack_size
= th
->machine_register_stack_end
- th
->machine_register_stack_start
;
137 cont
->machine_register_stack_src
= th
->machine_register_stack_start
;
138 if (cont
->machine_register_stack
) {
139 REALLOC_N(cont
->machine_register_stack
, VALUE
, size
);
142 cont
->machine_register_stack
= ALLOC_N(VALUE
, size
);
145 MEMCPY(cont
->machine_register_stack
, cont
->machine_register_stack_src
, VALUE
, size
);
148 sth
->machine_stack_start
= sth
->machine_stack_end
= 0;
150 sth
->machine_register_stack_start
= sth
->machine_register_stack_end
= 0;
154 static rb_context_t
*
155 cont_new(VALUE klass
)
158 volatile VALUE contval
;
159 rb_thread_t
*th
= GET_THREAD();
161 contval
= Data_Make_Struct(klass
, rb_context_t
,
162 cont_mark
, cont_free
, cont
);
164 cont
->self
= contval
;
167 /* save thread context */
168 cont
->saved_thread
= *th
;
173 void vm_stack_to_heap(rb_thread_t
*th
);
176 cont_capture(volatile int *stat
)
179 rb_thread_t
*th
= GET_THREAD(), *sth
;
180 volatile VALUE contval
;
182 vm_stack_to_heap(th
);
183 cont
= cont_new(rb_cContinuation
);
184 contval
= cont
->self
;
185 sth
= &cont
->saved_thread
;
187 cont
->vm_stack
= ALLOC_N(VALUE
, th
->stack_size
);
188 MEMCPY(cont
->vm_stack
, th
->stack
, VALUE
, th
->stack_size
);
191 cont_save_machine_stack(th
, cont
);
193 if (ruby_setjmp(cont
->jmpbuf
)) {
207 NORETURN(static void cont_restore_1(rb_context_t
*));
210 cont_restore_1(rb_context_t
*cont
)
212 rb_thread_t
*th
= GET_THREAD(), *sth
= &cont
->saved_thread
;
214 /* restore thread context */
215 if (cont
->type
== CONTINUATION_CONTEXT
) {
219 th
->fiber
= sth
->fiber
;
220 fib
= th
->fiber
? th
->fiber
: th
->root_fiber
;
224 GetContPtr(fib
, fcont
);
225 th
->stack_size
= fcont
->saved_thread
.stack_size
;
226 th
->stack
= fcont
->saved_thread
.stack
;
228 MEMCPY(th
->stack
, cont
->vm_stack
, VALUE
, sth
->stack_size
);
232 th
->stack
= sth
->stack
;
233 th
->stack_size
= sth
->stack_size
;
234 th
->local_storage
= sth
->local_storage
;
235 th
->fiber
= cont
->self
;
239 th
->safe_level
= sth
->safe_level
;
240 th
->raised_flag
= sth
->raised_flag
;
241 th
->state
= sth
->state
;
242 th
->status
= sth
->status
;
244 th
->trap_tag
= sth
->trap_tag
;
245 th
->errinfo
= sth
->errinfo
;
246 th
->first_proc
= sth
->first_proc
;
248 /* restore machine stack */
251 /* workaround for x64 SEH */
254 ((_JUMP_BUFFER
*)(&cont
->jmpbuf
))->Frame
=
255 ((_JUMP_BUFFER
*)(&buf
))->Frame
;
258 if (cont
->machine_stack_src
) {
259 FLUSH_REGISTER_WINDOWS
;
260 MEMCPY(cont
->machine_stack_src
, cont
->machine_stack
,
261 VALUE
, cont
->machine_stack_size
);
265 if (cont
->machine_register_stack_src
) {
266 MEMCPY(cont
->machine_register_stack_src
, cont
->machine_register_stack
,
267 VALUE
, cont
->machine_register_stack_size
);
271 ruby_longjmp(cont
->jmpbuf
, 1);
274 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t
*, VALUE
*)));
277 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
278 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
279 static volatile int C(a
), C(b
), C(c
), C(d
), C(e
);
280 static volatile int C(f
), C(g
), C(h
), C(i
), C(j
);
281 static volatile int C(k
), C(l
), C(m
), C(n
), C(o
);
282 static volatile int C(p
), C(q
), C(r
), C(s
), C(t
);
283 int rb_dummy_false
= 0;
284 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t
*, VALUE
*)));
286 register_stack_extend(rb_context_t
*cont
, VALUE
*curr_bsp
)
288 if (rb_dummy_false
) {
289 /* use registers as much as possible */
290 E(a
) = E(b
) = E(c
) = E(d
) = E(e
) =
291 E(f
) = E(g
) = E(h
) = E(i
) = E(j
) =
292 E(k
) = E(l
) = E(m
) = E(n
) = E(o
) =
293 E(p
) = E(q
) = E(r
) = E(s
) = E(t
) = 0;
294 E(a
) = E(b
) = E(c
) = E(d
) = E(e
) =
295 E(f
) = E(g
) = E(h
) = E(i
) = E(j
) =
296 E(k
) = E(l
) = E(m
) = E(n
) = E(o
) =
297 E(p
) = E(q
) = E(r
) = E(s
) = E(t
) = 0;
299 if (curr_bsp
< cont
->machine_register_stack_src
+cont
->machine_register_stack_size
) {
300 register_stack_extend(cont
, (VALUE
*)rb_ia64_bsp());
302 cont_restore_1(cont
);
309 cont_restore_0(rb_context_t
*cont
, VALUE
*addr_in_prev_frame
)
311 if (cont
->machine_stack_src
) {
312 #define STACK_PAD_SIZE 1024
313 VALUE space
[STACK_PAD_SIZE
];
315 #if STACK_GROW_DIRECTION < 0 /* downward */
316 if (addr_in_prev_frame
> cont
->machine_stack_src
) {
317 cont_restore_0(cont
, &space
[0]);
319 #elif STACK_GROW_DIRECTION > 0 /* upward */
320 if (addr_in_prev_frame
< cont
->machine_stack_src
+ cont
->machine_stack_size
) {
321 cont_restore_0(cont
, &space
[STACK_PAD_SIZE
-1]);
324 if (addr_in_prev_frame
> &space
[0]) {
325 /* Stack grows downward */
326 if (addr_in_prev_frame
> cont
->saved_thread
.machine_stack_size
) {
327 cont_restore_0(cont
, &space
[0]);
331 /* Stack grows upward */
332 if (addr_in_prev_frame
< cont
->machine_stack_src
+ cont
->machine_stack_size
) {
333 cont_restore_0(cont
, &space
[STACK_PAD_SIZE
-1]);
339 register_stack_extend(cont
, (VALUE
*)rb_ia64_bsp());
341 cont_restore_1(cont
);
346 * Document-class: Continuation
348 * Continuation objects are generated by
349 * <code>Kernel#callcc</code>. They hold a return address and execution
350 * context, allowing a nonlocal return to the end of the
351 * <code>callcc</code> block from anywhere within a program.
352 * Continuations are somewhat analogous to a structured version of C's
353 * <code>setjmp/longjmp</code> (although they contain more state, so
354 * you might consider them closer to threads).
358 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
360 * puts(message = arr.shift)
361 * $cc.call unless message =~ /Max/
370 * This (somewhat contrived) example allows the inner loop to abandon
376 * for j in i*5...(i+1)*5
377 * cont.call() if j == 17
394 * callcc {|cont| block } => obj
396 * Generates a <code>Continuation</code> object, which it passes to the
397 * associated block. Performing a <em>cont</em><code>.call</code> will
398 * cause the <code>callcc</code> to return (as will falling through the
399 * end of the block). The value returned by the <code>callcc</code> is
400 * the value of the block, or the value passed to
401 * <em>cont</em><code>.call</code>. See class <code>Continuation</code>
402 * for more details. Also see <code>Kernel::throw</code> for
403 * an alternative mechanism for unwinding a call stack.
407 rb_callcc(VALUE self
)
410 volatile VALUE val
= cont_capture(&called
);
416 return rb_yield(val
);
421 make_passing_arg(int argc
, VALUE
*argv
)
429 return rb_ary_new4(argc
, argv
);
435 * cont.call(args, ...)
438 * Invokes the continuation. The program continues from the end of the
439 * <code>callcc</code> block. If no arguments are given, the original
440 * <code>callcc</code> returns <code>nil</code>. If one argument is
441 * given, <code>callcc</code> returns it. Otherwise, an array
442 * containing <i>args</i> is returned.
444 * callcc {|cont| cont.call } #=> nil
445 * callcc {|cont| cont.call 1 } #=> 1
446 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
450 rb_cont_call(int argc
, VALUE
*argv
, VALUE contval
)
453 rb_thread_t
*th
= GET_THREAD();
454 GetContPtr(contval
, cont
);
456 if (cont
->saved_thread
.self
!= th
->self
) {
457 rb_raise(rb_eRuntimeError
, "continuation called across threads");
459 if (cont
->saved_thread
.trap_tag
!= th
->trap_tag
) {
460 rb_raise(rb_eRuntimeError
, "continuation called across trap");
462 if (cont
->saved_thread
.fiber
) {
464 GetContPtr(cont
->saved_thread
.fiber
, fcont
);
466 if (th
->fiber
!= cont
->saved_thread
.fiber
) {
467 rb_raise(rb_eRuntimeError
, "continuation called across fiber");
471 rb_raise(rb_eRuntimeError
, "continuation called dead fiber");
475 cont
->value
= make_passing_arg(argc
, argv
);
477 cont_restore_0(cont
, &contval
);
478 return Qnil
; /* unreachable */
485 #define FIBER_VM_STACK_SIZE (4 * 1024)
487 static rb_context_t
*
488 fiber_alloc(VALUE klass
)
490 rb_context_t
*cont
= cont_new(klass
);
492 cont
->type
= FIBER_CONTEXT
;
499 fiber_new(VALUE klass
, VALUE proc
)
501 rb_context_t
*cont
= fiber_alloc(klass
);
502 VALUE contval
= cont
->self
;
503 rb_thread_t
*th
= &cont
->saved_thread
;
509 th
->stack_size
= FIBER_VM_STACK_SIZE
;
510 th
->stack
= ALLOC_N(VALUE
, th
->stack_size
);
512 th
->cfp
= (void *)(th
->stack
+ th
->stack_size
);
515 th
->cfp
->sp
= th
->stack
+ 1;
517 th
->cfp
->lfp
= th
->stack
;
519 th
->cfp
->dfp
= th
->stack
;
520 th
->cfp
->self
= Qnil
;
524 th
->cfp
->block_iseq
= 0;
526 th
->local_storage
= st_init_numtable();
528 th
->first_proc
= proc
;
530 MEMCPY(&cont
->jmpbuf
, &th
->root_jmpbuf
, rb_jmpbuf_t
, 1);
536 rb_fiber_new(VALUE (*func
)(ANYARGS
), VALUE obj
)
538 return fiber_new(rb_cFiber
, rb_proc_new(func
, obj
));
542 rb_fiber_s_new(VALUE self
)
544 return fiber_new(self
, rb_block_proc());
551 VALUE curr
= rb_fiber_current();
552 GetContPtr(curr
, cont
);
554 if (cont
->prev
== Qnil
) {
555 rb_thread_t
*th
= GET_THREAD();
557 if (th
->root_fiber
!= curr
) {
558 return th
->root_fiber
;
561 rb_raise(rb_eFiberError
, "can't yield from root fiber");
565 VALUE prev
= cont
->prev
;
571 VALUE
rb_fiber_transfer(VALUE fib
, int argc
, VALUE
*argv
);
574 rb_fiber_terminate(rb_context_t
*cont
)
576 VALUE value
= cont
->value
;
577 cont
->alive
= Qfalse
;
578 rb_fiber_transfer(return_fiber(), 1, &value
);
584 rb_thread_t
*th
= GET_THREAD();
590 GetContPtr(th
->fiber
, cont
);
592 if ((state
= EXEC_TAG()) == 0) {
593 GetProcPtr(cont
->saved_thread
.first_proc
, proc
);
597 th
->local_lfp
= proc
->block
.lfp
;
598 th
->local_svar
= Qnil
;
600 cont
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
, 1, &args
, 0);
606 th
->thrown_errinfo
= th
->errinfo
;
610 vm_make_jump_tag_but_local_jump(state
, th
->errinfo
);
612 RUBY_VM_SET_INTERRUPT(th
);
615 rb_fiber_terminate(cont
);
616 rb_bug("rb_fiber_start: unreachable");
622 rb_thread_t
*th
= GET_THREAD();
623 if (th
->fiber
== 0) {
625 rb_context_t
*cont
= fiber_alloc(rb_cFiber
);
626 cont
->type
= ROOT_FIBER_CONTEXT
;
627 th
->root_fiber
= th
->fiber
= cont
->self
;
633 fiber_store(rb_context_t
*next_cont
)
635 rb_thread_t
*th
= GET_THREAD();
639 GetContPtr(th
->fiber
, cont
);
640 cont
->saved_thread
= *th
;
643 /* create current fiber */
644 cont
= fiber_alloc(rb_cFiber
); /* no need to allocate vm stack */
645 cont
->type
= ROOT_FIBER_CONTEXT
;
646 th
->root_fiber
= th
->fiber
= cont
->self
;
649 cont_save_machine_stack(th
, cont
);
651 if (ruby_setjmp(cont
->jmpbuf
)) {
653 GetContPtr(th
->fiber
, cont
);
662 fiber_switch(VALUE fib
, int argc
, VALUE
*argv
, int is_resume
)
666 rb_thread_t
*th
= GET_THREAD();
668 GetContPtr(fib
, cont
);
670 if (cont
->saved_thread
.self
!= th
->self
) {
671 rb_raise(rb_eFiberError
, "fiber called across threads");
673 else if (cont
->saved_thread
.trap_tag
!= th
->trap_tag
) {
674 rb_raise(rb_eFiberError
, "fiber called across trap");
676 else if (!cont
->alive
) {
677 rb_raise(rb_eFiberError
, "dead fiber called");
681 cont
->prev
= rb_fiber_current();
684 cont
->value
= make_passing_arg(argc
, argv
);
686 if ((value
= fiber_store(cont
)) == Qundef
) {
687 cont_restore_0(cont
, &value
);
688 rb_bug("rb_fiber_resume: unreachable");
691 RUBY_VM_CHECK_INTS();
697 rb_fiber_transfer(VALUE fib
, int argc
, VALUE
*argv
)
699 return fiber_switch(fib
, argc
, argv
, 0);
703 rb_fiber_resume(VALUE fib
, int argc
, VALUE
*argv
)
706 GetContPtr(fib
, cont
);
708 if (cont
->prev
!= Qnil
) {
709 rb_raise(rb_eFiberError
, "double resume");
712 return fiber_switch(fib
, argc
, argv
, 1);
716 rb_fiber_yield(int argc
, VALUE
*argv
)
718 return rb_fiber_transfer(return_fiber(), argc
, argv
);
722 rb_fiber_alive_p(VALUE fib
)
725 GetContPtr(fib
, cont
);
730 rb_fiber_m_resume(int argc
, VALUE
*argv
, VALUE fib
)
732 return rb_fiber_resume(fib
, argc
, argv
);
736 rb_fiber_m_transfer(int argc
, VALUE
*argv
, VALUE fib
)
738 return rb_fiber_transfer(fib
, argc
, argv
);
742 rb_fiber_s_yield(int argc
, VALUE
*argv
, VALUE klass
)
744 return rb_fiber_yield(argc
, argv
);
748 rb_fiber_s_current(VALUE klass
)
750 return rb_fiber_current();
756 rb_cFiber
= rb_define_class("Fiber", rb_cObject
);
757 rb_undef_alloc_func(rb_cFiber
);
758 rb_eFiberError
= rb_define_class("FiberError", rb_eStandardError
);
759 rb_define_singleton_method(rb_cFiber
, "new", rb_fiber_s_new
, 0);
760 rb_define_singleton_method(rb_cFiber
, "yield", rb_fiber_s_yield
, -1);
761 rb_define_method(rb_cFiber
, "resume", rb_fiber_m_resume
, -1);
765 Init_Continuation_body(void)
767 rb_cContinuation
= rb_define_class("Continuation", rb_cObject
);
768 rb_undef_alloc_func(rb_cContinuation
);
769 rb_undef_method(CLASS_OF(rb_cContinuation
), "new");
770 rb_define_method(rb_cContinuation
, "call", rb_cont_call
, -1);
771 rb_define_method(rb_cContinuation
, "[]", rb_cont_call
, -1);
772 rb_define_global_function("callcc", rb_callcc
, 0);
776 Init_Fiber_as_Coroutine(void)
778 rb_define_method(rb_cFiber
, "transfer", rb_fiber_m_transfer
, -1);
779 rb_define_method(rb_cFiber
, "alive?", rb_fiber_alive_p
, 0);
780 rb_define_singleton_method(rb_cFiber
, "current", rb_fiber_s_current
, 0);