1 /**********************************************************************
6 created at: Thu May 23 09:03:43 2007
8 Copyright (C) 2007 Koichi Sasada
10 **********************************************************************/
12 #include "ruby/ruby.h"
15 #include "eval_intern.h"
18 CONTINUATION_CONTEXT
= 0,
20 ROOT_FIBER_CONTEXT
= 2
23 typedef struct rb_context_struct
{
28 VALUE
*machine_stack_src
;
30 VALUE
*machine_register_stack
;
31 VALUE
*machine_register_stack_src
;
32 int machine_register_stack_size
;
34 rb_thread_t saved_thread
;
36 int machine_stack_size
;
39 enum context_type type
;
42 static VALUE rb_cContinuation
;
43 static VALUE rb_cFiber
;
44 static VALUE rb_eFiberError
;
46 #define GetContPtr(obj, ptr) \
47 Data_Get_Struct(obj, rb_context_t, ptr)
49 NOINLINE(static VALUE
cont_capture(volatile int *stat
));
51 void rb_thread_mark(rb_thread_t
*th
);
56 RUBY_MARK_ENTER("cont");
58 rb_context_t
*cont
= ptr
;
59 rb_gc_mark(cont
->value
);
60 rb_gc_mark(cont
->prev
);
61 rb_thread_mark(&cont
->saved_thread
);
64 rb_gc_mark_locations(cont
->vm_stack
,
65 cont
->vm_stack
+ cont
->saved_thread
.stack_size
);
68 if (cont
->machine_stack
) {
69 rb_gc_mark_locations(cont
->machine_stack
,
70 cont
->machine_stack
+ cont
->machine_stack_size
);
73 if (cont
->machine_register_stack
) {
74 rb_gc_mark_locations(cont
->machine_register_stack
,
75 cont
->machine_register_stack
+ cont
->machine_register_stack_size
);
79 RUBY_MARK_LEAVE("cont");
85 RUBY_FREE_ENTER("cont");
87 rb_context_t
*cont
= ptr
;
88 RUBY_FREE_UNLESS_NULL(cont
->saved_thread
.stack
);
89 RUBY_FREE_UNLESS_NULL(cont
->machine_stack
);
91 RUBY_FREE_UNLESS_NULL(cont
->machine_register_stack
);
93 RUBY_FREE_UNLESS_NULL(cont
->vm_stack
);
95 if (cont
->type
== FIBER_CONTEXT
) {
96 st_free_table(cont
->saved_thread
.local_storage
);
101 RUBY_FREE_LEAVE("cont");
105 cont_save_machine_stack(rb_thread_t
*th
, rb_context_t
*cont
)
108 rb_thread_t
*sth
= &cont
->saved_thread
;
110 SET_MACHINE_STACK_END(&th
->machine_stack_end
);
112 th
->machine_register_stack_end
= rb_ia64_bsp();
115 if (th
->machine_stack_start
> th
->machine_stack_end
) {
116 size
= cont
->machine_stack_size
= th
->machine_stack_start
- th
->machine_stack_end
;
117 cont
->machine_stack_src
= th
->machine_stack_end
;
120 size
= cont
->machine_stack_size
= th
->machine_stack_end
- th
->machine_stack_start
;
121 cont
->machine_stack_src
= th
->machine_stack_start
;
124 if (cont
->machine_stack
) {
125 REALLOC_N(cont
->machine_stack
, VALUE
, size
);
128 cont
->machine_stack
= ALLOC_N(VALUE
, size
);
131 FLUSH_REGISTER_WINDOWS
;
132 MEMCPY(cont
->machine_stack
, cont
->machine_stack_src
, VALUE
, size
);
136 size
= cont
->machine_register_stack_size
= th
->machine_register_stack_end
- th
->machine_register_stack_start
;
137 cont
->machine_register_stack_src
= th
->machine_register_stack_start
;
138 if (cont
->machine_register_stack
) {
139 REALLOC_N(cont
->machine_register_stack
, VALUE
, size
);
142 cont
->machine_register_stack
= ALLOC_N(VALUE
, size
);
145 MEMCPY(cont
->machine_register_stack
, cont
->machine_register_stack_src
, VALUE
, size
);
148 sth
->machine_stack_start
= sth
->machine_stack_end
= 0;
150 sth
->machine_register_stack_start
= sth
->machine_register_stack_end
= 0;
154 static rb_context_t
*
155 cont_new(VALUE klass
)
158 volatile VALUE contval
;
159 rb_thread_t
*th
= GET_THREAD();
161 contval
= Data_Make_Struct(klass
, rb_context_t
, cont_mark
, cont_free
, cont
);
163 cont
->self
= contval
;
166 /* save thread context */
167 cont
->saved_thread
= *th
;
172 void vm_stack_to_heap(rb_thread_t
*th
);
175 cont_capture(volatile int *stat
)
178 rb_thread_t
*th
= GET_THREAD(), *sth
;
179 volatile VALUE contval
;
181 vm_stack_to_heap(th
);
182 cont
= cont_new(rb_cContinuation
);
183 contval
= cont
->self
;
184 sth
= &cont
->saved_thread
;
186 cont
->vm_stack
= ALLOC_N(VALUE
, th
->stack_size
);
187 MEMCPY(cont
->vm_stack
, th
->stack
, VALUE
, th
->stack_size
);
190 cont_save_machine_stack(th
, cont
);
192 if (ruby_setjmp(cont
->jmpbuf
)) {
206 NORETURN(static void cont_restore_1(rb_context_t
*));
209 cont_restore_1(rb_context_t
*cont
)
211 rb_thread_t
*th
= GET_THREAD(), *sth
= &cont
->saved_thread
;
213 /* restore thread context */
214 if (cont
->type
== CONTINUATION_CONTEXT
) {
218 th
->fiber
= sth
->fiber
;
219 fib
= th
->fiber
? th
->fiber
: th
->root_fiber
;
223 GetContPtr(fib
, fcont
);
224 th
->stack_size
= fcont
->saved_thread
.stack_size
;
225 th
->stack
= fcont
->saved_thread
.stack
;
227 MEMCPY(th
->stack
, cont
->vm_stack
, VALUE
, sth
->stack_size
);
231 th
->stack
= sth
->stack
;
232 th
->stack_size
= sth
->stack_size
;
233 th
->local_storage
= sth
->local_storage
;
234 th
->fiber
= cont
->self
;
238 th
->safe_level
= sth
->safe_level
;
239 th
->raised_flag
= sth
->raised_flag
;
240 th
->state
= sth
->state
;
241 th
->status
= sth
->status
;
243 th
->trap_tag
= sth
->trap_tag
;
244 th
->errinfo
= sth
->errinfo
;
245 th
->first_proc
= sth
->first_proc
;
247 /* restore machine stack */
250 /* workaround for x64 SEH */
253 ((_JUMP_BUFFER
*)(&cont
->jmpbuf
))->Frame
=
254 ((_JUMP_BUFFER
*)(&buf
))->Frame
;
257 if (cont
->machine_stack_src
) {
258 FLUSH_REGISTER_WINDOWS
;
259 MEMCPY(cont
->machine_stack_src
, cont
->machine_stack
,
260 VALUE
, cont
->machine_stack_size
);
264 if (cont
->machine_register_stack_src
) {
265 MEMCPY(cont
->machine_register_stack_src
, cont
->machine_register_stack
,
266 VALUE
, cont
->machine_register_stack_size
);
270 ruby_longjmp(cont
->jmpbuf
, 1);
273 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t
*, VALUE
*)));
276 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
277 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
278 static volatile int C(a
), C(b
), C(c
), C(d
), C(e
);
279 static volatile int C(f
), C(g
), C(h
), C(i
), C(j
);
280 static volatile int C(k
), C(l
), C(m
), C(n
), C(o
);
281 static volatile int C(p
), C(q
), C(r
), C(s
), C(t
);
282 int rb_dummy_false
= 0;
283 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t
*, VALUE
*)));
285 register_stack_extend(rb_context_t
*cont
, VALUE
*curr_bsp
)
287 if (rb_dummy_false
) {
288 /* use registers as much as possible */
289 E(a
) = E(b
) = E(c
) = E(d
) = E(e
) =
290 E(f
) = E(g
) = E(h
) = E(i
) = E(j
) =
291 E(k
) = E(l
) = E(m
) = E(n
) = E(o
) =
292 E(p
) = E(q
) = E(r
) = E(s
) = E(t
) = 0;
293 E(a
) = E(b
) = E(c
) = E(d
) = E(e
) =
294 E(f
) = E(g
) = E(h
) = E(i
) = E(j
) =
295 E(k
) = E(l
) = E(m
) = E(n
) = E(o
) =
296 E(p
) = E(q
) = E(r
) = E(s
) = E(t
) = 0;
298 if (curr_bsp
< cont
->machine_register_stack_src
+cont
->machine_register_stack_size
) {
299 register_stack_extend(cont
, (VALUE
*)rb_ia64_bsp());
301 cont_restore_1(cont
);
308 cont_restore_0(rb_context_t
*cont
, VALUE
*addr_in_prev_frame
)
310 if (cont
->machine_stack_src
) {
311 #define STACK_PAD_SIZE 1024
312 VALUE space
[STACK_PAD_SIZE
];
314 #if STACK_GROW_DIRECTION < 0 /* downward */
315 if (addr_in_prev_frame
> cont
->machine_stack_src
) {
316 cont_restore_0(cont
, &space
[0]);
318 #elif STACK_GROW_DIRECTION > 0 /* upward */
319 if (addr_in_prev_frame
< cont
->machine_stack_src
+ cont
->machine_stack_size
) {
320 cont_restore_0(cont
, &space
[STACK_PAD_SIZE
-1]);
323 if (addr_in_prev_frame
> &space
[0]) {
324 /* Stack grows downward */
325 if (addr_in_prev_frame
> cont
->machine_stack_src
) {
326 cont_restore_0(cont
, &space
[0]);
330 /* Stack grows upward */
331 if (addr_in_prev_frame
< cont
->machine_stack_src
+ cont
->machine_stack_size
) {
332 cont_restore_0(cont
, &space
[STACK_PAD_SIZE
-1]);
338 register_stack_extend(cont
, (VALUE
*)rb_ia64_bsp());
340 cont_restore_1(cont
);
345 * Document-class: Continuation
347 * Continuation objects are generated by
348 * <code>Kernel#callcc</code>. They hold a return address and execution
349 * context, allowing a nonlocal return to the end of the
350 * <code>callcc</code> block from anywhere within a program.
351 * Continuations are somewhat analogous to a structured version of C's
352 * <code>setjmp/longjmp</code> (although they contain more state, so
353 * you might consider them closer to threads).
357 * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
359 * puts(message = arr.shift)
360 * $cc.call unless message =~ /Max/
369 * This (somewhat contrived) example allows the inner loop to abandon
375 * for j in i*5...(i+1)*5
376 * cont.call() if j == 17
393 * callcc {|cont| block } => obj
395 * Generates a <code>Continuation</code> object, which it passes to the
396 * associated block. Performing a <em>cont</em><code>.call</code> will
397 * cause the <code>callcc</code> to return (as will falling through the
398 * end of the block). The value returned by the <code>callcc</code> is
399 * the value of the block, or the value passed to
400 * <em>cont</em><code>.call</code>. See class <code>Continuation</code>
401 * for more details. Also see <code>Kernel::throw</code> for
402 * an alternative mechanism for unwinding a call stack.
406 rb_callcc(VALUE self
)
409 volatile VALUE val
= cont_capture(&called
);
415 return rb_yield(val
);
420 make_passing_arg(int argc
, VALUE
*argv
)
428 return rb_ary_new4(argc
, argv
);
434 * cont.call(args, ...)
437 * Invokes the continuation. The program continues from the end of the
438 * <code>callcc</code> block. If no arguments are given, the original
439 * <code>callcc</code> returns <code>nil</code>. If one argument is
440 * given, <code>callcc</code> returns it. Otherwise, an array
441 * containing <i>args</i> is returned.
443 * callcc {|cont| cont.call } #=> nil
444 * callcc {|cont| cont.call 1 } #=> 1
445 * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
449 rb_cont_call(int argc
, VALUE
*argv
, VALUE contval
)
452 rb_thread_t
*th
= GET_THREAD();
453 GetContPtr(contval
, cont
);
455 if (cont
->saved_thread
.self
!= th
->self
) {
456 rb_raise(rb_eRuntimeError
, "continuation called across threads");
458 if (cont
->saved_thread
.trap_tag
!= th
->trap_tag
) {
459 rb_raise(rb_eRuntimeError
, "continuation called across trap");
461 if (cont
->saved_thread
.fiber
) {
463 GetContPtr(cont
->saved_thread
.fiber
, fcont
);
465 if (th
->fiber
!= cont
->saved_thread
.fiber
) {
466 rb_raise(rb_eRuntimeError
, "continuation called across fiber");
470 rb_raise(rb_eRuntimeError
, "continuation called dead fiber");
474 cont
->value
= make_passing_arg(argc
, argv
);
476 cont_restore_0(cont
, &contval
);
477 return Qnil
; /* unreachable */
484 #define FIBER_VM_STACK_SIZE (4 * 1024)
486 static rb_context_t
*
487 fiber_alloc(VALUE klass
)
489 rb_context_t
*cont
= cont_new(klass
);
491 cont
->type
= FIBER_CONTEXT
;
498 fiber_new(VALUE klass
, VALUE proc
)
500 rb_context_t
*cont
= fiber_alloc(klass
);
501 VALUE contval
= cont
->self
;
502 rb_thread_t
*th
= &cont
->saved_thread
;
508 th
->stack_size
= FIBER_VM_STACK_SIZE
;
509 th
->stack
= ALLOC_N(VALUE
, th
->stack_size
);
511 th
->cfp
= (void *)(th
->stack
+ th
->stack_size
);
514 th
->cfp
->sp
= th
->stack
+ 1;
516 th
->cfp
->lfp
= th
->stack
;
518 th
->cfp
->dfp
= th
->stack
;
519 th
->cfp
->self
= Qnil
;
523 th
->cfp
->block_iseq
= 0;
525 th
->local_storage
= st_init_numtable();
527 th
->first_proc
= proc
;
529 MEMCPY(&cont
->jmpbuf
, &th
->root_jmpbuf
, rb_jmpbuf_t
, 1);
535 rb_fiber_new(VALUE (*func
)(ANYARGS
), VALUE obj
)
537 return fiber_new(rb_cFiber
, rb_proc_new(func
, obj
));
541 rb_fiber_s_new(VALUE self
)
543 return fiber_new(self
, rb_block_proc());
550 VALUE curr
= rb_fiber_current();
551 GetContPtr(curr
, cont
);
553 if (cont
->prev
== Qnil
) {
554 rb_thread_t
*th
= GET_THREAD();
556 if (th
->root_fiber
!= curr
) {
557 return th
->root_fiber
;
560 rb_raise(rb_eFiberError
, "can't yield from root fiber");
564 VALUE prev
= cont
->prev
;
570 VALUE
rb_fiber_transfer(VALUE fib
, int argc
, VALUE
*argv
);
573 rb_fiber_terminate(rb_context_t
*cont
)
575 VALUE value
= cont
->value
;
576 cont
->alive
= Qfalse
;
577 rb_fiber_transfer(return_fiber(), 1, &value
);
583 rb_thread_t
*th
= GET_THREAD();
589 GetContPtr(th
->fiber
, cont
);
591 if ((state
= EXEC_TAG()) == 0) {
592 GetProcPtr(cont
->saved_thread
.first_proc
, proc
);
596 th
->local_lfp
= proc
->block
.lfp
;
597 th
->local_svar
= Qnil
;
599 cont
->value
= vm_invoke_proc(th
, proc
, proc
->block
.self
, 1, &args
, 0);
605 th
->thrown_errinfo
= th
->errinfo
;
609 vm_make_jump_tag_but_local_jump(state
, th
->errinfo
);
611 RUBY_VM_SET_INTERRUPT(th
);
614 rb_fiber_terminate(cont
);
615 rb_bug("rb_fiber_start: unreachable");
621 rb_thread_t
*th
= GET_THREAD();
622 if (th
->fiber
== 0) {
624 rb_context_t
*cont
= fiber_alloc(rb_cFiber
);
625 cont
->type
= ROOT_FIBER_CONTEXT
;
626 th
->root_fiber
= th
->fiber
= cont
->self
;
632 fiber_store(rb_context_t
*next_cont
)
634 rb_thread_t
*th
= GET_THREAD();
638 GetContPtr(th
->fiber
, cont
);
639 cont
->saved_thread
= *th
;
642 /* create current fiber */
643 cont
= fiber_alloc(rb_cFiber
); /* no need to allocate vm stack */
644 cont
->type
= ROOT_FIBER_CONTEXT
;
645 th
->root_fiber
= th
->fiber
= cont
->self
;
648 cont_save_machine_stack(th
, cont
);
650 if (ruby_setjmp(cont
->jmpbuf
)) {
652 GetContPtr(th
->fiber
, cont
);
661 fiber_switch(VALUE fib
, int argc
, VALUE
*argv
, int is_resume
)
665 rb_thread_t
*th
= GET_THREAD();
667 GetContPtr(fib
, cont
);
669 if (cont
->saved_thread
.self
!= th
->self
) {
670 rb_raise(rb_eFiberError
, "fiber called across threads");
672 else if (cont
->saved_thread
.trap_tag
!= th
->trap_tag
) {
673 rb_raise(rb_eFiberError
, "fiber called across trap");
675 else if (!cont
->alive
) {
676 rb_raise(rb_eFiberError
, "dead fiber called");
680 cont
->prev
= rb_fiber_current();
683 cont
->value
= make_passing_arg(argc
, argv
);
685 if ((value
= fiber_store(cont
)) == Qundef
) {
686 cont_restore_0(cont
, &value
);
687 rb_bug("rb_fiber_resume: unreachable");
690 RUBY_VM_CHECK_INTS();
696 rb_fiber_transfer(VALUE fib
, int argc
, VALUE
*argv
)
698 return fiber_switch(fib
, argc
, argv
, 0);
702 rb_fiber_resume(VALUE fib
, int argc
, VALUE
*argv
)
705 GetContPtr(fib
, cont
);
707 if (cont
->prev
!= Qnil
) {
708 rb_raise(rb_eFiberError
, "double resume");
711 return fiber_switch(fib
, argc
, argv
, 1);
715 rb_fiber_yield(int argc
, VALUE
*argv
)
717 return rb_fiber_transfer(return_fiber(), argc
, argv
);
721 rb_fiber_alive_p(VALUE fib
)
724 GetContPtr(fib
, cont
);
729 rb_fiber_m_resume(int argc
, VALUE
*argv
, VALUE fib
)
731 return rb_fiber_resume(fib
, argc
, argv
);
735 rb_fiber_m_transfer(int argc
, VALUE
*argv
, VALUE fib
)
737 return rb_fiber_transfer(fib
, argc
, argv
);
741 rb_fiber_s_yield(int argc
, VALUE
*argv
, VALUE klass
)
743 return rb_fiber_yield(argc
, argv
);
747 rb_fiber_s_current(VALUE klass
)
749 return rb_fiber_current();
755 rb_cFiber
= rb_define_class("Fiber", rb_cObject
);
756 rb_undef_alloc_func(rb_cFiber
);
757 rb_eFiberError
= rb_define_class("FiberError", rb_eStandardError
);
758 rb_define_singleton_method(rb_cFiber
, "new", rb_fiber_s_new
, 0);
759 rb_define_singleton_method(rb_cFiber
, "yield", rb_fiber_s_yield
, -1);
760 rb_define_method(rb_cFiber
, "resume", rb_fiber_m_resume
, -1);
764 Init_Continuation_body(void)
766 rb_cContinuation
= rb_define_class("Continuation", rb_cObject
);
767 rb_undef_alloc_func(rb_cContinuation
);
768 rb_undef_method(CLASS_OF(rb_cContinuation
), "new");
769 rb_define_method(rb_cContinuation
, "call", rb_cont_call
, -1);
770 rb_define_method(rb_cContinuation
, "[]", rb_cont_call
, -1);
771 rb_define_global_function("callcc", rb_callcc
, 0);
775 Init_Fiber_as_Coroutine(void)
777 rb_define_method(rb_cFiber
, "transfer", rb_fiber_m_transfer
, -1);
778 rb_define_method(rb_cFiber
, "alive?", rb_fiber_alive_p
, 0);
779 rb_define_singleton_method(rb_cFiber
, "current", rb_fiber_s_current
, 0);