1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
11 #include "ruby/ruby.h"
12 #include "ruby/node.h"
14 #include "ruby/encoding.h"
17 #include "insnhelper.h"
18 #include "vm_insnhelper.c"
27 VALUE ruby_vm_global_state_version
= 1;
28 rb_thread_t
*ruby_current_thread
= 0;
29 rb_vm_t
*ruby_current_vm
= 0;
31 void vm_analysis_operand(int insn
, int n
, VALUE op
);
32 void vm_analysis_register(int reg
, int isset
);
33 void vm_analysis_insn(int insn
);
35 static NODE
*lfp_set_special_cref(VALUE
*lfp
, NODE
* cref
);
38 static VALUE finish_insn_seq
[1] = { BIN(finish_SC_ax_ax
) };
39 #elif OPT_CALL_THREADED_CODE
40 static VALUE
const finish_insn_seq
[1] = { 0 };
42 static VALUE finish_insn_seq
[1] = { BIN(finish
) };
46 rb_vm_change_state(void)
48 INC_VM_STATE_VERSION();
51 /* control stack frame */
54 rb_vm_set_finish_env(rb_thread_t
*th
)
56 vm_push_frame(th
, 0, FRAME_MAGIC_FINISH
,
57 Qnil
, th
->cfp
->lfp
[0], 0,
59 th
->cfp
->pc
= (VALUE
*)&finish_insn_seq
[0];
64 rb_vm_set_top_stack(rb_thread_t
*th
, VALUE iseqval
)
67 GetISeqPtr(iseqval
, iseq
);
69 if (iseq
->type
!= ISEQ_TYPE_TOP
) {
70 rb_raise(rb_eTypeError
, "Not a toplevel InstructionSequence");
74 rb_vm_set_finish_env(th
);
76 vm_push_frame(th
, iseq
, FRAME_MAGIC_TOP
,
77 th
->top_self
, 0, iseq
->iseq_encoded
,
78 th
->cfp
->sp
, 0, iseq
->local_size
);
82 rb_vm_set_eval_stack(rb_thread_t
*th
, VALUE iseqval
)
85 rb_block_t
*block
= th
->base_block
;
86 GetISeqPtr(iseqval
, iseq
);
89 rb_vm_set_finish_env(th
);
90 vm_push_frame(th
, iseq
, FRAME_MAGIC_EVAL
, block
->self
,
91 GC_GUARDED_PTR(block
->dfp
), iseq
->iseq_encoded
,
92 th
->cfp
->sp
, block
->lfp
, iseq
->local_size
);
101 RUBY_FREE_ENTER("env");
104 RUBY_FREE_UNLESS_NULL(env
->env
);
107 RUBY_FREE_LEAVE("env");
114 RUBY_MARK_ENTER("env");
118 /* TODO: should mark more restricted range */
119 RUBY_GC_INFO("env->env\n");
120 rb_gc_mark_locations(env
->env
, env
->env
+ env
->env_size
);
123 RUBY_GC_INFO("env->prev_envval\n");
124 RUBY_MARK_UNLESS_NULL(env
->prev_envval
);
125 RUBY_MARK_UNLESS_NULL(env
->block
.proc
);
127 if (env
->block
.iseq
) {
128 if (BUILTIN_TYPE(env
->block
.iseq
) == T_NODE
) {
129 RUBY_MARK_UNLESS_NULL((VALUE
)env
->block
.iseq
);
132 RUBY_MARK_UNLESS_NULL(env
->block
.iseq
->self
);
136 RUBY_MARK_LEAVE("env");
144 obj
= Data_Make_Struct(rb_cEnv
, rb_env_t
, env_mark
, env_free
, env
);
146 env
->prev_envval
= 0;
151 static VALUE
check_env_value(VALUE envval
);
154 check_env(rb_env_t
*env
)
157 printf("envptr: %p\n", &env
->block
.dfp
[0]);
158 printf("orphan: %p\n", (void *)env
->block
.dfp
[1]);
159 printf("inheap: %p\n", (void *)env
->block
.dfp
[2]);
160 printf("envval: %10p ", (void *)env
->block
.dfp
[3]);
161 dp(env
->block
.dfp
[3]);
162 printf("penvv : %10p ", (void *)env
->block
.dfp
[4]);
163 dp(env
->block
.dfp
[4]);
164 printf("lfp: %10p\n", env
->block
.lfp
);
165 printf("dfp: %10p\n", env
->block
.dfp
);
166 if (env
->block
.dfp
[4]) {
168 check_env_value(env
->block
.dfp
[4]);
175 check_env_value(VALUE envval
)
178 GetEnvPtr(envval
, env
);
180 if (check_env(env
)) {
183 rb_bug("invalid env");
184 return Qnil
; /* unreachable */
188 vm_make_env_each(rb_thread_t
*th
, rb_control_frame_t
*cfp
,
189 VALUE
*envptr
, VALUE
*endptr
)
191 VALUE envval
, penvval
= 0;
196 if (ENV_IN_HEAP_P(th
, envptr
)) {
197 return ENV_VAL(envptr
);
200 if (envptr
!= endptr
) {
201 VALUE
*penvptr
= GC_GUARDED_PTR_REF(*envptr
);
202 rb_control_frame_t
*pcfp
= cfp
;
204 if (ENV_IN_HEAP_P(th
, penvptr
)) {
205 penvval
= ENV_VAL(penvptr
);
208 while (pcfp
->dfp
!= penvptr
) {
210 if (pcfp
->dfp
== 0) {
212 rb_bug("invalid dfp");
215 penvval
= vm_make_env_each(th
, pcfp
, penvptr
, endptr
);
216 cfp
->lfp
= pcfp
->lfp
;
217 *envptr
= GC_GUARDED_PTR(pcfp
->dfp
);
222 envval
= env_alloc();
223 GetEnvPtr(envval
, env
);
225 if (!RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
229 local_size
= cfp
->iseq
->local_size
;
232 env
->env_size
= local_size
+ 1 + 2;
233 env
->local_size
= local_size
;
234 env
->env
= ALLOC_N(VALUE
, env
->env_size
);
235 env
->prev_envval
= penvval
;
237 for (i
= 0; i
<= local_size
; i
++) {
238 env
->env
[i
] = envptr
[-local_size
+ i
];
240 fprintf(stderr
, "%2d ", &envptr
[-local_size
+ i
] - th
->stack
); dp(env
->env
[i
]);
241 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
242 /* clear value stack for GC */
243 envptr
[-local_size
+ i
] = 0;
248 *envptr
= envval
; /* GC mark */
249 nenvptr
= &env
->env
[i
- 1];
250 nenvptr
[1] = envval
; /* frame self */
251 nenvptr
[2] = penvval
; /* frame prev env object */
253 /* reset lfp/dfp in cfp */
255 if (envptr
== endptr
) {
260 env
->block
.self
= cfp
->self
;
261 env
->block
.lfp
= cfp
->lfp
;
262 env
->block
.dfp
= cfp
->dfp
;
263 env
->block
.iseq
= cfp
->iseq
;
266 (!(cfp
->lfp
[-1] == Qnil
||
267 BUILTIN_TYPE(cfp
->lfp
[-1]) == T_VALUES
))) {
268 rb_bug("invalid svar");
271 if (!RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
279 collect_local_variables_in_env(rb_env_t
*env
, VALUE ary
)
282 if (env
->block
.lfp
== env
->block
.dfp
) {
285 for (i
= 0; i
< env
->block
.iseq
->local_table_size
; i
++) {
286 ID lid
= env
->block
.iseq
->local_table
[i
];
288 rb_ary_push(ary
, ID2SYM(lid
));
291 if (env
->prev_envval
) {
292 GetEnvPtr(env
->prev_envval
, env
);
293 collect_local_variables_in_env(env
, ary
);
299 vm_collect_local_variables_in_heap(rb_thread_t
*th
, VALUE
*dfp
, VALUE ary
)
301 if (ENV_IN_HEAP_P(th
, dfp
)) {
303 GetEnvPtr(ENV_VAL(dfp
), env
);
304 collect_local_variables_in_env(env
, ary
);
313 vm_make_env_object(rb_thread_t
*th
, rb_control_frame_t
*cfp
)
317 if (VM_FRAME_FLAG(cfp
->flag
) == FRAME_MAGIC_FINISH
) {
318 /* for method_missing */
319 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
322 envval
= vm_make_env_each(th
, cfp
, cfp
->dfp
, cfp
->lfp
);
325 check_env_value(envval
);
332 vm_stack_to_heap(rb_thread_t
*th
)
334 rb_control_frame_t
*cfp
= th
->cfp
;
335 while ((cfp
= vm_get_ruby_level_cfp(th
, cfp
)) != 0) {
336 vm_make_env_object(th
, cfp
);
337 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
344 vm_make_proc_from_block(rb_thread_t
*th
, rb_control_frame_t
*cfp
,
348 rb_control_frame_t
*bcfp
;
349 VALUE
*bdfp
; /* to gc mark */
355 bcfp
= RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block
);
357 block
->proc
= procval
= vm_make_proc(th
, bcfp
, block
);
362 vm_make_proc(rb_thread_t
*th
,
363 rb_control_frame_t
*cfp
, rb_block_t
*block
)
365 VALUE procval
, envval
, blockprocval
= 0;
368 if (GC_GUARDED_PTR_REF(cfp
->lfp
[0])) {
369 if (!RUBY_VM_CLASS_SPECIAL_P(cfp
->lfp
[0])) {
372 blockprocval
= vm_make_proc_from_block(
373 th
, cfp
, (rb_block_t
*)GC_GUARDED_PTR_REF(*cfp
->lfp
));
375 GetProcPtr(blockprocval
, p
);
376 *cfp
->lfp
= GC_GUARDED_PTR(&p
->block
);
379 envval
= vm_make_env_object(th
, cfp
);
382 check_env_value(envval
);
384 procval
= rb_proc_alloc(rb_cProc
);
385 GetProcPtr(procval
, proc
);
386 proc
->blockprocval
= blockprocval
;
387 proc
->block
.self
= block
->self
;
388 proc
->block
.lfp
= block
->lfp
;
389 proc
->block
.dfp
= block
->dfp
;
390 proc
->block
.iseq
= block
->iseq
;
391 proc
->block
.proc
= procval
;
392 proc
->envval
= envval
;
393 proc
->safe_level
= th
->safe_level
;
394 proc
->special_cref_stack
= lfp_get_special_cref(block
->lfp
);
397 if (th
->stack
< block
->dfp
&& block
->dfp
< th
->stack
+ th
->stack_size
) {
398 rb_bug("invalid ptr: block->dfp");
400 if (th
->stack
< block
->lfp
&& block
->lfp
< th
->stack
+ th
->stack_size
) {
401 rb_bug("invalid ptr: block->lfp");
408 /* C -> Ruby: method */
411 vm_call0(rb_thread_t
*th
, VALUE klass
, VALUE recv
,
412 VALUE id
, ID oid
, int argc
, const VALUE
*argv
,
413 NODE
* body
, int nosuper
)
416 rb_block_t
*blockptr
= 0;
418 if (0) printf("id: %s, nd: %s, argc: %d, passed: %p\n",
419 rb_id2name(id
), ruby_node_name(nd_type(body
)),
420 argc
, th
->passed_block
);
422 if (th
->passed_block
) {
423 blockptr
= th
->passed_block
;
424 th
->passed_block
= 0;
426 switch (nd_type(body
)) {
427 case RUBY_VM_METHOD_NODE
:{
428 rb_control_frame_t
*reg_cfp
;
429 VALUE iseqval
= (VALUE
)body
->nd_body
;
432 rb_vm_set_finish_env(th
);
435 CHECK_STACK_OVERFLOW(reg_cfp
, argc
+ 1);
437 *reg_cfp
->sp
++ = recv
;
438 for (i
= 0; i
< argc
; i
++) {
439 *reg_cfp
->sp
++ = argv
[i
];
442 vm_setup_method(th
, reg_cfp
, argc
, blockptr
, 0, iseqval
, recv
, klass
);
443 val
= vm_eval_body(th
);
447 EXEC_EVENT_HOOK(th
, RUBY_EVENT_C_CALL
, recv
, id
, klass
);
449 rb_control_frame_t
*reg_cfp
= th
->cfp
;
450 rb_control_frame_t
*cfp
=
451 vm_push_frame(th
, 0, FRAME_MAGIC_CFUNC
,
452 recv
, (VALUE
)blockptr
, 0, reg_cfp
->sp
, 0, 1);
455 cfp
->method_class
= klass
;
457 val
= call_cfunc(body
->nd_cfnc
, recv
, body
->nd_argc
, argc
, argv
);
459 if (reg_cfp
!= th
->cfp
+ 1) {
462 rb_bug("cfp consistency error - call0");
467 EXEC_EVENT_HOOK(th
, RUBY_EVENT_C_RETURN
, recv
, id
, klass
);
472 rb_raise(rb_eArgError
, "wrong number of arguments (%d for 1)", argc
);
474 val
= rb_ivar_set(recv
, body
->nd_vid
, argv
[0]);
479 rb_raise(rb_eArgError
, "wrong number of arguments (%d for 0)",
482 val
= rb_attr_get(recv
, body
->nd_vid
);
486 val
= vm_call_bmethod(th
, id
, body
->nd_cval
,
487 recv
, klass
, argc
, (VALUE
*)argv
, blockptr
);
491 rb_bug("unsupported: vm_call0(%s)", ruby_node_name(nd_type(body
)));
493 RUBY_VM_CHECK_INTS();
498 vm_call_super(rb_thread_t
*th
, int argc
, const VALUE
*argv
)
500 VALUE recv
= th
->cfp
->self
;
504 rb_control_frame_t
*cfp
= th
->cfp
;
507 klass
= cfp
->method_class
;
508 klass
= RCLASS_SUPER(klass
);
511 klass
= vm_search_normal_superclass(cfp
->method_class
, recv
);
517 rb_bug("vm_call_super: should not be reached");
520 body
= rb_method_node(klass
, id
); /* this returns NODE_METHOD */
523 body
= body
->nd_body
;
529 rb_bug("vm_call_super: not found");
532 return vm_call0(th
, klass
, recv
, id
, id
, argc
, argv
, body
, CALL_SUPER
);
536 rb_call_super(int argc
, const VALUE
*argv
)
539 return vm_call_super(GET_THREAD(), argc
, argv
);
542 /* C -> Ruby: block */
545 invoke_block(rb_thread_t
*th
, rb_block_t
*block
, VALUE self
,
546 int argc
, VALUE
*argv
, rb_block_t
*blockptr
)
549 if (BUILTIN_TYPE(block
->iseq
) != T_NODE
) {
550 rb_iseq_t
*iseq
= block
->iseq
;
551 rb_control_frame_t
*cfp
= th
->cfp
;
553 const int arg_size
= iseq
->arg_size
;
554 const int type
= block_proc_is_lambda(block
->proc
) ? FRAME_MAGIC_LAMBDA
: FRAME_MAGIC_BLOCK
;
556 rb_vm_set_finish_env(th
);
558 CHECK_STACK_OVERFLOW(cfp
, argc
+ iseq
->stack_max
);
560 for (i
=0; i
<argc
; i
++) {
561 cfp
->sp
[i
] = argv
[i
];
564 opt_pc
= vm_yield_setup_args(th
, iseq
, argc
, cfp
->sp
, blockptr
,
565 type
== FRAME_MAGIC_LAMBDA
);
567 vm_push_frame(th
, iseq
, type
,
568 self
, GC_GUARDED_PTR(block
->dfp
),
569 iseq
->iseq_encoded
+ opt_pc
, cfp
->sp
+ arg_size
, block
->lfp
,
570 iseq
->local_size
- arg_size
);
572 val
= vm_eval_body(th
);
575 val
= vm_yield_with_cfunc(th
, block
, self
, argc
, argv
);
581 vm_yield(rb_thread_t
*th
, int argc
, VALUE
*argv
)
583 rb_block_t
*block
= GC_GUARDED_PTR_REF(th
->cfp
->lfp
[0]);
586 vm_localjump_error("no block given", Qnil
, 0);
589 return invoke_block(th
, block
, block
->self
, argc
, argv
, 0);
593 vm_invoke_proc(rb_thread_t
*th
, rb_proc_t
*proc
,
594 VALUE self
, int argc
, VALUE
*argv
, rb_block_t
*blockptr
)
598 volatile int stored_safe
= th
->safe_level
;
599 volatile NODE
*stored_special_cref_stack
=
600 lfp_set_special_cref(proc
->block
.lfp
, proc
->special_cref_stack
);
601 rb_control_frame_t
* volatile cfp
= th
->cfp
;
604 if ((state
= EXEC_TAG()) == 0) {
605 th
->safe_level
= proc
->safe_level
;
606 val
= invoke_block(th
, &proc
->block
, self
, argc
, argv
, blockptr
);
610 if (!proc
->is_from_method
) {
611 th
->safe_level
= stored_safe
;
614 lfp_set_special_cref(proc
->block
.lfp
, (NODE
*)stored_special_cref_stack
);
617 if (state
== TAG_RETURN
&& proc
->is_lambda
) {
618 VALUE err
= th
->errinfo
;
619 VALUE
*escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
620 VALUE
*cdfp
= proc
->block
.dfp
;
622 if (escape_dfp
== cdfp
) {
626 val
= GET_THROWOBJ_VAL(err
);
637 /* special variable */
640 vm_cfp_svar_get(rb_thread_t
*th
, rb_control_frame_t
*cfp
, VALUE key
)
642 while (cfp
->pc
== 0) {
645 return lfp_svar_get(th
, cfp
->lfp
, key
);
649 vm_cfp_svar_set(rb_thread_t
*th
, rb_control_frame_t
*cfp
, VALUE key
, VALUE val
)
651 while (cfp
->pc
== 0) {
654 lfp_svar_set(th
, cfp
->lfp
, key
, val
);
658 vm_svar_get(VALUE key
)
660 rb_thread_t
*th
= GET_THREAD();
661 return vm_cfp_svar_get(th
, th
->cfp
, key
);
665 vm_svar_set(VALUE key
, VALUE val
)
667 rb_thread_t
*th
= GET_THREAD();
668 vm_cfp_svar_set(th
, th
->cfp
, key
, val
);
674 return vm_svar_get(1);
678 rb_backref_set(VALUE val
)
684 rb_lastline_get(void)
686 return vm_svar_get(0);
690 rb_lastline_set(VALUE val
)
698 vm_get_sourceline(rb_control_frame_t
*cfp
)
701 rb_iseq_t
*iseq
= cfp
->iseq
;
703 if (RUBY_VM_NORMAL_ISEQ_P(iseq
)) {
705 int pos
= cfp
->pc
- cfp
->iseq
->iseq_encoded
;
707 for (i
= 0; i
< iseq
->insn_info_size
; i
++) {
708 if (iseq
->insn_info_table
[i
].position
== pos
) {
709 line_no
= iseq
->insn_info_table
[i
- 1].line_no
;
713 line_no
= iseq
->insn_info_table
[i
- 1].line_no
;
720 vm_backtrace_each(rb_thread_t
*th
,
721 rb_control_frame_t
*limit_cfp
,
722 rb_control_frame_t
*cfp
,
723 char *file
, int line_no
, VALUE ary
)
727 while (cfp
> limit_cfp
) {
729 if (cfp
->iseq
!= 0) {
731 rb_iseq_t
*iseq
= cfp
->iseq
;
733 line_no
= vm_get_sourceline(cfp
);
734 file
= RSTRING_PTR(iseq
->filename
);
735 str
= rb_sprintf("%s:%d:in `%s'",
736 file
, line_no
, RSTRING_PTR(iseq
->name
));
737 rb_ary_push(ary
, str
);
740 else if (RUBYVM_CFUNC_FRAME_P(cfp
)) {
741 str
= rb_sprintf("%s:%d:in `%s'",
743 rb_id2name(cfp
->method_id
));
744 rb_ary_push(ary
, str
);
746 cfp
= RUBY_VM_NEXT_CONTROL_FRAME(cfp
);
748 return rb_ary_reverse(ary
);
752 vm_backtrace(rb_thread_t
*th
, int lev
)
755 rb_control_frame_t
*cfp
= th
->cfp
;
756 rb_control_frame_t
*top_of_cfp
= (void *)(th
->stack
+ th
->stack_size
);
766 if (cfp
>= top_of_cfp
) {
773 ary
= vm_backtrace_each(th
, RUBY_VM_NEXT_CONTROL_FRAME(cfp
),
774 top_of_cfp
, "", 0, ary
);
783 rb_thread_t
*th
= GET_THREAD();
784 rb_control_frame_t
*cfp
= th
->cfp
;
785 while ((void *)(cfp
+ 1) < (void *)(th
->stack
+ th
->stack_size
)) {
786 /* printf("cfp: %p\n", cfp->type); */
787 if (cfp
->lfp
&& cfp
->lfp
[-1] != Qnil
&&
788 TYPE(cfp
->lfp
[-1]) != T_VALUES
) {
789 /* dp(cfp->lfp[-1]); */
790 rb_bug("!!!invalid svar!!!");
797 lfp_set_special_cref(VALUE
*lfp
, NODE
* cref
)
799 struct RValues
*values
= (void *) lfp
[-1];
806 if (cref
== 0 && ((VALUE
)values
== Qnil
|| values
->basic
.klass
== 0)) {
810 old_cref
= (NODE
*)lfp_svar_get(GET_THREAD(), lfp
, 2);
811 lfp_svar_set(GET_THREAD(), lfp
, 2, (VALUE
)cref
);
817 vm_set_special_cref(rb_thread_t
*th
, VALUE
*lfp
, NODE
* cref_stack
)
819 return lfp_set_special_cref(lfp
, cref_stack
);
824 debug_cref(NODE
*cref
)
828 printf("%ld\n", cref
->nd_visi
);
829 cref
= cref
->nd_next
;
835 vm_get_cref(rb_thread_t
*th
, rb_iseq_t
*iseq
, rb_control_frame_t
*cfp
)
837 return get_cref(iseq
, cfp
->lfp
);
841 vm_cref_push(rb_thread_t
*th
, VALUE klass
, int noex
)
843 NODE
*cref
= NEW_BLOCK(klass
);
844 rb_control_frame_t
*cfp
= vm_get_ruby_level_cfp(th
, th
->cfp
);
847 cref
->nd_next
= get_cref(cfp
->iseq
, cfp
->lfp
);
848 cref
->nd_visi
= noex
;
853 vm_get_cbase(rb_thread_t
*th
)
855 rb_control_frame_t
*cfp
= vm_get_ruby_level_cfp(th
, th
->cfp
);
856 NODE
*cref
= get_cref(cfp
->iseq
, cfp
->lfp
);
857 VALUE klass
= Qundef
;
860 if ((klass
= cref
->nd_clss
) != 0) {
863 cref
= cref
->nd_next
;
871 make_localjump_error(const char *mesg
, VALUE value
, int reason
)
873 extern VALUE rb_eLocalJumpError
;
874 VALUE exc
= rb_exc_new2(rb_eLocalJumpError
, mesg
);
879 id
= rb_intern("break");
882 id
= rb_intern("redo");
885 id
= rb_intern("retry");
888 id
= rb_intern("next");
891 id
= rb_intern("return");
894 id
= rb_intern("noreason");
897 rb_iv_set(exc
, "@exit_value", value
);
898 rb_iv_set(exc
, "@reason", ID2SYM(id
));
903 vm_localjump_error(const char *mesg
, VALUE value
, int reason
)
905 VALUE exc
= make_localjump_error(mesg
, value
, reason
);
910 vm_make_jump_tag_but_local_jump(int state
, VALUE val
)
915 val
= GET_THREAD()->tag
->retval
;
920 result
= make_localjump_error("unexpected return", val
, state
);
923 result
= make_localjump_error("unexpected break", val
, state
);
926 result
= make_localjump_error("unexpected next", val
, state
);
929 result
= make_localjump_error("unexpected redo", Qnil
, state
);
932 result
= make_localjump_error("retry outside of rescue clause", Qnil
, state
);
941 vm_jump_tag_but_local_jump(int state
, VALUE val
)
943 VALUE exc
= vm_make_jump_tag_but_local_jump(state
, val
);
950 NORETURN(static void vm_iter_break(rb_thread_t
*th
));
953 vm_iter_break(rb_thread_t
*th
)
955 rb_control_frame_t
*cfp
= th
->cfp
;
956 VALUE
*dfp
= GC_GUARDED_PTR_REF(*cfp
->dfp
);
958 th
->state
= TAG_BREAK
;
959 th
->errinfo
= (VALUE
)NEW_THROW_OBJECT(Qnil
, (VALUE
)dfp
, TAG_BREAK
);
960 TH_JUMP_TAG(th
, TAG_BREAK
);
966 vm_iter_break(GET_THREAD());
969 /* optimization: redefine management */
971 VALUE ruby_vm_redefined_flag
= 0;
972 static st_table
*vm_opt_method_table
= 0;
975 rb_vm_check_redefinition_opt_method(NODE
*node
)
979 if (st_lookup(vm_opt_method_table
, (st_data_t
)node
, &bop
)) {
980 ruby_vm_redefined_flag
|= bop
;
985 add_opt_method(VALUE klass
, ID mid
, VALUE bop
)
988 if (st_lookup(RCLASS_M_TBL(klass
), mid
, (void *)&node
) &&
989 nd_type(node
->nd_body
->nd_body
) == NODE_CFUNC
) {
990 st_insert(vm_opt_method_table
, (st_data_t
)node
, (st_data_t
)bop
);
993 rb_bug("undefined optimized method: %s", rb_id2name(mid
));
998 vm_init_redefined_flag(void)
1003 vm_opt_method_table
= st_init_numtable();
1005 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_)
1006 #define C(k) add_opt_method(rb_c##k, mid, bop)
1007 OP(PLUS
, PLUS
), (C(Fixnum
), C(Float
), C(String
), C(Array
));
1008 OP(MINUS
, MINUS
), (C(Fixnum
));
1009 OP(MULT
, MULT
), (C(Fixnum
), C(Float
));
1010 OP(DIV
, DIV
), (C(Fixnum
), C(Float
));
1011 OP(MOD
, MOD
), (C(Fixnum
), C(Float
));
1012 OP(Eq
, EQ
), (C(Fixnum
), C(Float
), C(String
));
1013 OP(LT
, LT
), (C(Fixnum
));
1014 OP(LE
, LE
), (C(Fixnum
));
1015 OP(LTLT
, LTLT
), (C(String
), C(Array
));
1016 OP(AREF
, AREF
), (C(Array
), C(Hash
));
1017 OP(ASET
, ASET
), (C(Array
), C(Hash
));
1018 OP(Length
, LENGTH
), (C(Array
), C(String
), C(Hash
));
1019 OP(Succ
, SUCC
), (C(Fixnum
), C(String
), C(Time
));
1020 OP(GT
, GT
), (C(Fixnum
));
1021 OP(GE
, GE
), (C(Fixnum
));
1026 /* evaluator body */
1028 #include "vm_evalbody.c"
1033 cfunc finish F1 F2 C1
1034 rb_funcall finish F1 F2 C1
1036 VM finish F1 F2 C1 F3
1038 F1 - F3 : pushed by VM
1039 C1 : pushed by send insn (CFUNC)
1041 struct CONTROL_FRAME {
1042 VALUE *pc; // cfp[0]
1043 VALUE *sp; // cfp[1]
1044 VALUE *bp; // cfp[2]
1045 rb_iseq_t *iseq; // cfp[3]
1046 VALUE flag; // cfp[4]
1047 VALUE self; // cfp[5]
1048 VALUE *lfp; // cfp[6]
1049 VALUE *dfp; // cfp[7]
1050 rb_iseq_t * block_iseq; // cfp[8]
1051 VALUE proc; // cfp[9] always 0
1058 rb_iseq_t *block_iseq;
1066 struct METHOD_CONTROL_FRAME {
1067 struct CONTROL_FRAME;
1070 struct METHOD_FRAME {
1077 VALUE special; // lfp [1]
1078 struct block_object *block_ptr | 0x01; // lfp [0]
1081 struct BLOCK_CONTROL_FRAME {
1085 struct BLOCK_FRAME {
1092 VALUE *(prev_ptr | 0x01); // DFP[0]
1095 struct CLASS_CONTROL_FRAME {
1099 struct CLASS_FRAME {
1103 VALUE prev_dfp; // for frame jump
1106 struct C_METHOD_CONTROL_FRAME {
1108 VALUE *sp; // stack pointer
1109 VALUE *bp; // base pointer (used in exception)
1110 rb_iseq_t *iseq; // cmi
1111 VALUE magic; // C_METHOD_FRAME
1114 VALUE *dfp; // == lfp
1115 rb_iseq_t * block_iseq; //
1116 VALUE proc; // always 0
1119 struct C_BLOCK_CONTROL_FRAME {
1120 VALUE *pc; // point only "finish" insn
1122 rb_iseq_t *iseq; // ?
1123 VALUE magic; // C_METHOD_FRAME
1124 VALUE self; // needed?
1127 rb_iseq_t * block_iseq; // 0
1130 struct C_METHDO_FRAME{
1138 vm_eval_body(rb_thread_t
*th
)
1145 if ((state
= EXEC_TAG()) == 0) {
1147 result
= vm_eval(th
, initial
);
1148 if ((state
= th
->state
) != 0) {
1151 goto exception_handler
;
1156 struct iseq_catch_table_entry
*entry
;
1157 unsigned long epc
, cont_pc
, cont_sp
;
1158 VALUE catch_iseqval
;
1159 rb_control_frame_t
*cfp
;
1160 VALUE
*escape_dfp
= NULL
;
1165 if (state
== TAG_RAISE
) {
1166 rb_ivar_set(err
, idThrowState
, INT2FIX(state
));
1170 cont_pc
= cont_sp
= catch_iseqval
= 0;
1172 while (th
->cfp
->pc
== 0 || th
->cfp
->iseq
== 0) {
1177 epc
= cfp
->pc
- cfp
->iseq
->iseq_encoded
;
1179 if (state
== TAG_BREAK
|| state
== TAG_RETURN
) {
1180 escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
1182 if (cfp
->dfp
== escape_dfp
) {
1183 if (state
== TAG_RETURN
) {
1184 if ((cfp
+ 1)->pc
!= &finish_insn_seq
[0]) {
1185 SET_THROWOBJ_CATCH_POINT(err
, (VALUE
)(cfp
+ 1)->dfp
);
1186 SET_THROWOBJ_STATE(err
, state
= TAG_BREAK
);
1189 result
= GET_THROWOBJ_VAL(err
);
1198 #if OPT_STACK_CACHING
1199 initial
= (GET_THROWOBJ_VAL(err
));
1201 *th
->cfp
->sp
++ = (GET_THROWOBJ_VAL(err
));
1209 if (state
== TAG_RAISE
) {
1210 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1211 entry
= &cfp
->iseq
->catch_table
[i
];
1212 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1214 if (entry
->type
== CATCH_TYPE_RESCUE
||
1215 entry
->type
== CATCH_TYPE_ENSURE
) {
1216 catch_iseqval
= entry
->iseq
;
1217 cont_pc
= entry
->cont
;
1218 cont_sp
= entry
->sp
;
1224 else if (state
== TAG_RETRY
) {
1225 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1226 entry
= &cfp
->iseq
->catch_table
[i
];
1227 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1229 if (entry
->type
== CATCH_TYPE_ENSURE
) {
1230 catch_iseqval
= entry
->iseq
;
1231 cont_pc
= entry
->cont
;
1232 cont_sp
= entry
->sp
;
1235 else if (entry
->type
== CATCH_TYPE_RETRY
) {
1237 escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
1238 if (cfp
->dfp
== escape_dfp
) {
1239 cfp
->pc
= cfp
->iseq
->iseq_encoded
+ entry
->cont
;
1247 else if (state
== TAG_BREAK
&& ((VALUE
)escape_dfp
& ~0x03) == 0) {
1248 type
= CATCH_TYPE_BREAK
;
1250 search_restart_point
:
1251 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1252 entry
= &cfp
->iseq
->catch_table
[i
];
1254 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1255 if (entry
->type
== CATCH_TYPE_ENSURE
) {
1256 catch_iseqval
= entry
->iseq
;
1257 cont_pc
= entry
->cont
;
1258 cont_sp
= entry
->sp
;
1261 else if (entry
->type
== type
) {
1262 cfp
->pc
= cfp
->iseq
->iseq_encoded
+ entry
->cont
;
1263 cfp
->sp
= cfp
->bp
+ entry
->sp
;
1265 if (!(state
== TAG_REDO
) &&
1266 !(state
== TAG_NEXT
&& !escape_dfp
) &&
1267 !(state
== TAG_BREAK
&& !escape_dfp
)) {
1268 #if OPT_STACK_CACHING
1269 initial
= (GET_THROWOBJ_VAL(err
));
1271 *th
->cfp
->sp
++ = (GET_THROWOBJ_VAL(err
));
1280 else if (state
== TAG_REDO
) {
1281 type
= CATCH_TYPE_REDO
;
1282 escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
1283 goto search_restart_point
;
1285 else if (state
== TAG_NEXT
) {
1286 type
= CATCH_TYPE_NEXT
;
1287 escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
1288 goto search_restart_point
;
1291 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1292 entry
= &cfp
->iseq
->catch_table
[i
];
1293 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1295 if (entry
->type
== CATCH_TYPE_ENSURE
) {
1296 catch_iseqval
= entry
->iseq
;
1297 cont_pc
= entry
->cont
;
1298 cont_sp
= entry
->sp
;
1305 if (catch_iseqval
!= 0) {
1306 /* found catch table */
1307 rb_iseq_t
*catch_iseq
;
1309 /* enter catch scope */
1310 GetISeqPtr(catch_iseqval
, catch_iseq
);
1311 cfp
->sp
= cfp
->bp
+ cont_sp
;
1312 cfp
->pc
= cfp
->iseq
->iseq_encoded
+ cont_pc
;
1314 /* push block frame */
1316 vm_push_frame(th
, catch_iseq
, FRAME_MAGIC_BLOCK
,
1317 cfp
->self
, (VALUE
)cfp
->dfp
, catch_iseq
->iseq_encoded
,
1318 cfp
->sp
+ 1, cfp
->lfp
, catch_iseq
->local_size
- 1);
1326 if (th
->cfp
->pc
!= &finish_insn_seq
[0]) {
1327 goto exception_handler
;
1345 rb_iseq_eval(VALUE iseqval
)
1347 rb_thread_t
*th
= GET_THREAD();
1351 rb_vm_set_top_stack(th
, iseqval
);
1353 if (!rb_const_defined(rb_cObject
, rb_intern("TOPLEVEL_BINDING"))) {
1354 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
1356 val
= vm_eval_body(th
);
1357 tmp
= iseqval
; /* prohibit tail call optimization */
1362 rb_thread_method_id_and_class(rb_thread_t
*th
, ID
*idp
, VALUE
*klassp
)
1364 rb_control_frame_t
*cfp
= th
->cfp
;
1365 rb_iseq_t
*iseq
= cfp
->iseq
;
1367 if (idp
) *idp
= cfp
->method_id
;
1368 if (klassp
) *klassp
= cfp
->method_class
;
1372 if (RUBY_VM_IFUNC_P(iseq
)) {
1373 if (idp
) *idp
= rb_intern("<ifunc>");
1374 if (klassp
) *klassp
= 0;
1377 if (iseq
->defined_method_id
) {
1378 if (idp
) *idp
= iseq
->defined_method_id
;
1379 if (klassp
) *klassp
= iseq
->klass
;
1382 if (iseq
->local_iseq
== iseq
) {
1385 iseq
= iseq
->parent_iseq
;
1391 rb_frame_method_id_and_class(ID
*idp
, VALUE
*klassp
)
1393 return rb_thread_method_id_and_class(GET_THREAD(), idp
, klassp
);
1397 rb_thread_current_status(rb_thread_t
*th
)
1399 rb_control_frame_t
*cfp
= th
->cfp
;
1402 if (cfp
->iseq
!= 0) {
1404 rb_iseq_t
*iseq
= cfp
->iseq
;
1405 int line_no
= vm_get_sourceline(cfp
);
1406 char *file
= RSTRING_PTR(iseq
->filename
);
1407 str
= rb_sprintf("%s:%d:in `%s'",
1408 file
, line_no
, RSTRING_PTR(iseq
->name
));
1411 else if (cfp
->method_id
) {
1412 str
= rb_sprintf("`%s#%s' (cfunc)",
1413 RSTRING_PTR(rb_class_name(cfp
->method_class
)),
1414 rb_id2name(cfp
->method_id
));
1421 rb_vm_call_cfunc(VALUE recv
, VALUE (*func
)(VALUE
), VALUE arg
, rb_block_t
*blockptr
, VALUE filename
)
1423 rb_thread_t
*th
= GET_THREAD();
1424 rb_control_frame_t
*reg_cfp
= th
->cfp
;
1425 volatile VALUE iseqval
= rb_iseq_new(0, filename
, filename
, 0, ISEQ_TYPE_TOP
);
1428 vm_push_frame(th
, DATA_PTR(iseqval
), FRAME_MAGIC_TOP
,
1429 recv
, (VALUE
)blockptr
, 0, reg_cfp
->sp
, 0, 1);
1437 rb_vm_cfunc_funcall_p(rb_control_frame_t
*cfp
)
1439 if (vm_cfunc_flags(cfp
) & (VM_CALL_FCALL_BIT
| VM_CALL_VCALL_BIT
))
1449 RUBY_FREE_ENTER("vm");
1451 rb_vm_t
*vmobj
= ptr
;
1453 st_free_table(vmobj
->living_threads
);
1454 vmobj
->living_threads
= 0;
1455 /* TODO: MultiVM Instance */
1456 /* VM object should not be cleaned by GC */
1457 /* ruby_xfree(ptr); */
1458 /* ruby_current_vm = 0; */
1460 RUBY_FREE_LEAVE("vm");
1464 vm_mark_each_thread_func(st_data_t key
, st_data_t value
, st_data_t dummy
)
1466 VALUE thval
= (VALUE
)key
;
1472 mark_event_hooks(rb_event_hook_t
*hook
)
1475 rb_gc_mark(hook
->data
);
1481 rb_vm_mark(void *ptr
)
1483 RUBY_MARK_ENTER("vm");
1484 RUBY_GC_INFO("-------------------------------------------------\n");
1487 if (vm
->living_threads
) {
1488 st_foreach(vm
->living_threads
, vm_mark_each_thread_func
, 0);
1490 RUBY_MARK_UNLESS_NULL(vm
->thgroup_default
);
1491 RUBY_MARK_UNLESS_NULL(vm
->mark_object_ary
);
1492 RUBY_MARK_UNLESS_NULL(vm
->last_status
);
1493 RUBY_MARK_UNLESS_NULL(vm
->load_path
);
1494 RUBY_MARK_UNLESS_NULL(vm
->loaded_features
);
1495 RUBY_MARK_UNLESS_NULL(vm
->top_self
);
1497 if (vm
->loading_table
) {
1498 rb_mark_tbl(vm
->loading_table
);
1501 mark_event_hooks(vm
->event_hooks
);
1504 RUBY_MARK_LEAVE("vm");
1508 vm_init2(rb_vm_t
*vm
)
1510 MEMZERO(vm
, rb_vm_t
, 1);
1515 #define USE_THREAD_DATA_RECYCLE 1
1517 #if USE_THREAD_DATA_RECYCLE
1518 #define RECYCLE_MAX 64
1519 VALUE
*thread_recycle_stack_slot
[RECYCLE_MAX
];
1520 int thread_recycle_stack_count
= 0;
1523 thread_recycle_stack(int size
)
1525 if (thread_recycle_stack_count
) {
1526 return thread_recycle_stack_slot
[--thread_recycle_stack_count
];
1529 return ALLOC_N(VALUE
, size
);
1534 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
1538 rb_thread_recycle_stack_release(VALUE
*stack
)
1540 #if USE_THREAD_DATA_RECYCLE
1541 if (thread_recycle_stack_count
< RECYCLE_MAX
) {
1542 thread_recycle_stack_slot
[thread_recycle_stack_count
++] = stack
;
1552 static rb_thread_t
*
1553 thread_recycle_struct(void)
1555 void *p
= ALLOC_N(rb_thread_t
, 1);
1556 memset(p
, 0, sizeof(rb_thread_t
));
1561 thread_free(void *ptr
)
1564 RUBY_FREE_ENTER("thread");
1569 if (!th
->root_fiber
) {
1570 RUBY_FREE_UNLESS_NULL(th
->stack
);
1573 if (th
->local_storage
) {
1574 st_free_table(th
->local_storage
);
1579 VALUE
*ptr
= th
->value_cache_ptr
;
1582 RBASIC(v
)->flags
= 0;
1583 RBASIC(v
)->klass
= 0;
1589 if (th
->vm
->main_thread
== th
) {
1590 RUBY_GC_INFO("main thread\n");
1596 RUBY_FREE_LEAVE("thread");
1599 void rb_gc_mark_machine_stack(rb_thread_t
*th
);
1602 rb_thread_mark(void *ptr
)
1604 rb_thread_t
*th
= NULL
;
1605 RUBY_MARK_ENTER("thread");
1609 VALUE
*p
= th
->stack
;
1610 VALUE
*sp
= th
->cfp
->sp
;
1611 rb_control_frame_t
*cfp
= th
->cfp
;
1612 rb_control_frame_t
*limit_cfp
= (void *)(th
->stack
+ th
->stack_size
);
1617 rb_gc_mark_locations(p
, p
+ th
->mark_stack_len
);
1619 while (cfp
!= limit_cfp
) {
1620 rb_gc_mark(cfp
->proc
);
1621 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1625 /* mark ruby objects */
1626 RUBY_MARK_UNLESS_NULL(th
->first_proc
);
1627 if (th
->first_proc
) RUBY_MARK_UNLESS_NULL(th
->first_args
);
1629 RUBY_MARK_UNLESS_NULL(th
->thgroup
);
1630 RUBY_MARK_UNLESS_NULL(th
->value
);
1631 RUBY_MARK_UNLESS_NULL(th
->errinfo
);
1632 RUBY_MARK_UNLESS_NULL(th
->thrown_errinfo
);
1633 RUBY_MARK_UNLESS_NULL(th
->local_svar
);
1634 RUBY_MARK_UNLESS_NULL(th
->top_self
);
1635 RUBY_MARK_UNLESS_NULL(th
->top_wrapper
);
1636 RUBY_MARK_UNLESS_NULL(th
->fiber
);
1637 RUBY_MARK_UNLESS_NULL(th
->root_fiber
);
1639 rb_mark_tbl(th
->local_storage
);
1641 if (GET_THREAD() != th
&& th
->machine_stack_start
&& th
->machine_stack_end
) {
1642 rb_gc_mark_machine_stack(th
);
1643 rb_gc_mark_locations((VALUE
*)&th
->machine_regs
,
1644 (VALUE
*)(&th
->machine_regs
) +
1645 sizeof(th
->machine_regs
) / sizeof(VALUE
));
1648 mark_event_hooks(th
->event_hooks
);
1651 RUBY_MARK_UNLESS_NULL(th
->stat_insn_usage
);
1652 RUBY_MARK_LEAVE("thread");
1656 thread_alloc(VALUE klass
)
1659 #ifdef USE_THREAD_RECYCLE
1660 rb_thread_t
*th
= thread_recycle_struct();
1661 obj
= Data_Wrap_Struct(klass
, rb_thread_mark
, thread_free
, th
);
1664 obj
= Data_Make_Struct(klass
, rb_thread_t
,
1665 rb_thread_mark
, thread_free
, th
);
1671 th_init2(rb_thread_t
*th
)
1673 /* allocate thread stack */
1674 th
->stack_size
= RUBY_VM_THREAD_STACK_SIZE
;
1675 th
->stack
= thread_recycle_stack(th
->stack_size
);
1677 th
->cfp
= (void *)(th
->stack
+ th
->stack_size
);
1679 vm_push_frame(th
, 0, FRAME_MAGIC_TOP
, Qnil
, 0, 0,
1682 th
->status
= THREAD_RUNNABLE
;
1686 th
->value_cache_ptr
= &th
->value_cache
[0];
1691 th_init(rb_thread_t
*th
)
1697 ruby_thread_init(VALUE self
)
1700 rb_vm_t
*vm
= GET_THREAD()->vm
;
1701 GetThreadPtr(self
, th
);
1707 th
->top_wrapper
= 0;
1708 th
->top_self
= rb_vm_top_self();
1713 rb_thread_alloc(VALUE klass
)
1715 VALUE self
= thread_alloc(klass
);
1716 ruby_thread_init(self
);
1720 VALUE
insns_name_array(void);
1721 extern VALUE
*rb_gc_stack_start
;
1722 extern size_t rb_gc_stack_maxsize
;
1724 extern VALUE
*rb_gc_register_stack_start
;
1737 VALUE ary
= rb_ary_new();
1739 #include <execinfo.h>
1740 #define MAX_NATIVE_TRACE 1024
1741 static void *trace
[MAX_NATIVE_TRACE
];
1742 int n
= backtrace(trace
, MAX_NATIVE_TRACE
);
1743 char **syms
= backtrace_symbols(trace
, n
);
1750 for (i
=0; i
<n
; i
++) {
1751 rb_ary_push(ary
, rb_str_new2(syms
[i
]));
1764 rb_cVM
= rb_define_class("VM", rb_cObject
);
1765 rb_undef_alloc_func(rb_cVM
);
1768 rb_cEnv
= rb_define_class_under(rb_cVM
, "Env", rb_cObject
);
1769 rb_undef_alloc_func(rb_cEnv
);
1772 rb_cThread
= rb_define_class("Thread", rb_cObject
);
1773 rb_undef_alloc_func(rb_cThread
);
1774 rb_define_method(rb_cThread
, "initialize", ruby_thread_init
, 0);
1776 /* ::VM::USAGE_ANALYSIS_* */
1777 rb_define_const(rb_cVM
, "USAGE_ANALYSIS_INSN", rb_hash_new());
1778 rb_define_const(rb_cVM
, "USAGE_ANALYSIS_REGS", rb_hash_new());
1779 rb_define_const(rb_cVM
, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
1780 rb_define_const(rb_cVM
, "OPTS", opts
= rb_ary_new());
1782 #if OPT_DIRECT_THREADED_CODE
1783 rb_ary_push(opts
, rb_str_new2("direct threaded code"));
1784 #elif OPT_TOKEN_THREADED_CODE
1785 rb_ary_push(opts
, rb_str_new2("token threaded code"));
1786 #elif OPT_CALL_THREADED_CODE
1787 rb_ary_push(opts
, rb_str_new2("call threaded code"));
1790 #if OPT_BASIC_OPERATIONS
1791 rb_ary_push(opts
, rb_str_new2("optimize basic operation"));
1794 #if OPT_STACK_CACHING
1795 rb_ary_push(opts
, rb_str_new2("stack caching"));
1797 #if OPT_OPERANDS_UNIFICATION
1798 rb_ary_push(opts
, rb_str_new2("operands unification]"));
1800 #if OPT_INSTRUCTIONS_UNIFICATION
1801 rb_ary_push(opts
, rb_str_new2("instructions unification"));
1803 #if OPT_INLINE_METHOD_CACHE
1804 rb_ary_push(opts
, rb_str_new2("inline method cache"));
1806 #if OPT_BLOCKINLINING
1807 rb_ary_push(opts
, rb_str_new2("block inlining"));
1810 /* ::VM::InsnNameArray */
1811 rb_define_const(rb_cVM
, "INSTRUCTION_NAMES", insns_name_array());
1813 /* debug functions ::VM::SDR(), ::VM::NSDR() */
1814 /* rb_define_singleton_method(rb_cVM, "SDR", sdr, 0); */
1815 /* rb_define_singleton_method(rb_cVM, "NSDR", nsdr, 0); */
1817 /* VM bootstrap: phase 2 */
1819 rb_vm_t
*vm
= ruby_current_vm
;
1820 rb_thread_t
*th
= GET_THREAD();
1821 VALUE filename
= rb_str_new2("<dummy toplevel>");
1822 volatile VALUE iseqval
= rb_iseq_new(0, filename
, filename
, 0, ISEQ_TYPE_TOP
);
1823 volatile VALUE th_self
;
1826 /* create vm object */
1827 vm
->self
= Data_Wrap_Struct(rb_cVM
, rb_vm_mark
, vm_free
, vm
);
1829 /* create main thread */
1830 th_self
= th
->self
= Data_Wrap_Struct(rb_cThread
, rb_thread_mark
,
1832 vm
->main_thread
= th
;
1833 vm
->running_thread
= th
;
1835 th
->top_wrapper
= 0;
1836 th
->top_self
= rb_vm_top_self();
1837 rb_thread_set_current(th
);
1839 vm
->living_threads
= st_init_numtable();
1840 st_insert(vm
->living_threads
, th_self
, (st_data_t
) th
->thread_id
);
1842 rb_register_mark_object(iseqval
);
1843 GetISeqPtr(iseqval
, iseq
);
1844 th
->cfp
->iseq
= iseq
;
1845 th
->cfp
->pc
= iseq
->iseq_encoded
;
1847 vm_init_redefined_flag();
1850 struct rb_objspace
*rb_objspace_alloc(void);
1855 /* VM bootstrap: phase 1 */
1856 rb_vm_t
*vm
= malloc(sizeof(*vm
));
1857 rb_thread_t
*th
= malloc(sizeof(*th
));
1858 MEMZERO(th
, rb_thread_t
, 1);
1860 rb_thread_set_current_raw(th
);
1863 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1864 vm
->objspace
= rb_objspace_alloc();
1866 ruby_current_vm
= vm
;
1870 th
->machine_stack_start
= rb_gc_stack_start
;
1871 th
->machine_stack_maxsize
= rb_gc_stack_maxsize
;
1873 th
->machine_register_stack_start
= rb_gc_register_stack_start
;
1874 th
->machine_stack_maxsize
/= 2;
1875 th
->machine_register_stack_maxsize
= th
->machine_stack_maxsize
;
1882 main_to_s(VALUE obj
)
1884 return rb_str_new2("main");
1890 return GET_VM()->top_self
;
1896 rb_vm_t
*vm
= GET_VM();
1898 vm
->top_self
= rb_obj_alloc(rb_cObject
);
1899 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s
, 0);