1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
11 #include "ruby/ruby.h"
12 #include "ruby/node.h"
14 #include "ruby/encoding.h"
17 #include "insnhelper.h"
18 #include "vm_insnhelper.c"
27 VALUE rb_mRubyVMFrozenCore
;
29 VALUE ruby_vm_global_state_version
= 1;
30 rb_thread_t
*ruby_current_thread
= 0;
31 rb_vm_t
*ruby_current_vm
= 0;
33 void vm_analysis_operand(int insn
, int n
, VALUE op
);
34 void vm_analysis_register(int reg
, int isset
);
35 void vm_analysis_insn(int insn
);
38 static VALUE finish_insn_seq
[1] = { BIN(finish_SC_ax_ax
) };
39 #elif OPT_CALL_THREADED_CODE
40 static VALUE
const finish_insn_seq
[1] = { 0 };
42 static VALUE finish_insn_seq
[1] = { BIN(finish
) };
46 rb_vm_change_state(void)
48 INC_VM_STATE_VERSION();
51 /* control stack frame */
54 rb_vm_set_finish_env(rb_thread_t
* th
)
56 vm_push_frame(th
, 0, VM_FRAME_MAGIC_FINISH
,
57 Qnil
, th
->cfp
->lfp
[0], 0,
59 th
->cfp
->pc
= (VALUE
*)&finish_insn_seq
[0];
64 vm_set_top_stack(rb_thread_t
* th
, VALUE iseqval
)
67 GetISeqPtr(iseqval
, iseq
);
69 if (iseq
->type
!= ISEQ_TYPE_TOP
) {
70 rb_raise(rb_eTypeError
, "Not a toplevel InstructionSequence");
74 rb_vm_set_finish_env(th
);
76 vm_push_frame(th
, iseq
, VM_FRAME_MAGIC_TOP
,
77 th
->top_self
, 0, iseq
->iseq_encoded
,
78 th
->cfp
->sp
, 0, iseq
->local_size
);
82 vm_set_eval_stack(rb_thread_t
* th
, VALUE iseqval
, const NODE
*cref
)
85 rb_block_t
* const block
= th
->base_block
;
86 GetISeqPtr(iseqval
, iseq
);
89 rb_vm_set_finish_env(th
);
90 vm_push_frame(th
, iseq
, VM_FRAME_MAGIC_EVAL
, block
->self
,
91 GC_GUARDED_PTR(block
->dfp
), iseq
->iseq_encoded
,
92 th
->cfp
->sp
, block
->lfp
, iseq
->local_size
);
95 th
->cfp
->dfp
[-1] = (VALUE
)cref
;
100 vm_get_ruby_level_next_cfp(rb_thread_t
*th
, rb_control_frame_t
*cfp
)
102 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th
, cfp
)) {
103 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
106 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
112 vm_get_ruby_level_caller_cfp(rb_thread_t
*th
, rb_control_frame_t
*cfp
)
114 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
118 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
120 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th
, cfp
)) {
121 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
125 if ((cfp
->flag
& VM_FRAME_FLAG_PASSED
) == 0) {
128 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
136 env_free(void * const ptr
)
138 RUBY_FREE_ENTER("env");
140 const rb_env_t
* const env
= ptr
;
141 RUBY_FREE_UNLESS_NULL(env
->env
);
144 RUBY_FREE_LEAVE("env");
148 env_mark(void * const ptr
)
150 RUBY_MARK_ENTER("env");
152 const rb_env_t
* const env
= ptr
;
155 /* TODO: should mark more restricted range */
156 RUBY_GC_INFO("env->env\n");
157 rb_gc_mark_locations(env
->env
, env
->env
+ env
->env_size
);
160 RUBY_GC_INFO("env->prev_envval\n");
161 RUBY_MARK_UNLESS_NULL(env
->prev_envval
);
162 RUBY_MARK_UNLESS_NULL(env
->block
.self
);
163 RUBY_MARK_UNLESS_NULL(env
->block
.proc
);
165 if (env
->block
.iseq
) {
166 if (BUILTIN_TYPE(env
->block
.iseq
) == T_NODE
) {
167 RUBY_MARK_UNLESS_NULL((VALUE
)env
->block
.iseq
);
170 RUBY_MARK_UNLESS_NULL(env
->block
.iseq
->self
);
174 RUBY_MARK_LEAVE("env");
182 obj
= Data_Make_Struct(rb_cEnv
, rb_env_t
, env_mark
, env_free
, env
);
184 env
->prev_envval
= 0;
189 static VALUE
check_env_value(VALUE envval
);
192 check_env(rb_env_t
* const env
)
195 printf("envptr: %p\n", &env
->block
.dfp
[0]);
196 printf("orphan: %p\n", (void *)env
->block
.dfp
[1]);
197 printf("inheap: %p\n", (void *)env
->block
.dfp
[2]);
198 printf("envval: %10p ", (void *)env
->block
.dfp
[3]);
199 dp(env
->block
.dfp
[3]);
200 printf("penvv : %10p ", (void *)env
->block
.dfp
[4]);
201 dp(env
->block
.dfp
[4]);
202 printf("lfp: %10p\n", env
->block
.lfp
);
203 printf("dfp: %10p\n", env
->block
.dfp
);
204 if (env
->block
.dfp
[4]) {
206 check_env_value(env
->block
.dfp
[4]);
213 check_env_value(VALUE envval
)
216 GetEnvPtr(envval
, env
);
218 if (check_env(env
)) {
221 rb_bug("invalid env");
222 return Qnil
; /* unreachable */
226 vm_make_env_each(rb_thread_t
* const th
, rb_control_frame_t
* const cfp
,
227 VALUE
*envptr
, VALUE
* const endptr
)
229 VALUE envval
, penvval
= 0;
234 if (ENV_IN_HEAP_P(th
, envptr
)) {
235 return ENV_VAL(envptr
);
238 if (envptr
!= endptr
) {
239 VALUE
*penvptr
= GC_GUARDED_PTR_REF(*envptr
);
240 rb_control_frame_t
*pcfp
= cfp
;
242 if (ENV_IN_HEAP_P(th
, penvptr
)) {
243 penvval
= ENV_VAL(penvptr
);
246 while (pcfp
->dfp
!= penvptr
) {
248 if (pcfp
->dfp
== 0) {
250 rb_bug("invalid dfp");
253 penvval
= vm_make_env_each(th
, pcfp
, penvptr
, endptr
);
254 cfp
->lfp
= pcfp
->lfp
;
255 *envptr
= GC_GUARDED_PTR(pcfp
->dfp
);
260 envval
= env_alloc();
261 GetEnvPtr(envval
, env
);
263 if (!RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
267 local_size
= cfp
->iseq
->local_size
;
270 env
->env_size
= local_size
+ 1 + 2;
271 env
->local_size
= local_size
;
272 env
->env
= ALLOC_N(VALUE
, env
->env_size
);
273 env
->prev_envval
= penvval
;
275 for (i
= 0; i
<= local_size
; i
++) {
276 env
->env
[i
] = envptr
[-local_size
+ i
];
278 fprintf(stderr
, "%2d ", &envptr
[-local_size
+ i
] - th
->stack
); dp(env
->env
[i
]);
279 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
280 /* clear value stack for GC */
281 envptr
[-local_size
+ i
] = 0;
286 *envptr
= envval
; /* GC mark */
287 nenvptr
= &env
->env
[i
- 1];
288 nenvptr
[1] = envval
; /* frame self */
289 nenvptr
[2] = penvval
; /* frame prev env object */
291 /* reset lfp/dfp in cfp */
293 if (envptr
== endptr
) {
298 env
->block
.self
= cfp
->self
;
299 env
->block
.lfp
= cfp
->lfp
;
300 env
->block
.dfp
= cfp
->dfp
;
301 env
->block
.iseq
= cfp
->iseq
;
303 if (!RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
311 collect_local_variables_in_env(rb_env_t
* const env
, const VALUE ary
)
314 for (i
= 0; i
< env
->block
.iseq
->local_table_size
; i
++) {
315 ID lid
= env
->block
.iseq
->local_table
[i
];
317 rb_ary_push(ary
, ID2SYM(lid
));
320 if (env
->prev_envval
) {
322 GetEnvPtr(env
->prev_envval
, prevenv
);
323 collect_local_variables_in_env(prevenv
, ary
);
329 vm_collect_local_variables_in_heap(rb_thread_t
* const th
,
330 VALUE
* const dfp
, const VALUE ary
)
332 if (ENV_IN_HEAP_P(th
, dfp
)) {
334 GetEnvPtr(ENV_VAL(dfp
), env
);
335 collect_local_variables_in_env(env
, ary
);
344 vm_make_env_object(rb_thread_t
* th
, rb_control_frame_t
*cfp
)
348 if (VM_FRAME_TYPE(cfp
) == VM_FRAME_MAGIC_FINISH
) {
349 /* for method_missing */
350 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
353 envval
= vm_make_env_each(th
, cfp
, cfp
->dfp
, cfp
->lfp
);
356 check_env_value(envval
);
363 vm_stack_to_heap(rb_thread_t
* const th
)
365 rb_control_frame_t
*cfp
= th
->cfp
;
366 while ((cfp
= vm_get_ruby_level_next_cfp(th
, cfp
)) != 0) {
367 vm_make_env_object(th
, cfp
);
368 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
375 vm_make_proc_from_block(rb_thread_t
*th
, rb_control_frame_t
*cfp
,
379 rb_control_frame_t
*bcfp
;
380 VALUE
*bdfp
; /* to gc mark */
386 bcfp
= RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block
);
388 block
->proc
= procval
= vm_make_proc(th
, bcfp
, block
);
393 vm_make_proc(rb_thread_t
*th
,
394 rb_control_frame_t
*cfp
, const rb_block_t
*block
)
396 VALUE procval
, envval
, blockprocval
= 0;
399 if (GC_GUARDED_PTR_REF(cfp
->lfp
[0])) {
400 if (!RUBY_VM_CLASS_SPECIAL_P(cfp
->lfp
[0])) {
403 blockprocval
= vm_make_proc_from_block(
404 th
, cfp
, (rb_block_t
*)GC_GUARDED_PTR_REF(*cfp
->lfp
));
406 GetProcPtr(blockprocval
, p
);
407 *cfp
->lfp
= GC_GUARDED_PTR(&p
->block
);
410 envval
= vm_make_env_object(th
, cfp
);
413 check_env_value(envval
);
415 procval
= rb_proc_alloc(rb_cProc
);
416 GetProcPtr(procval
, proc
);
417 proc
->blockprocval
= blockprocval
;
418 proc
->block
.self
= block
->self
;
419 proc
->block
.lfp
= block
->lfp
;
420 proc
->block
.dfp
= block
->dfp
;
421 proc
->block
.iseq
= block
->iseq
;
422 proc
->block
.proc
= procval
;
423 proc
->envval
= envval
;
424 proc
->safe_level
= th
->safe_level
;
427 if (th
->stack
< block
->dfp
&& block
->dfp
< th
->stack
+ th
->stack_size
) {
428 rb_bug("invalid ptr: block->dfp");
430 if (th
->stack
< block
->lfp
&& block
->lfp
< th
->stack
+ th
->stack_size
) {
431 rb_bug("invalid ptr: block->lfp");
438 /* C -> Ruby: block */
441 invoke_block_from_c(rb_thread_t
*th
, const rb_block_t
*block
,
442 VALUE self
, int argc
, const VALUE
*argv
,
443 const rb_block_t
*blockptr
, const NODE
*cref
)
445 if (BUILTIN_TYPE(block
->iseq
) != T_NODE
) {
446 const rb_iseq_t
*iseq
= block
->iseq
;
447 const rb_control_frame_t
*cfp
= th
->cfp
;
448 int i
, opt_pc
, arg_size
= iseq
->arg_size
;
449 int type
= block_proc_is_lambda(block
->proc
) ?
450 VM_FRAME_MAGIC_LAMBDA
: VM_FRAME_MAGIC_BLOCK
;
452 rb_vm_set_finish_env(th
);
454 CHECK_STACK_OVERFLOW(cfp
, argc
+ iseq
->stack_max
);
456 for (i
=0; i
<argc
; i
++) {
457 cfp
->sp
[i
] = argv
[i
];
460 opt_pc
= vm_yield_setup_args(th
, iseq
, argc
, cfp
->sp
, blockptr
,
461 type
== VM_FRAME_MAGIC_LAMBDA
);
463 vm_push_frame(th
, iseq
, type
,
464 self
, GC_GUARDED_PTR(block
->dfp
),
465 iseq
->iseq_encoded
+ opt_pc
, cfp
->sp
+ arg_size
, block
->lfp
,
466 iseq
->local_size
- arg_size
);
469 th
->cfp
->dfp
[-1] = (VALUE
)cref
;
472 return vm_eval_body(th
);
475 return vm_yield_with_cfunc(th
, block
, self
, argc
, argv
, blockptr
);
479 static inline const rb_block_t
*
480 check_block(rb_thread_t
*th
)
482 const rb_block_t
*blockptr
= GC_GUARDED_PTR_REF(th
->cfp
->lfp
[0]);
485 vm_localjump_error("no block given", Qnil
, 0);
492 vm_yield_with_cref(rb_thread_t
*th
, int argc
, const VALUE
*argv
, const NODE
*cref
)
494 const rb_block_t
*blockptr
= check_block(th
);
495 return invoke_block_from_c(th
, blockptr
, blockptr
->self
, argc
, argv
, 0, cref
);
499 vm_yield(rb_thread_t
*th
, int argc
, const VALUE
*argv
)
501 const rb_block_t
*blockptr
= check_block(th
);
502 return invoke_block_from_c(th
, blockptr
, blockptr
->self
, argc
, argv
, 0, 0);
506 vm_invoke_proc(rb_thread_t
*th
, rb_proc_t
*proc
, VALUE self
,
507 int argc
, const VALUE
*argv
, rb_block_t
* blockptr
)
511 volatile int stored_safe
= th
->safe_level
;
512 rb_control_frame_t
* volatile cfp
= th
->cfp
;
515 if ((state
= EXEC_TAG()) == 0) {
516 if (!proc
->is_from_method
) {
517 th
->safe_level
= proc
->safe_level
;
519 val
= invoke_block_from_c(th
, &proc
->block
, self
, argc
, argv
, blockptr
, 0);
523 if (!proc
->is_from_method
) {
524 th
->safe_level
= stored_safe
;
528 if (state
== TAG_RETURN
&& proc
->is_lambda
) {
529 VALUE err
= th
->errinfo
;
530 VALUE
*escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
532 if (escape_dfp
== cfp
->dfp
) {
537 val
= GET_THROWOBJ_VAL(err
);
548 /* special variable */
550 static rb_control_frame_t
*
551 vm_normal_frame(rb_thread_t
*th
, rb_control_frame_t
*cfp
)
553 while (cfp
->pc
== 0) {
554 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
555 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th
, cfp
)) {
563 vm_cfp_svar_get(rb_thread_t
*th
, rb_control_frame_t
*cfp
, VALUE key
)
565 cfp
= vm_normal_frame(th
, cfp
);
566 return lfp_svar_get(th
, cfp
? cfp
->lfp
: 0, key
);
570 vm_cfp_svar_set(rb_thread_t
*th
, rb_control_frame_t
*cfp
, VALUE key
, const VALUE val
)
572 cfp
= vm_normal_frame(th
, cfp
);
573 lfp_svar_set(th
, cfp
? cfp
->lfp
: 0, key
, val
);
577 vm_svar_get(VALUE key
)
579 rb_thread_t
*th
= GET_THREAD();
580 return vm_cfp_svar_get(th
, th
->cfp
, key
);
584 vm_svar_set(VALUE key
, VALUE val
)
586 rb_thread_t
*th
= GET_THREAD();
587 vm_cfp_svar_set(th
, th
->cfp
, key
, val
);
593 return vm_svar_get(1);
597 rb_backref_set(VALUE val
)
603 rb_lastline_get(void)
605 return vm_svar_get(0);
609 rb_lastline_set(VALUE val
)
617 vm_get_sourceline(const rb_control_frame_t
*cfp
)
620 const rb_iseq_t
*iseq
= cfp
->iseq
;
622 if (RUBY_VM_NORMAL_ISEQ_P(iseq
)) {
624 int pos
= cfp
->pc
- cfp
->iseq
->iseq_encoded
;
626 for (i
= 0; i
< iseq
->insn_info_size
; i
++) {
627 if (iseq
->insn_info_table
[i
].position
== pos
) {
628 line_no
= iseq
->insn_info_table
[i
- 1].line_no
;
632 line_no
= iseq
->insn_info_table
[i
- 1].line_no
;
639 vm_backtrace_each(rb_thread_t
*th
,
640 const rb_control_frame_t
*limit_cfp
, const rb_control_frame_t
*cfp
,
641 const char * file
, int line_no
, VALUE ary
)
645 while (cfp
> limit_cfp
) {
647 if (cfp
->iseq
!= 0) {
649 rb_iseq_t
*iseq
= cfp
->iseq
;
651 line_no
= vm_get_sourceline(cfp
);
652 file
= RSTRING_PTR(iseq
->filename
);
653 str
= rb_sprintf("%s:%d:in `%s'",
654 file
, line_no
, RSTRING_PTR(iseq
->name
));
655 rb_ary_push(ary
, str
);
658 else if (RUBYVM_CFUNC_FRAME_P(cfp
)) {
659 str
= rb_sprintf("%s:%d:in `%s'",
661 rb_id2name(cfp
->method_id
));
662 rb_ary_push(ary
, str
);
664 cfp
= RUBY_VM_NEXT_CONTROL_FRAME(cfp
);
666 return rb_ary_reverse(ary
);
670 vm_backtrace(rb_thread_t
*th
, int lev
)
673 const rb_control_frame_t
*cfp
= th
->cfp
;
674 const rb_control_frame_t
*top_of_cfp
= (void *)(th
->stack
+ th
->stack_size
);
684 if (cfp
>= top_of_cfp
) {
691 ary
= vm_backtrace_each(th
, RUBY_VM_NEXT_CONTROL_FRAME(cfp
),
692 top_of_cfp
, "", 0, ary
);
699 rb_thread_t
*th
= GET_THREAD();
700 rb_control_frame_t
*cfp
= vm_get_ruby_level_next_cfp(th
, th
->cfp
);
703 return RSTRING_PTR(cfp
->iseq
->filename
);
713 rb_thread_t
*th
= GET_THREAD();
714 rb_control_frame_t
*cfp
= vm_get_ruby_level_next_cfp(th
, th
->cfp
);
717 return vm_get_sourceline(cfp
);
727 rb_thread_t
*th
= GET_THREAD();
728 rb_control_frame_t
*cfp
= vm_get_ruby_level_next_cfp(th
, th
->cfp
);
729 return vm_get_cref(cfp
->iseq
, cfp
->lfp
, cfp
->dfp
);
734 debug_cref(NODE
*cref
)
738 printf("%ld\n", cref
->nd_visi
);
739 cref
= cref
->nd_next
;
745 vm_cref_push(rb_thread_t
*th
, VALUE klass
, int noex
)
747 rb_control_frame_t
*cfp
= vm_get_ruby_level_caller_cfp(th
, th
->cfp
);
748 NODE
*cref
= NEW_BLOCK(klass
);
750 cref
->nd_visi
= noex
;
753 cref
->nd_next
= vm_get_cref(cfp
->iseq
, cfp
->lfp
, cfp
->dfp
);
760 vm_get_cbase(const rb_iseq_t
*iseq
, const VALUE
*lfp
, const VALUE
*dfp
)
762 NODE
*cref
= vm_get_cref(iseq
, lfp
, dfp
);
763 VALUE klass
= Qundef
;
766 if ((klass
= cref
->nd_clss
) != 0) {
769 cref
= cref
->nd_next
;
778 rb_thread_t
*th
= GET_THREAD();
779 rb_control_frame_t
*cfp
= vm_get_ruby_level_next_cfp(th
, th
->cfp
);
781 return vm_get_cbase(cfp
->iseq
, cfp
->lfp
, cfp
->dfp
);
787 make_localjump_error(const char *mesg
, VALUE value
, int reason
)
789 extern VALUE rb_eLocalJumpError
;
790 VALUE exc
= rb_exc_new2(rb_eLocalJumpError
, mesg
);
795 CONST_ID(id
, "break");
798 CONST_ID(id
, "redo");
801 CONST_ID(id
, "retry");
804 CONST_ID(id
, "next");
807 CONST_ID(id
, "return");
810 CONST_ID(id
, "noreason");
813 rb_iv_set(exc
, "@exit_value", value
);
814 rb_iv_set(exc
, "@reason", ID2SYM(id
));
819 vm_localjump_error(const char *mesg
, VALUE value
, int reason
)
821 VALUE exc
= make_localjump_error(mesg
, value
, reason
);
826 vm_make_jump_tag_but_local_jump(int state
, VALUE val
)
831 val
= GET_THREAD()->tag
->retval
;
837 result
= make_localjump_error("unexpected return", val
, state
);
840 result
= make_localjump_error("unexpected break", val
, state
);
843 result
= make_localjump_error("unexpected next", val
, state
);
846 result
= make_localjump_error("unexpected redo", Qnil
, state
);
849 result
= make_localjump_error("retry outside of rescue clause", Qnil
, state
);
858 vm_jump_tag_but_local_jump(int state
, VALUE val
)
860 VALUE exc
= vm_make_jump_tag_but_local_jump(state
, val
);
867 NORETURN(static void vm_iter_break(rb_thread_t
*th
));
870 vm_iter_break(rb_thread_t
*th
)
872 rb_control_frame_t
*cfp
= th
->cfp
;
873 VALUE
*dfp
= GC_GUARDED_PTR_REF(*cfp
->dfp
);
875 th
->state
= TAG_BREAK
;
876 th
->errinfo
= (VALUE
)NEW_THROW_OBJECT(Qnil
, (VALUE
)dfp
, TAG_BREAK
);
877 TH_JUMP_TAG(th
, TAG_BREAK
);
883 vm_iter_break(GET_THREAD());
886 /* optimization: redefine management */
888 VALUE ruby_vm_redefined_flag
= 0;
889 static st_table
*vm_opt_method_table
= 0;
892 rb_vm_check_redefinition_opt_method(const NODE
*node
)
896 if (st_lookup(vm_opt_method_table
, (st_data_t
)node
, &bop
)) {
897 ruby_vm_redefined_flag
|= bop
;
902 add_opt_method(VALUE klass
, ID mid
, VALUE bop
)
905 if (st_lookup(RCLASS_M_TBL(klass
), mid
, (void *)&node
) &&
906 nd_type(node
->nd_body
->nd_body
) == NODE_CFUNC
) {
907 st_insert(vm_opt_method_table
, (st_data_t
)node
, (st_data_t
)bop
);
910 rb_bug("undefined optimized method: %s", rb_id2name(mid
));
915 vm_init_redefined_flag(void)
920 vm_opt_method_table
= st_init_numtable();
922 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_)
923 #define C(k) add_opt_method(rb_c##k, mid, bop)
924 OP(PLUS
, PLUS
), (C(Fixnum
), C(Float
), C(String
), C(Array
));
925 OP(MINUS
, MINUS
), (C(Fixnum
));
926 OP(MULT
, MULT
), (C(Fixnum
), C(Float
));
927 OP(DIV
, DIV
), (C(Fixnum
), C(Float
));
928 OP(MOD
, MOD
), (C(Fixnum
), C(Float
));
929 OP(Eq
, EQ
), (C(Fixnum
), C(Float
), C(String
));
930 OP(LT
, LT
), (C(Fixnum
));
931 OP(LE
, LE
), (C(Fixnum
));
932 OP(LTLT
, LTLT
), (C(String
), C(Array
));
933 OP(AREF
, AREF
), (C(Array
), C(Hash
));
934 OP(ASET
, ASET
), (C(Array
), C(Hash
));
935 OP(Length
, LENGTH
), (C(Array
), C(String
), C(Hash
));
936 OP(Succ
, SUCC
), (C(Fixnum
), C(String
), C(Time
));
937 OP(GT
, GT
), (C(Fixnum
));
938 OP(GE
, GE
), (C(Fixnum
));
945 #include "vm_evalbody.c"
950 cfunc finish F1 F2 C1
951 rb_funcall finish F1 F2 C1
953 VM finish F1 F2 C1 F3
955 F1 - F3 : pushed by VM
956 C1 : pushed by send insn (CFUNC)
958 struct CONTROL_FRAME {
959 VALUE *pc; // cfp[0], program counter
960 VALUE *sp; // cfp[1], stack pointer
961 VALUE *bp; // cfp[2], base pointer
962 rb_iseq_t *iseq; // cfp[3], iseq
963 VALUE flag; // cfp[4], magic
964 VALUE self; // cfp[5], self
965 VALUE *lfp; // cfp[6], local frame pointer
966 VALUE *dfp; // cfp[7], dynamic frame pointer
967 rb_iseq_t * block_iseq; // cfp[8], block iseq
968 VALUE proc; // cfp[9], always 0
975 rb_iseq_t *block_iseq;
979 struct METHOD_CONTROL_FRAME {
980 rb_control_frame_t frame;
983 struct METHOD_FRAME {
991 VALUE special; // lfp [1]
992 struct block_object *block_ptr | 0x01; // lfp [0]
995 struct BLOCK_CONTROL_FRAME {
996 rb_control_frame_t frame;
1007 VALUE *(prev_ptr | 0x01); // DFP[0]
1010 struct CLASS_CONTROL_FRAME {
1011 rb_control_frame_t frame;
1014 struct CLASS_FRAME {
1019 VALUE prev_dfp; // for frame jump
1022 struct C_METHOD_CONTROL_FRAME {
1024 VALUE *sp; // stack pointer
1025 VALUE *bp; // base pointer (used in exception)
1026 rb_iseq_t *iseq; // cmi
1027 VALUE magic; // C_METHOD_FRAME
1030 VALUE *dfp; // == lfp
1031 rb_iseq_t * block_iseq; //
1032 VALUE proc; // always 0
1035 struct C_BLOCK_CONTROL_FRAME {
1036 VALUE *pc; // point only "finish" insn
1038 rb_iseq_t *iseq; // ?
1039 VALUE magic; // C_METHOD_FRAME
1040 VALUE self; // needed?
1043 rb_iseq_t * block_iseq; // 0
1049 vm_eval_body(rb_thread_t
*th
)
1054 VALUE
*escape_dfp
= NULL
;
1058 if ((state
= EXEC_TAG()) == 0) {
1060 result
= vm_eval(th
, initial
);
1061 if ((state
= th
->state
) != 0) {
1064 goto exception_handler
;
1069 struct iseq_catch_table_entry
*entry
;
1070 unsigned long epc
, cont_pc
, cont_sp
;
1071 VALUE catch_iseqval
;
1072 rb_control_frame_t
*cfp
;
1077 if (state
== TAG_RAISE
) {
1078 if (OBJ_FROZEN(err
)) rb_exc_raise(err
);
1079 rb_ivar_set(err
, idThrowState
, INT2FIX(state
));
1083 cont_pc
= cont_sp
= catch_iseqval
= 0;
1085 while (th
->cfp
->pc
== 0 || th
->cfp
->iseq
== 0) {
1090 epc
= cfp
->pc
- cfp
->iseq
->iseq_encoded
;
1092 if (state
== TAG_BREAK
|| state
== TAG_RETURN
) {
1093 escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
1095 if (cfp
->dfp
== escape_dfp
) {
1096 if (state
== TAG_RETURN
) {
1097 if ((cfp
+ 1)->pc
!= &finish_insn_seq
[0]) {
1098 SET_THROWOBJ_CATCH_POINT(err
, (VALUE
)(cfp
+ 1)->dfp
);
1099 SET_THROWOBJ_STATE(err
, state
= TAG_BREAK
);
1102 result
= GET_THROWOBJ_VAL(err
);
1111 #if OPT_STACK_CACHING
1112 initial
= (GET_THROWOBJ_VAL(err
));
1114 *th
->cfp
->sp
++ = (GET_THROWOBJ_VAL(err
));
1122 if (state
== TAG_RAISE
) {
1123 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1124 entry
= &cfp
->iseq
->catch_table
[i
];
1125 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1127 if (entry
->type
== CATCH_TYPE_RESCUE
||
1128 entry
->type
== CATCH_TYPE_ENSURE
) {
1129 catch_iseqval
= entry
->iseq
;
1130 cont_pc
= entry
->cont
;
1131 cont_sp
= entry
->sp
;
1137 else if (state
== TAG_RETRY
) {
1138 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1139 entry
= &cfp
->iseq
->catch_table
[i
];
1140 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1142 if (entry
->type
== CATCH_TYPE_ENSURE
) {
1143 catch_iseqval
= entry
->iseq
;
1144 cont_pc
= entry
->cont
;
1145 cont_sp
= entry
->sp
;
1148 else if (entry
->type
== CATCH_TYPE_RETRY
) {
1150 escape_dfp
= GET_THROWOBJ_CATCH_POINT(err
);
1151 if (cfp
->dfp
== escape_dfp
) {
1152 cfp
->pc
= cfp
->iseq
->iseq_encoded
+ entry
->cont
;
1160 else if (state
== TAG_BREAK
&& ((VALUE
)escape_dfp
& ~0x03) == 0) {
1161 type
= CATCH_TYPE_BREAK
;
1163 search_restart_point
:
1164 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1165 entry
= &cfp
->iseq
->catch_table
[i
];
1167 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1168 if (entry
->type
== CATCH_TYPE_ENSURE
) {
1169 catch_iseqval
= entry
->iseq
;
1170 cont_pc
= entry
->cont
;
1171 cont_sp
= entry
->sp
;
1174 else if (entry
->type
== type
) {
1175 cfp
->pc
= cfp
->iseq
->iseq_encoded
+ entry
->cont
;
1176 cfp
->sp
= cfp
->bp
+ entry
->sp
;
1178 if (state
!= TAG_REDO
) {
1179 #if OPT_STACK_CACHING
1180 initial
= (GET_THROWOBJ_VAL(err
));
1182 *th
->cfp
->sp
++ = (GET_THROWOBJ_VAL(err
));
1191 else if (state
== TAG_REDO
) {
1192 type
= CATCH_TYPE_REDO
;
1193 goto search_restart_point
;
1195 else if (state
== TAG_NEXT
) {
1196 type
= CATCH_TYPE_NEXT
;
1197 goto search_restart_point
;
1200 for (i
= 0; i
< cfp
->iseq
->catch_table_size
; i
++) {
1201 entry
= &cfp
->iseq
->catch_table
[i
];
1202 if (entry
->start
< epc
&& entry
->end
>= epc
) {
1204 if (entry
->type
== CATCH_TYPE_ENSURE
) {
1205 catch_iseqval
= entry
->iseq
;
1206 cont_pc
= entry
->cont
;
1207 cont_sp
= entry
->sp
;
1214 if (catch_iseqval
!= 0) {
1215 /* found catch table */
1216 rb_iseq_t
*catch_iseq
;
1218 /* enter catch scope */
1219 GetISeqPtr(catch_iseqval
, catch_iseq
);
1220 cfp
->sp
= cfp
->bp
+ cont_sp
;
1221 cfp
->pc
= cfp
->iseq
->iseq_encoded
+ cont_pc
;
1223 /* push block frame */
1225 vm_push_frame(th
, catch_iseq
, VM_FRAME_MAGIC_BLOCK
,
1226 cfp
->self
, (VALUE
)cfp
->dfp
, catch_iseq
->iseq_encoded
,
1227 cfp
->sp
+ 1 /* push value */, cfp
->lfp
, catch_iseq
->local_size
- 1);
1235 if (th
->cfp
->pc
!= &finish_insn_seq
[0]) {
1236 goto exception_handler
;
1254 rb_iseq_eval(VALUE iseqval
)
1256 rb_thread_t
*th
= GET_THREAD();
1260 vm_set_top_stack(th
, iseqval
);
1262 if (!rb_const_defined(rb_cObject
, rb_intern("TOPLEVEL_BINDING"))) {
1263 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
1265 val
= vm_eval_body(th
);
1266 tmp
= iseqval
; /* prohibit tail call optimization */
1271 rb_thread_method_id_and_class(rb_thread_t
*th
,
1272 ID
*idp
, VALUE
*klassp
)
1274 rb_control_frame_t
*cfp
= th
->cfp
;
1275 rb_iseq_t
*iseq
= cfp
->iseq
;
1277 if (idp
) *idp
= cfp
->method_id
;
1278 if (klassp
) *klassp
= cfp
->method_class
;
1282 if (RUBY_VM_IFUNC_P(iseq
)) {
1283 if (idp
) CONST_ID(*idp
, "<ifunc>");
1284 if (klassp
) *klassp
= 0;
1287 if (iseq
->defined_method_id
) {
1288 if (idp
) *idp
= iseq
->defined_method_id
;
1289 if (klassp
) *klassp
= iseq
->klass
;
1292 if (iseq
->local_iseq
== iseq
) {
1295 iseq
= iseq
->parent_iseq
;
1301 rb_frame_method_id_and_class(ID
*idp
, VALUE
*klassp
)
1303 return rb_thread_method_id_and_class(GET_THREAD(), idp
, klassp
);
1307 rb_thread_current_status(const rb_thread_t
*th
)
1309 const rb_control_frame_t
*cfp
= th
->cfp
;
1312 if (cfp
->iseq
!= 0) {
1314 rb_iseq_t
*iseq
= cfp
->iseq
;
1315 int line_no
= vm_get_sourceline(cfp
);
1316 char *file
= RSTRING_PTR(iseq
->filename
);
1317 str
= rb_sprintf("%s:%d:in `%s'",
1318 file
, line_no
, RSTRING_PTR(iseq
->name
));
1321 else if (cfp
->method_id
) {
1322 str
= rb_sprintf("`%s#%s' (cfunc)",
1323 RSTRING_PTR(rb_class_name(cfp
->method_class
)),
1324 rb_id2name(cfp
->method_id
));
1331 rb_vm_call_cfunc(VALUE recv
, VALUE (*func
)(VALUE
), VALUE arg
,
1332 const rb_block_t
*blockptr
, VALUE filename
)
1334 rb_thread_t
*th
= GET_THREAD();
1335 const rb_control_frame_t
*reg_cfp
= th
->cfp
;
1336 volatile VALUE iseqval
= rb_iseq_new(0, filename
, filename
, 0, ISEQ_TYPE_TOP
);
1339 vm_push_frame(th
, DATA_PTR(iseqval
), VM_FRAME_MAGIC_TOP
,
1340 recv
, (VALUE
)blockptr
, 0, reg_cfp
->sp
, 0, 1);
1353 RUBY_FREE_ENTER("vm");
1355 rb_vm_t
*vmobj
= ptr
;
1357 st_free_table(vmobj
->living_threads
);
1358 vmobj
->living_threads
= 0;
1359 /* TODO: MultiVM Instance */
1360 /* VM object should not be cleaned by GC */
1361 /* ruby_xfree(ptr); */
1362 /* ruby_current_vm = 0; */
1364 RUBY_FREE_LEAVE("vm");
1368 vm_mark_each_thread_func(st_data_t key
, st_data_t value
, st_data_t dummy
)
1370 VALUE thval
= (VALUE
)key
;
1376 mark_event_hooks(rb_event_hook_t
*hook
)
1379 rb_gc_mark(hook
->data
);
1385 rb_vm_mark(void *ptr
)
1387 RUBY_MARK_ENTER("vm");
1388 RUBY_GC_INFO("-------------------------------------------------\n");
1391 if (vm
->living_threads
) {
1392 st_foreach(vm
->living_threads
, vm_mark_each_thread_func
, 0);
1394 RUBY_MARK_UNLESS_NULL(vm
->thgroup_default
);
1395 RUBY_MARK_UNLESS_NULL(vm
->mark_object_ary
);
1396 RUBY_MARK_UNLESS_NULL(vm
->load_path
);
1397 RUBY_MARK_UNLESS_NULL(vm
->loaded_features
);
1398 RUBY_MARK_UNLESS_NULL(vm
->top_self
);
1399 RUBY_MARK_UNLESS_NULL(vm
->coverages
);
1400 rb_gc_mark_locations(vm
->special_exceptions
, vm
->special_exceptions
+ ruby_special_error_count
);
1402 if (vm
->loading_table
) {
1403 rb_mark_tbl(vm
->loading_table
);
1406 mark_event_hooks(vm
->event_hooks
);
1409 RUBY_MARK_LEAVE("vm");
1413 vm_init2(rb_vm_t
*vm
)
1415 MEMZERO(vm
, rb_vm_t
, 1);
1416 vm
->src_encoding_index
= -1;
1421 #define USE_THREAD_DATA_RECYCLE 1
1423 #if USE_THREAD_DATA_RECYCLE
1424 #define RECYCLE_MAX 64
1425 VALUE
*thread_recycle_stack_slot
[RECYCLE_MAX
];
1426 int thread_recycle_stack_count
= 0;
1429 thread_recycle_stack(int size
)
1431 if (thread_recycle_stack_count
) {
1432 return thread_recycle_stack_slot
[--thread_recycle_stack_count
];
1435 return ALLOC_N(VALUE
, size
);
1440 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
1444 rb_thread_recycle_stack_release(VALUE
*stack
)
1446 #if USE_THREAD_DATA_RECYCLE
1447 if (thread_recycle_stack_count
< RECYCLE_MAX
) {
1448 thread_recycle_stack_slot
[thread_recycle_stack_count
++] = stack
;
1455 #ifdef USE_THREAD_RECYCLE
1456 static rb_thread_t
*
1457 thread_recycle_struct(void)
1459 void *p
= ALLOC_N(rb_thread_t
, 1);
1460 memset(p
, 0, sizeof(rb_thread_t
));
1466 thread_free(void *ptr
)
1469 RUBY_FREE_ENTER("thread");
1474 if (!th
->root_fiber
) {
1475 RUBY_FREE_UNLESS_NULL(th
->stack
);
1478 if (th
->locking_mutex
!= Qfalse
) {
1479 rb_bug("thread_free: locking_mutex must be NULL (%p:%ld)", th
, th
->locking_mutex
);
1481 if (th
->keeping_mutexes
!= NULL
) {
1482 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%ld)", th
, th
->locking_mutex
);
1485 if (th
->local_storage
) {
1486 st_free_table(th
->local_storage
);
1491 VALUE
*ptr
= th
->value_cache_ptr
;
1494 RBASIC(v
)->flags
= 0;
1495 RBASIC(v
)->klass
= 0;
1501 if (th
->vm
->main_thread
== th
) {
1502 RUBY_GC_INFO("main thread\n");
1508 RUBY_FREE_LEAVE("thread");
1511 void rb_gc_mark_machine_stack(rb_thread_t
*th
);
1514 rb_thread_mark(void *ptr
)
1516 rb_thread_t
*th
= NULL
;
1517 RUBY_MARK_ENTER("thread");
1521 VALUE
*p
= th
->stack
;
1522 VALUE
*sp
= th
->cfp
->sp
;
1523 rb_control_frame_t
*cfp
= th
->cfp
;
1524 rb_control_frame_t
*limit_cfp
= (void *)(th
->stack
+ th
->stack_size
);
1529 rb_gc_mark_locations(p
, p
+ th
->mark_stack_len
);
1531 while (cfp
!= limit_cfp
) {
1532 rb_gc_mark(cfp
->proc
);
1533 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1537 /* mark ruby objects */
1538 RUBY_MARK_UNLESS_NULL(th
->first_proc
);
1539 if (th
->first_proc
) RUBY_MARK_UNLESS_NULL(th
->first_args
);
1541 RUBY_MARK_UNLESS_NULL(th
->thgroup
);
1542 RUBY_MARK_UNLESS_NULL(th
->value
);
1543 RUBY_MARK_UNLESS_NULL(th
->errinfo
);
1544 RUBY_MARK_UNLESS_NULL(th
->thrown_errinfo
);
1545 RUBY_MARK_UNLESS_NULL(th
->local_svar
);
1546 RUBY_MARK_UNLESS_NULL(th
->top_self
);
1547 RUBY_MARK_UNLESS_NULL(th
->top_wrapper
);
1548 RUBY_MARK_UNLESS_NULL(th
->fiber
);
1549 RUBY_MARK_UNLESS_NULL(th
->root_fiber
);
1550 RUBY_MARK_UNLESS_NULL(th
->stat_insn_usage
);
1551 RUBY_MARK_UNLESS_NULL(th
->last_status
);
1553 RUBY_MARK_UNLESS_NULL(th
->locking_mutex
);
1555 rb_mark_tbl(th
->local_storage
);
1557 if (GET_THREAD() != th
&& th
->machine_stack_start
&& th
->machine_stack_end
) {
1558 rb_gc_mark_machine_stack(th
);
1559 rb_gc_mark_locations((VALUE
*)&th
->machine_regs
,
1560 (VALUE
*)(&th
->machine_regs
) +
1561 sizeof(th
->machine_regs
) / sizeof(VALUE
));
1564 mark_event_hooks(th
->event_hooks
);
1567 RUBY_MARK_LEAVE("thread");
1571 thread_alloc(VALUE klass
)
1574 #ifdef USE_THREAD_RECYCLE
1575 rb_thread_t
*th
= thread_recycle_struct();
1576 obj
= Data_Wrap_Struct(klass
, rb_thread_mark
, thread_free
, th
);
1579 obj
= Data_Make_Struct(klass
, rb_thread_t
, rb_thread_mark
, thread_free
, th
);
1585 th_init2(rb_thread_t
*th
, VALUE self
)
1589 /* allocate thread stack */
1590 th
->stack_size
= RUBY_VM_THREAD_STACK_SIZE
;
1591 th
->stack
= thread_recycle_stack(th
->stack_size
);
1593 th
->cfp
= (void *)(th
->stack
+ th
->stack_size
);
1595 vm_push_frame(th
, 0, VM_FRAME_MAGIC_TOP
, Qnil
, 0, 0,
1598 th
->status
= THREAD_RUNNABLE
;
1600 th
->last_status
= Qnil
;
1603 th
->value_cache_ptr
= &th
->value_cache
[0];
1608 th_init(rb_thread_t
*th
, VALUE self
)
1614 ruby_thread_init(VALUE self
)
1617 rb_vm_t
*vm
= GET_THREAD()->vm
;
1618 GetThreadPtr(self
, th
);
1623 th
->top_wrapper
= 0;
1624 th
->top_self
= rb_vm_top_self();
1629 rb_thread_alloc(VALUE klass
)
1631 VALUE self
= thread_alloc(klass
);
1632 ruby_thread_init(self
);
1637 vm_define_method(rb_thread_t
*th
, VALUE obj
, ID id
, VALUE iseqval
,
1638 rb_num_t is_singleton
, NODE
*cref
)
1641 VALUE klass
= cref
->nd_clss
;
1642 int noex
= cref
->nd_visi
;
1644 GetISeqPtr(iseqval
, miseq
);
1647 rb_raise(rb_eTypeError
, "no class/module to add method");
1651 if (FIXNUM_P(obj
) || SYMBOL_P(obj
)) {
1652 rb_raise(rb_eTypeError
,
1653 "can't define singleton method \"%s\" for %s",
1654 rb_id2name(id
), rb_obj_classname(obj
));
1657 if (OBJ_FROZEN(obj
)) {
1658 rb_error_frozen("object");
1661 klass
= rb_singleton_class(obj
);
1666 COPY_CREF(miseq
->cref_stack
, cref
);
1667 miseq
->klass
= klass
;
1668 miseq
->defined_method_id
= id
;
1669 newbody
= NEW_NODE(RUBY_VM_METHOD_NODE
, 0, miseq
->self
, 0);
1670 rb_add_method(klass
, id
, newbody
, noex
);
1672 if (!is_singleton
&& noex
== NOEX_MODFUNC
) {
1673 rb_add_method(rb_singleton_class(klass
), id
, newbody
, NOEX_PUBLIC
);
1675 INC_VM_STATE_VERSION();
1678 #define REWIND_CFP(expr) do { \
1679 rb_thread_t *th__ = GET_THREAD(); \
1680 th__->cfp++; expr; th__->cfp--; \
1684 m_core_define_method(VALUE self
, VALUE cbase
, VALUE sym
, VALUE iseqval
)
1687 vm_define_method(GET_THREAD(), cbase
, SYM2ID(sym
), iseqval
, 0, vm_cref());
1693 m_core_define_singleton_method(VALUE self
, VALUE cbase
, VALUE sym
, VALUE iseqval
)
1696 vm_define_method(GET_THREAD(), cbase
, SYM2ID(sym
), iseqval
, 1, vm_cref());
1702 m_core_set_method_alias(VALUE self
, VALUE cbase
, VALUE sym1
, VALUE sym2
)
1705 rb_alias(cbase
, SYM2ID(sym1
), SYM2ID(sym2
));
1711 m_core_set_variable_alias(VALUE self
, VALUE sym1
, VALUE sym2
)
1714 rb_alias_variable(SYM2ID(sym1
), SYM2ID(sym2
));
1720 m_core_undef_method(VALUE self
, VALUE cbase
, VALUE sym
)
1723 rb_undef(cbase
, SYM2ID(sym
));
1724 INC_VM_STATE_VERSION();
1730 m_core_set_postexe(VALUE self
, VALUE iseqval
)
1733 rb_iseq_t
*blockiseq
;
1734 rb_block_t
*blockptr
;
1735 rb_thread_t
*th
= GET_THREAD();
1736 rb_control_frame_t
*cfp
= vm_get_ruby_level_next_cfp(th
, th
->cfp
);
1738 extern void rb_call_end_proc(VALUE data
);
1740 GetISeqPtr(iseqval
, blockiseq
);
1742 blockptr
= RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp
);
1743 blockptr
->iseq
= blockiseq
;
1746 proc
= vm_make_proc(th
, cfp
, blockptr
);
1747 rb_set_end_proc(rb_call_end_proc
, proc
);
1752 VALUE
insns_name_array(void);
1753 extern VALUE
*rb_gc_stack_start
;
1754 extern size_t rb_gc_stack_maxsize
;
1756 extern VALUE
*rb_gc_register_stack_start
;
1759 /* debug functions */
1771 VALUE ary
= rb_ary_new();
1773 #include <execinfo.h>
1774 #define MAX_NATIVE_TRACE 1024
1775 static void *trace
[MAX_NATIVE_TRACE
];
1776 int n
= backtrace(trace
, MAX_NATIVE_TRACE
);
1777 char **syms
= backtrace_symbols(trace
, n
);
1784 for (i
=0; i
<n
; i
++) {
1785 rb_ary_push(ary
, rb_str_new2(syms
[i
]));
1787 free(syms
); /* OK */
1800 rb_cRubyVM
= rb_define_class("RubyVM", rb_cObject
);
1801 rb_undef_alloc_func(rb_cRubyVM
);
1803 /* ::VM::FrozenCore */
1804 fcore
= rb_module_new();
1805 RBASIC(fcore
)->flags
= T_ICLASS
;
1806 klass
= rb_singleton_class(fcore
);
1807 rb_define_method_id(klass
, id_core_set_method_alias
, m_core_set_method_alias
, 3);
1808 rb_define_method_id(klass
, id_core_set_variable_alias
, m_core_set_variable_alias
, 2);
1809 rb_define_method_id(klass
, id_core_undef_method
, m_core_undef_method
, 2);
1810 rb_define_method_id(klass
, id_core_define_method
, m_core_define_method
, 3);
1811 rb_define_method_id(klass
, id_core_define_singleton_method
, m_core_define_singleton_method
, 3);
1812 rb_define_method_id(klass
, id_core_set_postexe
, m_core_set_postexe
, 1);
1813 rb_obj_freeze(fcore
);
1814 rb_mRubyVMFrozenCore
= fcore
;
1817 rb_cEnv
= rb_define_class_under(rb_cRubyVM
, "Env", rb_cObject
);
1818 rb_undef_alloc_func(rb_cEnv
);
1821 rb_cThread
= rb_define_class("Thread", rb_cObject
);
1822 rb_undef_alloc_func(rb_cThread
);
1824 /* ::VM::USAGE_ANALYSIS_* */
1825 rb_define_const(rb_cRubyVM
, "USAGE_ANALYSIS_INSN", rb_hash_new());
1826 rb_define_const(rb_cRubyVM
, "USAGE_ANALYSIS_REGS", rb_hash_new());
1827 rb_define_const(rb_cRubyVM
, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
1828 rb_define_const(rb_cRubyVM
, "OPTS", opts
= rb_ary_new());
1830 #if OPT_DIRECT_THREADED_CODE
1831 rb_ary_push(opts
, rb_str_new2("direct threaded code"));
1832 #elif OPT_TOKEN_THREADED_CODE
1833 rb_ary_push(opts
, rb_str_new2("token threaded code"));
1834 #elif OPT_CALL_THREADED_CODE
1835 rb_ary_push(opts
, rb_str_new2("call threaded code"));
1838 #if OPT_BASIC_OPERATIONS
1839 rb_ary_push(opts
, rb_str_new2("optimize basic operation"));
1842 #if OPT_STACK_CACHING
1843 rb_ary_push(opts
, rb_str_new2("stack caching"));
1845 #if OPT_OPERANDS_UNIFICATION
1846 rb_ary_push(opts
, rb_str_new2("operands unification]"));
1848 #if OPT_INSTRUCTIONS_UNIFICATION
1849 rb_ary_push(opts
, rb_str_new2("instructions unification"));
1851 #if OPT_INLINE_METHOD_CACHE
1852 rb_ary_push(opts
, rb_str_new2("inline method cache"));
1854 #if OPT_BLOCKINLINING
1855 rb_ary_push(opts
, rb_str_new2("block inlining"));
1858 /* ::VM::InsnNameArray */
1859 rb_define_const(rb_cRubyVM
, "INSTRUCTION_NAMES", insns_name_array());
1861 /* debug functions ::VM::SDR(), ::VM::NSDR() */
1863 rb_define_singleton_method(rb_cRubyVM
, "SDR", sdr
, 0);
1864 rb_define_singleton_method(rb_cRubyVM
, "NSDR", nsdr
, 0);
1870 /* VM bootstrap: phase 2 */
1872 rb_vm_t
*vm
= ruby_current_vm
;
1873 rb_thread_t
*th
= GET_THREAD();
1874 VALUE filename
= rb_str_new2("<dummy toplevel>");
1875 volatile VALUE iseqval
= rb_iseq_new(0, filename
, filename
, 0, ISEQ_TYPE_TOP
);
1876 volatile VALUE th_self
;
1879 /* create vm object */
1880 vm
->self
= Data_Wrap_Struct(rb_cRubyVM
, rb_vm_mark
, vm_free
, vm
);
1882 /* create main thread */
1883 th_self
= th
->self
= Data_Wrap_Struct(rb_cThread
, rb_thread_mark
, thread_free
, th
);
1884 vm
->main_thread
= th
;
1885 vm
->running_thread
= th
;
1887 th
->top_wrapper
= 0;
1888 th
->top_self
= rb_vm_top_self();
1889 rb_thread_set_current(th
);
1891 vm
->living_threads
= st_init_numtable();
1892 st_insert(vm
->living_threads
, th_self
, (st_data_t
) th
->thread_id
);
1894 rb_register_mark_object(iseqval
);
1895 GetISeqPtr(iseqval
, iseq
);
1896 th
->cfp
->iseq
= iseq
;
1897 th
->cfp
->pc
= iseq
->iseq_encoded
;
1899 vm_init_redefined_flag();
1902 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1903 struct rb_objspace
*rb_objspace_alloc(void);
1905 void ruby_thread_init_stack(rb_thread_t
*th
);
1910 /* VM bootstrap: phase 1 */
1911 rb_vm_t
* vm
= malloc(sizeof(*vm
));
1912 rb_thread_t
* th
= malloc(sizeof(*th
));
1914 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
1917 MEMZERO(th
, rb_thread_t
, 1);
1919 rb_thread_set_current_raw(th
);
1922 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1923 vm
->objspace
= rb_objspace_alloc();
1925 ruby_current_vm
= vm
;
1929 ruby_thread_init_stack(th
);
1935 main_to_s(VALUE obj
)
1937 return rb_str_new2("main");
1941 rb_vm_top_self(void)
1943 return GET_VM()->top_self
;
1949 rb_vm_t
*vm
= GET_VM();
1951 vm
->top_self
= rb_obj_alloc(rb_cObject
);
1952 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s
, 0);
1956 ruby_vm_verbose_ptr(rb_vm_t
*vm
)
1958 return &vm
->verbose
;
1962 ruby_vm_debug_ptr(rb_vm_t
*vm
)
1968 rb_ruby_verbose_ptr(void)
1970 return ruby_vm_verbose_ptr(GET_VM());
1974 rb_ruby_debug_ptr(void)
1976 return ruby_vm_debug_ptr(GET_VM());