1 /**********************************************************************
7 Copyright (C) 2004-2007 Koichi Sasada
9 **********************************************************************/
11 #define vm_exec rb_vm_exec
13 #include "eval_intern.h"
16 #include "internal/compile.h"
17 #include "internal/cont.h"
18 #include "internal/error.h"
19 #include "internal/eval.h"
20 #include "internal/inits.h"
21 #include "internal/object.h"
22 #include "internal/parse.h"
23 #include "internal/proc.h"
24 #include "internal/re.h"
25 #include "internal/symbol.h"
26 #include "internal/thread.h"
27 #include "internal/vm.h"
28 #include "internal/sanitizers.h"
35 #include "vm_callinfo.h"
38 #include "vm_insnhelper.h"
39 #include "ractor_core.h"
47 #include "probes.dmyh"
49 #include "probes_helper.h"
51 VALUE
rb_str_concat_literals(size_t, const VALUE
*);
53 /* :FIXME: This #ifdef is because we build pch in case of mswin and
54 * not in case of other situations. That distinction might change in
55 * a future. We would better make it detectable in something better
56 * than just _MSC_VER. */
62 VALUE
vm_exec(rb_execution_context_t
*, bool);
64 PUREFUNC(static inline const VALUE
*VM_EP_LEP(const VALUE
*));
65 static inline const VALUE
*
66 VM_EP_LEP(const VALUE
*ep
)
68 while (!VM_ENV_LOCAL_P(ep
)) {
69 ep
= VM_ENV_PREV_EP(ep
);
74 static inline const rb_control_frame_t
*
75 rb_vm_search_cf_from_ep(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
, const VALUE
* const ep
)
81 const rb_control_frame_t
* const eocfp
= RUBY_VM_END_CONTROL_FRAME(ec
); /* end of control frame pointer */
87 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
95 rb_vm_ep_local_ep(const VALUE
*ep
)
100 PUREFUNC(static inline const VALUE
*VM_CF_LEP(const rb_control_frame_t
* const cfp
));
101 static inline const VALUE
*
102 VM_CF_LEP(const rb_control_frame_t
* const cfp
)
104 return VM_EP_LEP(cfp
->ep
);
107 static inline const VALUE
*
108 VM_CF_PREV_EP(const rb_control_frame_t
* const cfp
)
110 return VM_ENV_PREV_EP(cfp
->ep
);
113 PUREFUNC(static inline VALUE
VM_CF_BLOCK_HANDLER(const rb_control_frame_t
* const cfp
));
115 VM_CF_BLOCK_HANDLER(const rb_control_frame_t
* const cfp
)
117 const VALUE
*ep
= VM_CF_LEP(cfp
);
118 return VM_ENV_BLOCK_HANDLER(ep
);
122 rb_vm_cframe_keyword_p(const rb_control_frame_t
*cfp
)
124 return VM_FRAME_CFRAME_KW_P(cfp
);
128 rb_vm_frame_block_handler(const rb_control_frame_t
*cfp
)
130 return VM_CF_BLOCK_HANDLER(cfp
);
133 #if VM_CHECK_MODE > 0
135 VM_CFP_IN_HEAP_P(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
)
137 const VALUE
*start
= ec
->vm_stack
;
138 const VALUE
*end
= (VALUE
*)ec
->vm_stack
+ ec
->vm_stack_size
;
139 VM_ASSERT(start
!= NULL
);
141 if (start
<= (VALUE
*)cfp
&& (VALUE
*)cfp
< end
) {
150 VM_EP_IN_HEAP_P(const rb_execution_context_t
*ec
, const VALUE
*ep
)
152 const VALUE
*start
= ec
->vm_stack
;
153 const VALUE
*end
= (VALUE
*)ec
->cfp
;
154 VM_ASSERT(start
!= NULL
);
156 if (start
<= ep
&& ep
< end
) {
165 vm_ep_in_heap_p_(const rb_execution_context_t
*ec
, const VALUE
*ep
)
167 if (VM_EP_IN_HEAP_P(ec
, ep
)) {
168 VALUE envval
= ep
[VM_ENV_DATA_INDEX_ENV
]; /* VM_ENV_ENVVAL(ep); */
170 if (envval
!= Qundef
) {
171 const rb_env_t
*env
= (const rb_env_t
*)envval
;
173 VM_ASSERT(vm_assert_env(envval
));
174 VM_ASSERT(VM_ENV_FLAGS(ep
, VM_ENV_FLAG_ESCAPED
));
175 VM_ASSERT(env
->ep
== ep
);
185 rb_vm_ep_in_heap_p(const VALUE
*ep
)
187 const rb_execution_context_t
*ec
= GET_EC();
188 if (ec
->vm_stack
== NULL
) return TRUE
;
189 return vm_ep_in_heap_p_(ec
, ep
);
193 static struct rb_captured_block
*
194 VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t
*cfp
)
196 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp
));
197 return (struct rb_captured_block
*)&cfp
->self
;
200 static rb_control_frame_t
*
201 VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block
*captured
)
203 rb_control_frame_t
*cfp
= ((rb_control_frame_t
*)((VALUE
*)(captured
) - 3));
204 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp
));
205 VM_ASSERT(sizeof(rb_control_frame_t
)/sizeof(VALUE
) == 8 + VM_DEBUG_BP_CHECK
? 1 : 0);
210 VM_BH_FROM_CFP_P(VALUE block_handler
, const rb_control_frame_t
*cfp
)
212 const struct rb_captured_block
*captured
= VM_CFP_TO_CAPTURED_BLOCK(cfp
);
213 return VM_TAGGED_PTR_REF(block_handler
, 0x03) == captured
;
217 vm_passed_block_handler(rb_execution_context_t
*ec
)
219 VALUE block_handler
= ec
->passed_block_handler
;
220 ec
->passed_block_handler
= VM_BLOCK_HANDLER_NONE
;
221 vm_block_handler_verify(block_handler
);
222 return block_handler
;
226 vm_cref_new0(VALUE klass
, rb_method_visibility_t visi
, int module_func
, rb_cref_t
*prev_cref
, int pushed_by_eval
, int use_prev_prev
, int singleton
)
228 VALUE refinements
= Qnil
;
229 int omod_shared
= FALSE
;
234 rb_scope_visibility_t visi
;
238 scope_visi
.visi
.method_visi
= visi
;
239 scope_visi
.visi
.module_func
= module_func
;
242 if (prev_cref
!= NULL
&& prev_cref
!= (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
243 refinements
= CREF_REFINEMENTS(prev_cref
);
245 if (!NIL_P(refinements
)) {
247 CREF_OMOD_SHARED_SET(prev_cref
);
251 VM_ASSERT(singleton
|| klass
);
253 cref
= (rb_cref_t
*)rb_imemo_new(imemo_cref
, klass
, (VALUE
)(use_prev_prev
? CREF_NEXT(prev_cref
) : prev_cref
), scope_visi
.value
, refinements
);
255 if (pushed_by_eval
) CREF_PUSHED_BY_EVAL_SET(cref
);
256 if (omod_shared
) CREF_OMOD_SHARED_SET(cref
);
257 if (singleton
) CREF_SINGLETON_SET(cref
);
263 vm_cref_new(VALUE klass
, rb_method_visibility_t visi
, int module_func
, rb_cref_t
*prev_cref
, int pushed_by_eval
, int singleton
)
265 return vm_cref_new0(klass
, visi
, module_func
, prev_cref
, pushed_by_eval
, FALSE
, singleton
);
269 vm_cref_new_use_prev(VALUE klass
, rb_method_visibility_t visi
, int module_func
, rb_cref_t
*prev_cref
, int pushed_by_eval
)
271 return vm_cref_new0(klass
, visi
, module_func
, prev_cref
, pushed_by_eval
, TRUE
, FALSE
);
275 ref_delete_symkey(VALUE key
, VALUE value
, VALUE unused
)
277 return SYMBOL_P(key
) ? ST_DELETE
: ST_CONTINUE
;
281 vm_cref_dup(const rb_cref_t
*cref
)
283 const rb_scope_visibility_t
*visi
= CREF_SCOPE_VISI(cref
);
284 rb_cref_t
*next_cref
= CREF_NEXT(cref
), *new_cref
;
285 int pushed_by_eval
= CREF_PUSHED_BY_EVAL(cref
);
286 int singleton
= CREF_SINGLETON(cref
);
288 new_cref
= vm_cref_new(cref
->klass_or_self
, visi
->method_visi
, visi
->module_func
, next_cref
, pushed_by_eval
, singleton
);
290 if (!NIL_P(CREF_REFINEMENTS(cref
))) {
291 VALUE ref
= rb_hash_dup(CREF_REFINEMENTS(cref
));
292 rb_hash_foreach(ref
, ref_delete_symkey
, Qnil
);
293 CREF_REFINEMENTS_SET(new_cref
, ref
);
294 CREF_OMOD_SHARED_UNSET(new_cref
);
302 rb_vm_cref_dup_without_refinements(const rb_cref_t
*cref
)
304 const rb_scope_visibility_t
*visi
= CREF_SCOPE_VISI(cref
);
305 rb_cref_t
*next_cref
= CREF_NEXT(cref
), *new_cref
;
306 int pushed_by_eval
= CREF_PUSHED_BY_EVAL(cref
);
307 int singleton
= CREF_SINGLETON(cref
);
309 new_cref
= vm_cref_new(cref
->klass_or_self
, visi
->method_visi
, visi
->module_func
, next_cref
, pushed_by_eval
, singleton
);
311 if (!NIL_P(CREF_REFINEMENTS(cref
))) {
312 CREF_REFINEMENTS_SET(new_cref
, Qnil
);
313 CREF_OMOD_SHARED_UNSET(new_cref
);
320 vm_cref_new_toplevel(rb_execution_context_t
*ec
)
322 rb_cref_t
*cref
= vm_cref_new(rb_cObject
, METHOD_VISI_PRIVATE
/* toplevel visibility is private */, FALSE
, NULL
, FALSE
, FALSE
);
323 VALUE top_wrapper
= rb_ec_thread_ptr(ec
)->top_wrapper
;
326 cref
= vm_cref_new(top_wrapper
, METHOD_VISI_PRIVATE
, FALSE
, cref
, FALSE
, FALSE
);
333 rb_vm_cref_new_toplevel(void)
335 return vm_cref_new_toplevel(GET_EC());
339 vm_cref_dump(const char *mesg
, const rb_cref_t
*cref
)
341 ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg
, (void *)cref
);
344 ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref
))));
345 cref
= CREF_NEXT(cref
);
350 rb_vm_block_ep_update(VALUE obj
, const struct rb_block
*dst
, const VALUE
*ep
)
352 *((const VALUE
**)&dst
->as
.captured
.ep
) = ep
;
353 RB_OBJ_WRITTEN(obj
, Qundef
, VM_ENV_ENVVAL(ep
));
357 vm_bind_update_env(VALUE bindval
, rb_binding_t
*bind
, VALUE envval
)
359 const rb_env_t
*env
= (rb_env_t
*)envval
;
360 RB_OBJ_WRITE(bindval
, &bind
->block
.as
.captured
.code
.iseq
, env
->iseq
);
361 rb_vm_block_ep_update(bindval
, &bind
->block
, env
->ep
);
364 #if VM_COLLECT_USAGE_DETAILS
365 static void vm_collect_usage_operand(int insn
, int n
, VALUE op
);
366 static void vm_collect_usage_insn(int insn
);
367 static void vm_collect_usage_register(int reg
, int isset
);
370 static VALUE
vm_make_env_object(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
);
371 extern VALUE
rb_vm_invoke_bmethod(rb_execution_context_t
*ec
, rb_proc_t
*proc
, VALUE self
,
372 int argc
, const VALUE
*argv
, int kw_splat
, VALUE block_handler
,
373 const rb_callable_method_entry_t
*me
);
374 static VALUE
vm_invoke_proc(rb_execution_context_t
*ec
, rb_proc_t
*proc
, VALUE self
, int argc
, const VALUE
*argv
, int kw_splat
, VALUE block_handler
);
376 #include "vm_insnhelper.c"
382 #include "vm_method.c"
383 #endif /* #ifndef MJIT_HEADER */
390 rb_next_class_serial(void)
392 rb_serial_t class_serial
= NEXT_CLASS_SERIAL();
398 VALUE rb_mRubyVMFrozenCore
;
399 VALUE rb_block_param_proxy
;
401 #define ruby_vm_redefined_flag GET_VM()->redefined_flag
402 VALUE ruby_vm_const_missing_count
= 0;
403 rb_vm_t
*ruby_current_vm_ptr
= NULL
;
404 rb_ractor_t
*ruby_single_main_ractor
;
405 bool ruby_vm_keep_script_lines
;
407 #ifdef RB_THREAD_LOCAL_SPECIFIER
408 RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t
*ruby_current_ec
;
411 rb_execution_context_t
*
414 return ruby_current_ec
;
417 rb_current_ec_set(rb_execution_context_t
*ec
)
419 ruby_current_ec
= ec
;
424 native_tls_key_t ruby_current_ec_key
;
427 rb_event_flag_t ruby_vm_event_flags
;
428 rb_event_flag_t ruby_vm_event_enabled_global_flags
;
429 unsigned int ruby_vm_event_local_num
;
431 rb_serial_t ruby_vm_global_constant_state
= 1;
432 rb_serial_t ruby_vm_class_serial
= 1;
433 rb_serial_t ruby_vm_global_cvar_state
= 1;
435 static const struct rb_callcache vm_empty_cc
= {
436 .flags
= T_IMEMO
| (imemo_callcache
<< FL_USHIFT
) | VM_CALLCACHE_UNMARKABLE
,
439 .call_
= vm_call_general
,
445 static const struct rb_callcache vm_empty_cc_for_super
= {
446 .flags
= T_IMEMO
| (imemo_callcache
<< FL_USHIFT
) | VM_CALLCACHE_UNMARKABLE
,
449 .call_
= vm_call_super_method
,
455 static void thread_free(void *ptr
);
458 rb_vm_inc_const_missing_count(void)
460 ruby_vm_const_missing_count
+=1;
463 MJIT_FUNC_EXPORTED
int
464 rb_dtrace_setup(rb_execution_context_t
*ec
, VALUE klass
, ID id
,
465 struct ruby_dtrace_method_hook_args
*args
)
467 enum ruby_value_type type
;
469 if (!ec
) ec
= GET_EC();
470 if (!rb_ec_frame_method_id_and_class(ec
, &id
, 0, &klass
) || !klass
)
473 if (RB_TYPE_P(klass
, T_ICLASS
)) {
474 klass
= RBASIC(klass
)->klass
;
476 else if (FL_TEST(klass
, FL_SINGLETON
)) {
477 klass
= rb_attr_get(klass
, id__attached__
);
478 if (NIL_P(klass
)) return FALSE
;
480 type
= BUILTIN_TYPE(klass
);
481 if (type
== T_CLASS
|| type
== T_ICLASS
|| type
== T_MODULE
) {
482 VALUE name
= rb_class_path(klass
);
483 const char *classname
, *filename
;
484 const char *methodname
= rb_id2name(id
);
485 if (methodname
&& (filename
= rb_source_location_cstr(&args
->line_no
)) != 0) {
486 if (NIL_P(name
) || !(classname
= StringValuePtr(name
)))
487 classname
= "<unknown>";
488 args
->classname
= classname
;
489 args
->methodname
= methodname
;
490 args
->filename
= filename
;
501 * RubyVM.stat -> Hash
502 * RubyVM.stat(hsh) -> hsh
503 * RubyVM.stat(Symbol) -> Numeric
505 * Returns a Hash containing implementation-dependent counters inside the VM.
507 * This hash includes information about method/constant cache serials:
510 * :global_constant_state=>481,
511 * :class_serial=>9029
514 * The contents of the hash are implementation specific and may be changed in
517 * This method is only expected to work on C Ruby.
521 vm_stat(int argc
, VALUE
*argv
, VALUE self
)
523 static VALUE sym_global_constant_state
, sym_class_serial
, sym_global_cvar_state
;
525 VALUE hash
= Qnil
, key
= Qnil
;
527 if (rb_check_arity(argc
, 0, 1) == 1) {
531 else if (RB_TYPE_P(arg
, T_HASH
))
534 rb_raise(rb_eTypeError
, "non-hash or symbol given");
537 hash
= rb_hash_new();
540 if (sym_global_constant_state
== 0) {
541 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
542 S(global_constant_state
);
544 S(global_cvar_state
);
548 #define SET(name, attr) \
549 if (key == sym_##name) \
550 return SERIALT2NUM(attr); \
551 else if (hash != Qnil) \
552 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
554 SET(global_constant_state
, ruby_vm_global_constant_state
);
555 SET(class_serial
, ruby_vm_class_serial
);
556 SET(global_cvar_state
, ruby_vm_global_cvar_state
);
559 if (!NIL_P(key
)) { /* matched key should return above */
560 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
566 /* control stack frame */
569 vm_set_top_stack(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
)
571 if (iseq
->body
->type
!= ISEQ_TYPE_TOP
) {
572 rb_raise(rb_eTypeError
, "Not a toplevel InstructionSequence");
576 vm_push_frame(ec
, iseq
, VM_FRAME_MAGIC_TOP
| VM_ENV_FLAG_LOCAL
| VM_FRAME_FLAG_FINISH
, rb_ec_thread_ptr(ec
)->top_self
,
577 VM_BLOCK_HANDLER_NONE
,
578 (VALUE
)vm_cref_new_toplevel(ec
), /* cref or me */
579 iseq
->body
->iseq_encoded
, ec
->cfp
->sp
,
580 iseq
->body
->local_table_size
, iseq
->body
->stack_max
);
584 vm_set_eval_stack(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
, const rb_cref_t
*cref
, const struct rb_block
*base_block
)
586 vm_push_frame(ec
, iseq
, VM_FRAME_MAGIC_EVAL
| VM_FRAME_FLAG_FINISH
,
587 vm_block_self(base_block
), VM_GUARDED_PREV_EP(vm_block_ep(base_block
)),
588 (VALUE
)cref
, /* cref or me */
589 iseq
->body
->iseq_encoded
,
590 ec
->cfp
->sp
, iseq
->body
->local_table_size
,
591 iseq
->body
->stack_max
);
595 vm_set_main_stack(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
)
597 VALUE toplevel_binding
= rb_const_get(rb_cObject
, rb_intern("TOPLEVEL_BINDING"));
600 GetBindingPtr(toplevel_binding
, bind
);
601 RUBY_ASSERT_MESG(bind
, "TOPLEVEL_BINDING is not built");
603 vm_set_eval_stack(ec
, iseq
, 0, &bind
->block
);
606 if (iseq
->body
->local_table_size
> 0) {
607 vm_bind_update_env(toplevel_binding
, bind
, vm_make_env_object(ec
, ec
->cfp
));
612 rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
)
614 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec
, cfp
)) {
616 return (rb_control_frame_t
*)cfp
;
618 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
623 MJIT_FUNC_EXPORTED rb_control_frame_t
*
624 rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
)
626 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec
, cfp
)) {
627 if (VM_FRAME_RUBYFRAME_P(cfp
)) {
628 return (rb_control_frame_t
*)cfp
;
630 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
635 #endif /* #ifndef MJIT_HEADER */
637 static rb_control_frame_t
*
638 vm_get_ruby_level_caller_cfp(const rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
)
640 if (VM_FRAME_RUBYFRAME_P(cfp
)) {
641 return (rb_control_frame_t
*)cfp
;
644 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
646 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec
, cfp
)) {
647 if (VM_FRAME_RUBYFRAME_P(cfp
)) {
648 return (rb_control_frame_t
*)cfp
;
651 if (VM_ENV_FLAGS(cfp
->ep
, VM_FRAME_FLAG_PASSED
) == FALSE
) {
654 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
660 rb_vm_pop_cfunc_frame(void)
662 rb_execution_context_t
*ec
= GET_EC();
663 rb_control_frame_t
*cfp
= ec
->cfp
;
664 const rb_callable_method_entry_t
*me
= rb_vm_frame_method_entry(cfp
);
666 EXEC_EVENT_HOOK(ec
, RUBY_EVENT_C_RETURN
, cfp
->self
, me
->def
->original_id
, me
->called_id
, me
->owner
, Qnil
);
667 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec
, me
->owner
, me
->def
->original_id
);
668 vm_pop_frame(ec
, cfp
, cfp
->ep
);
674 rb_vm_rewind_cfp(rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
)
676 /* check skipped frame */
677 while (ec
->cfp
!= cfp
) {
679 printf("skipped frame: %s\n", vm_frametype_name(ec
->cfp
));
681 if (VM_FRAME_TYPE(ec
->cfp
) != VM_FRAME_MAGIC_CFUNC
) {
684 else { /* unlikely path */
685 rb_vm_pop_cfunc_frame();
693 ruby_vm_at_exit(void (*func
)(rb_vm_t
*))
695 rb_vm_t
*vm
= GET_VM();
696 rb_at_exit_list
*nl
= ALLOC(rb_at_exit_list
);
698 nl
->next
= vm
->at_exit
;
703 ruby_vm_run_at_exit_hooks(rb_vm_t
*vm
)
705 rb_at_exit_list
*l
= vm
->at_exit
;
708 rb_at_exit_list
* t
= l
->next
;
709 rb_vm_at_exit_func
*func
= l
->func
;
718 static VALUE
check_env_value(const rb_env_t
*env
);
721 check_env(const rb_env_t
*env
)
723 fputs("---\n", stderr
);
724 ruby_debug_printf("envptr: %p\n", (void *)&env
->ep
[0]);
725 ruby_debug_printf("envval: %10p ", (void *)env
->ep
[1]);
727 ruby_debug_printf("ep: %10p\n", (void *)env
->ep
);
728 if (rb_vm_env_prev_env(env
)) {
729 fputs(">>\n", stderr
);
730 check_env_value(rb_vm_env_prev_env(env
));
731 fputs("<<\n", stderr
);
737 check_env_value(const rb_env_t
*env
)
739 if (check_env(env
)) {
742 rb_bug("invalid env");
743 return Qnil
; /* unreachable */
747 vm_block_handler_escape(const rb_execution_context_t
*ec
, VALUE block_handler
)
749 switch (vm_block_handler_type(block_handler
)) {
750 case block_handler_type_ifunc
:
751 case block_handler_type_iseq
:
752 return rb_vm_make_proc(ec
, VM_BH_TO_CAPT_BLOCK(block_handler
), rb_cProc
);
754 case block_handler_type_symbol
:
755 case block_handler_type_proc
:
756 return block_handler
;
758 VM_UNREACHABLE(vm_block_handler_escape
);
763 vm_make_env_each(const rb_execution_context_t
* const ec
, rb_control_frame_t
*const cfp
)
765 const VALUE
* const ep
= cfp
->ep
;
767 const rb_iseq_t
*env_iseq
;
768 VALUE
*env_body
, *env_ep
;
769 int local_size
, env_size
;
771 if (VM_ENV_ESCAPED_P(ep
)) {
772 return VM_ENV_ENVVAL(ep
);
775 if (!VM_ENV_LOCAL_P(ep
)) {
776 const VALUE
*prev_ep
= VM_ENV_PREV_EP(ep
);
777 if (!VM_ENV_ESCAPED_P(prev_ep
)) {
778 rb_control_frame_t
*prev_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
780 while (prev_cfp
->ep
!= prev_ep
) {
781 prev_cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp
);
782 VM_ASSERT(prev_cfp
->ep
!= NULL
);
785 vm_make_env_each(ec
, prev_cfp
);
786 VM_FORCE_WRITE_SPECIAL_CONST(&ep
[VM_ENV_DATA_INDEX_SPECVAL
], VM_GUARDED_PREV_EP(prev_cfp
->ep
));
790 VALUE block_handler
= VM_ENV_BLOCK_HANDLER(ep
);
792 if (block_handler
!= VM_BLOCK_HANDLER_NONE
) {
793 VALUE blockprocval
= vm_block_handler_escape(ec
, block_handler
);
794 VM_STACK_ENV_WRITE(ep
, VM_ENV_DATA_INDEX_SPECVAL
, blockprocval
);
798 if (!VM_FRAME_RUBYFRAME_P(cfp
)) {
799 local_size
= VM_ENV_DATA_SIZE
;
802 local_size
= cfp
->iseq
->body
->local_table_size
+ VM_ENV_DATA_SIZE
;
806 * # local variables on a stack frame (N == local_size)
807 * [lvar1, lvar2, ..., lvarN, SPECVAL]
811 * # moved local variables
812 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
817 env_size
= local_size
+
819 env_body
= ALLOC_N(VALUE
, env_size
);
820 MEMCPY(env_body
, ep
- (local_size
- 1 /* specval */), VALUE
, local_size
);
823 for (i
= 0; i
< local_size
; i
++) {
824 if (VM_FRAME_RUBYFRAME_P(cfp
)) {
825 /* clear value stack for GC */
826 ep
[-local_size
+ i
] = 0;
831 env_iseq
= VM_FRAME_RUBYFRAME_P(cfp
) ? cfp
->iseq
: NULL
;
832 env_ep
= &env_body
[local_size
- 1 /* specval */];
834 env
= vm_env_new(env_ep
, env_body
, env_size
, env_iseq
);
837 VM_ENV_FLAGS_SET(env_ep
, VM_ENV_FLAG_ESCAPED
| VM_ENV_FLAG_WB_REQUIRED
);
838 VM_STACK_ENV_WRITE(ep
, 0, (VALUE
)env
); /* GC mark */
843 vm_make_env_object(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
)
845 VALUE envval
= vm_make_env_each(ec
, cfp
);
848 check_env_value((const rb_env_t
*)envval
);
855 rb_vm_stack_to_heap(rb_execution_context_t
*ec
)
857 rb_control_frame_t
*cfp
= ec
->cfp
;
858 while ((cfp
= rb_vm_get_binding_creatable_next_cfp(ec
, cfp
)) != 0) {
859 vm_make_env_object(ec
, cfp
);
860 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
865 rb_vm_env_prev_env(const rb_env_t
*env
)
867 const VALUE
*ep
= env
->ep
;
869 if (VM_ENV_LOCAL_P(ep
)) {
873 const VALUE
*prev_ep
= VM_ENV_PREV_EP(ep
);
874 return VM_ENV_ENVVAL_PTR(prev_ep
);
879 collect_local_variables_in_iseq(const rb_iseq_t
*iseq
, const struct local_var_list
*vars
)
883 for (i
= 0; i
< iseq
->body
->local_table_size
; i
++) {
884 local_var_list_add(vars
, iseq
->body
->local_table
[i
]);
890 collect_local_variables_in_env(const rb_env_t
*env
, const struct local_var_list
*vars
)
893 if (VM_ENV_FLAGS(env
->ep
, VM_ENV_FLAG_ISOLATED
)) break;
894 collect_local_variables_in_iseq(env
->iseq
, vars
);
895 } while ((env
= rb_vm_env_prev_env(env
)) != NULL
);
899 vm_collect_local_variables_in_heap(const VALUE
*ep
, const struct local_var_list
*vars
)
901 if (VM_ENV_ESCAPED_P(ep
)) {
902 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep
), vars
);
911 rb_vm_env_local_variables(const rb_env_t
*env
)
913 struct local_var_list vars
;
914 local_var_list_init(&vars
);
915 collect_local_variables_in_env(env
, &vars
);
916 return local_var_list_finish(&vars
);
920 rb_iseq_local_variables(const rb_iseq_t
*iseq
)
922 struct local_var_list vars
;
923 local_var_list_init(&vars
);
924 while (collect_local_variables_in_iseq(iseq
, &vars
)) {
925 iseq
= iseq
->body
->parent_iseq
;
927 return local_var_list_finish(&vars
);
933 vm_proc_create_from_captured(VALUE klass
,
934 const struct rb_captured_block
*captured
,
935 enum rb_block_type block_type
,
936 int8_t is_from_method
, int8_t is_lambda
)
938 VALUE procval
= rb_proc_alloc(klass
);
939 rb_proc_t
*proc
= RTYPEDDATA_DATA(procval
);
941 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured
->ep
));
944 RB_OBJ_WRITE(procval
, &proc
->block
.as
.captured
.code
.val
, captured
->code
.val
);
945 RB_OBJ_WRITE(procval
, &proc
->block
.as
.captured
.self
, captured
->self
);
946 rb_vm_block_ep_update(procval
, &proc
->block
, captured
->ep
);
948 vm_block_type_set(&proc
->block
, block_type
);
949 proc
->is_from_method
= is_from_method
;
950 proc
->is_lambda
= is_lambda
;
956 rb_vm_block_copy(VALUE obj
, const struct rb_block
*dst
, const struct rb_block
*src
)
959 switch (vm_block_type(src
)) {
960 case block_type_iseq
:
961 case block_type_ifunc
:
962 RB_OBJ_WRITE(obj
, &dst
->as
.captured
.self
, src
->as
.captured
.self
);
963 RB_OBJ_WRITE(obj
, &dst
->as
.captured
.code
.val
, src
->as
.captured
.code
.val
);
964 rb_vm_block_ep_update(obj
, dst
, src
->as
.captured
.ep
);
966 case block_type_symbol
:
967 RB_OBJ_WRITE(obj
, &dst
->as
.symbol
, src
->as
.symbol
);
969 case block_type_proc
:
970 RB_OBJ_WRITE(obj
, &dst
->as
.proc
, src
->as
.proc
);
976 proc_create(VALUE klass
, const struct rb_block
*block
, int8_t is_from_method
, int8_t is_lambda
)
978 VALUE procval
= rb_proc_alloc(klass
);
979 rb_proc_t
*proc
= RTYPEDDATA_DATA(procval
);
981 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block
)));
982 rb_vm_block_copy(procval
, &proc
->block
, block
);
983 vm_block_type_set(&proc
->block
, block
->type
);
984 proc
->is_from_method
= is_from_method
;
985 proc
->is_lambda
= is_lambda
;
991 rb_proc_dup(VALUE self
)
996 GetProcPtr(self
, src
);
997 procval
= proc_create(rb_obj_class(self
), &src
->block
, src
->is_from_method
, src
->is_lambda
);
998 if (RB_OBJ_SHAREABLE_P(self
)) FL_SET_RAW(procval
, RUBY_FL_SHAREABLE
);
999 RB_GC_GUARD(self
); /* for: body = rb_proc_dup(body) */
1003 struct collect_outer_variable_name_data
{
1013 if (SIZEOF_VOIDP
> SIZEOF_LONG
)
1016 return ULONG2NUM(id
);
1022 if (SIZEOF_VOIDP
> SIZEOF_LONG
)
1023 return (ID
)NUM2ULL(num
);
1025 return (ID
)NUM2ULONG(num
);
1028 static enum rb_id_table_iterator_result
1029 collect_outer_variable_names(ID id
, VALUE val
, void *ptr
)
1031 struct collect_outer_variable_name_data
*data
= (struct collect_outer_variable_name_data
*)ptr
;
1033 if (id
== rb_intern("yield")) {
1038 if (data
->isolate
||
1039 val
== Qtrue
/* write */) {
1043 store
= &data
->read_only
;
1045 if (*store
== Qfalse
) *store
= rb_ary_new();
1046 rb_ary_push(*store
, ID2NUM(id
));
1048 return ID_TABLE_CONTINUE
;
1051 static const rb_env_t
*
1052 env_copy(const VALUE
*src_ep
, VALUE read_only_variables
)
1054 const rb_env_t
*src_env
= (rb_env_t
*)VM_ENV_ENVVAL(src_ep
);
1055 VM_ASSERT(src_env
->ep
== src_ep
);
1057 VALUE
*env_body
= ZALLOC_N(VALUE
, src_env
->env_size
); // fill with Qfalse
1058 VALUE
*ep
= &env_body
[src_env
->env_size
- 2];
1059 volatile VALUE prev_env
= Qnil
;
1061 if (read_only_variables
) {
1062 for (int i
=RARRAY_LENINT(read_only_variables
)-1; i
>=0; i
--) {
1063 ID id
= NUM2ID(RARRAY_AREF(read_only_variables
, i
));
1065 for (unsigned int j
=0; j
<src_env
->iseq
->body
->local_table_size
; j
++) {
1066 if (id
== src_env
->iseq
->body
->local_table
[j
]) {
1067 VALUE v
= src_env
->env
[j
];
1068 if (!rb_ractor_shareable_p(v
)) {
1069 VALUE name
= rb_id2str(id
);
1070 VALUE msg
= rb_sprintf("can not make shareable Proc because it can refer"
1071 " unshareable object %+" PRIsVALUE
" from ", v
);
1073 rb_str_catf(msg
, "variable `%" PRIsVALUE
"'", name
);
1075 rb_str_cat_cstr(msg
, "a hidden variable");
1076 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError
, msg
));
1079 rb_ary_delete_at(read_only_variables
, i
);
1086 ep
[VM_ENV_DATA_INDEX_ME_CREF
] = src_ep
[VM_ENV_DATA_INDEX_ME_CREF
];
1087 ep
[VM_ENV_DATA_INDEX_FLAGS
] = src_ep
[VM_ENV_DATA_INDEX_FLAGS
] | VM_ENV_FLAG_ISOLATED
;
1089 if (!VM_ENV_LOCAL_P(src_ep
)) {
1090 const VALUE
*prev_ep
= VM_ENV_PREV_EP(src_env
->ep
);
1091 const rb_env_t
*new_prev_env
= env_copy(prev_ep
, read_only_variables
);
1092 prev_env
= (VALUE
)new_prev_env
;
1093 ep
[VM_ENV_DATA_INDEX_SPECVAL
] = VM_GUARDED_PREV_EP(new_prev_env
->ep
);
1096 ep
[VM_ENV_DATA_INDEX_SPECVAL
] = VM_BLOCK_HANDLER_NONE
;
1099 const rb_env_t
*copied_env
= vm_env_new(ep
, env_body
, src_env
->env_size
, src_env
->iseq
);
1100 RB_GC_GUARD(prev_env
);
1105 proc_isolate_env(VALUE self
, rb_proc_t
*proc
, VALUE read_only_variables
)
1107 const struct rb_captured_block
*captured
= &proc
->block
.as
.captured
;
1108 const rb_env_t
*env
= env_copy(captured
->ep
, read_only_variables
);
1109 *((const VALUE
**)&proc
->block
.as
.captured
.ep
) = env
->ep
;
1110 RB_OBJ_WRITTEN(self
, Qundef
, env
);
1114 proc_shared_outer_variables(struct rb_id_table
*outer_variables
, bool isolate
, const char *message
)
1116 struct collect_outer_variable_name_data data
= {
1119 .read_only
= Qfalse
,
1122 rb_id_table_foreach(outer_variables
, collect_outer_variable_names
, (void *)&data
);
1124 if (data
.ary
!= Qfalse
) {
1125 VALUE str
= rb_sprintf("can not %s because it accesses outer variables", message
);
1126 VALUE ary
= data
.ary
;
1127 const char *sep
= " (";
1128 for (long i
= 0; i
< RARRAY_LEN(ary
); i
++) {
1129 VALUE name
= rb_id2str(NUM2ID(RARRAY_AREF(ary
, i
)));
1130 if (!name
) continue;
1131 rb_str_cat_cstr(str
, sep
);
1133 rb_str_append(str
, name
);
1135 if (*sep
== ',') rb_str_cat_cstr(str
, ")");
1136 rb_str_cat_cstr(str
, data
.yield
? " and uses `yield'." : ".");
1137 rb_exc_raise(rb_exc_new_str(rb_eArgError
, str
));
1139 else if (data
.yield
) {
1140 rb_raise(rb_eArgError
, "can not %s because it uses `yield'.", message
);
1143 return data
.read_only
;
1147 rb_proc_isolate_bang(VALUE self
)
1149 const rb_iseq_t
*iseq
= vm_proc_iseq(self
);
1152 rb_proc_t
*proc
= (rb_proc_t
*)RTYPEDDATA_DATA(self
);
1153 if (proc
->block
.type
!= block_type_iseq
) rb_raise(rb_eRuntimeError
, "not supported yet");
1155 if (iseq
->body
->outer_variables
) {
1156 proc_shared_outer_variables(iseq
->body
->outer_variables
, true, "isolate a Proc");
1159 proc_isolate_env(self
, proc
, Qfalse
);
1160 proc
->is_isolated
= TRUE
;
1163 FL_SET_RAW(self
, RUBY_FL_SHAREABLE
);
1168 rb_proc_isolate(VALUE self
)
1170 VALUE dst
= rb_proc_dup(self
);
1171 rb_proc_isolate_bang(dst
);
1176 rb_proc_ractor_make_shareable(VALUE self
)
1178 const rb_iseq_t
*iseq
= vm_proc_iseq(self
);
1181 rb_proc_t
*proc
= (rb_proc_t
*)RTYPEDDATA_DATA(self
);
1182 if (proc
->block
.type
!= block_type_iseq
) rb_raise(rb_eRuntimeError
, "not supported yet");
1184 if (!rb_ractor_shareable_p(vm_block_self(&proc
->block
))) {
1185 rb_raise(rb_eRactorIsolationError
,
1186 "Proc's self is not shareable: %" PRIsVALUE
,
1190 VALUE read_only_variables
= Qfalse
;
1192 if (iseq
->body
->outer_variables
) {
1193 read_only_variables
=
1194 proc_shared_outer_variables(iseq
->body
->outer_variables
, false, "make a Proc shareable");
1197 proc_isolate_env(self
, proc
, read_only_variables
);
1198 proc
->is_isolated
= TRUE
;
1201 FL_SET_RAW(self
, RUBY_FL_SHAREABLE
);
1205 MJIT_FUNC_EXPORTED VALUE
1206 rb_vm_make_proc_lambda(const rb_execution_context_t
*ec
, const struct rb_captured_block
*captured
, VALUE klass
, int8_t is_lambda
)
1210 if (!VM_ENV_ESCAPED_P(captured
->ep
)) {
1211 rb_control_frame_t
*cfp
= VM_CAPTURED_BLOCK_TO_CFP(captured
);
1212 vm_make_env_object(ec
, cfp
);
1214 VM_ASSERT(VM_EP_IN_HEAP_P(ec
, captured
->ep
));
1215 VM_ASSERT(imemo_type_p(captured
->code
.val
, imemo_iseq
) ||
1216 imemo_type_p(captured
->code
.val
, imemo_ifunc
));
1218 procval
= vm_proc_create_from_captured(klass
, captured
,
1219 imemo_type(captured
->code
.val
) == imemo_iseq
? block_type_iseq
: block_type_ifunc
, FALSE
, is_lambda
);
1226 rb_vm_make_binding(const rb_execution_context_t
*ec
, const rb_control_frame_t
*src_cfp
)
1228 rb_control_frame_t
*cfp
= rb_vm_get_binding_creatable_next_cfp(ec
, src_cfp
);
1229 rb_control_frame_t
*ruby_level_cfp
= rb_vm_get_ruby_level_next_cfp(ec
, src_cfp
);
1230 VALUE bindval
, envval
;
1233 if (cfp
== 0 || ruby_level_cfp
== 0) {
1234 rb_raise(rb_eRuntimeError
, "Can't create Binding Object on top of Fiber.");
1238 envval
= vm_make_env_object(ec
, cfp
);
1239 if (cfp
== ruby_level_cfp
) {
1242 cfp
= rb_vm_get_binding_creatable_next_cfp(ec
, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
));
1245 bindval
= rb_binding_alloc(rb_cBinding
);
1246 GetBindingPtr(bindval
, bind
);
1247 vm_bind_update_env(bindval
, bind
, envval
);
1248 RB_OBJ_WRITE(bindval
, &bind
->block
.as
.captured
.self
, cfp
->self
);
1249 RB_OBJ_WRITE(bindval
, &bind
->block
.as
.captured
.code
.iseq
, cfp
->iseq
);
1250 RB_OBJ_WRITE(bindval
, &bind
->pathobj
, ruby_level_cfp
->iseq
->body
->location
.pathobj
);
1251 bind
->first_lineno
= rb_vm_get_sourceline(ruby_level_cfp
);
1257 rb_binding_add_dynavars(VALUE bindval
, rb_binding_t
*bind
, int dyncount
, const ID
*dynvars
)
1259 VALUE envval
, pathobj
= bind
->pathobj
;
1260 VALUE path
= pathobj_path(pathobj
);
1261 VALUE realpath
= pathobj_realpath(pathobj
);
1262 const struct rb_block
*base_block
;
1263 const rb_env_t
*env
;
1264 rb_execution_context_t
*ec
= GET_EC();
1265 const rb_iseq_t
*base_iseq
, *iseq
;
1269 if (dyncount
< 0) return 0;
1271 base_block
= &bind
->block
;
1272 base_iseq
= vm_block_iseq(base_block
);
1275 rb_ast_id_table_t
*dyns
= ALLOCV(idtmp
, sizeof(rb_ast_id_table_t
) + dyncount
* sizeof(ID
));
1276 dyns
->size
= dyncount
;
1277 MEMCPY(dyns
->ids
, dynvars
, ID
, dyncount
);
1279 rb_node_init(&tmp_node
, NODE_SCOPE
, (VALUE
)dyns
, 0, 0);
1280 ast
.root
= &tmp_node
;
1281 ast
.compile_option
= 0;
1282 ast
.script_lines
= INT2FIX(-1);
1285 iseq
= rb_iseq_new(&ast
, base_iseq
->body
->location
.label
, path
, realpath
, base_iseq
, ISEQ_TYPE_EVAL
);
1288 VALUE tempstr
= rb_fstring_lit("<temp>");
1289 iseq
= rb_iseq_new_top(&ast
, tempstr
, tempstr
, tempstr
, NULL
);
1291 tmp_node
.nd_tbl
= 0; /* reset table */
1294 vm_set_eval_stack(ec
, iseq
, 0, base_block
);
1295 vm_bind_update_env(bindval
, bind
, envval
= vm_make_env_object(ec
, ec
->cfp
));
1296 rb_vm_pop_frame(ec
);
1298 env
= (const rb_env_t
*)envval
;
1302 /* C -> Ruby: block */
1305 invoke_block(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
, VALUE self
, const struct rb_captured_block
*captured
, const rb_cref_t
*cref
, VALUE type
, int opt_pc
)
1307 int arg_size
= iseq
->body
->param
.size
;
1309 vm_push_frame(ec
, iseq
, type
| VM_FRAME_FLAG_FINISH
, self
,
1310 VM_GUARDED_PREV_EP(captured
->ep
),
1311 (VALUE
)cref
, /* cref or method */
1312 iseq
->body
->iseq_encoded
+ opt_pc
,
1313 ec
->cfp
->sp
+ arg_size
,
1314 iseq
->body
->local_table_size
- arg_size
,
1315 iseq
->body
->stack_max
);
1316 return vm_exec(ec
, true);
1320 invoke_bmethod(rb_execution_context_t
*ec
, const rb_iseq_t
*iseq
, VALUE self
, const struct rb_captured_block
*captured
, const rb_callable_method_entry_t
*me
, VALUE type
, int opt_pc
)
1323 int arg_size
= iseq
->body
->param
.size
;
1326 VM_ASSERT(me
->def
->type
== VM_METHOD_TYPE_BMETHOD
);
1328 vm_push_frame(ec
, iseq
, type
| VM_FRAME_FLAG_BMETHOD
, self
,
1329 VM_GUARDED_PREV_EP(captured
->ep
),
1331 iseq
->body
->iseq_encoded
+ opt_pc
,
1332 ec
->cfp
->sp
+ arg_size
,
1333 iseq
->body
->local_table_size
- arg_size
,
1334 iseq
->body
->stack_max
);
1336 VM_ENV_FLAGS_SET(ec
->cfp
->ep
, VM_FRAME_FLAG_FINISH
);
1337 ret
= vm_exec(ec
, true);
1342 ALWAYS_INLINE(static VALUE
1343 invoke_iseq_block_from_c(rb_execution_context_t
*ec
, const struct rb_captured_block
*captured
,
1344 VALUE self
, int argc
, const VALUE
*argv
, int kw_splat
, VALUE passed_block_handler
,
1345 const rb_cref_t
*cref
, int is_lambda
, const rb_callable_method_entry_t
*me
));
1348 invoke_iseq_block_from_c(rb_execution_context_t
*ec
, const struct rb_captured_block
*captured
,
1349 VALUE self
, int argc
, const VALUE
*argv
, int kw_splat
, VALUE passed_block_handler
,
1350 const rb_cref_t
*cref
, int is_lambda
, const rb_callable_method_entry_t
*me
)
1352 const rb_iseq_t
*iseq
= rb_iseq_check(captured
->code
.iseq
);
1354 VALUE type
= VM_FRAME_MAGIC_BLOCK
| (is_lambda
? VM_FRAME_FLAG_LAMBDA
: 0);
1355 rb_control_frame_t
*cfp
= ec
->cfp
;
1356 VALUE
*sp
= cfp
->sp
;
1360 CHECK_VM_STACK_OVERFLOW(cfp
, argc
);
1361 vm_check_canary(ec
, sp
);
1362 cfp
->sp
= sp
+ argc
;
1363 for (i
=0; i
<argc
; i
++) {
1367 opt_pc
= vm_yield_setup_args(ec
, iseq
, argc
, sp
, kw_splat
, passed_block_handler
,
1368 (is_lambda
? arg_setup_method
: arg_setup_block
));
1372 return invoke_block(ec
, iseq
, self
, captured
, cref
, type
, opt_pc
);
1375 return invoke_bmethod(ec
, iseq
, self
, captured
, me
, type
, opt_pc
);
1380 invoke_block_from_c_bh(rb_execution_context_t
*ec
, VALUE block_handler
,
1381 int argc
, const VALUE
*argv
,
1382 int kw_splat
, VALUE passed_block_handler
, const rb_cref_t
*cref
,
1383 int is_lambda
, int force_blockarg
)
1386 switch (vm_block_handler_type(block_handler
)) {
1387 case block_handler_type_iseq
:
1389 const struct rb_captured_block
*captured
= VM_BH_TO_ISEQ_BLOCK(block_handler
);
1390 return invoke_iseq_block_from_c(ec
, captured
, captured
->self
,
1391 argc
, argv
, kw_splat
, passed_block_handler
,
1392 cref
, is_lambda
, NULL
);
1394 case block_handler_type_ifunc
:
1395 return vm_yield_with_cfunc(ec
, VM_BH_TO_IFUNC_BLOCK(block_handler
),
1396 VM_BH_TO_IFUNC_BLOCK(block_handler
)->self
,
1397 argc
, argv
, kw_splat
, passed_block_handler
, NULL
);
1398 case block_handler_type_symbol
:
1399 return vm_yield_with_symbol(ec
, VM_BH_TO_SYMBOL(block_handler
),
1400 argc
, argv
, kw_splat
, passed_block_handler
);
1401 case block_handler_type_proc
:
1402 if (force_blockarg
== FALSE
) {
1403 is_lambda
= block_proc_is_lambda(VM_BH_TO_PROC(block_handler
));
1405 block_handler
= vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler
));
1408 VM_UNREACHABLE(invoke_block_from_c_splattable
);
1413 check_block_handler(rb_execution_context_t
*ec
)
1415 VALUE block_handler
= VM_CF_BLOCK_HANDLER(ec
->cfp
);
1416 vm_block_handler_verify(block_handler
);
1417 if (UNLIKELY(block_handler
== VM_BLOCK_HANDLER_NONE
)) {
1418 rb_vm_localjump_error("no block given", Qnil
, 0);
1421 return block_handler
;
1425 vm_yield_with_cref(rb_execution_context_t
*ec
, int argc
, const VALUE
*argv
, int kw_splat
, const rb_cref_t
*cref
, int is_lambda
)
1427 return invoke_block_from_c_bh(ec
, check_block_handler(ec
),
1428 argc
, argv
, kw_splat
, VM_BLOCK_HANDLER_NONE
,
1429 cref
, is_lambda
, FALSE
);
1433 vm_yield(rb_execution_context_t
*ec
, int argc
, const VALUE
*argv
, int kw_splat
)
1435 return vm_yield_with_cref(ec
, argc
, argv
, kw_splat
, NULL
, FALSE
);
1439 vm_yield_with_block(rb_execution_context_t
*ec
, int argc
, const VALUE
*argv
, VALUE block_handler
, int kw_splat
)
1441 return invoke_block_from_c_bh(ec
, check_block_handler(ec
),
1442 argc
, argv
, kw_splat
, block_handler
,
1443 NULL
, FALSE
, FALSE
);
1447 vm_yield_force_blockarg(rb_execution_context_t
*ec
, VALUE args
)
1449 return invoke_block_from_c_bh(ec
, check_block_handler(ec
), 1, &args
,
1450 RB_NO_KEYWORDS
, VM_BLOCK_HANDLER_NONE
, NULL
, FALSE
, TRUE
);
1453 ALWAYS_INLINE(static VALUE
1454 invoke_block_from_c_proc(rb_execution_context_t
*ec
, const rb_proc_t
*proc
,
1455 VALUE self
, int argc
, const VALUE
*argv
,
1456 int kw_splat
, VALUE passed_block_handler
, int is_lambda
,
1457 const rb_callable_method_entry_t
*me
));
1460 invoke_block_from_c_proc(rb_execution_context_t
*ec
, const rb_proc_t
*proc
,
1461 VALUE self
, int argc
, const VALUE
*argv
,
1462 int kw_splat
, VALUE passed_block_handler
, int is_lambda
,
1463 const rb_callable_method_entry_t
*me
)
1465 const struct rb_block
*block
= &proc
->block
;
1468 switch (vm_block_type(block
)) {
1469 case block_type_iseq
:
1470 return invoke_iseq_block_from_c(ec
, &block
->as
.captured
, self
, argc
, argv
, kw_splat
, passed_block_handler
, NULL
, is_lambda
, me
);
1471 case block_type_ifunc
:
1472 if (kw_splat
== 1) {
1473 VALUE keyword_hash
= argv
[argc
-1];
1474 if (!RB_TYPE_P(keyword_hash
, T_HASH
)) {
1475 keyword_hash
= rb_to_hash_type(keyword_hash
);
1477 if (RHASH_EMPTY_P(keyword_hash
)) {
1481 ((VALUE
*)argv
)[argc
-1] = rb_hash_dup(keyword_hash
);
1484 return vm_yield_with_cfunc(ec
, &block
->as
.captured
, self
, argc
, argv
, kw_splat
, passed_block_handler
, me
);
1485 case block_type_symbol
:
1486 return vm_yield_with_symbol(ec
, block
->as
.symbol
, argc
, argv
, kw_splat
, passed_block_handler
);
1487 case block_type_proc
:
1488 is_lambda
= block_proc_is_lambda(block
->as
.proc
);
1489 block
= vm_proc_block(block
->as
.proc
);
1492 VM_UNREACHABLE(invoke_block_from_c_proc
);
1497 vm_invoke_proc(rb_execution_context_t
*ec
, rb_proc_t
*proc
, VALUE self
,
1498 int argc
, const VALUE
*argv
, int kw_splat
, VALUE passed_block_handler
)
1500 return invoke_block_from_c_proc(ec
, proc
, self
, argc
, argv
, kw_splat
, passed_block_handler
, proc
->is_lambda
, NULL
);
1503 MJIT_FUNC_EXPORTED VALUE
1504 rb_vm_invoke_bmethod(rb_execution_context_t
*ec
, rb_proc_t
*proc
, VALUE self
,
1505 int argc
, const VALUE
*argv
, int kw_splat
, VALUE block_handler
, const rb_callable_method_entry_t
*me
)
1507 return invoke_block_from_c_proc(ec
, proc
, self
, argc
, argv
, kw_splat
, block_handler
, TRUE
, me
);
1510 MJIT_FUNC_EXPORTED VALUE
1511 rb_vm_invoke_proc(rb_execution_context_t
*ec
, rb_proc_t
*proc
,
1512 int argc
, const VALUE
*argv
, int kw_splat
, VALUE passed_block_handler
)
1514 VALUE self
= vm_block_self(&proc
->block
);
1515 vm_block_handler_verify(passed_block_handler
);
1517 if (proc
->is_from_method
) {
1518 return rb_vm_invoke_bmethod(ec
, proc
, self
, argc
, argv
, kw_splat
, passed_block_handler
, NULL
);
1521 return vm_invoke_proc(ec
, proc
, self
, argc
, argv
, kw_splat
, passed_block_handler
);
1526 rb_vm_invoke_proc_with_self(rb_execution_context_t
*ec
, rb_proc_t
*proc
, VALUE self
,
1527 int argc
, const VALUE
*argv
, int kw_splat
, VALUE passed_block_handler
)
1529 vm_block_handler_verify(passed_block_handler
);
1531 if (proc
->is_from_method
) {
1532 return rb_vm_invoke_bmethod(ec
, proc
, self
, argc
, argv
, kw_splat
, passed_block_handler
, NULL
);
1535 return vm_invoke_proc(ec
, proc
, self
, argc
, argv
, kw_splat
, passed_block_handler
);
1539 /* special variable */
1541 static rb_control_frame_t
*
1542 vm_normal_frame(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
)
1544 while (cfp
->pc
== 0) {
1545 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1546 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec
, cfp
)) {
1554 vm_cfp_svar_get(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, VALUE key
)
1556 cfp
= vm_normal_frame(ec
, cfp
);
1557 return lep_svar_get(ec
, cfp
? VM_CF_LEP(cfp
) : 0, key
);
1561 vm_cfp_svar_set(const rb_execution_context_t
*ec
, rb_control_frame_t
*cfp
, VALUE key
, const VALUE val
)
1563 cfp
= vm_normal_frame(ec
, cfp
);
1564 lep_svar_set(ec
, cfp
? VM_CF_LEP(cfp
) : 0, key
, val
);
1568 vm_svar_get(const rb_execution_context_t
*ec
, VALUE key
)
1570 return vm_cfp_svar_get(ec
, ec
->cfp
, key
);
1574 vm_svar_set(const rb_execution_context_t
*ec
, VALUE key
, VALUE val
)
1576 vm_cfp_svar_set(ec
, ec
->cfp
, key
, val
);
1580 rb_backref_get(void)
1582 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF
);
1586 rb_backref_set(VALUE val
)
1588 vm_svar_set(GET_EC(), VM_SVAR_BACKREF
, val
);
1592 rb_lastline_get(void)
1594 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE
);
1598 rb_lastline_set(VALUE val
)
1600 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE
, val
);
1608 const rb_execution_context_t
*ec
= GET_EC();
1609 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
1612 return RSTRING_PTR(rb_iseq_path(cfp
->iseq
));
1622 const rb_execution_context_t
*ec
= GET_EC();
1623 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
1626 return rb_vm_get_sourceline(cfp
);
1634 rb_source_location(int *pline
)
1636 const rb_execution_context_t
*ec
= GET_EC();
1637 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
1639 if (cfp
&& VM_FRAME_RUBYFRAME_P(cfp
)) {
1640 if (pline
) *pline
= rb_vm_get_sourceline(cfp
);
1641 return rb_iseq_path(cfp
->iseq
);
1644 if (pline
) *pline
= 0;
1649 MJIT_FUNC_EXPORTED
const char *
1650 rb_source_location_cstr(int *pline
)
1652 VALUE path
= rb_source_location(pline
);
1653 if (NIL_P(path
)) return NULL
;
1654 return RSTRING_PTR(path
);
1660 const rb_execution_context_t
*ec
= GET_EC();
1661 return vm_ec_cref(ec
);
1665 rb_vm_cref_replace_with_duplicated_cref(void)
1667 const rb_execution_context_t
*ec
= GET_EC();
1668 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
1669 rb_cref_t
*cref
= vm_cref_replace_with_duplicated_cref(cfp
->ep
);
1675 rb_vm_cref_in_context(VALUE self
, VALUE cbase
)
1677 const rb_execution_context_t
*ec
= GET_EC();
1678 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
1679 const rb_cref_t
*cref
;
1680 if (!cfp
|| cfp
->self
!= self
) return NULL
;
1681 if (!vm_env_cref_by_cref(cfp
->ep
)) return NULL
;
1682 cref
= vm_get_cref(cfp
->ep
);
1683 if (CREF_CLASS(cref
) != cbase
) return NULL
;
1689 debug_cref(rb_cref_t
*cref
)
1692 dp(CREF_CLASS(cref
));
1693 printf("%ld\n", CREF_VISI(cref
));
1694 cref
= CREF_NEXT(cref
);
1702 const rb_execution_context_t
*ec
= GET_EC();
1703 const rb_control_frame_t
*cfp
= rb_vm_get_ruby_level_next_cfp(ec
, ec
->cfp
);
1706 rb_raise(rb_eRuntimeError
, "Can't call on top of Fiber or Thread");
1708 return vm_get_cbase(cfp
->ep
);
1714 make_localjump_error(const char *mesg
, VALUE value
, int reason
)
1716 extern VALUE rb_eLocalJumpError
;
1717 VALUE exc
= rb_exc_new2(rb_eLocalJumpError
, mesg
);
1722 CONST_ID(id
, "break");
1725 CONST_ID(id
, "redo");
1728 CONST_ID(id
, "retry");
1731 CONST_ID(id
, "next");
1734 CONST_ID(id
, "return");
1737 CONST_ID(id
, "noreason");
1740 rb_iv_set(exc
, "@exit_value", value
);
1741 rb_iv_set(exc
, "@reason", ID2SYM(id
));
1745 MJIT_FUNC_EXPORTED
void
1746 rb_vm_localjump_error(const char *mesg
, VALUE value
, int reason
)
1748 VALUE exc
= make_localjump_error(mesg
, value
, reason
);
1753 rb_vm_make_jump_tag_but_local_jump(int state
, VALUE val
)
1759 mesg
= "unexpected return";
1762 mesg
= "unexpected break";
1765 mesg
= "unexpected next";
1768 mesg
= "unexpected redo";
1772 mesg
= "retry outside of rescue clause";
1778 if (val
== Qundef
) {
1779 val
= GET_EC()->tag
->retval
;
1781 return make_localjump_error(mesg
, val
, state
);
1785 rb_vm_jump_tag_but_local_jump(int state
)
1787 VALUE exc
= rb_vm_make_jump_tag_but_local_jump(state
, Qundef
);
1788 if (!NIL_P(exc
)) rb_exc_raise(exc
);
1789 EC_JUMP_TAG(GET_EC(), state
);
1792 static rb_control_frame_t
*
1793 next_not_local_frame(rb_control_frame_t
*cfp
)
1795 while (VM_ENV_LOCAL_P(cfp
->ep
)) {
1796 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1801 NORETURN(static void vm_iter_break(rb_execution_context_t
*ec
, VALUE val
));
1804 vm_iter_break(rb_execution_context_t
*ec
, VALUE val
)
1806 rb_control_frame_t
*cfp
= next_not_local_frame(ec
->cfp
);
1807 const VALUE
*ep
= VM_CF_PREV_EP(cfp
);
1808 const rb_control_frame_t
*target_cfp
= rb_vm_search_cf_from_ep(ec
, cfp
, ep
);
1810 #if 0 /* raise LocalJumpError */
1812 rb_vm_localjump_error("unexpected break", val
, TAG_BREAK
);
1816 ec
->errinfo
= (VALUE
)THROW_DATA_NEW(val
, target_cfp
, TAG_BREAK
);
1817 EC_JUMP_TAG(ec
, TAG_BREAK
);
1823 vm_iter_break(GET_EC(), Qnil
);
1827 rb_iter_break_value(VALUE val
)
1829 vm_iter_break(GET_EC(), val
);
1832 /* optimization: redefine management */
1834 static st_table
*vm_opt_method_def_table
= 0;
1835 static st_table
*vm_opt_mid_table
= 0;
1838 vm_redefinition_check_flag(VALUE klass
)
1840 if (klass
== rb_cInteger
) return INTEGER_REDEFINED_OP_FLAG
;
1841 if (klass
== rb_cFloat
) return FLOAT_REDEFINED_OP_FLAG
;
1842 if (klass
== rb_cString
) return STRING_REDEFINED_OP_FLAG
;
1843 if (klass
== rb_cArray
) return ARRAY_REDEFINED_OP_FLAG
;
1844 if (klass
== rb_cHash
) return HASH_REDEFINED_OP_FLAG
;
1845 if (klass
== rb_cSymbol
) return SYMBOL_REDEFINED_OP_FLAG
;
1847 if (klass
== rb_cTime
) return TIME_REDEFINED_OP_FLAG
;
1849 if (klass
== rb_cRegexp
) return REGEXP_REDEFINED_OP_FLAG
;
1850 if (klass
== rb_cNilClass
) return NIL_REDEFINED_OP_FLAG
;
1851 if (klass
== rb_cTrueClass
) return TRUE_REDEFINED_OP_FLAG
;
1852 if (klass
== rb_cFalseClass
) return FALSE_REDEFINED_OP_FLAG
;
1853 if (klass
== rb_cProc
) return PROC_REDEFINED_OP_FLAG
;
1858 rb_vm_check_optimizable_mid(VALUE mid
)
1860 if (!vm_opt_mid_table
) {
1864 return st_lookup(vm_opt_mid_table
, mid
, NULL
);
1868 vm_redefinition_check_method_type(const rb_method_entry_t
*me
)
1870 if (me
->called_id
!= me
->def
->original_id
) {
1874 const rb_method_definition_t
*def
= me
->def
;
1875 switch (def
->type
) {
1876 case VM_METHOD_TYPE_CFUNC
:
1877 case VM_METHOD_TYPE_OPTIMIZED
:
1885 rb_vm_check_redefinition_opt_method(const rb_method_entry_t
*me
, VALUE klass
)
1888 if (RB_TYPE_P(klass
, T_ICLASS
) && FL_TEST(klass
, RICLASS_IS_ORIGIN
) &&
1889 RB_TYPE_P(RBASIC_CLASS(klass
), T_CLASS
)) {
1890 klass
= RBASIC_CLASS(klass
);
1892 if (vm_redefinition_check_method_type(me
)) {
1893 if (st_lookup(vm_opt_method_def_table
, (st_data_t
)me
->def
, &bop
)) {
1894 int flag
= vm_redefinition_check_flag(klass
);
1896 rb_yjit_bop_redefined(klass
, me
, (enum ruby_basic_operators
)bop
);
1897 ruby_vm_redefined_flag
[bop
] |= flag
;
1903 static enum rb_id_table_iterator_result
1904 check_redefined_method(ID mid
, VALUE value
, void *data
)
1906 VALUE klass
= (VALUE
)data
;
1907 const rb_method_entry_t
*me
= (rb_method_entry_t
*)value
;
1908 const rb_method_entry_t
*newme
= rb_method_entry(klass
, mid
);
1910 if (newme
!= me
) rb_vm_check_redefinition_opt_method(me
, me
->owner
);
1912 return ID_TABLE_CONTINUE
;
1916 rb_vm_check_redefinition_by_prepend(VALUE klass
)
1918 if (!vm_redefinition_check_flag(klass
)) return;
1919 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass
)), check_redefined_method
, (void *)klass
);
1923 add_opt_method(VALUE klass
, ID mid
, VALUE bop
)
1925 const rb_method_entry_t
*me
= rb_method_entry_at(klass
, mid
);
1927 if (me
&& vm_redefinition_check_method_type(me
)) {
1928 st_insert(vm_opt_method_def_table
, (st_data_t
)me
->def
, (st_data_t
)bop
);
1929 st_insert(vm_opt_mid_table
, (st_data_t
)mid
, (st_data_t
)Qtrue
);
1932 rb_bug("undefined optimized method: %s", rb_id2name(mid
));
1937 vm_init_redefined_flag(void)
1942 vm_opt_method_def_table
= st_init_numtable();
1943 vm_opt_mid_table
= st_init_numtable();
1945 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
1946 #define C(k) add_opt_method(rb_c##k, mid, bop)
1947 OP(PLUS
, PLUS
), (C(Integer
), C(Float
), C(String
), C(Array
));
1948 OP(MINUS
, MINUS
), (C(Integer
), C(Float
));
1949 OP(MULT
, MULT
), (C(Integer
), C(Float
));
1950 OP(DIV
, DIV
), (C(Integer
), C(Float
));
1951 OP(MOD
, MOD
), (C(Integer
), C(Float
));
1952 OP(Eq
, EQ
), (C(Integer
), C(Float
), C(String
), C(Symbol
));
1953 OP(Eqq
, EQQ
), (C(Integer
), C(Float
), C(Symbol
), C(String
),
1954 C(NilClass
), C(TrueClass
), C(FalseClass
));
1955 OP(LT
, LT
), (C(Integer
), C(Float
));
1956 OP(LE
, LE
), (C(Integer
), C(Float
));
1957 OP(GT
, GT
), (C(Integer
), C(Float
));
1958 OP(GE
, GE
), (C(Integer
), C(Float
));
1959 OP(LTLT
, LTLT
), (C(String
), C(Array
));
1960 OP(AREF
, AREF
), (C(Array
), C(Hash
), C(Integer
));
1961 OP(ASET
, ASET
), (C(Array
), C(Hash
));
1962 OP(Length
, LENGTH
), (C(Array
), C(String
), C(Hash
));
1963 OP(Size
, SIZE
), (C(Array
), C(String
), C(Hash
));
1964 OP(EmptyP
, EMPTY_P
), (C(Array
), C(String
), C(Hash
));
1965 OP(Succ
, SUCC
), (C(Integer
), C(String
));
1966 OP(EqTilde
, MATCH
), (C(Regexp
), C(String
));
1967 OP(Freeze
, FREEZE
), (C(String
));
1968 OP(UMinus
, UMINUS
), (C(String
));
1969 OP(Max
, MAX
), (C(Array
));
1970 OP(Min
, MIN
), (C(Array
));
1971 OP(Call
, CALL
), (C(Proc
));
1972 OP(And
, AND
), (C(Integer
));
1973 OP(Or
, OR
), (C(Integer
));
1974 OP(NilP
, NIL_P
), (C(NilClass
));
1979 /* for vm development */
1983 vm_frametype_name(const rb_control_frame_t
*cfp
)
1985 switch (VM_FRAME_TYPE(cfp
)) {
1986 case VM_FRAME_MAGIC_METHOD
: return "method";
1987 case VM_FRAME_MAGIC_BLOCK
: return "block";
1988 case VM_FRAME_MAGIC_CLASS
: return "class";
1989 case VM_FRAME_MAGIC_TOP
: return "top";
1990 case VM_FRAME_MAGIC_CFUNC
: return "cfunc";
1991 case VM_FRAME_MAGIC_IFUNC
: return "ifunc";
1992 case VM_FRAME_MAGIC_EVAL
: return "eval";
1993 case VM_FRAME_MAGIC_RESCUE
: return "rescue";
1995 rb_bug("unknown frame");
2001 frame_return_value(const struct vm_throw_data
*err
)
2003 if (THROW_DATA_P(err
) &&
2004 THROW_DATA_STATE(err
) == TAG_BREAK
&&
2005 THROW_DATA_CONSUMED_P(err
) == FALSE
) {
2006 return THROW_DATA_VAL(err
);
2016 frame_name(const rb_control_frame_t
*cfp
)
2018 unsigned long type
= VM_FRAME_TYPE(cfp
);
2019 #define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2036 // cfp_returning_with_value:
2037 // Whether cfp is the last frame in the unwinding process for a non-local return.
2039 hook_before_rewind(rb_execution_context_t
*ec
, const rb_control_frame_t
*cfp
,
2040 bool cfp_returning_with_value
, int state
, struct vm_throw_data
*err
)
2042 if (state
== TAG_RAISE
&& RBASIC(err
)->klass
== rb_eSysStackError
) {
2046 const rb_iseq_t
*iseq
= cfp
->iseq
;
2047 rb_hook_list_t
*local_hooks
= iseq
->aux
.exec
.local_hooks
;
2049 switch (VM_FRAME_TYPE(ec
->cfp
)) {
2050 case VM_FRAME_MAGIC_METHOD
:
2051 RUBY_DTRACE_METHOD_RETURN_HOOK(ec
, 0, 0);
2052 EXEC_EVENT_HOOK_AND_POP_FRAME(ec
, RUBY_EVENT_RETURN
, ec
->cfp
->self
, 0, 0, 0, frame_return_value(err
));
2054 if (UNLIKELY(local_hooks
&& local_hooks
->events
& RUBY_EVENT_RETURN
)) {
2055 rb_exec_event_hook_orig(ec
, local_hooks
, RUBY_EVENT_RETURN
,
2056 ec
->cfp
->self
, 0, 0, 0, frame_return_value(err
), TRUE
);
2059 THROW_DATA_CONSUMED_SET(err
);
2061 case VM_FRAME_MAGIC_BLOCK
:
2062 if (VM_FRAME_BMETHOD_P(ec
->cfp
)) {
2063 VALUE bmethod_return_value
= frame_return_value(err
);
2064 if (cfp_returning_with_value
) {
2065 // Non-local return terminating at a BMETHOD control frame.
2066 bmethod_return_value
= THROW_DATA_VAL(err
);
2070 EXEC_EVENT_HOOK(ec
, RUBY_EVENT_B_RETURN
, ec
->cfp
->self
, 0, 0, 0, bmethod_return_value
);
2071 if (UNLIKELY(local_hooks
&& local_hooks
->events
& RUBY_EVENT_B_RETURN
)) {
2072 rb_exec_event_hook_orig(ec
, local_hooks
, RUBY_EVENT_B_RETURN
,
2073 ec
->cfp
->self
, 0, 0, 0, bmethod_return_value
, FALSE
);
2076 const rb_callable_method_entry_t
*me
= rb_vm_frame_method_entry(ec
->cfp
);
2078 EXEC_EVENT_HOOK_AND_POP_FRAME(ec
, RUBY_EVENT_RETURN
, ec
->cfp
->self
,
2079 rb_vm_frame_method_entry(ec
->cfp
)->def
->original_id
,
2080 rb_vm_frame_method_entry(ec
->cfp
)->called_id
,
2081 rb_vm_frame_method_entry(ec
->cfp
)->owner
,
2082 bmethod_return_value
);
2084 VM_ASSERT(me
->def
->type
== VM_METHOD_TYPE_BMETHOD
);
2085 local_hooks
= me
->def
->body
.bmethod
.hooks
;
2087 if (UNLIKELY(local_hooks
&& local_hooks
->events
& RUBY_EVENT_RETURN
)) {
2088 rb_exec_event_hook_orig(ec
, local_hooks
, RUBY_EVENT_RETURN
, ec
->cfp
->self
,
2089 rb_vm_frame_method_entry(ec
->cfp
)->def
->original_id
,
2090 rb_vm_frame_method_entry(ec
->cfp
)->called_id
,
2091 rb_vm_frame_method_entry(ec
->cfp
)->owner
,
2092 bmethod_return_value
, TRUE
);
2094 THROW_DATA_CONSUMED_SET(err
);
2097 EXEC_EVENT_HOOK_AND_POP_FRAME(ec
, RUBY_EVENT_B_RETURN
, ec
->cfp
->self
, 0, 0, 0, frame_return_value(err
));
2098 if (UNLIKELY(local_hooks
&& local_hooks
->events
& RUBY_EVENT_B_RETURN
)) {
2099 rb_exec_event_hook_orig(ec
, local_hooks
, RUBY_EVENT_B_RETURN
,
2100 ec
->cfp
->self
, 0, 0, 0, frame_return_value(err
), TRUE
);
2102 THROW_DATA_CONSUMED_SET(err
);
2105 case VM_FRAME_MAGIC_CLASS
:
2106 EXEC_EVENT_HOOK_AND_POP_FRAME(ec
, RUBY_EVENT_END
, ec
->cfp
->self
, 0, 0, 0, Qnil
);
2112 /* evaluator body */
2117 cfunc finish F1 F2 C1
2118 rb_funcall finish F1 F2 C1
2120 VM finish F1 F2 C1 F3
2122 F1 - F3 : pushed by VM
2123 C1 : pushed by send insn (CFUNC)
2125 struct CONTROL_FRAME {
2126 VALUE *pc; // cfp[0], program counter
2127 VALUE *sp; // cfp[1], stack pointer
2128 rb_iseq_t *iseq; // cfp[2], iseq
2129 VALUE self; // cfp[3], self
2130 const VALUE *ep; // cfp[4], env pointer
2131 const void *block_code; // cfp[5], block code
2134 struct rb_captured_block {
2147 VALUE cref; // ep[-2]
2148 VALUE special; // ep[-1]
2149 VALUE flags; // ep[ 0] == lep[0]
2159 VALUE cref; // ep[-2]
2160 VALUE special; // ep[-1]
2161 VALUE flags; // ep[ 0]
2169 VALUE prev_ep; // for frame jump
2173 struct C_METHOD_CONTROL_FRAME {
2175 VALUE *sp; // stack pointer
2176 rb_iseq_t *iseq; // cmi
2178 VALUE *ep; // ep == lep
2182 struct C_BLOCK_CONTROL_FRAME {
2183 VALUE *pc; // point only "finish" insn
2185 rb_iseq_t *iseq; // ?
2191 If mjit_exec is already called before calling vm_exec, `mjit_enable_p` should
2192 be FALSE to avoid calling `mjit_exec` twice.
2196 vm_exec_handle_exception(rb_execution_context_t
*ec
, enum ruby_tag_type state
,
2197 VALUE errinfo
, VALUE
*initial
);
2200 vm_exec(rb_execution_context_t
*ec
, bool mjit_enable_p
)
2202 enum ruby_tag_type state
;
2203 VALUE result
= Qundef
;
2209 if ((state
= EC_EXEC_TAG()) == TAG_NONE
) {
2210 if (!mjit_enable_p
|| (result
= mjit_exec(ec
)) == Qundef
) {
2211 result
= vm_exec_core(ec
, initial
);
2213 goto vm_loop_start
; /* fallback to the VM */
2216 result
= ec
->errinfo
;
2217 rb_ec_raised_reset(ec
, RAISED_STACKOVERFLOW
| RAISED_NOMEMORY
);
2218 while ((result
= vm_exec_handle_exception(ec
, state
, result
, &initial
)) == Qundef
) {
2219 /* caught a jump, exec the handler */
2220 result
= vm_exec_core(ec
, initial
);
2222 VM_ASSERT(ec
->tag
== &_tag
);
2223 /* when caught `throw`, `tag.state` is set. */
2224 if ((state
= _tag
.state
) == TAG_NONE
) break;
2225 _tag
.state
= TAG_NONE
;
2233 vm_exec_handle_exception(rb_execution_context_t
*ec
, enum ruby_tag_type state
,
2234 VALUE errinfo
, VALUE
*initial
)
2236 struct vm_throw_data
*err
= (struct vm_throw_data
*)errinfo
;
2240 const struct iseq_catch_table_entry
*entry
;
2241 const struct iseq_catch_table
*ct
;
2242 unsigned long epc
, cont_pc
, cont_sp
;
2243 const rb_iseq_t
*catch_iseq
;
2244 rb_control_frame_t
*cfp
;
2246 const rb_control_frame_t
*escape_cfp
;
2248 cont_pc
= cont_sp
= 0;
2251 while (ec
->cfp
->pc
== 0 || ec
->cfp
->iseq
== 0) {
2252 if (UNLIKELY(VM_FRAME_TYPE(ec
->cfp
) == VM_FRAME_MAGIC_CFUNC
)) {
2253 EXEC_EVENT_HOOK_AND_POP_FRAME(ec
, RUBY_EVENT_C_RETURN
, ec
->cfp
->self
,
2254 rb_vm_frame_method_entry(ec
->cfp
)->def
->original_id
,
2255 rb_vm_frame_method_entry(ec
->cfp
)->called_id
,
2256 rb_vm_frame_method_entry(ec
->cfp
)->owner
, Qnil
);
2257 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec
,
2258 rb_vm_frame_method_entry(ec
->cfp
)->owner
,
2259 rb_vm_frame_method_entry(ec
->cfp
)->def
->original_id
);
2261 rb_vm_pop_frame(ec
);
2265 epc
= cfp
->pc
- cfp
->iseq
->body
->iseq_encoded
;
2268 if (state
== TAG_BREAK
|| state
== TAG_RETURN
) {
2269 escape_cfp
= THROW_DATA_CATCH_FRAME(err
);
2271 if (cfp
== escape_cfp
) {
2272 if (state
== TAG_RETURN
) {
2273 if (!VM_FRAME_FINISHED_P(cfp
)) {
2274 THROW_DATA_CATCH_FRAME_SET(err
, cfp
+ 1);
2275 THROW_DATA_STATE_SET(err
, state
= TAG_BREAK
);
2278 ct
= cfp
->iseq
->body
->catch_table
;
2279 if (ct
) for (i
= 0; i
< ct
->size
; i
++) {
2280 entry
= UNALIGNED_MEMBER_PTR(ct
, entries
[i
]);
2281 if (entry
->start
< epc
&& entry
->end
>= epc
) {
2282 if (entry
->type
== CATCH_TYPE_ENSURE
) {
2283 catch_iseq
= entry
->iseq
;
2284 cont_pc
= entry
->cont
;
2285 cont_sp
= entry
->sp
;
2290 if (catch_iseq
== NULL
) {
2292 THROW_DATA_CATCH_FRAME_SET(err
, cfp
+ 1);
2293 // cfp == escape_cfp here so calling with cfp_returning_with_value = true
2294 hook_before_rewind(ec
, ec
->cfp
, true, state
, err
);
2295 rb_vm_pop_frame(ec
);
2296 return THROW_DATA_VAL(err
);
2303 #if OPT_STACK_CACHING
2304 *initial
= THROW_DATA_VAL(err
);
2306 *ec
->cfp
->sp
++ = THROW_DATA_VAL(err
);
2314 if (state
== TAG_RAISE
) {
2315 ct
= cfp
->iseq
->body
->catch_table
;
2316 if (ct
) for (i
= 0; i
< ct
->size
; i
++) {
2317 entry
= UNALIGNED_MEMBER_PTR(ct
, entries
[i
]);
2318 if (entry
->start
< epc
&& entry
->end
>= epc
) {
2320 if (entry
->type
== CATCH_TYPE_RESCUE
||
2321 entry
->type
== CATCH_TYPE_ENSURE
) {
2322 catch_iseq
= entry
->iseq
;
2323 cont_pc
= entry
->cont
;
2324 cont_sp
= entry
->sp
;
2330 else if (state
== TAG_RETRY
) {
2331 ct
= cfp
->iseq
->body
->catch_table
;
2332 if (ct
) for (i
= 0; i
< ct
->size
; i
++) {
2333 entry
= UNALIGNED_MEMBER_PTR(ct
, entries
[i
]);
2334 if (entry
->start
< epc
&& entry
->end
>= epc
) {
2336 if (entry
->type
== CATCH_TYPE_ENSURE
) {
2337 catch_iseq
= entry
->iseq
;
2338 cont_pc
= entry
->cont
;
2339 cont_sp
= entry
->sp
;
2342 else if (entry
->type
== CATCH_TYPE_RETRY
) {
2343 const rb_control_frame_t
*escape_cfp
;
2344 escape_cfp
= THROW_DATA_CATCH_FRAME(err
);
2345 if (cfp
== escape_cfp
) {
2346 cfp
->pc
= cfp
->iseq
->body
->iseq_encoded
+ entry
->cont
;
2354 else if ((state
== TAG_BREAK
&& !escape_cfp
) ||
2355 (state
== TAG_REDO
) ||
2356 (state
== TAG_NEXT
)) {
2357 type
= (const enum catch_type
[TAG_MASK
]) {
2358 [TAG_BREAK
] = CATCH_TYPE_BREAK
,
2359 [TAG_NEXT
] = CATCH_TYPE_NEXT
,
2360 [TAG_REDO
] = CATCH_TYPE_REDO
,
2361 /* otherwise = dontcare */
2364 ct
= cfp
->iseq
->body
->catch_table
;
2365 if (ct
) for (i
= 0; i
< ct
->size
; i
++) {
2366 entry
= UNALIGNED_MEMBER_PTR(ct
, entries
[i
]);
2368 if (entry
->start
< epc
&& entry
->end
>= epc
) {
2369 if (entry
->type
== CATCH_TYPE_ENSURE
) {
2370 catch_iseq
= entry
->iseq
;
2371 cont_pc
= entry
->cont
;
2372 cont_sp
= entry
->sp
;
2375 else if (entry
->type
== type
) {
2376 cfp
->pc
= cfp
->iseq
->body
->iseq_encoded
+ entry
->cont
;
2377 cfp
->sp
= vm_base_ptr(cfp
) + entry
->sp
;
2379 if (state
!= TAG_REDO
) {
2380 #if OPT_STACK_CACHING
2381 *initial
= THROW_DATA_VAL(err
);
2383 *ec
->cfp
->sp
++ = THROW_DATA_VAL(err
);
2387 VM_ASSERT(ec
->tag
->state
== TAG_NONE
);
2394 ct
= cfp
->iseq
->body
->catch_table
;
2395 if (ct
) for (i
= 0; i
< ct
->size
; i
++) {
2396 entry
= UNALIGNED_MEMBER_PTR(ct
, entries
[i
]);
2397 if (entry
->start
< epc
&& entry
->end
>= epc
) {
2399 if (entry
->type
== CATCH_TYPE_ENSURE
) {
2400 catch_iseq
= entry
->iseq
;
2401 cont_pc
= entry
->cont
;
2402 cont_sp
= entry
->sp
;
2409 if (catch_iseq
!= NULL
) { /* found catch table */
2410 /* enter catch scope */
2411 const int arg_size
= 1;
2413 rb_iseq_check(catch_iseq
);
2414 cfp
->sp
= vm_base_ptr(cfp
) + cont_sp
;
2415 cfp
->pc
= cfp
->iseq
->body
->iseq_encoded
+ cont_pc
;
2417 /* push block frame */
2418 cfp
->sp
[0] = (VALUE
)err
;
2419 vm_push_frame(ec
, catch_iseq
, VM_FRAME_MAGIC_RESCUE
,
2421 VM_GUARDED_PREV_EP(cfp
->ep
),
2423 catch_iseq
->body
->iseq_encoded
,
2424 cfp
->sp
+ arg_size
/* push value */,
2425 catch_iseq
->body
->local_table_size
- arg_size
,
2426 catch_iseq
->body
->stack_max
);
2429 ec
->tag
->state
= TAG_NONE
;
2435 hook_before_rewind(ec
, ec
->cfp
, (cfp
== escape_cfp
), state
, err
);
2437 if (VM_FRAME_FINISHED_P(ec
->cfp
)) {
2438 rb_vm_pop_frame(ec
);
2439 ec
->errinfo
= (VALUE
)err
;
2440 ec
->tag
= ec
->tag
->prev
;
2441 EC_JUMP_TAG(ec
, state
);
2444 rb_vm_pop_frame(ec
);
2453 rb_iseq_eval(const rb_iseq_t
*iseq
)
2455 rb_execution_context_t
*ec
= GET_EC();
2457 vm_set_top_stack(ec
, iseq
);
2458 val
= vm_exec(ec
, true);
2463 rb_iseq_eval_main(const rb_iseq_t
*iseq
)
2465 rb_execution_context_t
*ec
= GET_EC();
2468 vm_set_main_stack(ec
, iseq
);
2469 val
= vm_exec(ec
, true);
2474 rb_vm_control_frame_id_and_class(const rb_control_frame_t
*cfp
, ID
*idp
, ID
*called_idp
, VALUE
*klassp
)
2476 const rb_callable_method_entry_t
*me
= rb_vm_frame_method_entry(cfp
);
2479 if (idp
) *idp
= me
->def
->original_id
;
2480 if (called_idp
) *called_idp
= me
->called_id
;
2481 if (klassp
) *klassp
= me
->owner
;
2490 rb_ec_frame_method_id_and_class(const rb_execution_context_t
*ec
, ID
*idp
, ID
*called_idp
, VALUE
*klassp
)
2492 return rb_vm_control_frame_id_and_class(ec
->cfp
, idp
, called_idp
, klassp
);
2496 rb_frame_method_id_and_class(ID
*idp
, VALUE
*klassp
)
2498 return rb_ec_frame_method_id_and_class(GET_EC(), idp
, 0, klassp
);
2502 rb_vm_call_cfunc(VALUE recv
, VALUE (*func
)(VALUE
), VALUE arg
,
2503 VALUE block_handler
, VALUE filename
)
2505 rb_execution_context_t
*ec
= GET_EC();
2506 const rb_control_frame_t
*reg_cfp
= ec
->cfp
;
2507 const rb_iseq_t
*iseq
= rb_iseq_new(0, filename
, filename
, Qnil
, 0, ISEQ_TYPE_TOP
);
2510 vm_push_frame(ec
, iseq
, VM_FRAME_MAGIC_TOP
| VM_ENV_FLAG_LOCAL
| VM_FRAME_FLAG_FINISH
,
2511 recv
, block_handler
,
2512 (VALUE
)vm_cref_new_toplevel(ec
), /* cref or me */
2513 0, reg_cfp
->sp
, 0, 0);
2517 rb_vm_pop_frame(ec
);
2524 rb_vm_update_references(void *ptr
)
2529 rb_gc_update_tbl_refs(vm
->frozen_strings
);
2530 vm
->mark_object_ary
= rb_gc_location(vm
->mark_object_ary
);
2531 vm
->load_path
= rb_gc_location(vm
->load_path
);
2532 vm
->load_path_snapshot
= rb_gc_location(vm
->load_path_snapshot
);
2534 if (vm
->load_path_check_cache
) {
2535 vm
->load_path_check_cache
= rb_gc_location(vm
->load_path_check_cache
);
2538 vm
->expanded_load_path
= rb_gc_location(vm
->expanded_load_path
);
2539 vm
->loaded_features
= rb_gc_location(vm
->loaded_features
);
2540 vm
->loaded_features_snapshot
= rb_gc_location(vm
->loaded_features_snapshot
);
2541 vm
->loaded_features_realpaths
= rb_gc_location(vm
->loaded_features_realpaths
);
2542 vm
->top_self
= rb_gc_location(vm
->top_self
);
2543 vm
->orig_progname
= rb_gc_location(vm
->orig_progname
);
2545 rb_gc_update_tbl_refs(vm
->overloaded_cme_table
);
2547 if (vm
->coverages
) {
2548 vm
->coverages
= rb_gc_location(vm
->coverages
);
2549 vm
->me2counter
= rb_gc_location(vm
->me2counter
);
2555 rb_vm_each_stack_value(void *ptr
, void (*cb
)(VALUE
, void*), void *ctx
)
2560 list_for_each(&vm
->ractor
.set
, r
, vmlr_node
) {
2561 VM_ASSERT(rb_ractor_status_p(r
, ractor_blocking
) ||
2562 rb_ractor_status_p(r
, ractor_running
));
2563 if (r
->threads
.cnt
> 0) {
2564 rb_thread_t
*th
= 0;
2565 list_for_each(&r
->threads
.set
, th
, lt_node
) {
2566 VM_ASSERT(th
!= NULL
);
2567 rb_execution_context_t
* ec
= th
->ec
;
2569 VALUE
*p
= ec
->vm_stack
;
2570 VALUE
*sp
= ec
->cfp
->sp
;
2572 if (!rb_special_const_p(*p
)) {
2584 static enum rb_id_table_iterator_result
2585 vm_mark_negative_cme(VALUE val
, void *dmy
)
2588 return ID_TABLE_CONTINUE
;
2592 rb_vm_mark(void *ptr
)
2594 RUBY_MARK_ENTER("vm");
2595 RUBY_GC_INFO("-------------------------------------------------\n");
2600 const VALUE
*obj_ary
;
2602 list_for_each(&vm
->ractor
.set
, r
, vmlr_node
) {
2603 // ractor.set only contains blocking or running ractors
2604 VM_ASSERT(rb_ractor_status_p(r
, ractor_blocking
) ||
2605 rb_ractor_status_p(r
, ractor_running
));
2606 rb_gc_mark(rb_ractor_self(r
));
2609 rb_gc_mark_movable(vm
->mark_object_ary
);
2611 len
= RARRAY_LEN(vm
->mark_object_ary
);
2612 obj_ary
= RARRAY_CONST_PTR(vm
->mark_object_ary
);
2613 for (i
=0; i
< len
; i
++) {
2617 rb_gc_mark(*obj_ary
);
2618 jlen
= RARRAY_LEN(*obj_ary
);
2619 ptr
= RARRAY_CONST_PTR(*obj_ary
);
2620 for (j
=0; j
< jlen
; j
++) {
2626 rb_gc_mark_movable(vm
->load_path
);
2627 rb_gc_mark_movable(vm
->load_path_snapshot
);
2628 RUBY_MARK_MOVABLE_UNLESS_NULL(vm
->load_path_check_cache
);
2629 rb_gc_mark_movable(vm
->expanded_load_path
);
2630 rb_gc_mark_movable(vm
->loaded_features
);
2631 rb_gc_mark_movable(vm
->loaded_features_snapshot
);
2632 rb_gc_mark_movable(vm
->loaded_features_realpaths
);
2633 rb_gc_mark_movable(vm
->top_self
);
2634 rb_gc_mark_movable(vm
->orig_progname
);
2635 RUBY_MARK_MOVABLE_UNLESS_NULL(vm
->coverages
);
2636 RUBY_MARK_MOVABLE_UNLESS_NULL(vm
->me2counter
);
2637 /* Prevent classes from moving */
2638 rb_mark_tbl(vm
->defined_module_hash
);
2640 if (vm
->loading_table
) {
2641 rb_mark_tbl(vm
->loading_table
);
2644 rb_gc_mark_values(RUBY_NSIG
, vm
->trap_list
.cmd
);
2646 rb_id_table_foreach_values(vm
->negative_cme_table
, vm_mark_negative_cme
, NULL
);
2647 rb_mark_tbl_no_pin(vm
->overloaded_cme_table
);
2648 for (i
=0; i
<VM_GLOBAL_CC_CACHE_TABLE_SIZE
; i
++) {
2649 const struct rb_callcache
*cc
= vm
->global_cc_cache_table
[i
];
2652 if (!vm_cc_invalidated_p(cc
)) {
2653 rb_gc_mark((VALUE
)cc
);
2656 vm
->global_cc_cache_table
[i
] = NULL
;
2664 RUBY_MARK_LEAVE("vm");
2667 #undef rb_vm_register_special_exception
2669 rb_vm_register_special_exception_str(enum ruby_special_exceptions sp
, VALUE cls
, VALUE mesg
)
2671 rb_vm_t
*vm
= GET_VM();
2672 VALUE exc
= rb_exc_new3(cls
, rb_obj_freeze(mesg
));
2674 ((VALUE
*)vm
->special_exceptions
)[sp
] = exc
;
2675 rb_gc_register_mark_object(exc
);
2679 rb_vm_add_root_module(VALUE module
)
2681 rb_vm_t
*vm
= GET_VM();
2683 st_insert(vm
->defined_module_hash
, (st_data_t
)module
, (st_data_t
)module
);
2689 free_loading_table_entry(st_data_t key
, st_data_t value
, st_data_t arg
)
2696 ruby_vm_destruct(rb_vm_t
*vm
)
2698 RUBY_FREE_ENTER("vm");
2701 rb_thread_t
*th
= vm
->ractor
.main_thread
;
2702 struct rb_objspace
*objspace
= vm
->objspace
;
2703 vm
->ractor
.main_thread
= NULL
;
2706 rb_fiber_reset_root_local_storage(th
);
2709 rb_vm_living_threads_init(vm
);
2710 ruby_vm_run_at_exit_hooks(vm
);
2711 if (vm
->loading_table
) {
2712 st_foreach(vm
->loading_table
, free_loading_table_entry
, 0);
2713 st_free_table(vm
->loading_table
);
2714 vm
->loading_table
= 0;
2716 if (vm
->frozen_strings
) {
2717 st_free_table(vm
->frozen_strings
);
2718 vm
->frozen_strings
= 0;
2720 RB_ALTSTACK_FREE(vm
->main_altstack
);
2722 rb_objspace_free(objspace
);
2724 rb_native_mutex_destroy(&vm
->waitpid_lock
);
2725 rb_native_mutex_destroy(&vm
->workqueue_lock
);
2726 /* after freeing objspace, you *can't* use ruby_xfree() */
2728 ruby_current_vm_ptr
= NULL
;
2730 RUBY_FREE_LEAVE("vm");
2735 vm_memsize(const void *ptr
)
2737 size_t size
= sizeof(rb_vm_t
);
2740 // size += vmobj->ractor_num * sizeof(rb_ractor_t);
2745 static const rb_data_type_t vm_data_type
= {
2747 {0, 0, vm_memsize
,},
2748 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
2753 vm_default_params(void)
2755 rb_vm_t
*vm
= GET_VM();
2756 VALUE result
= rb_hash_new_with_size(4);
2757 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
2758 SET(thread_vm_stack_size
);
2759 SET(thread_machine_stack_size
);
2760 SET(fiber_vm_stack_size
);
2761 SET(fiber_machine_stack_size
);
2763 rb_obj_freeze(result
);
2768 get_param(const char *name
, size_t default_value
, size_t min_value
)
2771 size_t result
= default_value
;
2772 if ((envval
= getenv(name
)) != 0) {
2773 long val
= atol(envval
);
2774 if (val
< (long)min_value
) {
2775 val
= (long)min_value
;
2777 result
= (size_t)(((val
-1 + RUBY_VM_SIZE_ALIGN
) / RUBY_VM_SIZE_ALIGN
) * RUBY_VM_SIZE_ALIGN
);
2779 if (0) ruby_debug_printf("%s: %"PRIuSIZE
"\n", name
, result
); /* debug print */
2785 check_machine_stack_size(size_t *sizep
)
2787 #ifdef PTHREAD_STACK_MIN
2788 size_t size
= *sizep
;
2791 #ifdef PTHREAD_STACK_MIN
2792 if (size
< (size_t)PTHREAD_STACK_MIN
) {
2793 *sizep
= (size_t)PTHREAD_STACK_MIN
* 2;
2799 vm_default_params_setup(rb_vm_t
*vm
)
2801 vm
->default_params
.thread_vm_stack_size
=
2802 get_param("RUBY_THREAD_VM_STACK_SIZE",
2803 RUBY_VM_THREAD_VM_STACK_SIZE
,
2804 RUBY_VM_THREAD_VM_STACK_SIZE_MIN
);
2806 vm
->default_params
.thread_machine_stack_size
=
2807 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
2808 RUBY_VM_THREAD_MACHINE_STACK_SIZE
,
2809 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
);
2811 vm
->default_params
.fiber_vm_stack_size
=
2812 get_param("RUBY_FIBER_VM_STACK_SIZE",
2813 RUBY_VM_FIBER_VM_STACK_SIZE
,
2814 RUBY_VM_FIBER_VM_STACK_SIZE_MIN
);
2816 vm
->default_params
.fiber_machine_stack_size
=
2817 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
2818 RUBY_VM_FIBER_MACHINE_STACK_SIZE
,
2819 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
);
2821 /* environment dependent check */
2822 check_machine_stack_size(&vm
->default_params
.thread_machine_stack_size
);
2823 check_machine_stack_size(&vm
->default_params
.fiber_machine_stack_size
);
2827 vm_init2(rb_vm_t
*vm
)
2829 MEMZERO(vm
, rb_vm_t
, 1);
2830 rb_vm_living_threads_init(vm
);
2831 vm
->thread_report_on_exception
= 1;
2832 vm
->src_encoding_index
= -1;
2834 vm_default_params_setup(vm
);
2838 rb_execution_context_update(const rb_execution_context_t
*ec
)
2840 /* update VM stack */
2844 VALUE
*p
= ec
->vm_stack
;
2845 VALUE
*sp
= ec
->cfp
->sp
;
2846 rb_control_frame_t
*cfp
= ec
->cfp
;
2847 rb_control_frame_t
*limit_cfp
= (void *)(ec
->vm_stack
+ ec
->vm_stack_size
);
2849 for (i
= 0; i
< (long)(sp
- p
); i
++) {
2851 VALUE update
= rb_gc_location(ref
);
2852 if (ref
!= update
) {
2857 while (cfp
!= limit_cfp
) {
2858 const VALUE
*ep
= cfp
->ep
;
2859 cfp
->self
= rb_gc_location(cfp
->self
);
2860 cfp
->iseq
= (rb_iseq_t
*)rb_gc_location((VALUE
)cfp
->iseq
);
2861 cfp
->block_code
= (void *)rb_gc_location((VALUE
)cfp
->block_code
);
2863 if (!VM_ENV_LOCAL_P(ep
)) {
2864 const VALUE
*prev_ep
= VM_ENV_PREV_EP(ep
);
2865 if (VM_ENV_FLAGS(prev_ep
, VM_ENV_FLAG_ESCAPED
)) {
2866 VM_FORCE_WRITE(&prev_ep
[VM_ENV_DATA_INDEX_ENV
], rb_gc_location(prev_ep
[VM_ENV_DATA_INDEX_ENV
]));
2869 if (VM_ENV_FLAGS(ep
, VM_ENV_FLAG_ESCAPED
)) {
2870 VM_FORCE_WRITE(&ep
[VM_ENV_DATA_INDEX_ENV
], rb_gc_location(ep
[VM_ENV_DATA_INDEX_ENV
]));
2871 VM_FORCE_WRITE(&ep
[VM_ENV_DATA_INDEX_ME_CREF
], rb_gc_location(ep
[VM_ENV_DATA_INDEX_ME_CREF
]));
2875 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
2880 static enum rb_id_table_iterator_result
2881 mark_local_storage_i(VALUE local
, void *data
)
2884 return ID_TABLE_CONTINUE
;
2888 rb_execution_context_mark(const rb_execution_context_t
*ec
)
2893 VALUE
*p
= ec
->vm_stack
;
2894 VALUE
*sp
= ec
->cfp
->sp
;
2895 rb_control_frame_t
*cfp
= ec
->cfp
;
2896 rb_control_frame_t
*limit_cfp
= (void *)(ec
->vm_stack
+ ec
->vm_stack_size
);
2898 VM_ASSERT(sp
== ec
->cfp
->sp
);
2899 rb_gc_mark_vm_stack_values((long)(sp
- p
), p
);
2901 while (cfp
!= limit_cfp
) {
2902 const VALUE
*ep
= cfp
->ep
;
2903 VM_ASSERT(!!VM_ENV_FLAGS(ep
, VM_ENV_FLAG_ESCAPED
) == vm_ep_in_heap_p_(ec
, ep
));
2904 rb_gc_mark_movable(cfp
->self
);
2905 rb_gc_mark_movable((VALUE
)cfp
->iseq
);
2906 rb_gc_mark_movable((VALUE
)cfp
->block_code
);
2908 if (!VM_ENV_LOCAL_P(ep
)) {
2909 const VALUE
*prev_ep
= VM_ENV_PREV_EP(ep
);
2910 if (VM_ENV_FLAGS(prev_ep
, VM_ENV_FLAG_ESCAPED
)) {
2911 rb_gc_mark_movable(prev_ep
[VM_ENV_DATA_INDEX_ENV
]);
2914 if (VM_ENV_FLAGS(ep
, VM_ENV_FLAG_ESCAPED
)) {
2915 rb_gc_mark_movable(ep
[VM_ENV_DATA_INDEX_ENV
]);
2916 rb_gc_mark(ep
[VM_ENV_DATA_INDEX_ME_CREF
]);
2920 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
2924 /* mark machine stack */
2925 if (ec
->machine
.stack_start
&& ec
->machine
.stack_end
&&
2926 ec
!= GET_EC() /* marked for current ec at the first stage of marking */
2928 rb_gc_mark_machine_stack(ec
);
2929 rb_gc_mark_locations((VALUE
*)&ec
->machine
.regs
,
2930 (VALUE
*)(&ec
->machine
.regs
) +
2931 sizeof(ec
->machine
.regs
) / (sizeof(VALUE
)));
2934 RUBY_MARK_UNLESS_NULL(ec
->errinfo
);
2935 RUBY_MARK_UNLESS_NULL(ec
->root_svar
);
2936 if (ec
->local_storage
) {
2937 rb_id_table_foreach_values(ec
->local_storage
, mark_local_storage_i
, NULL
);
2939 RUBY_MARK_UNLESS_NULL(ec
->local_storage_recursive_hash
);
2940 RUBY_MARK_UNLESS_NULL(ec
->local_storage_recursive_hash_for_trace
);
2941 RUBY_MARK_UNLESS_NULL(ec
->private_const_reference
);
2944 void rb_fiber_mark_self(rb_fiber_t
*fib
);
2945 void rb_fiber_update_self(rb_fiber_t
*fib
);
2946 void rb_threadptr_root_fiber_setup(rb_thread_t
*th
);
2947 void rb_threadptr_root_fiber_release(rb_thread_t
*th
);
2950 thread_compact(void *ptr
)
2952 rb_thread_t
*th
= ptr
;
2954 th
->self
= rb_gc_location(th
->self
);
2956 if (!th
->root_fiber
) {
2957 rb_execution_context_update(th
->ec
);
2962 thread_mark(void *ptr
)
2964 rb_thread_t
*th
= ptr
;
2965 RUBY_MARK_ENTER("thread");
2966 rb_fiber_mark_self(th
->ec
->fiber_ptr
);
2968 /* mark ruby objects */
2969 switch (th
->invoke_type
) {
2970 case thread_invoke_type_proc
:
2971 case thread_invoke_type_ractor_proc
:
2972 RUBY_MARK_UNLESS_NULL(th
->invoke_arg
.proc
.proc
);
2973 RUBY_MARK_UNLESS_NULL(th
->invoke_arg
.proc
.args
);
2975 case thread_invoke_type_func
:
2976 rb_gc_mark_maybe((VALUE
)th
->invoke_arg
.func
.arg
);
2982 rb_gc_mark(rb_ractor_self(th
->ractor
));
2983 RUBY_MARK_UNLESS_NULL(th
->thgroup
);
2984 RUBY_MARK_UNLESS_NULL(th
->value
);
2985 RUBY_MARK_UNLESS_NULL(th
->pending_interrupt_queue
);
2986 RUBY_MARK_UNLESS_NULL(th
->pending_interrupt_mask_stack
);
2987 RUBY_MARK_UNLESS_NULL(th
->top_self
);
2988 RUBY_MARK_UNLESS_NULL(th
->top_wrapper
);
2989 if (th
->root_fiber
) rb_fiber_mark_self(th
->root_fiber
);
2991 /* Ensure EC stack objects are pinned */
2992 rb_execution_context_mark(th
->ec
);
2993 RUBY_MARK_UNLESS_NULL(th
->stat_insn_usage
);
2994 RUBY_MARK_UNLESS_NULL(th
->last_status
);
2995 RUBY_MARK_UNLESS_NULL(th
->locking_mutex
);
2996 RUBY_MARK_UNLESS_NULL(th
->name
);
2998 RUBY_MARK_UNLESS_NULL(th
->scheduler
);
3000 RUBY_MARK_LEAVE("thread");
3004 thread_free(void *ptr
)
3006 rb_thread_t
*th
= ptr
;
3007 RUBY_FREE_ENTER("thread");
3009 if (th
->locking_mutex
!= Qfalse
) {
3010 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th
, (void *)th
->locking_mutex
);
3012 if (th
->keeping_mutexes
!= NULL
) {
3013 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th
, (void *)th
->keeping_mutexes
);
3016 rb_threadptr_root_fiber_release(th
);
3018 if (th
->vm
&& th
->vm
->ractor
.main_thread
== th
) {
3019 RUBY_GC_INFO("MRI main thread\n");
3025 RUBY_FREE_LEAVE("thread");
3029 thread_memsize(const void *ptr
)
3031 const rb_thread_t
*th
= ptr
;
3032 size_t size
= sizeof(rb_thread_t
);
3034 if (!th
->root_fiber
) {
3035 size
+= th
->ec
->vm_stack_size
* sizeof(VALUE
);
3037 if (th
->ec
->local_storage
) {
3038 size
+= rb_id_table_memsize(th
->ec
->local_storage
);
3043 #define thread_data_type ruby_threadptr_data_type
3044 const rb_data_type_t ruby_threadptr_data_type
= {
3052 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3056 rb_obj_is_thread(VALUE obj
)
3058 return RBOOL(rb_typeddata_is_kind_of(obj
, &thread_data_type
));
3062 thread_alloc(VALUE klass
)
3066 obj
= TypedData_Make_Struct(klass
, rb_thread_t
, &thread_data_type
, th
);
3072 rb_ec_set_vm_stack(rb_execution_context_t
*ec
, VALUE
*stack
, size_t size
)
3074 ec
->vm_stack
= stack
;
3075 ec
->vm_stack_size
= size
;
3079 rb_ec_initialize_vm_stack(rb_execution_context_t
*ec
, VALUE
*stack
, size_t size
)
3081 rb_ec_set_vm_stack(ec
, stack
, size
);
3083 ec
->cfp
= (void *)(ec
->vm_stack
+ ec
->vm_stack_size
);
3086 NULL
/* dummy iseq */,
3087 VM_FRAME_MAGIC_DUMMY
| VM_ENV_FLAG_LOCAL
| VM_FRAME_FLAG_FINISH
| VM_FRAME_FLAG_CFRAME
/* dummy frame */,
3088 Qnil
/* dummy self */, VM_BLOCK_HANDLER_NONE
/* dummy block ptr */,
3089 0 /* dummy cref/me */,
3090 0 /* dummy pc */, ec
->vm_stack
, 0, 0
3095 rb_ec_clear_vm_stack(rb_execution_context_t
*ec
)
3097 rb_ec_set_vm_stack(ec
, NULL
, 0);
3099 // Avoid dangling pointers:
3104 th_init(rb_thread_t
*th
, VALUE self
)
3107 rb_threadptr_root_fiber_setup(th
);
3109 /* All threads are blocking until a non-blocking fiber is scheduled */
3111 th
->scheduler
= Qnil
;
3114 size_t size
= th
->vm
->default_params
.thread_vm_stack_size
/ sizeof(VALUE
);
3115 rb_ec_initialize_vm_stack(th
->ec
, ALLOC_N(VALUE
, size
), size
);
3118 VM_ASSERT(th
->ec
->cfp
== NULL
);
3119 VM_ASSERT(th
->ec
->vm_stack
== NULL
);
3120 VM_ASSERT(th
->ec
->vm_stack_size
== 0);
3123 th
->status
= THREAD_RUNNABLE
;
3124 th
->last_status
= Qnil
;
3125 th
->ec
->errinfo
= Qnil
;
3126 th
->ec
->root_svar
= Qfalse
;
3127 th
->ec
->local_storage_recursive_hash
= Qnil
;
3128 th
->ec
->local_storage_recursive_hash_for_trace
= Qnil
;
3129 #ifdef NON_SCALAR_THREAD_ID
3130 th
->thread_id_string
[0] = '\0';
3135 #if OPT_CALL_THREADED_CODE
3136 th
->retval
= Qundef
;
3139 th
->report_on_exception
= th
->vm
->thread_report_on_exception
;
3140 th
->ext_config
.ractor_safe
= true;
3144 ruby_thread_init(VALUE self
)
3146 rb_thread_t
*th
= GET_THREAD();
3147 rb_thread_t
*target_th
= rb_thread_ptr(self
);
3148 rb_vm_t
*vm
= th
->vm
;
3151 th_init(target_th
, self
);
3153 target_th
->top_wrapper
= 0;
3154 target_th
->top_self
= rb_vm_top_self();
3155 target_th
->ec
->root_svar
= Qfalse
;
3156 target_th
->ractor
= th
->ractor
;
3162 rb_thread_alloc(VALUE klass
)
3164 VALUE self
= thread_alloc(klass
);
3165 ruby_thread_init(self
);
3169 #define REWIND_CFP(expr) do { \
3170 rb_execution_context_t *ec__ = GET_EC(); \
3171 VALUE *const curr_sp = (ec__->cfp++)->sp; \
3172 VALUE *const saved_sp = ec__->cfp->sp; \
3173 ec__->cfp->sp = curr_sp; \
3175 (ec__->cfp--)->sp = saved_sp; \
3179 m_core_set_method_alias(VALUE self
, VALUE cbase
, VALUE sym1
, VALUE sym2
)
3182 rb_alias(cbase
, SYM2ID(sym1
), SYM2ID(sym2
));
3188 m_core_set_variable_alias(VALUE self
, VALUE sym1
, VALUE sym2
)
3191 rb_alias_variable(SYM2ID(sym1
), SYM2ID(sym2
));
3197 m_core_undef_method(VALUE self
, VALUE cbase
, VALUE sym
)
3200 ID mid
= SYM2ID(sym
);
3201 rb_undef(cbase
, mid
);
3202 rb_clear_method_cache(self
, mid
);
3208 m_core_set_postexe(VALUE self
)
3210 rb_set_end_proc(rb_call_end_proc
, rb_block_proc());
3214 static VALUE
core_hash_merge_kwd(VALUE hash
, VALUE kw
);
3217 core_hash_merge(VALUE hash
, long argc
, const VALUE
*argv
)
3219 Check_Type(hash
, T_HASH
);
3220 VM_ASSERT(argc
% 2 == 0);
3221 rb_hash_bulk_insert(argc
, argv
, hash
);
3226 m_core_hash_merge_ptr(int argc
, VALUE
*argv
, VALUE recv
)
3228 VALUE hash
= argv
[0];
3230 REWIND_CFP(hash
= core_hash_merge(hash
, argc
-1, argv
+1));
3236 kwmerge_i(VALUE key
, VALUE value
, VALUE hash
)
3238 rb_hash_aset(hash
, key
, value
);
3243 m_core_hash_merge_kwd(VALUE recv
, VALUE hash
, VALUE kw
)
3245 REWIND_CFP(hash
= core_hash_merge_kwd(hash
, kw
));
3250 m_core_make_shareable(VALUE recv
, VALUE obj
)
3252 return rb_ractor_make_shareable(obj
);
3256 m_core_make_shareable_copy(VALUE recv
, VALUE obj
)
3258 return rb_ractor_make_shareable_copy(obj
);
3262 m_core_ensure_shareable(VALUE recv
, VALUE obj
, VALUE name
)
3264 return rb_ractor_ensure_shareable(obj
, name
);
3268 core_hash_merge_kwd(VALUE hash
, VALUE kw
)
3270 rb_hash_foreach(rb_to_hash_type(kw
), kwmerge_i
, hash
);
3274 /* Returns true if JIT is enabled */
3276 mjit_enabled_p(VALUE _
)
3278 return RBOOL(mjit_enabled
);
3282 mjit_pause_m(int argc
, VALUE
*argv
, RB_UNUSED_VAR(VALUE self
))
3284 VALUE options
= Qnil
;
3286 rb_scan_args(argc
, argv
, "0:", &options
);
3288 if (!NIL_P(options
)) {
3289 static ID keyword_ids
[1];
3290 if (!keyword_ids
[0])
3291 keyword_ids
[0] = rb_intern("wait");
3292 rb_get_kwargs(options
, keyword_ids
, 0, 1, &wait
);
3295 return mjit_pause(RTEST(wait
));
3299 mjit_resume_m(VALUE _
)
3301 return mjit_resume();
3304 extern VALUE
*rb_gc_stack_start
;
3305 extern size_t rb_gc_stack_maxsize
;
3307 /* debug functions */
3313 rb_vm_bugreport(NULL
);
3321 VALUE ary
= rb_ary_new();
3322 #ifdef HAVE_BACKTRACE
3323 #include <execinfo.h>
3324 #define MAX_NATIVE_TRACE 1024
3325 static void *trace
[MAX_NATIVE_TRACE
];
3326 int n
= (int)backtrace(trace
, MAX_NATIVE_TRACE
);
3327 char **syms
= backtrace_symbols(trace
, n
);
3334 for (i
=0; i
<n
; i
++) {
3335 rb_ary_push(ary
, rb_str_new2(syms
[i
]));
3337 free(syms
); /* OK */
3342 #if VM_COLLECT_USAGE_DETAILS
3343 static VALUE
usage_analysis_insn_start(VALUE self
);
3344 static VALUE
usage_analysis_operand_start(VALUE self
);
3345 static VALUE
usage_analysis_register_start(VALUE self
);
3346 static VALUE
usage_analysis_insn_stop(VALUE self
);
3347 static VALUE
usage_analysis_operand_stop(VALUE self
);
3348 static VALUE
usage_analysis_register_stop(VALUE self
);
3349 static VALUE
usage_analysis_insn_running(VALUE self
);
3350 static VALUE
usage_analysis_operand_running(VALUE self
);
3351 static VALUE
usage_analysis_register_running(VALUE self
);
3352 static VALUE
usage_analysis_insn_clear(VALUE self
);
3353 static VALUE
usage_analysis_operand_clear(VALUE self
);
3354 static VALUE
usage_analysis_register_clear(VALUE self
);
3358 f_raise(int c
, VALUE
*v
, VALUE _
)
3360 return rb_f_raise(c
, v
);
3366 return rb_block_proc();
3372 return rb_block_lambda();
3376 f_sprintf(int c
, const VALUE
*v
, VALUE _
)
3378 return rb_f_sprintf(c
, v
);
3382 vm_mtbl(VALUE self
, VALUE obj
, VALUE sym
)
3384 vm_mtbl_dump(CLASS_OF(obj
), RTEST(sym
) ? SYM2ID(sym
) : 0);
3389 vm_mtbl2(VALUE self
, VALUE obj
, VALUE sym
)
3391 vm_mtbl_dump(obj
, RTEST(sym
) ? SYM2ID(sym
) : 0);
3397 * RubyVM.keep_script_lines -> true or false
3399 * Return current +keep_script_lines+ status. Now it only returns
3400 * +true+ of +false+, but it can return other objects in future.
3402 * Note that this is an API for ruby internal use, debugging,
3403 * and research. Do not use this for any other purpose.
3404 * The compatibility is not guaranteed.
3407 vm_keep_script_lines(VALUE self
)
3409 return RBOOL(ruby_vm_keep_script_lines
);
3414 * RubyVM.keep_script_lines = true / false
3416 * It set +keep_script_lines+ flag. If the flag is set, all
3417 * loaded scripts are recorded in a interpreter process.
3419 * Note that this is an API for ruby internal use, debugging,
3420 * and research. Do not use this for any other purpose.
3421 * The compatibility is not guaranteed.
3424 vm_keep_script_lines_set(VALUE self
, VALUE flags
)
3426 ruby_vm_keep_script_lines
= RTEST(flags
);
3438 * Document-class: RubyVM
3440 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
3441 * other Ruby implementations such as JRuby and TruffleRuby.
3443 * The RubyVM module provides some access to MRI internals.
3444 * This module is for very limited purposes, such as debugging,
3445 * prototyping, and research. Normal users must not use it.
3446 * This module is not portable between Ruby implementations.
3448 rb_cRubyVM
= rb_define_class("RubyVM", rb_cObject
);
3449 rb_undef_alloc_func(rb_cRubyVM
);
3450 rb_undef_method(CLASS_OF(rb_cRubyVM
), "new");
3451 rb_define_singleton_method(rb_cRubyVM
, "stat", vm_stat
, -1);
3452 rb_define_singleton_method(rb_cRubyVM
, "keep_script_lines", vm_keep_script_lines
, 0);
3453 rb_define_singleton_method(rb_cRubyVM
, "keep_script_lines=", vm_keep_script_lines_set
, 1);
3455 #if USE_DEBUG_COUNTER
3456 rb_define_singleton_method(rb_cRubyVM
, "reset_debug_counters", rb_debug_counter_reset
, 0);
3457 rb_define_singleton_method(rb_cRubyVM
, "show_debug_counters", rb_debug_counter_show
, 0);
3460 /* FrozenCore (hidden) */
3461 fcore
= rb_class_new(rb_cBasicObject
);
3462 rb_set_class_path(fcore
, rb_cRubyVM
, "FrozenCore");
3463 RBASIC(fcore
)->flags
= T_ICLASS
;
3464 klass
= rb_singleton_class(fcore
);
3465 rb_define_method_id(klass
, id_core_set_method_alias
, m_core_set_method_alias
, 3);
3466 rb_define_method_id(klass
, id_core_set_variable_alias
, m_core_set_variable_alias
, 2);
3467 rb_define_method_id(klass
, id_core_undef_method
, m_core_undef_method
, 2);
3468 rb_define_method_id(klass
, id_core_set_postexe
, m_core_set_postexe
, 0);
3469 rb_define_method_id(klass
, id_core_hash_merge_ptr
, m_core_hash_merge_ptr
, -1);
3470 rb_define_method_id(klass
, id_core_hash_merge_kwd
, m_core_hash_merge_kwd
, 2);
3471 rb_define_method_id(klass
, id_core_raise
, f_raise
, -1);
3472 rb_define_method_id(klass
, id_core_sprintf
, f_sprintf
, -1);
3473 rb_define_method_id(klass
, idProc
, f_proc
, 0);
3474 rb_define_method_id(klass
, idLambda
, f_lambda
, 0);
3475 rb_define_method(klass
, "make_shareable", m_core_make_shareable
, 1);
3476 rb_define_method(klass
, "make_shareable_copy", m_core_make_shareable_copy
, 1);
3477 rb_define_method(klass
, "ensure_shareable", m_core_ensure_shareable
, 2);
3478 rb_obj_freeze(fcore
);
3479 RBASIC_CLEAR_CLASS(klass
);
3480 rb_obj_freeze(klass
);
3481 rb_gc_register_mark_object(fcore
);
3482 rb_mRubyVMFrozenCore
= fcore
;
3485 * Provides access to the Method JIT compiler of MRI.
3486 * Of course, this module is MRI specific.
3488 VALUE mjit
= rb_define_module_under(rb_cRubyVM
, "MJIT");
3489 rb_define_singleton_method(mjit
, "enabled?", mjit_enabled_p
, 0);
3490 rb_define_singleton_method(mjit
, "pause", mjit_pause_m
, -1);
3491 rb_define_singleton_method(mjit
, "resume", mjit_resume_m
, 0);
3494 * Document-class: Thread
3496 * Threads are the Ruby implementation for a concurrent programming model.
3498 * Programs that require multiple threads of execution are a perfect
3499 * candidate for Ruby's Thread class.
3501 * For example, we can create a new thread separate from the main thread's
3502 * execution using ::new.
3504 * thr = Thread.new { puts "What's the big deal" }
3506 * Then we are able to pause the execution of the main thread and allow
3507 * our new thread to finish, using #join:
3509 * thr.join #=> "What's the big deal"
3511 * If we don't call +thr.join+ before the main thread terminates, then all
3512 * other threads including +thr+ will be killed.
3514 * Alternatively, you can use an array for handling multiple threads at
3515 * once, like in the following example:
3518 * threads << Thread.new { puts "What's the big deal" }
3519 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
3521 * After creating a few threads we wait for them all to finish
3524 * threads.each { |thr| thr.join }
3526 * To retrieve the last value of a thread, use #value
3528 * thr = Thread.new { sleep 1; "Useful value" }
3529 * thr.value #=> "Useful value"
3531 * === Thread initialization
3533 * In order to create new threads, Ruby provides ::new, ::start, and
3534 * ::fork. A block must be provided with each of these methods, otherwise
3535 * a ThreadError will be raised.
3537 * When subclassing the Thread class, the +initialize+ method of your
3538 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
3539 * call super in your +initialize+ method.
3541 * === Thread termination
3543 * For terminating threads, Ruby provides a variety of ways to do this.
3545 * The class method ::kill, is meant to exit a given thread:
3547 * thr = Thread.new { sleep }
3548 * Thread.kill(thr) # sends exit() to thr
3550 * Alternatively, you can use the instance method #exit, or any of its
3551 * aliases #kill or #terminate.
3557 * Ruby provides a few instance methods for querying the state of a given
3558 * thread. To get a string with the current thread's state use #status
3560 * thr = Thread.new { sleep }
3561 * thr.status # => "sleep"
3563 * thr.status # => false
3565 * You can also use #alive? to tell if the thread is running or sleeping,
3566 * and #stop? if the thread is dead or sleeping.
3568 * === Thread variables and scope
3570 * Since threads are created with blocks, the same rules apply to other
3571 * Ruby blocks for variable scope. Any local variables created within this
3572 * block are accessible to only this thread.
3574 * ==== Fiber-local vs. Thread-local
3576 * Each fiber has its own bucket for Thread#[] storage. When you set a
3577 * new fiber-local it is only accessible within this Fiber. To illustrate:
3580 * Thread.current[:foo] = "bar"
3582 * p Thread.current[:foo] # => nil
3586 * This example uses #[] for getting and #[]= for setting fiber-locals,
3587 * you can also use #keys to list the fiber-locals for a given
3588 * thread and #key? to check if a fiber-local exists.
3590 * When it comes to thread-locals, they are accessible within the entire
3591 * scope of the thread. Given the following example:
3594 * Thread.current.thread_variable_set(:foo, 1)
3595 * p Thread.current.thread_variable_get(:foo) # => 1
3597 * Thread.current.thread_variable_set(:foo, 2)
3598 * p Thread.current.thread_variable_get(:foo) # => 2
3600 * p Thread.current.thread_variable_get(:foo) # => 2
3603 * You can see that the thread-local +:foo+ carried over into the fiber
3604 * and was changed to +2+ by the end of the thread.
3606 * This example makes use of #thread_variable_set to create new
3607 * thread-locals, and #thread_variable_get to reference them.
3609 * There is also #thread_variables to list all thread-locals, and
3610 * #thread_variable? to check if a given thread-local exists.
3612 * === Exception handling
3614 * When an unhandled exception is raised inside a thread, it will
3615 * terminate. By default, this exception will not propagate to other
3616 * threads. The exception is stored and when another thread calls #value
3617 * or #join, the exception will be re-raised in that thread.
3619 * t = Thread.new{ raise 'something went wrong' }
3620 * t.value #=> RuntimeError: something went wrong
3622 * An exception can be raised from outside the thread using the
3623 * Thread#raise instance method, which takes the same parameters as
3626 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
3627 * true, or $DEBUG = true will cause a subsequent unhandled exception
3628 * raised in a thread to be automatically re-raised in the main thread.
3630 * With the addition of the class method ::handle_interrupt, you can now
3631 * handle exceptions asynchronously with threads.
3635 * Ruby provides a few ways to support scheduling threads in your program.
3637 * The first way is by using the class method ::stop, to put the current
3638 * running thread to sleep and schedule the execution of another thread.
3640 * Once a thread is asleep, you can use the instance method #wakeup to
3641 * mark your thread as eligible for scheduling.
3643 * You can also try ::pass, which attempts to pass execution to another
3644 * thread but is dependent on the OS whether a running thread will switch
3645 * or not. The same goes for #priority, which lets you hint to the thread
3646 * scheduler which threads you want to take precedence when passing
3647 * execution. This method is also dependent on the OS and may be ignored
3648 * on some platforms.
3651 rb_cThread
= rb_define_class("Thread", rb_cObject
);
3652 rb_undef_alloc_func(rb_cThread
);
3654 #if VM_COLLECT_USAGE_DETAILS
3655 /* ::RubyVM::USAGE_ANALYSIS_* */
3656 #define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
3657 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
3658 define_usage_analysis_hash(INSN
);
3659 define_usage_analysis_hash(REGS
);
3660 define_usage_analysis_hash(INSN_BIGRAM
);
3662 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start
, 0);
3663 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start
, 0);
3664 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start
, 0);
3665 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop
, 0);
3666 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop
, 0);
3667 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop
, 0);
3668 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running
, 0);
3669 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running
, 0);
3670 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running
, 0);
3671 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear
, 0);
3672 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear
, 0);
3673 rb_define_singleton_method(rb_cRubyVM
, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear
, 0);
3677 * An Array of VM build options.
3678 * This constant is MRI specific.
3680 rb_define_const(rb_cRubyVM
, "OPTS", opts
= rb_ary_new());
3682 #if OPT_DIRECT_THREADED_CODE
3683 rb_ary_push(opts
, rb_str_new2("direct threaded code"));
3684 #elif OPT_TOKEN_THREADED_CODE
3685 rb_ary_push(opts
, rb_str_new2("token threaded code"));
3686 #elif OPT_CALL_THREADED_CODE
3687 rb_ary_push(opts
, rb_str_new2("call threaded code"));
3690 #if OPT_STACK_CACHING
3691 rb_ary_push(opts
, rb_str_new2("stack caching"));
3693 #if OPT_OPERANDS_UNIFICATION
3694 rb_ary_push(opts
, rb_str_new2("operands unification"));
3696 #if OPT_INSTRUCTIONS_UNIFICATION
3697 rb_ary_push(opts
, rb_str_new2("instructions unification"));
3699 #if OPT_INLINE_METHOD_CACHE
3700 rb_ary_push(opts
, rb_str_new2("inline method cache"));
3702 #if OPT_BLOCKINLINING
3703 rb_ary_push(opts
, rb_str_new2("block inlining"));
3706 /* ::RubyVM::INSTRUCTION_NAMES
3707 * A list of bytecode instruction names in MRI.
3708 * This constant is MRI specific.
3710 rb_define_const(rb_cRubyVM
, "INSTRUCTION_NAMES", rb_insns_name_array());
3712 /* ::RubyVM::DEFAULT_PARAMS
3713 * This constant exposes the VM's default parameters.
3714 * Note that changing these values does not affect VM execution.
3715 * Specification is not stable and you should not depend on this value.
3716 * Of course, this constant is MRI specific.
3718 rb_define_const(rb_cRubyVM
, "DEFAULT_PARAMS", vm_default_params());
3720 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
3722 rb_define_singleton_method(rb_cRubyVM
, "SDR", sdr
, 0);
3723 rb_define_singleton_method(rb_cRubyVM
, "NSDR", nsdr
, 0);
3724 rb_define_singleton_method(rb_cRubyVM
, "mtbl", vm_mtbl
, 2);
3725 rb_define_singleton_method(rb_cRubyVM
, "mtbl2", vm_mtbl2
, 2);
3733 /* VM bootstrap: phase 2 */
3735 rb_vm_t
*vm
= ruby_current_vm_ptr
;
3736 rb_thread_t
*th
= GET_THREAD();
3737 VALUE filename
= rb_fstring_lit("<main>");
3738 const rb_iseq_t
*iseq
= rb_iseq_new(0, filename
, filename
, Qnil
, 0, ISEQ_TYPE_TOP
);
3741 rb_ractor_main_setup(vm
, th
->ractor
, th
);
3743 /* create vm object */
3744 vm
->self
= TypedData_Wrap_Struct(rb_cRubyVM
, &vm_data_type
, vm
);
3746 /* create main thread */
3747 th
->self
= TypedData_Wrap_Struct(rb_cThread
, &thread_data_type
, th
);
3748 vm
->ractor
.main_thread
= th
;
3749 vm
->ractor
.main_ractor
= th
->ractor
;
3751 th
->top_wrapper
= 0;
3752 th
->top_self
= rb_vm_top_self();
3754 rb_gc_register_mark_object((VALUE
)iseq
);
3755 th
->ec
->cfp
->iseq
= iseq
;
3756 th
->ec
->cfp
->pc
= iseq
->body
->iseq_encoded
;
3757 th
->ec
->cfp
->self
= th
->top_self
;
3759 VM_ENV_FLAGS_UNSET(th
->ec
->cfp
->ep
, VM_FRAME_FLAG_CFRAME
);
3760 VM_STACK_ENV_WRITE(th
->ec
->cfp
->ep
, VM_ENV_DATA_INDEX_ME_CREF
, (VALUE
)vm_cref_new(rb_cObject
, METHOD_VISI_PRIVATE
, FALSE
, NULL
, FALSE
, FALSE
));
3763 * The Binding of the top level scope
3765 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
3767 rb_objspace_gc_enable(vm
->objspace
);
3769 vm_init_redefined_flag();
3771 rb_block_param_proxy
= rb_obj_alloc(rb_cObject
);
3772 rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy
), idCall
,
3773 OPTIMIZED_METHOD_TYPE_BLOCK_CALL
, 0, METHOD_VISI_PUBLIC
);
3774 rb_obj_freeze(rb_block_param_proxy
);
3775 rb_gc_register_mark_object(rb_block_param_proxy
);
3777 /* vm_backtrace.c */
3778 Init_vm_backtrace();
3782 rb_vm_set_progname(VALUE filename
)
3784 rb_thread_t
*th
= GET_VM()->ractor
.main_thread
;
3785 rb_control_frame_t
*cfp
= (void *)(th
->ec
->vm_stack
+ th
->ec
->vm_stack_size
);
3788 rb_iseq_pathobj_set(cfp
->iseq
, rb_str_dup(filename
), rb_iseq_realpath(cfp
->iseq
));
3791 extern const struct st_hash_type rb_fstring_hash_type
;
3796 /* VM bootstrap: phase 1 */
3797 rb_vm_t
* vm
= ruby_mimmalloc(sizeof(*vm
));
3798 rb_thread_t
* th
= ruby_mimmalloc(sizeof(*th
));
3800 fputs("[FATAL] failed to allocate memory\n", stderr
);
3803 MEMZERO(th
, rb_thread_t
, 1);
3806 vm
->objspace
= rb_objspace_alloc();
3807 ruby_current_vm_ptr
= vm
;
3808 vm
->negative_cme_table
= rb_id_table_create(16);
3809 vm
->overloaded_cme_table
= st_init_numtable();
3811 Init_native_thread(th
);
3814 vm
->ractor
.main_ractor
= th
->ractor
= rb_ractor_main_alloc();
3815 rb_ractor_set_current_ec(th
->ractor
, th
->ec
);
3816 ruby_thread_init_stack(th
);
3818 rb_native_mutex_initialize(&vm
->ractor
.sync
.lock
);
3819 rb_native_cond_initialize(&vm
->ractor
.sync
.barrier_cond
);
3820 rb_native_cond_initialize(&vm
->ractor
.sync
.terminate_cond
);
3824 Init_vm_objects(void)
3826 rb_vm_t
*vm
= GET_VM();
3828 vm
->defined_module_hash
= st_init_numtable();
3830 /* initialize mark object array, hash */
3831 vm
->mark_object_ary
= rb_ary_tmp_new(128);
3832 vm
->loading_table
= st_init_strtable();
3833 vm
->frozen_strings
= st_init_table_with_size(&rb_fstring_hash_type
, 10000);
3839 main_to_s(VALUE obj
)
3841 return rb_str_new2("main");
3845 rb_vm_top_self(void)
3847 return GET_VM()->top_self
;
3853 rb_vm_t
*vm
= GET_VM();
3855 vm
->top_self
= rb_obj_alloc(rb_cObject
);
3856 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s
, 0);
3857 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
3861 rb_ruby_verbose_ptr(void)
3863 rb_ractor_t
*cr
= GET_RACTOR();
3864 return &cr
->verbose
;
3868 rb_ruby_debug_ptr(void)
3870 rb_ractor_t
*cr
= GET_RACTOR();
3875 VALUE
rb_insn_operand_intern(const rb_iseq_t
*iseq
,
3876 VALUE insn
, int op_no
, VALUE op
,
3877 int len
, size_t pos
, VALUE
*pnop
, VALUE child
);
3880 rb_vm_fstring_table(void)
3882 return GET_VM()->frozen_strings
;
3885 #if VM_COLLECT_USAGE_DETAILS
3887 #define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
3890 * insn(Fixnum) => ihash(Hash)
3893 * -1(Fixnum) => count, # insn usage
3894 * 0(Fixnum) => ophash, # operand usage
3897 * val(interned string) => count(Fixnum)
3901 vm_analysis_insn(int insn
)
3905 static int prev_insn
= -1;
3911 CONST_ID(usage_hash
, "USAGE_ANALYSIS_INSN");
3912 CONST_ID(bigram_hash
, "USAGE_ANALYSIS_INSN_BIGRAM");
3913 uh
= rb_const_get(rb_cRubyVM
, usage_hash
);
3914 if (NIL_P(ihash
= rb_hash_aref(uh
, INT2FIX(insn
)))) {
3915 ihash
= rb_hash_new();
3916 HASH_ASET(uh
, INT2FIX(insn
), ihash
);
3918 if (NIL_P(cv
= rb_hash_aref(ihash
, INT2FIX(-1)))) {
3921 HASH_ASET(ihash
, INT2FIX(-1), INT2FIX(FIX2INT(cv
) + 1));
3924 if (prev_insn
!= -1) {
3929 ary
[0] = INT2FIX(prev_insn
);
3930 ary
[1] = INT2FIX(insn
);
3931 bi
= rb_ary_new4(2, &ary
[0]);
3933 uh
= rb_const_get(rb_cRubyVM
, bigram_hash
);
3934 if (NIL_P(cv
= rb_hash_aref(uh
, bi
))) {
3937 HASH_ASET(uh
, bi
, INT2FIX(FIX2INT(cv
) + 1));
3943 vm_analysis_operand(int insn
, int n
, VALUE op
)
3953 CONST_ID(usage_hash
, "USAGE_ANALYSIS_INSN");
3955 uh
= rb_const_get(rb_cRubyVM
, usage_hash
);
3956 if (NIL_P(ihash
= rb_hash_aref(uh
, INT2FIX(insn
)))) {
3957 ihash
= rb_hash_new();
3958 HASH_ASET(uh
, INT2FIX(insn
), ihash
);
3960 if (NIL_P(ophash
= rb_hash_aref(ihash
, INT2FIX(n
)))) {
3961 ophash
= rb_hash_new();
3962 HASH_ASET(ihash
, INT2FIX(n
), ophash
);
3965 valstr
= rb_insn_operand_intern(GET_EC()->cfp
->iseq
, insn
, n
, op
, 0, 0, 0, 0);
3968 if (NIL_P(cv
= rb_hash_aref(ophash
, valstr
))) {
3971 HASH_ASET(ophash
, valstr
, INT2FIX(FIX2INT(cv
) + 1));
3975 vm_analysis_register(int reg
, int isset
)
3980 static const char regstrs
[][5] = {
3988 static const char getsetstr
[][4] = {
3992 static VALUE syms
[sizeof(regstrs
) / sizeof(regstrs
[0])][2];
3996 CONST_ID(usage_hash
, "USAGE_ANALYSIS_REGS");
4001 for (i
= 0; i
< (int)(sizeof(regstrs
) / sizeof(regstrs
[0])); i
++) {
4003 for (j
= 0; j
< 2; j
++) {
4004 snprintf(buff
, 0x10, "%d %s %-4s", i
, getsetstr
[j
], regstrs
[i
]);
4005 syms
[i
][j
] = ID2SYM(rb_intern(buff
));
4009 valstr
= syms
[reg
][isset
];
4011 uh
= rb_const_get(rb_cRubyVM
, usage_hash
);
4012 if (NIL_P(cv
= rb_hash_aref(uh
, valstr
))) {
4015 HASH_ASET(uh
, valstr
, INT2FIX(FIX2INT(cv
) + 1));
4020 static void (*ruby_vm_collect_usage_func_insn
)(int insn
) = NULL
;
4021 static void (*ruby_vm_collect_usage_func_operand
)(int insn
, int n
, VALUE op
) = NULL
;
4022 static void (*ruby_vm_collect_usage_func_register
)(int reg
, int isset
) = NULL
;
4026 usage_analysis_insn_start(VALUE self
)
4028 ruby_vm_collect_usage_func_insn
= vm_analysis_insn
;
4034 usage_analysis_operand_start(VALUE self
)
4036 ruby_vm_collect_usage_func_operand
= vm_analysis_operand
;
4042 usage_analysis_register_start(VALUE self
)
4044 ruby_vm_collect_usage_func_register
= vm_analysis_register
;
4050 usage_analysis_insn_stop(VALUE self
)
4052 ruby_vm_collect_usage_func_insn
= 0;
4058 usage_analysis_operand_stop(VALUE self
)
4060 ruby_vm_collect_usage_func_operand
= 0;
4066 usage_analysis_register_stop(VALUE self
)
4068 ruby_vm_collect_usage_func_register
= 0;
4074 usage_analysis_insn_running(VALUE self
)
4076 return RBOOL(ruby_vm_collect_usage_func_insn
!= 0);
4081 usage_analysis_operand_running(VALUE self
)
4083 return RBOOL(ruby_vm_collect_usage_func_operand
!= 0);
4088 usage_analysis_register_running(VALUE self
)
4090 return RBOOL(ruby_vm_collect_usage_func_register
!= 0);
4095 usage_analysis_insn_clear(VALUE self
)
4102 CONST_ID(usage_hash
, "USAGE_ANALYSIS_INSN");
4103 CONST_ID(bigram_hash
, "USAGE_ANALYSIS_INSN_BIGRAM");
4104 uh
= rb_const_get(rb_cRubyVM
, usage_hash
);
4105 bh
= rb_const_get(rb_cRubyVM
, bigram_hash
);
4114 usage_analysis_operand_clear(VALUE self
)
4119 CONST_ID(usage_hash
, "USAGE_ANALYSIS_INSN");
4120 uh
= rb_const_get(rb_cRubyVM
, usage_hash
);
4128 usage_analysis_register_clear(VALUE self
)
4133 CONST_ID(usage_hash
, "USAGE_ANALYSIS_REGS");
4134 uh
= rb_const_get(rb_cRubyVM
, usage_hash
);
4142 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn
)(int insn
)) = 0;
4143 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand
)(int insn
, int n
, VALUE op
)) = 0;
4144 MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register
)(int reg
, int isset
)) = 0;
4148 #if VM_COLLECT_USAGE_DETAILS
4149 /* @param insn instruction number */
4151 vm_collect_usage_insn(int insn
)
4153 if (RUBY_DTRACE_INSN_ENABLED()) {
4154 RUBY_DTRACE_INSN(rb_insns_name(insn
));
4156 if (ruby_vm_collect_usage_func_insn
)
4157 (*ruby_vm_collect_usage_func_insn
)(insn
);
4160 /* @param insn instruction number
4161 * @param n n-th operand
4162 * @param op operand value
4165 vm_collect_usage_operand(int insn
, int n
, VALUE op
)
4167 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
4170 valstr
= rb_insn_operand_intern(GET_EC()->cfp
->iseq
, insn
, n
, op
, 0, 0, 0, 0);
4172 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr
), rb_insns_name(insn
));
4173 RB_GC_GUARD(valstr
);
4175 if (ruby_vm_collect_usage_func_operand
)
4176 (*ruby_vm_collect_usage_func_operand
)(insn
, n
, op
);
4179 /* @param reg register id. see code of vm_analysis_register() */
4180 /* @param isset 0: read, 1: write */
4182 vm_collect_usage_register(int reg
, int isset
)
4184 if (ruby_vm_collect_usage_func_register
)
4185 (*ruby_vm_collect_usage_func_register
)(reg
, isset
);
4189 MJIT_FUNC_EXPORTED
const struct rb_callcache
*
4190 rb_vm_empty_cc(void)
4192 return &vm_empty_cc
;
4195 MJIT_FUNC_EXPORTED
const struct rb_callcache
*
4196 rb_vm_empty_cc_for_super(void)
4198 return &vm_empty_cc_for_super
;
4201 #endif /* #ifndef MJIT_HEADER */
4203 #include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */