* 2022-01-18 [ci skip]
[ruby-80x24.org.git] / vm_core.h
blob11866b85e5dcecfd301e4865048bf5c4e8899dff
1 #ifndef RUBY_VM_CORE_H
2 #define RUBY_VM_CORE_H
3 /**********************************************************************
5 vm_core.h -
7 $Author$
8 created at: 04/01/01 19:41:38 JST
10 Copyright (C) 2004-2007 Koichi Sasada
12 **********************************************************************/
15 * Enable check mode.
16 * 1: enable local assertions.
18 #ifndef VM_CHECK_MODE
20 // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21 #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23 #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24 #endif
26 /**
27 * VM Debug Level
29 * debug level:
30 * 0: no debug output
31 * 1: show instruction name
32 * 2: show stack frame when control stack frame is changed
33 * 3: show stack status
34 * 4: show register
35 * 5:
36 * 10: gc check
39 #ifndef VMDEBUG
40 #define VMDEBUG 0
41 #endif
43 #if 0
44 #undef VMDEBUG
45 #define VMDEBUG 3
46 #endif
48 #include "ruby/internal/config.h"
50 #include <stddef.h>
51 #include <signal.h>
52 #include <stdarg.h>
54 #include "ruby_assert.h"
56 #if VM_CHECK_MODE > 0
57 #define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
58 #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
60 #else
61 #define VM_ASSERT(expr) ((void)0)
62 #define VM_UNREACHABLE(func) UNREACHABLE
63 #endif
65 #include <setjmp.h>
67 #include "ruby/internal/stdbool.h"
68 #include "ccan/list/list.h"
69 #include "id.h"
70 #include "internal.h"
71 #include "internal/array.h"
72 #include "internal/serial.h"
73 #include "internal/vm.h"
74 #include "method.h"
75 #include "node.h"
76 #include "ruby/ruby.h"
77 #include "ruby/st.h"
78 #include "ruby_atomic.h"
79 #include "vm_opts.h"
80 #include "darray.h"
82 #include "ruby/thread_native.h"
83 #include THREAD_IMPL_H
85 #define RUBY_VM_THREAD_MODEL 2
88 * implementation selector of get_insn_info algorithm
89 * 0: linear search
90 * 1: binary search
91 * 2: succinct bitvector
93 #ifndef VM_INSN_INFO_TABLE_IMPL
94 # define VM_INSN_INFO_TABLE_IMPL 2
95 #endif
97 #if defined(NSIG_MAX) /* POSIX issue 8 */
98 # undef NSIG
99 # define NSIG NSIG_MAX
100 #elif defined(_SIG_MAXSIG) /* FreeBSD */
101 # undef NSIG
102 # define NSIG _SIG_MAXSIG
103 #elif defined(_SIGMAX) /* QNX */
104 # define NSIG (_SIGMAX + 1)
105 #elif defined(NSIG) /* 99% of everything else */
106 # /* take it */
107 #else /* Last resort */
108 # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
109 #endif
111 #define RUBY_NSIG NSIG
113 #if defined(SIGCLD)
114 # define RUBY_SIGCHLD (SIGCLD)
115 #elif defined(SIGCHLD)
116 # define RUBY_SIGCHLD (SIGCHLD)
117 #else
118 # define RUBY_SIGCHLD (0)
119 #endif
121 /* platforms with broken or non-existent SIGCHLD work by polling */
122 #if defined(__APPLE__)
123 # define SIGCHLD_LOSSY (1)
124 #else
125 # define SIGCHLD_LOSSY (0)
126 #endif
128 /* define to 0 to test old code path */
129 #define WAITPID_USE_SIGCHLD (RUBY_SIGCHLD || SIGCHLD_LOSSY)
131 #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
132 # define USE_SIGALTSTACK
133 void *rb_allocate_sigaltstack(void);
134 void *rb_register_sigaltstack(void *);
135 # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
136 # define RB_ALTSTACK_FREE(var) free(var)
137 # define RB_ALTSTACK(var) var
138 #else /* noop */
139 # define RB_ALTSTACK_INIT(var, altstack)
140 # define RB_ALTSTACK_FREE(var)
141 # define RB_ALTSTACK(var) (0)
142 #endif
144 /*****************/
145 /* configuration */
146 /*****************/
148 /* gcc ver. check */
149 #if defined(__GNUC__) && __GNUC__ >= 2
151 #if OPT_TOKEN_THREADED_CODE
152 #if OPT_DIRECT_THREADED_CODE
153 #undef OPT_DIRECT_THREADED_CODE
154 #endif
155 #endif
157 #else /* defined(__GNUC__) && __GNUC__ >= 2 */
159 /* disable threaded code options */
160 #if OPT_DIRECT_THREADED_CODE
161 #undef OPT_DIRECT_THREADED_CODE
162 #endif
163 #if OPT_TOKEN_THREADED_CODE
164 #undef OPT_TOKEN_THREADED_CODE
165 #endif
166 #endif
168 /* call threaded code */
169 #if OPT_CALL_THREADED_CODE
170 #if OPT_DIRECT_THREADED_CODE
171 #undef OPT_DIRECT_THREADED_CODE
172 #endif /* OPT_DIRECT_THREADED_CODE */
173 #if OPT_STACK_CACHING
174 #undef OPT_STACK_CACHING
175 #endif /* OPT_STACK_CACHING */
176 #endif /* OPT_CALL_THREADED_CODE */
178 void rb_vm_encoded_insn_data_table_init(void);
179 typedef unsigned long rb_num_t;
180 typedef signed long rb_snum_t;
182 enum ruby_tag_type {
183 RUBY_TAG_NONE = 0x0,
184 RUBY_TAG_RETURN = 0x1,
185 RUBY_TAG_BREAK = 0x2,
186 RUBY_TAG_NEXT = 0x3,
187 RUBY_TAG_RETRY = 0x4,
188 RUBY_TAG_REDO = 0x5,
189 RUBY_TAG_RAISE = 0x6,
190 RUBY_TAG_THROW = 0x7,
191 RUBY_TAG_FATAL = 0x8,
192 RUBY_TAG_MASK = 0xf
195 #define TAG_NONE RUBY_TAG_NONE
196 #define TAG_RETURN RUBY_TAG_RETURN
197 #define TAG_BREAK RUBY_TAG_BREAK
198 #define TAG_NEXT RUBY_TAG_NEXT
199 #define TAG_RETRY RUBY_TAG_RETRY
200 #define TAG_REDO RUBY_TAG_REDO
201 #define TAG_RAISE RUBY_TAG_RAISE
202 #define TAG_THROW RUBY_TAG_THROW
203 #define TAG_FATAL RUBY_TAG_FATAL
204 #define TAG_MASK RUBY_TAG_MASK
206 enum ruby_vm_throw_flags {
207 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
208 VM_THROW_STATE_MASK = 0xff
211 /* forward declarations */
212 struct rb_thread_struct;
213 struct rb_control_frame_struct;
215 /* iseq data type */
216 typedef struct rb_compile_option_struct rb_compile_option_t;
218 union ic_serial_entry {
219 rb_serial_t raw;
220 VALUE data[2];
223 // imemo_constcache
224 struct iseq_inline_constant_cache_entry {
225 VALUE flags;
227 VALUE value; // v0
228 union ic_serial_entry ic_serial; // v1, v2
229 const rb_cref_t *ic_cref; // v3
231 STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
232 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
233 sizeof(const rb_cref_t *)) <= sizeof(struct RObject));
235 #if SIZEOF_SERIAL_T <= SIZEOF_VALUE
237 #define GET_IC_SERIAL(ice) (ice)->ic_serial.raw
238 #define SET_IC_SERIAL(ice, v) (ice)->ic_serial.raw = (v)
240 #else
242 static inline rb_serial_t
243 get_ic_serial(const struct iseq_inline_constant_cache_entry *ice)
245 union ic_serial_entry tmp;
246 tmp.data[0] = ice->ic_serial.data[0];
247 tmp.data[1] = ice->ic_serial.data[1];
248 return tmp.raw;
251 #define GET_IC_SERIAL(ice) get_ic_serial(ice)
253 static inline void
254 set_ic_serial(struct iseq_inline_constant_cache_entry *ice, rb_serial_t v)
256 union ic_serial_entry tmp;
257 tmp.raw = v;
258 ice->ic_serial.data[0] = tmp.data[0];
259 ice->ic_serial.data[1] = tmp.data[1];
262 #define SET_IC_SERIAL(ice, v) set_ic_serial((ice), (v))
264 #endif
266 struct iseq_inline_constant_cache {
267 struct iseq_inline_constant_cache_entry *entry;
268 // For YJIT: the index to the opt_getinlinecache instruction in the same iseq.
269 // It's set during compile time and constant once set.
270 unsigned get_insn_idx;
273 struct iseq_inline_iv_cache_entry {
274 struct rb_iv_index_tbl_entry *entry;
277 struct iseq_inline_cvar_cache_entry {
278 struct rb_cvar_class_tbl_entry *entry;
281 union iseq_inline_storage_entry {
282 struct {
283 struct rb_thread_struct *running_thread;
284 VALUE value;
285 } once;
286 struct iseq_inline_constant_cache ic_cache;
287 struct iseq_inline_iv_cache_entry iv_cache;
290 struct rb_calling_info {
291 const struct rb_callinfo *ci;
292 const struct rb_callcache *cc;
293 VALUE block_handler;
294 VALUE recv;
295 int argc;
296 int kw_splat;
299 struct rb_execution_context_struct;
301 #if 1
302 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
303 #else
304 #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
305 #endif
306 #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
308 typedef struct rb_iseq_location_struct {
309 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
310 VALUE base_label; /* String */
311 VALUE label; /* String */
312 VALUE first_lineno; /* TODO: may be unsigned short */
313 int node_id;
314 rb_code_location_t code_location;
315 } rb_iseq_location_t;
317 #define PATHOBJ_PATH 0
318 #define PATHOBJ_REALPATH 1
320 static inline VALUE
321 pathobj_path(VALUE pathobj)
323 if (RB_TYPE_P(pathobj, T_STRING)) {
324 return pathobj;
326 else {
327 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
328 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
332 static inline VALUE
333 pathobj_realpath(VALUE pathobj)
335 if (RB_TYPE_P(pathobj, T_STRING)) {
336 return pathobj;
338 else {
339 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
340 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
344 /* Forward declarations */
345 struct rb_mjit_unit;
347 // List of YJIT block versions
348 typedef rb_darray(struct yjit_block_version *) rb_yjit_block_array_t;
349 typedef rb_darray(rb_yjit_block_array_t) rb_yjit_block_array_array_t;
351 struct rb_iseq_constant_body {
352 enum iseq_type {
353 ISEQ_TYPE_TOP,
354 ISEQ_TYPE_METHOD,
355 ISEQ_TYPE_BLOCK,
356 ISEQ_TYPE_CLASS,
357 ISEQ_TYPE_RESCUE,
358 ISEQ_TYPE_ENSURE,
359 ISEQ_TYPE_EVAL,
360 ISEQ_TYPE_MAIN,
361 ISEQ_TYPE_PLAIN
362 } type; /* instruction sequence type */
364 unsigned int iseq_size;
365 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
368 * parameter information
370 * def m(a1, a2, ..., aM, # mandatory
371 * b1=(...), b2=(...), ..., bN=(...), # optional
372 * *c, # rest
373 * d1, d2, ..., dO, # post
374 * e1:(...), e2:(...), ..., eK:(...), # keyword
375 * **f, # keyword_rest
376 * &g) # block
377 * =>
379 * lead_num = M
380 * opt_num = N
381 * rest_start = M+N
382 * post_start = M+N+(*1)
383 * post_num = O
384 * keyword_num = K
385 * block_start = M+N+(*1)+O+K
386 * keyword_bits = M+N+(*1)+O+K+(&1)
387 * size = M+N+O+(*1)+K+(&1)+(**1) // parameter size.
390 struct {
391 struct {
392 unsigned int has_lead : 1;
393 unsigned int has_opt : 1;
394 unsigned int has_rest : 1;
395 unsigned int has_post : 1;
396 unsigned int has_kw : 1;
397 unsigned int has_kwrest : 1;
398 unsigned int has_block : 1;
400 unsigned int ambiguous_param0 : 1; /* {|a|} */
401 unsigned int accepts_no_kwarg : 1;
402 unsigned int ruby2_keywords: 1;
403 } flags;
405 unsigned int size;
407 int lead_num;
408 int opt_num;
409 int rest_start;
410 int post_start;
411 int post_num;
412 int block_start;
414 const VALUE *opt_table; /* (opt_num + 1) entries. */
415 /* opt_num and opt_table:
417 * def foo o1=e1, o2=e2, ..., oN=eN
418 * #=>
419 * # prologue code
420 * A1: e1
421 * A2: e2
422 * ...
423 * AN: eN
424 * AL: body
425 * opt_num = N
426 * opt_table = [A1, A2, ..., AN, AL]
429 const struct rb_iseq_param_keyword {
430 int num;
431 int required_num;
432 int bits_start;
433 int rest_start;
434 const ID *table;
435 VALUE *default_values;
436 } *keyword;
437 } param;
439 rb_iseq_location_t location;
441 /* insn info, must be freed */
442 struct iseq_insn_info {
443 const struct iseq_insn_info_entry *body;
444 unsigned int *positions;
445 unsigned int size;
446 #if VM_INSN_INFO_TABLE_IMPL == 2
447 struct succ_index_table *succ_index_table;
448 #endif
449 } insns_info;
451 const ID *local_table; /* must free */
453 /* catch table */
454 struct iseq_catch_table *catch_table;
456 /* for child iseq */
457 const struct rb_iseq_struct *parent_iseq;
458 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
460 union iseq_inline_storage_entry *is_entries;
461 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
463 struct {
464 rb_snum_t flip_count;
465 VALUE script_lines;
466 VALUE coverage;
467 VALUE pc2branchindex;
468 VALUE *original_iseq;
469 } variable;
471 unsigned int local_table_size;
472 unsigned int is_size;
473 unsigned int ci_size;
474 unsigned int stack_max; /* for stack overflow check */
476 char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
477 // If true, this ISeq is leaf *and* backtraces are not used, for example,
478 // by rb_profile_frames. We verify only leafness on VM_CHECK_MODE though.
479 // Note that GC allocations might use backtraces due to
480 // ObjectSpace#trace_object_allocations.
481 // For more details, see: https://bugs.ruby-lang.org/issues/16956
482 bool builtin_inline_p;
483 struct rb_id_table *outer_variables;
485 const rb_iseq_t *mandatory_only_iseq;
487 #if USE_MJIT
488 /* The following fields are MJIT related info. */
489 VALUE (*jit_func)(struct rb_execution_context_struct *,
490 struct rb_control_frame_struct *); /* function pointer for loaded native code */
491 long unsigned total_calls; /* number of total calls with `mjit_exec()` */
492 struct rb_mjit_unit *jit_unit;
493 #endif
495 rb_yjit_block_array_array_t yjit_blocks; // empty, or has a size equal to iseq_size
498 /* T_IMEMO/iseq */
499 /* typedef rb_iseq_t is in method.h */
500 struct rb_iseq_struct {
501 VALUE flags; /* 1 */
502 VALUE wrapper; /* 2 */
504 struct rb_iseq_constant_body *body; /* 3 */
506 union { /* 4, 5 words */
507 struct iseq_compile_data *compile_data; /* used at compile time */
509 struct {
510 VALUE obj;
511 int index;
512 } loader;
514 struct {
515 struct rb_hook_list_struct *local_hooks;
516 rb_event_flag_t global_trace_events;
517 } exec;
518 } aux;
521 #ifndef USE_LAZY_LOAD
522 #define USE_LAZY_LOAD 0
523 #endif
525 #if USE_LAZY_LOAD
526 const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
527 #endif
529 static inline const rb_iseq_t *
530 rb_iseq_check(const rb_iseq_t *iseq)
532 #if USE_LAZY_LOAD
533 if (iseq->body == NULL) {
534 rb_iseq_complete((rb_iseq_t *)iseq);
536 #endif
537 return iseq;
540 static inline const rb_iseq_t *
541 def_iseq_ptr(rb_method_definition_t *def)
543 //TODO: re-visit. to check the bug, enable this assertion.
544 #if VM_CHECK_MODE > 0
545 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
546 #endif
547 return rb_iseq_check(def->body.iseq.iseqptr);
550 enum ruby_special_exceptions {
551 ruby_error_reenter,
552 ruby_error_nomemory,
553 ruby_error_sysstack,
554 ruby_error_stackfatal,
555 ruby_error_stream_closed,
556 ruby_special_error_count
559 enum ruby_basic_operators {
560 BOP_PLUS,
561 BOP_MINUS,
562 BOP_MULT,
563 BOP_DIV,
564 BOP_MOD,
565 BOP_EQ,
566 BOP_EQQ,
567 BOP_LT,
568 BOP_LE,
569 BOP_LTLT,
570 BOP_AREF,
571 BOP_ASET,
572 BOP_LENGTH,
573 BOP_SIZE,
574 BOP_EMPTY_P,
575 BOP_NIL_P,
576 BOP_SUCC,
577 BOP_GT,
578 BOP_GE,
579 BOP_NOT,
580 BOP_NEQ,
581 BOP_MATCH,
582 BOP_FREEZE,
583 BOP_UMINUS,
584 BOP_MAX,
585 BOP_MIN,
586 BOP_CALL,
587 BOP_AND,
588 BOP_OR,
590 BOP_LAST_
593 #define GetVMPtr(obj, ptr) \
594 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
596 struct rb_vm_struct;
597 typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
599 typedef struct rb_at_exit_list {
600 rb_vm_at_exit_func *func;
601 struct rb_at_exit_list *next;
602 } rb_at_exit_list;
604 struct rb_objspace;
605 struct rb_objspace *rb_objspace_alloc(void);
606 void rb_objspace_free(struct rb_objspace *);
607 void rb_objspace_call_finalizer(struct rb_objspace *);
609 typedef struct rb_hook_list_struct {
610 struct rb_event_hook_struct *hooks;
611 rb_event_flag_t events;
612 unsigned int running;
613 bool need_clean;
614 bool is_local;
615 } rb_hook_list_t;
618 // see builtin.h for definition
619 typedef const struct rb_builtin_function *RB_BUILTIN;
621 typedef struct rb_vm_struct {
622 VALUE self;
624 struct {
625 struct list_head set;
626 unsigned int cnt;
627 unsigned int blocking_cnt;
629 struct rb_ractor_struct *main_ractor;
630 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
632 struct {
633 // monitor
634 rb_nativethread_lock_t lock;
635 struct rb_ractor_struct *lock_owner;
636 unsigned int lock_rec;
638 // barrier
639 bool barrier_waiting;
640 unsigned int barrier_cnt;
641 rb_nativethread_cond_t barrier_cond;
643 // join at exit
644 rb_nativethread_cond_t terminate_cond;
645 bool terminate_waiting;
646 } sync;
647 } ractor;
649 #ifdef USE_SIGALTSTACK
650 void *main_altstack;
651 #endif
653 rb_serial_t fork_gen;
654 rb_nativethread_lock_t waitpid_lock;
655 struct list_head waiting_pids; /* PID > 0: <=> struct waitpid_state */
656 struct list_head waiting_grps; /* PID <= 0: <=> struct waitpid_state */
657 struct list_head waiting_fds; /* <=> struct waiting_fd */
659 /* set in single-threaded processes only: */
660 volatile int ubf_async_safe;
662 unsigned int running: 1;
663 unsigned int thread_abort_on_exception: 1;
664 unsigned int thread_report_on_exception: 1;
665 unsigned int thread_ignore_deadlock: 1;
667 /* object management */
668 VALUE mark_object_ary;
669 const VALUE special_exceptions[ruby_special_error_count];
671 /* load */
672 VALUE top_self;
673 VALUE load_path;
674 VALUE load_path_snapshot;
675 VALUE load_path_check_cache;
676 VALUE expanded_load_path;
677 VALUE loaded_features;
678 VALUE loaded_features_snapshot;
679 VALUE loaded_features_realpaths;
680 struct st_table *loaded_features_index;
681 struct st_table *loading_table;
683 /* signal */
684 struct {
685 VALUE cmd[RUBY_NSIG];
686 } trap_list;
688 /* relation table of ensure - rollback for callcc */
689 struct st_table *ensure_rollback_table;
691 /* postponed_job (async-signal-safe, NOT thread-safe) */
692 struct rb_postponed_job_struct *postponed_job_buffer;
693 rb_atomic_t postponed_job_index;
695 int src_encoding_index;
697 /* workqueue (thread-safe, NOT async-signal-safe) */
698 struct list_head workqueue; /* <=> rb_workqueue_job.jnode */
699 rb_nativethread_lock_t workqueue_lock;
701 VALUE orig_progname, progname;
702 VALUE coverages, me2counter;
703 int coverage_mode;
705 st_table * defined_module_hash;
707 struct rb_objspace *objspace;
709 rb_at_exit_list *at_exit;
711 st_table *frozen_strings;
713 const struct rb_builtin_function *builtin_function_table;
714 int builtin_inline_index;
716 struct rb_id_table *negative_cme_table;
717 st_table *overloaded_cme_table; // cme -> overloaded_cme
719 #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
720 #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
721 #endif
722 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
724 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
725 uint32_t clock;
726 #endif
728 /* params */
729 struct { /* size in byte */
730 size_t thread_vm_stack_size;
731 size_t thread_machine_stack_size;
732 size_t fiber_vm_stack_size;
733 size_t fiber_machine_stack_size;
734 } default_params;
736 short redefined_flag[BOP_LAST_];
737 } rb_vm_t;
739 /* default values */
741 #define RUBY_VM_SIZE_ALIGN 4096
743 #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
744 #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
745 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
746 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
748 #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
749 #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
750 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
751 #if defined(__powerpc64__)
752 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
753 #else
754 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
755 #endif
757 #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
758 /* It seems sanitizers consume A LOT of machine stacks */
759 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
760 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
761 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
762 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
763 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
764 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
765 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
766 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
767 #endif
769 /* optimize insn */
770 #define INTEGER_REDEFINED_OP_FLAG (1 << 0)
771 #define FLOAT_REDEFINED_OP_FLAG (1 << 1)
772 #define STRING_REDEFINED_OP_FLAG (1 << 2)
773 #define ARRAY_REDEFINED_OP_FLAG (1 << 3)
774 #define HASH_REDEFINED_OP_FLAG (1 << 4)
775 /* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
776 #define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
777 #define TIME_REDEFINED_OP_FLAG (1 << 7)
778 #define REGEXP_REDEFINED_OP_FLAG (1 << 8)
779 #define NIL_REDEFINED_OP_FLAG (1 << 9)
780 #define TRUE_REDEFINED_OP_FLAG (1 << 10)
781 #define FALSE_REDEFINED_OP_FLAG (1 << 11)
782 #define PROC_REDEFINED_OP_FLAG (1 << 12)
784 #define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
786 #ifndef VM_DEBUG_BP_CHECK
787 #define VM_DEBUG_BP_CHECK 0
788 #endif
790 #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
791 #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
792 #endif
794 struct rb_captured_block {
795 VALUE self;
796 const VALUE *ep;
797 union {
798 const rb_iseq_t *iseq;
799 const struct vm_ifunc *ifunc;
800 VALUE val;
801 } code;
804 enum rb_block_handler_type {
805 block_handler_type_iseq,
806 block_handler_type_ifunc,
807 block_handler_type_symbol,
808 block_handler_type_proc
811 enum rb_block_type {
812 block_type_iseq,
813 block_type_ifunc,
814 block_type_symbol,
815 block_type_proc
818 struct rb_block {
819 union {
820 struct rb_captured_block captured;
821 VALUE symbol;
822 VALUE proc;
823 } as;
824 enum rb_block_type type;
827 typedef struct rb_control_frame_struct {
828 const VALUE *pc; /* cfp[0] */
829 VALUE *sp; /* cfp[1] */
830 const rb_iseq_t *iseq; /* cfp[2] */
831 VALUE self; /* cfp[3] / block[0] */
832 const VALUE *ep; /* cfp[4] / block[1] */
833 const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc or forwarded block handler */
834 VALUE *__bp__; /* cfp[6] */ /* outside vm_push_frame, use vm_base_ptr instead. */
836 #if VM_DEBUG_BP_CHECK
837 VALUE *bp_check; /* cfp[7] */
838 #endif
839 // Return address for YJIT code
840 void *jit_return;
841 } rb_control_frame_t;
843 extern const rb_data_type_t ruby_threadptr_data_type;
845 static inline struct rb_thread_struct *
846 rb_thread_ptr(VALUE thval)
848 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
851 enum rb_thread_status {
852 THREAD_RUNNABLE,
853 THREAD_STOPPED,
854 THREAD_STOPPED_FOREVER,
855 THREAD_KILLED
858 #ifdef RUBY_JMP_BUF
859 typedef RUBY_JMP_BUF rb_jmpbuf_t;
860 #else
861 typedef void *rb_jmpbuf_t[5];
862 #endif
865 the members which are written in EC_PUSH_TAG() should be placed at
866 the beginning and the end, so that entire region is accessible.
868 struct rb_vm_tag {
869 VALUE tag;
870 VALUE retval;
871 rb_jmpbuf_t buf;
872 struct rb_vm_tag *prev;
873 enum ruby_tag_type state;
874 unsigned int lock_rec;
877 STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
878 STATIC_ASSERT(rb_vm_tag_buf_end,
879 offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
880 sizeof(struct rb_vm_tag));
882 struct rb_unblock_callback {
883 rb_unblock_function_t *func;
884 void *arg;
887 struct rb_mutex_struct;
889 typedef struct rb_ensure_entry {
890 VALUE marker;
891 VALUE (*e_proc)(VALUE);
892 VALUE data2;
893 } rb_ensure_entry_t;
895 typedef struct rb_ensure_list {
896 struct rb_ensure_list *next;
897 struct rb_ensure_entry entry;
898 } rb_ensure_list_t;
900 typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
902 typedef struct rb_fiber_struct rb_fiber_t;
904 struct rb_waiting_list {
905 struct rb_waiting_list *next;
906 struct rb_thread_struct *thread;
907 struct rb_fiber_struct *fiber;
910 struct rb_execution_context_struct {
911 /* execution information */
912 VALUE *vm_stack; /* must free, must mark */
913 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
914 rb_control_frame_t *cfp;
916 struct rb_vm_tag *tag;
918 /* interrupt flags */
919 rb_atomic_t interrupt_flag;
920 rb_atomic_t interrupt_mask; /* size should match flag */
921 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
922 uint32_t checked_clock;
923 #endif
925 rb_fiber_t *fiber_ptr;
926 struct rb_thread_struct *thread_ptr;
928 /* storage (ec (fiber) local) */
929 struct rb_id_table *local_storage;
930 VALUE local_storage_recursive_hash;
931 VALUE local_storage_recursive_hash_for_trace;
933 /* eval env */
934 const VALUE *root_lep;
935 VALUE root_svar;
937 /* ensure & callcc */
938 rb_ensure_list_t *ensure_list;
940 /* trace information */
941 struct rb_trace_arg_struct *trace_arg;
943 /* temporary places */
944 VALUE errinfo;
945 VALUE passed_block_handler; /* for rb_iterate */
947 uint8_t raised_flag; /* only 3 bits needed */
949 /* n.b. only 7 bits needed, really: */
950 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
952 VALUE private_const_reference;
954 /* for GC */
955 struct {
956 VALUE *stack_start;
957 VALUE *stack_end;
958 size_t stack_maxsize;
959 RUBY_ALIGNAS(SIZEOF_VALUE) jmp_buf regs;
960 } machine;
963 #ifndef rb_execution_context_t
964 typedef struct rb_execution_context_struct rb_execution_context_t;
965 #define rb_execution_context_t rb_execution_context_t
966 #endif
968 // for builtin.h
969 #define VM_CORE_H_EC_DEFINED 1
971 // Set the vm_stack pointer in the execution context.
972 void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
974 // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
975 // @param ec the execution context to update.
976 // @param stack a pointer to the stack to use.
977 // @param size the size of the stack, as in `VALUE stack[size]`.
978 void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
980 // Clear (set to `NULL`) the vm_stack pointer.
981 // @param ec the execution context to update.
982 void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
984 struct rb_ext_config {
985 bool ractor_safe;
988 typedef struct rb_ractor_struct rb_ractor_t;
990 #if defined(__linux__) || defined(__FreeBSD__)
991 # define RB_THREAD_T_HAS_NATIVE_ID
992 #endif
994 typedef struct rb_thread_struct {
995 struct list_node lt_node; // managed by a ractor
996 VALUE self;
997 rb_ractor_t *ractor;
998 rb_vm_t *vm;
1000 rb_execution_context_t *ec;
1002 VALUE last_status; /* $? */
1004 /* for cfunc */
1005 struct rb_calling_info *calling;
1007 /* for load(true) */
1008 VALUE top_self;
1009 VALUE top_wrapper;
1011 /* thread control */
1012 rb_nativethread_id_t thread_id;
1013 #ifdef NON_SCALAR_THREAD_ID
1014 rb_thread_id_string_t thread_id_string;
1015 #endif
1016 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1017 int tid;
1018 #endif
1019 BITFIELD(enum rb_thread_status, status, 2);
1020 /* bit flags */
1021 unsigned int to_kill : 1;
1022 unsigned int abort_on_exception: 1;
1023 unsigned int report_on_exception: 1;
1024 unsigned int pending_interrupt_queue_checked: 1;
1025 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1026 uint32_t running_time_us; /* 12500..800000 */
1028 native_thread_data_t native_thread_data;
1029 void *blocking_region_buffer;
1031 VALUE thgroup;
1032 VALUE value;
1034 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1035 #if OPT_CALL_THREADED_CODE
1036 VALUE retval;
1037 #endif
1039 /* async errinfo queue */
1040 VALUE pending_interrupt_queue;
1041 VALUE pending_interrupt_mask_stack;
1043 /* interrupt management */
1044 rb_nativethread_lock_t interrupt_lock;
1045 struct rb_unblock_callback unblock;
1046 VALUE locking_mutex;
1047 struct rb_mutex_struct *keeping_mutexes;
1049 struct rb_waiting_list *join_list;
1051 union {
1052 struct {
1053 VALUE proc;
1054 VALUE args;
1055 int kw_splat;
1056 } proc;
1057 struct {
1058 VALUE (*func)(void *);
1059 void *arg;
1060 } func;
1061 } invoke_arg;
1063 enum thread_invoke_type {
1064 thread_invoke_type_none = 0,
1065 thread_invoke_type_proc,
1066 thread_invoke_type_ractor_proc,
1067 thread_invoke_type_func
1068 } invoke_type;
1070 /* statistics data for profiler */
1071 VALUE stat_insn_usage;
1073 /* fiber */
1074 rb_fiber_t *root_fiber;
1076 VALUE scheduler;
1077 unsigned blocking;
1079 /* misc */
1080 VALUE name;
1082 struct rb_ext_config ext_config;
1084 #ifdef USE_SIGALTSTACK
1085 void *altstack;
1086 #endif
1087 } rb_thread_t;
1089 typedef enum {
1090 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1091 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1092 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1093 /* 0x03..0x06 is reserved */
1094 VM_DEFINECLASS_TYPE_MASK = 0x07
1095 } rb_vm_defineclass_type_t;
1097 #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1098 #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1099 #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1100 #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1101 #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1102 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1104 /* iseq.c */
1105 RUBY_SYMBOL_EXPORT_BEGIN
1107 /* node -> iseq */
1108 rb_iseq_t *rb_iseq_new (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
1109 rb_iseq_t *rb_iseq_new_top (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1110 rb_iseq_t *rb_iseq_new_main (const rb_ast_body_t *ast, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1111 rb_iseq_t *rb_iseq_new_eval (const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth);
1112 rb_iseq_t *rb_iseq_new_with_opt(const rb_ast_body_t *ast, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, int isolated_depth,
1113 enum iseq_type, const rb_compile_option_t*);
1115 struct iseq_link_anchor;
1116 struct rb_iseq_new_with_callback_callback_func {
1117 VALUE flags;
1118 VALUE reserved;
1119 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1120 const void *data;
1122 static inline struct rb_iseq_new_with_callback_callback_func *
1123 rb_iseq_new_with_callback_new_callback(
1124 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1126 VALUE memo = rb_imemo_new(imemo_ifunc, (VALUE)func, (VALUE)ptr, Qundef, Qfalse);
1127 return (struct rb_iseq_new_with_callback_callback_func *)memo;
1129 rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1130 VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
1131 const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
1133 VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1134 int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1136 VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1138 RUBY_EXTERN VALUE rb_cISeq;
1139 RUBY_EXTERN VALUE rb_cRubyVM;
1140 RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1141 RUBY_EXTERN VALUE rb_block_param_proxy;
1142 RUBY_SYMBOL_EXPORT_END
1144 #define GetProcPtr(obj, ptr) \
1145 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1147 typedef struct {
1148 const struct rb_block block;
1149 unsigned int is_from_method: 1; /* bool */
1150 unsigned int is_lambda: 1; /* bool */
1151 unsigned int is_isolated: 1; /* bool */
1152 } rb_proc_t;
1154 RUBY_SYMBOL_EXPORT_BEGIN
1155 VALUE rb_proc_isolate(VALUE self);
1156 VALUE rb_proc_isolate_bang(VALUE self);
1157 VALUE rb_proc_ractor_make_shareable(VALUE self);
1158 RUBY_SYMBOL_EXPORT_END
1160 typedef struct {
1161 VALUE flags; /* imemo header */
1162 rb_iseq_t *iseq;
1163 const VALUE *ep;
1164 const VALUE *env;
1165 unsigned int env_size;
1166 } rb_env_t;
1168 extern const rb_data_type_t ruby_binding_data_type;
1170 #define GetBindingPtr(obj, ptr) \
1171 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1173 typedef struct {
1174 const struct rb_block block;
1175 const VALUE pathobj;
1176 unsigned short first_lineno;
1177 } rb_binding_t;
1179 /* used by compile time and send insn */
1181 enum vm_check_match_type {
1182 VM_CHECKMATCH_TYPE_WHEN = 1,
1183 VM_CHECKMATCH_TYPE_CASE = 2,
1184 VM_CHECKMATCH_TYPE_RESCUE = 3
1187 #define VM_CHECKMATCH_TYPE_MASK 0x03
1188 #define VM_CHECKMATCH_ARRAY 0x04
1190 enum vm_special_object_type {
1191 VM_SPECIAL_OBJECT_VMCORE = 1,
1192 VM_SPECIAL_OBJECT_CBASE,
1193 VM_SPECIAL_OBJECT_CONST_BASE
1196 enum vm_svar_index {
1197 VM_SVAR_LASTLINE = 0, /* $_ */
1198 VM_SVAR_BACKREF = 1, /* $~ */
1200 VM_SVAR_EXTRA_START = 2,
1201 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1204 /* inline cache */
1205 typedef struct iseq_inline_constant_cache *IC;
1206 typedef struct iseq_inline_iv_cache_entry *IVC;
1207 typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1208 typedef union iseq_inline_storage_entry *ISE;
1209 typedef const struct rb_callinfo *CALL_INFO;
1210 typedef const struct rb_callcache *CALL_CACHE;
1211 typedef struct rb_call_data *CALL_DATA;
1213 typedef VALUE CDHASH;
1215 #ifndef FUNC_FASTCALL
1216 #define FUNC_FASTCALL(x) x
1217 #endif
1219 typedef rb_control_frame_t *
1220 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1222 #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1223 #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1225 #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1226 #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1227 #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1229 enum {
1230 /* Frame/Environment flag bits:
1231 * MMMM MMMM MMMM MMMM ____ _FFF FFFF EEEX (LSB)
1233 * X : tag for GC marking (It seems as Fixnum)
1234 * EEE : 3 bits Env flags
1235 * FF..: 7 bits Frame flags
1236 * MM..: 15 bits frame magic (to check frame corruption)
1239 /* frame types */
1240 VM_FRAME_MAGIC_METHOD = 0x11110001,
1241 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1242 VM_FRAME_MAGIC_CLASS = 0x33330001,
1243 VM_FRAME_MAGIC_TOP = 0x44440001,
1244 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1245 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1246 VM_FRAME_MAGIC_EVAL = 0x77770001,
1247 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1248 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1250 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1252 /* frame flag */
1253 VM_FRAME_FLAG_FINISH = 0x0020,
1254 VM_FRAME_FLAG_BMETHOD = 0x0040,
1255 VM_FRAME_FLAG_CFRAME = 0x0080,
1256 VM_FRAME_FLAG_LAMBDA = 0x0100,
1257 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1258 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1259 VM_FRAME_FLAG_PASSED = 0x0800,
1261 /* env flag */
1262 VM_ENV_FLAG_LOCAL = 0x0002,
1263 VM_ENV_FLAG_ESCAPED = 0x0004,
1264 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1265 VM_ENV_FLAG_ISOLATED = 0x0010,
1268 #define VM_ENV_DATA_SIZE ( 3)
1270 #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1271 #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1272 #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1273 #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1275 #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1277 static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1279 static inline void
1280 VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1282 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1283 VM_ASSERT(FIXNUM_P(flags));
1284 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1287 static inline void
1288 VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1290 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1291 VM_ASSERT(FIXNUM_P(flags));
1292 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1295 static inline unsigned long
1296 VM_ENV_FLAGS(const VALUE *ep, long flag)
1298 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1299 VM_ASSERT(FIXNUM_P(flags));
1300 return flags & flag;
1303 static inline unsigned long
1304 VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1306 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1309 static inline int
1310 VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1312 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1315 static inline int
1316 VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1318 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1321 static inline int
1322 VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1324 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1327 static inline int
1328 VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1330 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1333 static inline int
1334 rb_obj_is_iseq(VALUE iseq)
1336 return imemo_type_p(iseq, imemo_iseq);
1339 #if VM_CHECK_MODE > 0
1340 #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1341 #endif
1343 static inline int
1344 VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1346 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1347 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1348 return cframe_p;
1351 static inline int
1352 VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1354 return !VM_FRAME_CFRAME_P(cfp);
1357 #define RUBYVM_CFUNC_FRAME_P(cfp) \
1358 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1360 #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1361 #define VM_BLOCK_HANDLER_NONE 0
1363 static inline int
1364 VM_ENV_LOCAL_P(const VALUE *ep)
1366 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1369 static inline const VALUE *
1370 VM_ENV_PREV_EP(const VALUE *ep)
1372 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1373 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1376 static inline VALUE
1377 VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1379 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1380 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1383 #if VM_CHECK_MODE > 0
1384 int rb_vm_ep_in_heap_p(const VALUE *ep);
1385 #endif
1387 static inline int
1388 VM_ENV_ESCAPED_P(const VALUE *ep)
1390 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1391 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1394 #if VM_CHECK_MODE > 0
1395 static inline int
1396 vm_assert_env(VALUE obj)
1398 VM_ASSERT(imemo_type_p(obj, imemo_env));
1399 return 1;
1401 #endif
1403 RBIMPL_ATTR_NONNULL((1))
1404 static inline VALUE
1405 VM_ENV_ENVVAL(const VALUE *ep)
1407 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1408 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1409 VM_ASSERT(vm_assert_env(envval));
1410 return envval;
1413 RBIMPL_ATTR_NONNULL((1))
1414 static inline const rb_env_t *
1415 VM_ENV_ENVVAL_PTR(const VALUE *ep)
1417 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1420 static inline const rb_env_t *
1421 vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1423 rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1424 env->env_size = env_size;
1425 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1426 return env;
1429 static inline void
1430 VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1432 *((VALUE *)ptr) = v;
1435 static inline void
1436 VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1438 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1439 VM_FORCE_WRITE(ptr, special_const_value);
1442 static inline void
1443 VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1445 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1446 VM_FORCE_WRITE(&ep[index], v);
1449 const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1450 const VALUE *rb_vm_proc_local_ep(VALUE proc);
1451 void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1452 void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1454 VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1456 #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1457 #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1459 #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1460 ((void *)(ecfp) > (void *)(cfp))
1462 static inline const rb_control_frame_t *
1463 RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1465 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1468 static inline int
1469 RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1471 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1474 static inline int
1475 VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1477 if ((block_handler & 0x03) == 0x01) {
1478 #if VM_CHECK_MODE > 0
1479 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1480 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1481 #endif
1482 return 1;
1484 else {
1485 return 0;
1489 static inline VALUE
1490 VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1492 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1493 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1494 return block_handler;
1497 static inline const struct rb_captured_block *
1498 VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1500 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1501 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1502 return captured;
1505 static inline int
1506 VM_BH_IFUNC_P(VALUE block_handler)
1508 if ((block_handler & 0x03) == 0x03) {
1509 #if VM_CHECK_MODE > 0
1510 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1511 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1512 #endif
1513 return 1;
1515 else {
1516 return 0;
1520 static inline VALUE
1521 VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1523 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1524 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1525 return block_handler;
1528 static inline const struct rb_captured_block *
1529 VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1531 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1532 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1533 return captured;
1536 static inline const struct rb_captured_block *
1537 VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1539 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1540 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1541 return captured;
1544 static inline enum rb_block_handler_type
1545 vm_block_handler_type(VALUE block_handler)
1547 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1548 return block_handler_type_iseq;
1550 else if (VM_BH_IFUNC_P(block_handler)) {
1551 return block_handler_type_ifunc;
1553 else if (SYMBOL_P(block_handler)) {
1554 return block_handler_type_symbol;
1556 else {
1557 VM_ASSERT(rb_obj_is_proc(block_handler));
1558 return block_handler_type_proc;
1562 static inline void
1563 vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1565 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1566 (vm_block_handler_type(block_handler), 1));
1569 static inline int
1570 vm_cfp_forwarded_bh_p(const rb_control_frame_t *cfp, VALUE block_handler)
1572 return ((VALUE) cfp->block_code) == block_handler;
1575 static inline enum rb_block_type
1576 vm_block_type(const struct rb_block *block)
1578 #if VM_CHECK_MODE > 0
1579 switch (block->type) {
1580 case block_type_iseq:
1581 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1582 break;
1583 case block_type_ifunc:
1584 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1585 break;
1586 case block_type_symbol:
1587 VM_ASSERT(SYMBOL_P(block->as.symbol));
1588 break;
1589 case block_type_proc:
1590 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1591 break;
1593 #endif
1594 return block->type;
1597 static inline void
1598 vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1600 struct rb_block *mb = (struct rb_block *)block;
1601 mb->type = type;
1604 static inline const struct rb_block *
1605 vm_proc_block(VALUE procval)
1607 VM_ASSERT(rb_obj_is_proc(procval));
1608 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1611 static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1612 static inline const VALUE *vm_block_ep(const struct rb_block *block);
1614 static inline const rb_iseq_t *
1615 vm_proc_iseq(VALUE procval)
1617 return vm_block_iseq(vm_proc_block(procval));
1620 static inline const VALUE *
1621 vm_proc_ep(VALUE procval)
1623 return vm_block_ep(vm_proc_block(procval));
1626 static inline const rb_iseq_t *
1627 vm_block_iseq(const struct rb_block *block)
1629 switch (vm_block_type(block)) {
1630 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1631 case block_type_proc: return vm_proc_iseq(block->as.proc);
1632 case block_type_ifunc:
1633 case block_type_symbol: return NULL;
1635 VM_UNREACHABLE(vm_block_iseq);
1636 return NULL;
1639 static inline const VALUE *
1640 vm_block_ep(const struct rb_block *block)
1642 switch (vm_block_type(block)) {
1643 case block_type_iseq:
1644 case block_type_ifunc: return block->as.captured.ep;
1645 case block_type_proc: return vm_proc_ep(block->as.proc);
1646 case block_type_symbol: return NULL;
1648 VM_UNREACHABLE(vm_block_ep);
1649 return NULL;
1652 static inline VALUE
1653 vm_block_self(const struct rb_block *block)
1655 switch (vm_block_type(block)) {
1656 case block_type_iseq:
1657 case block_type_ifunc:
1658 return block->as.captured.self;
1659 case block_type_proc:
1660 return vm_block_self(vm_proc_block(block->as.proc));
1661 case block_type_symbol:
1662 return Qundef;
1664 VM_UNREACHABLE(vm_block_self);
1665 return Qundef;
1668 static inline VALUE
1669 VM_BH_TO_SYMBOL(VALUE block_handler)
1671 VM_ASSERT(SYMBOL_P(block_handler));
1672 return block_handler;
1675 static inline VALUE
1676 VM_BH_FROM_SYMBOL(VALUE symbol)
1678 VM_ASSERT(SYMBOL_P(symbol));
1679 return symbol;
1682 static inline VALUE
1683 VM_BH_TO_PROC(VALUE block_handler)
1685 VM_ASSERT(rb_obj_is_proc(block_handler));
1686 return block_handler;
1689 static inline VALUE
1690 VM_BH_FROM_PROC(VALUE procval)
1692 VM_ASSERT(rb_obj_is_proc(procval));
1693 return procval;
1696 /* VM related object allocate functions */
1697 VALUE rb_thread_alloc(VALUE klass);
1698 VALUE rb_binding_alloc(VALUE klass);
1699 VALUE rb_proc_alloc(VALUE klass);
1700 VALUE rb_proc_dup(VALUE self);
1702 /* for debug */
1703 extern void rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1704 extern void rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc);
1705 extern void rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp
1706 #if OPT_STACK_CACHING
1707 , VALUE reg_a, VALUE reg_b
1708 #endif
1711 #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp)
1712 #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp))
1713 void rb_vm_bugreport(const void *);
1714 typedef void (*ruby_sighandler_t)(int);
1715 RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1716 NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1718 /* functions about thread/vm execution */
1719 RUBY_SYMBOL_EXPORT_BEGIN
1720 VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1721 VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1722 VALUE rb_iseq_path(const rb_iseq_t *iseq);
1723 VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1724 RUBY_SYMBOL_EXPORT_END
1726 VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1727 void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1729 int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1730 void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1732 VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1734 VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1735 static inline VALUE
1736 rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1738 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1741 static inline VALUE
1742 rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1744 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1747 VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1748 VALUE rb_vm_env_local_variables(const rb_env_t *env);
1749 const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1750 const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1751 void rb_vm_inc_const_missing_count(void);
1752 VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1753 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1754 MJIT_STATIC void rb_vm_pop_frame(rb_execution_context_t *ec);
1756 void rb_gvl_destroy(rb_global_vm_lock_t *gvl);
1758 void rb_thread_start_timer_thread(void);
1759 void rb_thread_stop_timer_thread(void);
1760 void rb_thread_reset_timer_thread(void);
1761 void rb_thread_wakeup_timer_thread(int);
1763 static inline void
1764 rb_vm_living_threads_init(rb_vm_t *vm)
1766 list_head_init(&vm->waiting_fds);
1767 list_head_init(&vm->waiting_pids);
1768 list_head_init(&vm->workqueue);
1769 list_head_init(&vm->waiting_grps);
1770 list_head_init(&vm->ractor.set);
1773 typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1774 rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1775 rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1776 int rb_vm_get_sourceline(const rb_control_frame_t *);
1777 void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1778 void ruby_thread_init_stack(rb_thread_t *th);
1779 rb_thread_t * ruby_thread_from_native(void);
1780 int ruby_thread_set_native(rb_thread_t *th);
1781 int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1782 void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1783 MJIT_STATIC VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1785 void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1787 #define rb_vm_register_special_exception(sp, e, m) \
1788 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1790 void rb_gc_mark_machine_stack(const rb_execution_context_t *ec);
1792 void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1794 MJIT_STATIC const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1796 #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1798 #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1799 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1800 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1801 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1802 if (UNLIKELY((cfp) <= &bound[1])) { \
1803 vm_stackoverflow(); \
1805 } while (0)
1807 #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1808 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1810 VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1812 rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1814 /* for thread */
1816 #if RUBY_VM_THREAD_MODEL == 2
1817 MJIT_SYMBOL_EXPORT_BEGIN
1819 RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1820 RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1821 RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1822 RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1823 RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1825 MJIT_SYMBOL_EXPORT_END
1827 #define GET_VM() rb_current_vm()
1828 #define GET_RACTOR() rb_current_ractor()
1829 #define GET_THREAD() rb_current_thread()
1830 #define GET_EC() rb_current_execution_context(true)
1832 static inline rb_thread_t *
1833 rb_ec_thread_ptr(const rb_execution_context_t *ec)
1835 return ec->thread_ptr;
1838 static inline rb_ractor_t *
1839 rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1841 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1842 if (th) {
1843 VM_ASSERT(th->ractor != NULL);
1844 return th->ractor;
1846 else {
1847 return NULL;
1851 static inline rb_vm_t *
1852 rb_ec_vm_ptr(const rb_execution_context_t *ec)
1854 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1855 if (th) {
1856 return th->vm;
1858 else {
1859 return NULL;
1863 static inline rb_execution_context_t *
1864 rb_current_execution_context(bool expect_ec)
1866 #ifdef RB_THREAD_LOCAL_SPECIFIER
1867 #ifdef __APPLE__
1868 rb_execution_context_t *ec = rb_current_ec();
1869 #else
1870 rb_execution_context_t *ec = ruby_current_ec;
1871 #endif
1872 #else
1873 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1874 #endif
1875 VM_ASSERT(!expect_ec || ec != NULL);
1876 return ec;
1879 static inline rb_thread_t *
1880 rb_current_thread(void)
1882 const rb_execution_context_t *ec = GET_EC();
1883 return rb_ec_thread_ptr(ec);
1886 static inline rb_ractor_t *
1887 rb_current_ractor(void)
1889 if (ruby_single_main_ractor) {
1890 return ruby_single_main_ractor;
1892 else {
1893 const rb_execution_context_t *ec = GET_EC();
1894 return rb_ec_ractor_ptr(ec);
1898 static inline rb_vm_t *
1899 rb_current_vm(void)
1901 #if 0 // TODO: reconsider the assertions
1902 VM_ASSERT(ruby_current_vm_ptr == NULL ||
1903 ruby_current_execution_context_ptr == NULL ||
1904 rb_ec_thread_ptr(GET_EC()) == NULL ||
1905 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
1906 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
1907 #endif
1909 return ruby_current_vm_ptr;
1912 void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
1913 unsigned int recorded_lock_rec,
1914 unsigned int current_lock_rec);
1916 static inline unsigned int
1917 rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
1919 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1921 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
1922 return 0;
1924 else {
1925 return vm->ractor.sync.lock_rec;
1929 #else
1930 #error "unsupported thread model"
1931 #endif
1933 enum {
1934 TIMER_INTERRUPT_MASK = 0x01,
1935 PENDING_INTERRUPT_MASK = 0x02,
1936 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
1937 TRAP_INTERRUPT_MASK = 0x08,
1938 TERMINATE_INTERRUPT_MASK = 0x10,
1939 VM_BARRIER_INTERRUPT_MASK = 0x20,
1942 #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
1943 #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
1944 #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1945 #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
1946 #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
1947 #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
1948 #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
1949 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1951 static inline bool
1952 RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
1954 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1955 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
1957 if (current_clock != ec->checked_clock) {
1958 ec->checked_clock = current_clock;
1959 RUBY_VM_SET_TIMER_INTERRUPT(ec);
1961 #endif
1962 return ec->interrupt_flag & ~(ec)->interrupt_mask;
1965 VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
1966 int rb_signal_buff_size(void);
1967 int rb_signal_exec(rb_thread_t *th, int sig);
1968 void rb_threadptr_check_signal(rb_thread_t *mth);
1969 void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1970 void rb_threadptr_signal_exit(rb_thread_t *th);
1971 int rb_threadptr_execute_interrupts(rb_thread_t *, int);
1972 void rb_threadptr_interrupt(rb_thread_t *th);
1973 void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
1974 void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
1975 void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
1976 VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
1977 void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
1978 void rb_execution_context_update(const rb_execution_context_t *ec);
1979 void rb_execution_context_mark(const rb_execution_context_t *ec);
1980 void rb_fiber_close(rb_fiber_t *fib);
1981 void Init_native_thread(rb_thread_t *th);
1982 int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
1984 // vm_sync.h
1985 void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
1986 void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
1988 #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
1989 static inline void
1990 rb_vm_check_ints(rb_execution_context_t *ec)
1992 VM_ASSERT(ec == GET_EC());
1993 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
1994 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
1998 /* tracer */
2000 struct rb_trace_arg_struct {
2001 rb_event_flag_t event;
2002 rb_execution_context_t *ec;
2003 const rb_control_frame_t *cfp;
2004 VALUE self;
2005 ID id;
2006 ID called_id;
2007 VALUE klass;
2008 VALUE data;
2010 int klass_solved;
2012 /* calc from cfp */
2013 int lineno;
2014 VALUE path;
2017 void rb_hook_list_mark(rb_hook_list_t *hooks);
2018 void rb_hook_list_free(rb_hook_list_t *hooks);
2019 void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2020 void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2022 void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2024 #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2025 const rb_event_flag_t flag_arg_ = (flag_); \
2026 rb_hook_list_t *hooks_arg_ = (hooks_); \
2027 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2028 /* defer evaluating the other arguments */ \
2029 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2031 } while (0)
2033 static inline void
2034 rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2035 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2037 struct rb_trace_arg_struct trace_arg;
2039 VM_ASSERT((hooks->events & flag) != 0);
2041 trace_arg.event = flag;
2042 trace_arg.ec = ec;
2043 trace_arg.cfp = ec->cfp;
2044 trace_arg.self = self;
2045 trace_arg.id = id;
2046 trace_arg.called_id = called_id;
2047 trace_arg.klass = klass;
2048 trace_arg.data = data;
2049 trace_arg.path = Qundef;
2050 trace_arg.klass_solved = 0;
2052 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2055 struct rb_ractor_pub {
2056 VALUE self;
2057 uint32_t id;
2058 rb_hook_list_t hooks;
2061 static inline rb_hook_list_t *
2062 rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2064 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2065 return &cr_pub->hooks;
2068 #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2069 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2071 #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2072 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2074 static inline void
2075 rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2077 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2078 NIL_P(eval_script) ? (VALUE)iseq :
2079 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2082 void rb_vm_trap_exit(rb_vm_t *vm);
2084 RUBY_SYMBOL_EXPORT_BEGIN
2086 int rb_thread_check_trap_pending(void);
2088 /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2089 #define RUBY_EVENT_COVERAGE_LINE 0x010000
2090 #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2092 extern VALUE rb_get_coverages(void);
2093 extern void rb_set_coverages(VALUE, int, VALUE);
2094 extern void rb_clear_coverages(void);
2095 extern void rb_reset_coverages(void);
2096 extern void rb_resume_coverages(void);
2097 extern void rb_suspend_coverages(void);
2099 void rb_postponed_job_flush(rb_vm_t *vm);
2101 // ractor.c
2102 RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2103 RUBY_EXTERN VALUE rb_eRactorIsolationError;
2105 RUBY_SYMBOL_EXPORT_END
2107 #endif /* RUBY_VM_CORE_H */