2 #ifndef RUBY_EVAL_INTERN_H
3 #define RUBY_EVAL_INTERN_H
5 #define PASS_PASSED_BLOCK() \
6 (GET_THREAD()->passed_block = \
7 GC_GUARDED_PTR_REF((rb_block_t *)GET_THREAD()->cfp->lfp[0]))
10 #include "ruby/node.h"
11 #include "ruby/util.h"
12 #include "ruby/signal.h"
19 #define EXIT_SUCCESS 0
22 #define EXIT_FAILURE 1
32 #include <crt_externs.h>
35 /* Make alloca work the best possible way. */
39 # define alloca __builtin_alloca
49 # ifndef alloca /* predefined by HP cc +Olibcalls */
53 # endif /* HAVE_ALLOCA_H */
56 #ifdef HAVE_STDARG_PROTOTYPES
58 #define va_init_list(a,b) va_start(a,b)
61 #define va_init_list(a,b) va_start(a)
65 char *strrchr(const char *, const char);
73 #include <net/socket.h>
77 #include "macruby_private.h"
81 #include "vmsruby_private.h"
84 #define ruby_setjmp(env) RUBY_SETJMP(env)
85 #define ruby_longjmp(env,val) RUBY_LONGJMP(env,val)
87 int _setjmp(), _longjmp();
90 #include <sys/types.h>
98 #ifdef HAVE_SYS_SELECT_H
99 #include <sys/select.h>
103 Solaris sys/select.h switches select to select_large_fdset to support larger
104 file descriptors if FD_SETSIZE is larger than 1024 on 32bit environment.
105 But Ruby doesn't change FD_SETSIZE because fd_set is allocated dynamically.
106 So following definition is required to use select_large_fdset.
108 #ifdef HAVE_SELECT_LARGE_FDSET
109 #define select(n, r, w, e, t) select_large_fdset(n, r, w, e, t)
112 #ifdef HAVE_SYS_PARAM_H
113 #include <sys/param.h>
116 #include <sys/stat.h>
118 #define SAVE_ROOT_JMPBUF(th, stmt) do \
119 if (ruby_setjmp((th)->root_jmpbuf) == 0) { \
126 #define TH_PUSH_TAG(th) do { \
127 rb_thread_t * const _th = th; \
128 struct rb_vm_tag _tag; \
130 _tag.prev = _th->tag; \
133 #define TH_POP_TAG() \
134 _th->tag = _tag.prev; \
137 #define TH_POP_TAG2() \
140 #define PUSH_TAG() TH_PUSH_TAG(GET_THREAD())
141 #define POP_TAG() TH_POP_TAG()
143 #define TH_EXEC_TAG() ruby_setjmp(_th->tag->buf)
148 #define TH_JUMP_TAG(th, st) do { \
149 ruby_longjmp(th->tag->buf,(st)); \
152 #define JUMP_TAG(st) TH_JUMP_TAG(GET_THREAD(), st)
154 #define TAG_RETURN 0x1
155 #define TAG_BREAK 0x2
157 #define TAG_RETRY 0x4
159 #define TAG_RAISE 0x6
160 #define TAG_THROW 0x7
161 #define TAG_FATAL 0x8
164 #define NEW_THROW_OBJECT(val, pt, st) \
165 ((VALUE)NEW_NODE(NODE_LIT, (val), (pt), (st)))
166 #define SET_THROWOBJ_CATCH_POINT(obj, val) \
167 (RNODE((obj))->u2.value = (val))
168 #define SET_THROWOBJ_STATE(obj, val) \
169 (RNODE((obj))->u3.value = (val))
171 #define GET_THROWOBJ_VAL(obj) ((VALUE)RNODE((obj))->u1.value)
172 #define GET_THROWOBJ_CATCH_POINT(obj) ((VALUE*)RNODE((obj))->u2.value)
173 #define GET_THROWOBJ_STATE(obj) ((int)RNODE((obj))->u3.value)
175 #define SCOPE_TEST(f) \
176 (ruby_cref()->nd_visi & (f))
178 #define SCOPE_CHECK(f) \
179 (ruby_cref()->nd_visi == (f))
181 #define SCOPE_SET(f) \
183 ruby_cref()->nd_visi = (f); \
186 #define CHECK_STACK_OVERFLOW(cfp, margin) do \
187 if (((VALUE *)(cfp)->sp) + (margin) + sizeof(rb_control_frame_t) >= ((VALUE *)cfp)) { \
188 rb_exc_raise(sysstack_error); \
192 void rb_thread_cleanup(void);
193 void rb_thread_wait_other_threads(void);
196 RAISED_EXCEPTION
= 1,
197 RAISED_STACKOVERFLOW
,
200 int rb_thread_set_raised(rb_thread_t
*th
);
201 int rb_thread_reset_raised(rb_thread_t
*th
);
202 #define rb_thread_raised_set(th, f) ((th)->raised_flag |= (f))
203 #define rb_thread_raised_reset(th, f) ((th)->raised_flag &= ~(f))
204 #define rb_thread_raised_p(th, f) (((th)->raised_flag & (f)) != 0)
205 #define rb_thread_raised_clear(th) ((th)->raised_flag = 0)
207 VALUE
rb_f_eval(int argc
, VALUE
*argv
, VALUE self
);
208 VALUE
rb_make_exception(int argc
, VALUE
*argv
);
210 NORETURN(void rb_fiber_start(void));
212 NORETURN(void rb_raise_jump(VALUE
));
213 NORETURN(void rb_print_undef(VALUE
, ID
, int));
214 NORETURN(void vm_localjump_error(const char *, VALUE
, int));
215 NORETURN(void vm_jump_tag_but_local_jump(int, VALUE
));
217 NODE
*vm_get_cref(rb_thread_t
*th
, rb_iseq_t
*iseq
, rb_control_frame_t
*cfp
);
218 NODE
*vm_cref_push(rb_thread_t
*th
, VALUE
, int);
219 NODE
*vm_set_special_cref(rb_thread_t
*th
, VALUE
*lfp
, NODE
* cref_stack
);
220 VALUE
vm_make_jump_tag_but_local_jump(int state
, VALUE val
);
222 static rb_control_frame_t
*
223 vm_get_ruby_level_cfp(rb_thread_t
*th
, rb_control_frame_t
*cfp
)
225 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th
, cfp
)) {
226 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
229 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
237 rb_thread_t
*th
= GET_THREAD();
238 rb_control_frame_t
*cfp
= vm_get_ruby_level_cfp(th
, th
->cfp
);
239 return vm_get_cref(th
, cfp
->iseq
, cfp
);
242 VALUE
vm_get_cbase(rb_thread_t
*th
);
243 VALUE
rb_obj_is_proc(VALUE
);
244 void rb_vm_check_redefinition_opt_method(NODE
*node
);
245 VALUE
rb_vm_call_cfunc(VALUE recv
, VALUE (*func
)(VALUE
), VALUE arg
, rb_block_t
*blockptr
, VALUE filename
);
246 void rb_thread_terminate_all(void);
247 void rb_vm_set_eval_stack(rb_thread_t
*, VALUE iseq
);
248 VALUE
rb_vm_top_self();
250 #define ruby_cbase() vm_get_cbase(GET_THREAD())
252 #endif /* RUBY_EVAL_INTERN_H */