1 /**********************************************************************
3 insnhelper.c - instruction helper functions.
7 Copyright (C) 2007 Koichi Sasada
9 **********************************************************************/
11 /* finish iseq array */
16 /* control stack frame */
23 static inline rb_control_frame_t
*
24 vm_push_frame(rb_thread_t
* th
, const rb_iseq_t
* iseq
,
25 VALUE type
, VALUE self
, VALUE specval
,
26 const VALUE
*pc
, VALUE
*sp
, VALUE
*lfp
,
29 rb_control_frame_t
* const cfp
= th
->cfp
= th
->cfp
- 1;
32 /* setup vm value stack */
35 for (i
=0; i
< local_size
; i
++) {
41 *sp
= GC_GUARDED_PTR(specval
);
47 /* setup vm control frame stack */
49 cfp
->pc
= (VALUE
*)pc
;
52 cfp
->iseq
= (rb_iseq_t
*) iseq
;
59 #define COLLECT_PROFILE 0
61 cfp
->prof_time_self
= clock();
62 cfp
->prof_time_chld
= 0;
73 vm_pop_frame(rb_thread_t
*th
)
76 rb_control_frame_t
*cfp
= th
->cfp
;
78 if (RUBY_VM_NORMAL_ISEQ_P(cfp
->iseq
)) {
79 VALUE current_time
= clock();
80 rb_control_frame_t
*cfp
= th
->cfp
;
81 cfp
->prof_time_self
= current_time
- cfp
->prof_time_self
;
82 (cfp
+1)->prof_time_chld
+= cfp
->prof_time_self
;
84 cfp
->iseq
->profile
.count
++;
85 cfp
->iseq
->profile
.time_cumu
= cfp
->prof_time_self
;
86 cfp
->iseq
->profile
.time_self
= cfp
->prof_time_self
- cfp
->prof_time_chld
;
88 else if (0 /* c method? */) {
92 th
->cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(th
->cfp
);
101 #define VM_CALLEE_SETUP_ARG(ret, th, iseq, orig_argc, orig_argv, block) \
102 if (LIKELY(iseq->arg_simple & 0x01)) { \
104 if (orig_argc != iseq->argc) { \
105 rb_raise(rb_eArgError, "wrong number of arguments (%d for %d)", orig_argc, iseq->argc); \
110 ret = vm_callee_setup_arg_complex(th, iseq, orig_argc, orig_argv, block); \
114 vm_callee_setup_arg_complex(rb_thread_t
*th
, const rb_iseq_t
* iseq
,
115 int orig_argc
, VALUE
* orig_argv
,
116 const rb_block_t
**block
)
118 const int m
= iseq
->argc
;
119 int argc
= orig_argc
;
120 VALUE
*argv
= orig_argv
;
123 th
->mark_stack_len
= argc
+ iseq
->arg_size
;
126 if (argc
< (m
+ iseq
->arg_post_len
)) { /* check with post arg */
127 rb_raise(rb_eArgError
, "wrong number of arguments (%d for %d)",
128 argc
, m
+ iseq
->arg_post_len
);
135 if (iseq
->arg_post_len
) {
136 if (!(orig_argc
< iseq
->arg_post_start
)) {
137 VALUE
*new_argv
= ALLOCA_N(VALUE
, argc
);
138 MEMCPY(new_argv
, argv
, VALUE
, argc
);
142 MEMCPY(&orig_argv
[iseq
->arg_post_start
], &argv
[argc
-= iseq
->arg_post_len
],
143 VALUE
, iseq
->arg_post_len
);
147 if (iseq
->arg_opts
) {
148 const int opts
= iseq
->arg_opts
- 1 /* no opt */;
150 if (iseq
->arg_rest
== -1 && argc
> opts
) {
151 rb_raise(rb_eArgError
, "wrong number of arguments (%d for %d)",
152 orig_argc
, m
+ opts
+ iseq
->arg_post_len
);
158 opt_pc
= iseq
->arg_opt_table
[opts
]; /* no opt */
162 for (i
= argc
; i
<opts
; i
++) {
163 orig_argv
[i
+ m
] = Qnil
;
165 opt_pc
= iseq
->arg_opt_table
[argc
];
171 if (iseq
->arg_rest
!= -1) {
172 orig_argv
[iseq
->arg_rest
] = rb_ary_new4(argc
, argv
);
176 /* block arguments */
177 if (block
&& iseq
->arg_block
!= -1) {
178 VALUE blockval
= Qnil
;
179 const rb_block_t
*blockptr
= *block
;
182 rb_raise(rb_eArgError
, "wrong number of arguments (%d for %d)",
183 orig_argc
, m
+ iseq
->arg_post_len
);
187 /* make Proc object */
188 if (blockptr
->proc
== 0) {
191 blockval
= vm_make_proc(th
, th
->cfp
, blockptr
);
193 GetProcPtr(blockval
, proc
);
194 *block
= &proc
->block
;
197 blockval
= blockptr
->proc
;
201 orig_argv
[iseq
->arg_block
] = blockval
; /* Proc or nil */
204 th
->mark_stack_len
= 0;
209 caller_setup_args(const rb_thread_t
*th
, rb_control_frame_t
*cfp
, VALUE flag
,
210 int argc
, rb_iseq_t
*blockiseq
, rb_block_t
**block
)
212 rb_block_t
*blockptr
= 0;
215 if (flag
& VM_CALL_ARGS_BLOCKARG_BIT
) {
222 if (!rb_obj_is_proc(proc
)) {
223 VALUE b
= rb_check_convert_type(proc
, T_DATA
, "Proc", "to_proc");
225 rb_raise(rb_eTypeError
,
226 "wrong argument type %s (expected Proc)",
227 rb_obj_classname(proc
));
231 GetProcPtr(proc
, po
);
232 blockptr
= &po
->block
;
233 RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp
)->proc
= proc
;
237 else if (blockiseq
) {
238 blockptr
= RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp
);
239 blockptr
->iseq
= blockiseq
;
245 /* expand top of stack? */
246 if (flag
& VM_CALL_ARGS_SPLAT_BIT
) {
247 VALUE ary
= *(cfp
->sp
- 1);
250 VALUE tmp
= rb_check_convert_type(ary
, T_ARRAY
, "Array", "to_a");
256 int len
= RARRAY_LEN(tmp
);
257 ptr
= RARRAY_PTR(tmp
);
260 CHECK_STACK_OVERFLOW(cfp
, len
);
262 for (i
= 0; i
< len
; i
++) {
273 call_cfunc(VALUE (*func
)(), VALUE recv
,
274 int len
, int argc
, const VALUE
*argv
)
276 /* printf("len: %d, argc: %d\n", len, argc); */
278 if (len
>= 0 && argc
!= len
) {
279 rb_raise(rb_eArgError
, "wrong number of arguments(%d for %d)",
285 return (*func
) (recv
, rb_ary_new4(argc
, argv
));
288 return (*func
) (argc
, argv
, recv
);
291 return (*func
) (recv
);
294 return (*func
) (recv
, argv
[0]);
297 return (*func
) (recv
, argv
[0], argv
[1]);
300 return (*func
) (recv
, argv
[0], argv
[1], argv
[2]);
303 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3]);
306 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4]);
309 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
313 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
317 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
318 argv
[5], argv
[6], argv
[7]);
321 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
322 argv
[5], argv
[6], argv
[7], argv
[8]);
325 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
326 argv
[5], argv
[6], argv
[7], argv
[8], argv
[9]);
329 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
330 argv
[5], argv
[6], argv
[7], argv
[8], argv
[9],
334 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
335 argv
[5], argv
[6], argv
[7], argv
[8], argv
[9],
339 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
340 argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10],
344 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
345 argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10],
346 argv
[11], argv
[12], argv
[13]);
349 return (*func
) (recv
, argv
[0], argv
[1], argv
[2], argv
[3], argv
[4],
350 argv
[5], argv
[6], argv
[7], argv
[8], argv
[9], argv
[10],
351 argv
[11], argv
[12], argv
[13], argv
[14]);
354 rb_raise(rb_eArgError
, "too many arguments(%d)", len
);
357 return Qnil
; /* not reached */
361 vm_call_cfunc(rb_thread_t
*th
, rb_control_frame_t
*reg_cfp
,
362 int num
, ID id
, VALUE recv
, VALUE klass
,
363 VALUE flag
, const NODE
*mn
, const rb_block_t
*blockptr
)
367 EXEC_EVENT_HOOK(th
, RUBY_EVENT_C_CALL
, recv
, id
, klass
);
369 rb_control_frame_t
*cfp
=
370 vm_push_frame(th
, 0, VM_FRAME_MAGIC_CFUNC
,
371 recv
, (VALUE
) blockptr
, 0, reg_cfp
->sp
, 0, 1);
374 cfp
->method_class
= klass
;
376 reg_cfp
->sp
-= num
+ 1;
378 val
= call_cfunc(mn
->nd_cfnc
, recv
, mn
->nd_argc
, num
, reg_cfp
->sp
+ 1);
380 if (reg_cfp
!= th
->cfp
+ 1) {
381 rb_bug("cfp consistency error - send");
386 EXEC_EVENT_HOOK(th
, RUBY_EVENT_C_RETURN
, recv
, id
, klass
);
392 vm_call_bmethod(rb_thread_t
*th
, ID id
, VALUE procval
, VALUE recv
,
393 VALUE klass
, int argc
, VALUE
*argv
, rb_block_t
*blockptr
)
395 rb_control_frame_t
*cfp
= th
->cfp
;
399 /* control block frame */
400 (cfp
-2)->method_id
= id
;
401 (cfp
-2)->method_class
= klass
;
403 GetProcPtr(procval
, proc
);
404 val
= vm_invoke_proc(th
, proc
, recv
, argc
, argv
, blockptr
);
409 vm_method_missing(rb_thread_t
*th
, ID id
, VALUE recv
,
410 int num
, rb_block_t
*blockptr
, int opt
)
412 rb_control_frame_t
* const reg_cfp
= th
->cfp
;
413 VALUE
*argv
= STACK_ADDR_FROM_TOP(num
+ 1);
415 argv
[0] = ID2SYM(id
);
416 th
->method_missing_reason
= opt
;
417 th
->passed_block
= blockptr
;
418 val
= rb_funcall2(recv
, idMethodMissing
, num
+ 1, argv
);
424 vm_setup_method(rb_thread_t
*th
, rb_control_frame_t
*cfp
,
425 const int argc
, const rb_block_t
*blockptr
, const VALUE flag
,
426 const VALUE iseqval
, const VALUE recv
, const VALUE klass
)
430 VALUE
*sp
, *rsp
= cfp
->sp
- argc
;
432 /* TODO: eliminate it */
433 GetISeqPtr(iseqval
, iseq
);
434 VM_CALLEE_SETUP_ARG(opt_pc
, th
, iseq
, argc
, rsp
, &blockptr
);
436 /* stack overflow check */
437 CHECK_STACK_OVERFLOW(cfp
, iseq
->stack_max
);
439 sp
= rsp
+ iseq
->arg_size
;
441 if (LIKELY(!(flag
& VM_CALL_TAILCALL_BIT
))) {
442 if (0) printf("local_size: %d, arg_size: %d\n",
443 iseq
->local_size
, iseq
->arg_size
);
445 /* clear local variables */
446 for (i
= 0; i
< iseq
->local_size
- iseq
->arg_size
; i
++) {
450 vm_push_frame(th
, iseq
,
451 VM_FRAME_MAGIC_METHOD
, recv
, (VALUE
) blockptr
,
452 iseq
->iseq_encoded
+ opt_pc
, sp
, 0, 0);
454 cfp
->sp
= rsp
- 1 /* recv */;
458 th
->cfp
++; /* pop cf */
462 for (i
=0; i
< (sp
- rsp
); i
++) {
468 /* clear local variables */
469 for (i
= 0; i
< iseq
->local_size
- iseq
->arg_size
; i
++) {
473 vm_push_frame(th
, iseq
,
474 VM_FRAME_MAGIC_METHOD
, recv
, (VALUE
) blockptr
,
475 iseq
->iseq_encoded
+ opt_pc
, sp
, 0, 0);
480 vm_call_method(rb_thread_t
* const th
, rb_control_frame_t
* const cfp
,
481 const int num
, rb_block_t
* const blockptr
, const VALUE flag
,
482 const ID id
, const NODE
* mn
, const VALUE recv
, VALUE klass
)
486 start_method_dispatch
:
489 if ((mn
->nd_noex
== 0)) {
490 /* dispatch method */
493 normal_method_dispatch
:
497 switch (nd_type(node
)) {
498 case RUBY_VM_METHOD_NODE
:{
499 vm_setup_method(th
, cfp
, num
, blockptr
, flag
, (VALUE
)node
->nd_body
, recv
, klass
);
503 val
= vm_call_cfunc(th
, cfp
, num
, id
, recv
, mn
->nd_clss
, flag
, node
, blockptr
);
507 val
= rb_ivar_set(recv
, node
->nd_vid
, *(cfp
->sp
- 1));
513 rb_raise(rb_eArgError
, "wrong number of arguments (%d for 0)",
516 val
= rb_attr_get(recv
, node
->nd_vid
);
521 VALUE
*argv
= cfp
->sp
- num
;
522 val
= vm_call_bmethod(th
, id
, node
->nd_cval
, recv
, klass
, num
, argv
, blockptr
);
523 cfp
->sp
+= - num
- 1;
527 klass
= RCLASS_SUPER(mn
->nd_clss
);
528 mn
= rb_method_node(klass
, id
);
531 goto normal_method_dispatch
;
534 goto start_method_dispatch
;
538 printf("node: %s\n", ruby_node_name(nd_type(node
)));
539 rb_bug("eval_invoke_method: unreachable");
548 if (!(flag
& VM_CALL_FCALL_BIT
) &&
549 (mn
->nd_noex
& NOEX_MASK
) & NOEX_PRIVATE
) {
550 int stat
= NOEX_PRIVATE
;
552 if (flag
& VM_CALL_VCALL_BIT
) {
555 val
= vm_method_missing(th
, id
, recv
, num
, blockptr
, stat
);
557 else if (((mn
->nd_noex
& NOEX_MASK
) & NOEX_PROTECTED
) &&
558 !(flag
& VM_CALL_SEND_BIT
)) {
559 VALUE defined_class
= mn
->nd_clss
;
561 if (TYPE(defined_class
) == T_ICLASS
) {
562 defined_class
= RBASIC(defined_class
)->klass
;
565 if (!rb_obj_is_kind_of(cfp
->self
, rb_class_real(defined_class
))) {
566 val
= vm_method_missing(th
, id
, recv
, num
, blockptr
, NOEX_PROTECTED
);
569 goto normal_method_dispatch
;
572 else if ((noex_safe
= NOEX_SAFE(mn
->nd_noex
)) > th
->safe_level
&&
574 rb_raise(rb_eSecurityError
, "calling insecure method: %s", rb_id2name(id
));
577 goto normal_method_dispatch
;
583 if (id
== idMethodMissing
) {
584 rb_bug("method missing");
588 if (flag
& VM_CALL_VCALL_BIT
) {
591 if (flag
& VM_CALL_SUPER_BIT
) {
594 val
= vm_method_missing(th
, id
, recv
, num
, blockptr
, stat
);
598 RUBY_VM_CHECK_INTS();
603 vm_send_optimize(rb_control_frame_t
* const reg_cfp
, NODE
** const mn
,
604 rb_num_t
* const flag
, rb_num_t
* const num
,
605 ID
* const id
, const VALUE klass
)
607 if (*mn
&& nd_type((*mn
)->nd_body
) == NODE_CFUNC
) {
608 NODE
*node
= (*mn
)->nd_body
;
609 extern VALUE
rb_f_send(int argc
, VALUE
*argv
, VALUE recv
);
611 if (node
->nd_cfnc
== rb_f_send
) {
614 *id
= SYMBOL_P(sym
) ? SYM2ID(sym
) : rb_to_id(sym
);
616 /* shift arguments */
618 MEMMOVE(&TOPN(i
), &TOPN(i
-1), VALUE
, i
);
621 *mn
= rb_method_node(klass
, *id
);
624 *flag
|= VM_CALL_FCALL_BIT
;
632 block_proc_is_lambda(const VALUE procval
)
637 GetProcPtr(procval
, proc
);
638 return proc
->is_lambda
;
646 vm_yield_with_cfunc(rb_thread_t
*th
, const rb_block_t
*block
,
647 VALUE self
, int argc
, const VALUE
*argv
,
648 const rb_block_t
*blockptr
)
650 NODE
*ifunc
= (NODE
*) block
->iseq
;
651 VALUE val
, arg
, blockarg
;
652 int lambda
= block_proc_is_lambda(block
->proc
);
655 arg
= rb_ary_new4(argc
, argv
);
657 else if (argc
== 0) {
665 blockarg
= vm_make_proc(th
, th
->cfp
, blockptr
);
671 vm_push_frame(th
, 0, VM_FRAME_MAGIC_IFUNC
,
672 self
, (VALUE
)block
->dfp
,
673 0, th
->cfp
->sp
, block
->lfp
, 1);
675 val
= (*ifunc
->nd_cfnc
) (arg
, ifunc
->nd_tval
, argc
, argv
, blockarg
);
682 vm_yield_setup_args(rb_thread_t
* const th
, const rb_iseq_t
*iseq
,
683 int orig_argc
, VALUE
*argv
,
684 const rb_block_t
*blockptr
, int lambda
)
686 if (0) { /* for debug */
687 printf(" argc: %d\n", orig_argc
);
688 printf("iseq argc: %d\n", iseq
->argc
);
689 printf("iseq opts: %d\n", iseq
->arg_opts
);
690 printf("iseq rest: %d\n", iseq
->arg_rest
);
691 printf("iseq post: %d\n", iseq
->arg_post_len
);
692 printf("iseq blck: %d\n", iseq
->arg_block
);
693 printf("iseq smpl: %d\n", iseq
->arg_simple
);
694 printf(" lambda: %s\n", lambda
? "true" : "false");
700 VM_CALLEE_SETUP_ARG(opt_pc
, th
, iseq
, orig_argc
, argv
, &blockptr
);
705 int argc
= orig_argc
;
706 const int m
= iseq
->argc
;
708 th
->mark_stack_len
= argc
;
712 * => {|a|} => a = [1, 2]
713 * => {|a, b|} => a, b = [1, 2]
715 if (!(iseq
->arg_simple
& 0x02) &&
716 (m
+ iseq
->arg_post_len
) > 0 &&
717 argc
== 1 && TYPE(argv
[0]) == T_ARRAY
) {
719 th
->mark_stack_len
= argc
= RARRAY_LEN(ary
);
721 CHECK_STACK_OVERFLOW(th
->cfp
, argc
);
723 MEMCPY(argv
, RARRAY_PTR(ary
), VALUE
, argc
);
726 for (i
=argc
; i
<m
; i
++) {
730 if (iseq
->arg_rest
== -1) {
734 * => {|a|} # truncate
736 th
->mark_stack_len
= argc
= m
;
740 int r
= iseq
->arg_rest
;
742 if (iseq
->arg_post_len
) {
743 int len
= iseq
->arg_post_len
;
744 int start
= iseq
->arg_post_start
;
745 int rsize
= argc
> m
? argc
- m
: 0;
749 if (psize
> len
) psize
= len
;
751 ary
= rb_ary_new4(rsize
- psize
, &argv
[r
]);
754 printf(" argc: %d\n", argc
);
755 printf(" len: %d\n", len
);
756 printf("start: %d\n", start
);
757 printf("rsize: %d\n", rsize
);
760 /* copy post argument */
761 MEMMOVE(&argv
[start
], &argv
[r
+ rsize
- psize
], VALUE
, psize
);
763 for (i
=psize
; i
<len
; i
++) {
764 argv
[start
+ i
] = Qnil
;
773 for (i
=argc
; i
<r
; i
++) {
776 argv
[r
] = rb_ary_new();
779 argv
[r
] = rb_ary_new4(argc
-r
, &argv
[r
]);
783 th
->mark_stack_len
= iseq
->arg_size
;
787 if (iseq
->arg_block
!= -1) {
788 VALUE procval
= Qnil
;
791 procval
= blockptr
->proc
;
794 argv
[iseq
->arg_block
] = procval
;
797 th
->mark_stack_len
= 0;
803 vm_invoke_block(rb_thread_t
*th
, rb_control_frame_t
*reg_cfp
, rb_num_t num
, rb_num_t flag
)
805 rb_block_t
* const block
= GET_BLOCK_PTR();
809 if (GET_ISEQ()->local_iseq
->type
!= ISEQ_TYPE_METHOD
|| block
== 0) {
810 vm_localjump_error("no block given (yield)", Qnil
, 0);
814 argc
= caller_setup_args(th
, GET_CFP(), flag
, argc
, 0, 0);
816 if (BUILTIN_TYPE(iseq
) != T_NODE
) {
818 const int arg_size
= iseq
->arg_size
;
819 VALUE
* const rsp
= GET_SP() - argc
;
822 CHECK_STACK_OVERFLOW(GET_CFP(), iseq
->stack_max
);
823 opt_pc
= vm_yield_setup_args(th
, iseq
, argc
, rsp
, 0,
824 block_proc_is_lambda(block
->proc
));
826 vm_push_frame(th
, iseq
,
827 VM_FRAME_MAGIC_BLOCK
, block
->self
, (VALUE
) block
->dfp
,
828 iseq
->iseq_encoded
+ opt_pc
, rsp
+ arg_size
, block
->lfp
,
829 iseq
->local_size
- arg_size
);
834 VALUE val
= vm_yield_with_cfunc(th
, block
, block
->self
, argc
, STACK_ADDR_FROM_TOP(argc
), 0);
835 POPN(argc
); /* TODO: should put before C/yield? */
843 lfp_svar_place(rb_thread_t
*th
, VALUE
*lfp
)
847 if (lfp
&& th
->local_lfp
!= lfp
) {
851 svar
= &th
->local_svar
;
854 *svar
= (VALUE
)NEW_IF(Qnil
, Qnil
, Qnil
);
856 return (NODE
*)*svar
;
860 lfp_svar_get(rb_thread_t
*th
, VALUE
*lfp
, VALUE key
)
862 NODE
*svar
= lfp_svar_place(th
, lfp
);
866 return svar
->u1
.value
;
868 return svar
->u2
.value
;
870 const VALUE hash
= svar
->u3
.value
;
876 return rb_hash_lookup(hash
, key
);
883 lfp_svar_set(rb_thread_t
*th
, VALUE
*lfp
, VALUE key
, VALUE val
)
885 NODE
*svar
= lfp_svar_place(th
, lfp
);
889 svar
->u1
.value
= val
;
892 svar
->u2
.value
= val
;
895 VALUE hash
= svar
->u3
.value
;
898 svar
->u3
.value
= hash
= rb_hash_new();
900 rb_hash_aset(hash
, key
, val
);
906 vm_getspecial(rb_thread_t
*th
, VALUE
*lfp
, VALUE key
, rb_num_t type
)
915 val
= lfp_svar_get(th
, lfp
, k
);
918 VALUE backref
= lfp_svar_get(th
, lfp
, 1);
923 val
= rb_reg_last_match(backref
);
926 val
= rb_reg_match_pre(backref
);
929 val
= rb_reg_match_post(backref
);
932 val
= rb_reg_match_last(backref
);
935 rb_bug("unexpected back-ref");
939 val
= rb_reg_nth_match(type
>> 1, backref
);
946 vm_get_cref(const rb_iseq_t
*iseq
, const VALUE
*lfp
, const VALUE
*dfp
)
952 cref
= iseq
->cref_stack
;
955 else if (dfp
[-1] != Qnil
) {
956 cref
= (NODE
*)dfp
[-1];
959 dfp
= GET_PREV_DFP(dfp
);
963 rb_bug("vm_get_cref: unreachable");
970 vm_check_if_namespace(VALUE klass
)
972 switch (TYPE(klass
)) {
977 rb_raise(rb_eTypeError
, "%s is not a class/module",
978 RSTRING_PTR(rb_inspect(klass
)));
983 vm_get_ev_const(rb_thread_t
*th
, const rb_iseq_t
*iseq
,
984 VALUE orig_klass
, ID id
, int is_defined
)
988 if (orig_klass
== Qnil
) {
989 /* in current lexical scope */
990 const NODE
*root_cref
= vm_get_cref(iseq
, th
->cfp
->lfp
, th
->cfp
->dfp
);
991 const NODE
*cref
= root_cref
;
992 VALUE klass
= orig_klass
;
994 while (cref
&& cref
->nd_next
) {
995 klass
= cref
->nd_clss
;
996 cref
= cref
->nd_next
;
1000 if (RCLASS_IV_TBL(klass
) &&
1001 st_lookup(RCLASS_IV_TBL(klass
), id
, &val
)) {
1002 if (val
== Qundef
) {
1003 rb_autoload_load(klass
, id
);
1004 goto search_continue
;
1019 klass
= root_cref
->nd_clss
;
1021 klass
= CLASS_OF(th
->cfp
->self
);
1025 return rb_const_defined(klass
, id
);
1028 return rb_const_get(klass
, id
);
1032 vm_check_if_namespace(orig_klass
);
1034 return rb_const_defined_from(orig_klass
, id
);
1037 return rb_const_get_from(orig_klass
, id
);
1043 vm_get_cvar_base(NODE
*cref
)
1047 while (cref
&& cref
->nd_next
&&
1048 (NIL_P(cref
->nd_clss
) || FL_TEST(cref
->nd_clss
, FL_SINGLETON
))) {
1049 cref
= cref
->nd_next
;
1051 if (!cref
->nd_next
) {
1052 rb_warn("class variable access from toplevel");
1056 klass
= cref
->nd_clss
;
1059 rb_raise(rb_eTypeError
, "no class variables available");
1065 vm_define_method(rb_thread_t
*th
, VALUE obj
, ID id
, rb_iseq_t
*miseq
,
1066 rb_num_t is_singleton
, NODE
*cref
)
1069 VALUE klass
= cref
->nd_clss
;
1070 int noex
= cref
->nd_visi
;
1073 rb_raise(rb_eTypeError
, "no class/module to add method");
1077 if (FIXNUM_P(obj
) || SYMBOL_P(obj
)) {
1078 rb_raise(rb_eTypeError
,
1079 "can't define singleton method \"%s\" for %s",
1080 rb_id2name(id
), rb_obj_classname(obj
));
1083 if (OBJ_FROZEN(obj
)) {
1084 rb_error_frozen("object");
1087 klass
= rb_singleton_class(obj
);
1092 COPY_CREF(miseq
->cref_stack
, cref
);
1093 miseq
->klass
= klass
;
1094 miseq
->defined_method_id
= id
;
1095 newbody
= NEW_NODE(RUBY_VM_METHOD_NODE
, 0, miseq
->self
, 0);
1096 rb_add_method(klass
, id
, newbody
, noex
);
1098 if (!is_singleton
&& noex
== NOEX_MODFUNC
) {
1099 rb_add_method(rb_singleton_class(klass
), id
, newbody
, NOEX_PUBLIC
);
1101 INC_VM_STATE_VERSION();
1104 static inline NODE
*
1105 vm_method_search(VALUE id
, VALUE klass
, IC ic
)
1109 #if OPT_INLINE_METHOD_CACHE
1111 if (LIKELY(klass
== ic
->ic_class
) &&
1112 LIKELY(GET_VM_STATE_VERSION() == ic
->ic_vmstat
)) {
1116 mn
= rb_method_node(klass
, id
);
1117 ic
->ic_class
= klass
;
1119 ic
->ic_vmstat
= GET_VM_STATE_VERSION();
1123 mn
= rb_method_node(klass
, id
);
1129 vm_search_normal_superclass(VALUE klass
, VALUE recv
)
1131 if (BUILTIN_TYPE(klass
) == T_CLASS
) {
1132 klass
= RCLASS_SUPER(klass
);
1134 else if (BUILTIN_TYPE(klass
) == T_MODULE
) {
1135 VALUE k
= CLASS_OF(recv
);
1137 if (BUILTIN_TYPE(k
) == T_ICLASS
&& RBASIC(k
)->klass
== klass
) {
1138 klass
= RCLASS_SUPER(k
);
1141 k
= RCLASS_SUPER(k
);
1148 vm_search_superclass(rb_control_frame_t
*reg_cfp
, rb_iseq_t
*ip
,
1149 VALUE recv
, VALUE sigval
,
1150 ID
*idp
, VALUE
*klassp
)
1155 while (ip
&& !ip
->klass
) {
1156 ip
= ip
->parent_iseq
;
1160 rb_raise(rb_eNoMethodError
, "super called outside of method");
1163 id
= ip
->defined_method_id
;
1165 if (ip
!= ip
->local_iseq
) {
1166 /* defined by Module#define_method() */
1167 rb_control_frame_t
*lcfp
= GET_CFP();
1169 while (lcfp
->iseq
!= ip
) {
1170 VALUE
*tdfp
= GET_PREV_DFP(lcfp
->dfp
);
1172 lcfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp
);
1173 if (lcfp
->dfp
== tdfp
) {
1179 id
= lcfp
->method_id
;
1180 klass
= vm_search_normal_superclass(lcfp
->method_class
, recv
);
1182 if (sigval
== Qfalse
) {
1184 rb_raise(rb_eRuntimeError
, "implicit argument passing of super from method defined by define_method() is not supported. Specify all arguments explicitly.");
1188 klass
= vm_search_normal_superclass(ip
->klass
, recv
);
1196 vm_throw(rb_thread_t
*th
, rb_control_frame_t
*reg_cfp
,
1197 rb_num_t throw_state
, VALUE throwobj
)
1199 rb_num_t state
= throw_state
& 0xff;
1200 rb_num_t flag
= throw_state
& 0x8000;
1201 rb_num_t level
= throw_state
>> 16;
1210 if (state
== TAG_BREAK
) {
1211 rb_control_frame_t
*cfp
= GET_CFP();
1212 VALUE
*dfp
= GET_DFP();
1214 rb_iseq_t
*base_iseq
= GET_ISEQ();
1217 if (cfp
->iseq
->type
!= ISEQ_TYPE_BLOCK
) {
1218 dfp
= GC_GUARDED_PTR_REF((VALUE
*) *dfp
);
1219 base_iseq
= base_iseq
->parent_iseq
;
1221 while ((VALUE
*) cfp
< th
->stack
+ th
->stack_size
) {
1222 if (cfp
->dfp
== dfp
) {
1225 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1227 rb_bug("VM (throw): can't find break base.");
1230 if (VM_FRAME_TYPE(cfp
) == VM_FRAME_MAGIC_LAMBDA
) {
1231 /* lambda{... break ...} */
1237 dfp
= GC_GUARDED_PTR_REF((VALUE
*) *dfp
);
1239 while ((VALUE
*)cfp
< th
->stack
+ th
->stack_size
) {
1240 if (cfp
->dfp
== dfp
) {
1241 VALUE epc
= epc
= cfp
->pc
- cfp
->iseq
->iseq_encoded
;
1242 rb_iseq_t
*iseq
= cfp
->iseq
;
1245 for (i
=0; i
<iseq
->catch_table_size
; i
++) {
1246 struct iseq_catch_table_entry
*entry
= &iseq
->catch_table
[i
];
1248 if (entry
->type
== CATCH_TYPE_BREAK
&&
1249 entry
->start
< epc
&& entry
->end
>= epc
) {
1250 if (entry
->cont
== epc
) {
1265 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1270 vm_localjump_error("break from proc-closure", throwobj
, TAG_BREAK
);
1273 else if (state
== TAG_RETRY
) {
1274 pt
= GC_GUARDED_PTR_REF((VALUE
*) * GET_DFP());
1275 for (i
= 0; i
< level
; i
++) {
1276 pt
= GC_GUARDED_PTR_REF((VALUE
*) * pt
);
1279 else if (state
== TAG_RETURN
) {
1280 rb_control_frame_t
*cfp
= GET_CFP();
1281 VALUE
*dfp
= GET_DFP();
1282 VALUE
* const lfp
= GET_LFP();
1284 /* check orphan and get dfp */
1285 while ((VALUE
*) cfp
< th
->stack
+ th
->stack_size
) {
1286 if (cfp
->lfp
== lfp
) {
1287 if (VM_FRAME_TYPE(cfp
) == VM_FRAME_MAGIC_LAMBDA
) {
1290 while (lfp
!= tdfp
) {
1291 if (cfp
->dfp
== tdfp
) {
1296 tdfp
= GC_GUARDED_PTR_REF((VALUE
*)*dfp
);
1301 if (cfp
->dfp
== lfp
&& cfp
->iseq
->type
== ISEQ_TYPE_METHOD
) {
1306 cfp
= RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp
);
1309 vm_localjump_error("unexpected return", throwobj
, TAG_RETURN
);
1315 rb_bug("isns(throw): unsupport throw type");
1319 return (VALUE
)NEW_THROW_OBJECT(throwobj
, (VALUE
) pt
, state
);
1322 /* continue throw */
1323 VALUE err
= throwobj
;
1325 if (FIXNUM_P(err
)) {
1326 th
->state
= FIX2INT(err
);
1328 else if (SYMBOL_P(err
)) {
1329 th
->state
= TAG_THROW
;
1331 else if (BUILTIN_TYPE(err
) == T_NODE
) {
1332 th
->state
= GET_THROWOBJ_STATE(err
);
1335 th
->state
= TAG_RAISE
;
1336 /*th->state = FIX2INT(rb_ivar_get(err, idThrowState));*/
1343 vm_expandarray(rb_control_frame_t
*cfp
, VALUE ary
, int num
, int flag
)
1345 int is_splat
= flag
& 0x01;
1346 int space_size
= num
+ is_splat
;
1347 VALUE
*base
= cfp
->sp
, *ptr
;
1348 volatile VALUE tmp_ary
;
1351 if (TYPE(ary
) != T_ARRAY
) {
1352 ary
= rb_ary_to_ary(ary
);
1355 cfp
->sp
+= space_size
;
1358 ptr
= RARRAY_PTR(ary
);
1359 len
= RARRAY_LEN(ary
);
1362 /* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
1366 for (i
=0; i
<num
-len
; i
++) {
1370 for (j
=0; i
<num
; i
++, j
++) {
1371 VALUE v
= ptr
[len
- j
- 1];
1375 *base
= rb_ary_new4(len
- j
, ptr
);
1379 /* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
1381 VALUE
*bptr
= &base
[space_size
- 1];
1383 for (i
=0; i
<num
; i
++) {
1385 for (; i
<num
; i
++) {
1394 *bptr
= rb_ary_new();
1397 *bptr
= rb_ary_new4(len
- num
, ptr
+ num
);
1404 check_cfunc(const NODE
*mn
, const void *func
)
1406 if (mn
&& nd_type(mn
->nd_body
) == NODE_CFUNC
&&
1407 mn
->nd_body
->nd_cfnc
== func
) {
1416 #ifndef NO_BIG_INLINE
1420 opt_eq_func(VALUE recv
, VALUE obj
, IC ic
)
1424 if (FIXNUM_2_P(recv
, obj
) &&
1425 BASIC_OP_UNREDEFINED_P(BOP_EQ
)) {
1433 else if (!SPECIAL_CONST_P(recv
) && !SPECIAL_CONST_P(obj
)) {
1434 if (HEAP_CLASS_OF(recv
) == rb_cFloat
&&
1435 HEAP_CLASS_OF(obj
) == rb_cFloat
&&
1436 BASIC_OP_UNREDEFINED_P(BOP_EQ
)) {
1437 double a
= RFLOAT_VALUE(recv
);
1438 double b
= RFLOAT_VALUE(obj
);
1440 if (isnan(a
) || isnan(b
)) {
1450 else if (HEAP_CLASS_OF(recv
) == rb_cString
&&
1451 HEAP_CLASS_OF(obj
) == rb_cString
&&
1452 BASIC_OP_UNREDEFINED_P(BOP_EQ
)) {
1453 val
= rb_str_equal(recv
, obj
);
1456 NODE
*mn
= vm_method_search(idEq
, CLASS_OF(recv
), ic
);
1457 extern VALUE
rb_obj_equal(VALUE obj1
, VALUE obj2
);
1459 if (check_cfunc(mn
, rb_obj_equal
)) {
1460 return recv
== obj
? Qtrue
: Qfalse
;