4 #include "shotgun/lib/shotgun.h"
5 #include "shotgun/lib/cpu.h"
6 #include "shotgun/lib/tuple.h"
7 #include "shotgun/lib/module.h"
8 #include "shotgun/lib/class.h"
9 #include "shotgun/lib/hash.h"
10 #include "shotgun/lib/lookuptable.h"
11 #include "shotgun/lib/methctx.h"
12 #include "shotgun/lib/array.h"
13 #include "shotgun/lib/string.h"
14 #include "shotgun/lib/symbol.h"
15 #include "shotgun/lib/machine.h"
16 #include "shotgun/lib/bytearray.h"
17 #include "shotgun/lib/fixnum.h"
18 #include "shotgun/lib/primitive_util.h"
19 #include "shotgun/lib/sendsite.h"
20 #include "shotgun/lib/subtend/ffi.h"
21 #include "shotgun/lib/subtend/nmc.h"
23 #if CONFIG_ENABLE_DTRACE
24 #include "shotgun/lib/dtrace_probes.h"
33 #include <mach/mach.h>
34 #include <mach/mach_time.h>
36 #define measure_cpu_time mach_absolute_time
38 void cpu_show_lookup_time(STATE
) {
39 struct mach_timebase_info timeinfo
;
41 double seconds
, total
;
43 mach_timebase_info(&timeinfo
);
45 nano
= (state
->lookup_time
* timeinfo
.numer
/ timeinfo
.denom
);
47 seconds
= (double)(nano
/ (double)1000000000);
49 nano
= ((mach_absolute_time() - state
->system_start
) * timeinfo
.numer
/ timeinfo
.denom
);
51 total
= (double)(nano
/ (double)1000000000);
53 printf("Total time: % 3.3f\n", total
);
54 printf("Lookup time: % 3.3f\n", seconds
);
55 printf("Percent: % 3.3f\n", (seconds
/ total
) * 100);
60 #define RISA(obj,cls) (REFERENCE_P(obj) && ISA(obj,BASIC_CLASS(cls)))
62 #define next_int_into(val) val = *ip_ptr++;
63 #define next_int next_int_into(_int);
66 #include "shotgun/lib/instruction_funcs.gen"
70 #define NEXT_OP printf(" => %p\n", *ip_ptr); sassert(*ip_ptr); goto **ip_ptr++
72 #define NEXT_OP goto **ip_ptr++
77 #define next_literal_into(val) next_int; val = fast_fetch(cpu_current_literals(state, c), _int)
78 #define next_literal next_literal_into(_lit)
80 OBJECT
cpu_open_class(STATE
, cpu c
, OBJECT under
, OBJECT sup
, OBJECT sym
, int *created
) {
81 OBJECT val
, s1
, s2
, s3
, s4
, sup_itr
;
85 /* Evil people could do A = 12; class A::B; end */
86 if(!ISA(under
, BASIC_CLASS(module
))) {
87 cpu_raise_exception(state
, c
,
88 cpu_new_exception(state
, c
, state
->global
->exc_type
, "Nesting constant is not a Module"));
92 val
= module_const_get(state
, under
, sym
);
94 if(AUTOLOAD_P(val
)) { return val
; }
95 if(ISA(val
, BASIC_CLASS(class))) {
96 if(!NIL_P(sup
) && class_superclass(state
, val
) != sup
) {
97 cpu_raise_exception(state
, c
,
98 cpu_new_exception(state
, c
, state
->global
->exc_type
, "superclass mismatch"));
102 cpu_raise_exception(state
, c
,
103 cpu_new_exception(state
, c
, state
->global
->exc_type
, "constant is not a class"));
109 val
= class_constitute(state
, sup
, under
);
111 cpu_raise_exception(state
, c
,
112 cpu_new_exception(state
, c
, state
->global
->exc_arg
, "Invalid superclass"));
119 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
121 if(under
!= state
->global
->object
) {
122 s1
= symbol_to_string(state
, module_get_name(under
));
123 s2
= symbol_to_string(state
, sym
);
124 s3
= string_dup(state
, s1
);
125 string_append(state
, s3
, string_new(state
, "::"));
126 string_append(state
, s3
, s2
);
127 s4
= string_to_sym(state
, s3
);
128 module_set_name(val
, s4
);
130 module_set_name(val
, sym
);
132 module_const_set(state
, under
, sym
, val
);
138 /* Return the module object corresponding to the name or, if
139 * the module does not exist yet, create a new one first.
141 OBJECT
cpu_open_module(STATE
, cpu c
, OBJECT under
, OBJECT sym
) {
144 module
= module_const_get(state
, under
, sym
);
146 module
= module_allocate_mature(state
, 0);
147 module_setup_fields(state
, module
);
150 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
152 if(under
!= BASIC_CLASS(object
)) {
153 s1
= symbol_to_string(state
, module_get_name(under
));
154 s1
= string_dup(state
, s1
);
155 string_append(state
, s1
, string_new(state
, "::"));
156 string_append(state
, s1
, symbol_to_string(state
, sym
));
157 module_set_name(module
, string_to_sym(state
, s1
));
159 module_set_name(module
, sym
);
162 module_const_set(state
, under
, sym
, module
);
163 module_setup_fields(state
, object_metaclass(state
, module
));
164 module_set_encloser(module
, under
);
165 module_set_encloser(object_metaclass(state
, module
), under
);
171 /* Locate the method object for calling method +name+ on an instance of +klass+.
172 +mod+ is updated to point to the Module that holds the method.
173 The method is then looked for in the hash tables up the superclass chain.
174 returns TRUE if we found a method object that should be considered
175 returns FALSE if we need to keep looking 'up' for the method
177 static inline int cpu_check_for_method(STATE
, cpu c
, OBJECT tbl
, struct message
*msg
) {
180 msg
->method
= lookuptable_fetch(state
, tbl
, msg
->name
);
182 if(NIL_P(msg
->method
)) return FALSE
;
184 /* A 'false' method means to terminate method lookup. (e.g. undef_method) */
185 if(FALSE_P(msg
->method
)) return TRUE
;
188 if(TUPLE_P(msg
->method
)) {
189 obj
= tuple_at(state
, msg
->method
, 1);
190 /* nil means that the actual method object is 'up' from here */
191 if(NIL_P(obj
)) return FALSE
;
192 } /* otherwise, bypass all visibility checks */
196 /* Check that we are allowed to call this method */
197 if(TUPLE_P(msg
->method
)) {
198 vis
= tuple_at(state
, msg
->method
, 0);
199 if(vis
== state
->global
->sym_private
) {
200 /* We stop on private methods. */
201 msg
->method
= Qfalse
;
203 } else if(vis
== state
->global
->sym_protected
) {
204 /* If it's protected, bail if the receiver isn't the same
206 if(!object_kind_of_p(state
, c
->self
, msg
->module
)) {
207 msg
->method
= Qfalse
;
212 obj
= tuple_at(state
, msg
->method
, 1);
214 /* The method was callable, but we need to keep looking
215 * for the implementation, so make the invocation bypass all further
216 * visibility checks */
226 #define UNVIS_METHOD(var) if(TUPLE_P(var)) { var = tuple_at(state, var, 1); }
228 static inline int cpu_find_method(STATE
, cpu c
, struct message
*msg
) {
230 struct method_cache
*ent
;
232 #if USE_GLOBAL_CACHING
233 ent
= state
->method_cache
+ CPU_CACHE_HASH(msg
->klass
, msg
->name
);
234 /* We hit a hole. Stop. */
235 if(ent
->name
== msg
->name
&& ent
->klass
== msg
->klass
) {
237 /* TODO does this need to check for protected? */
238 if(msg
->priv
|| ent
->is_public
) {
239 msg
->method
= ent
->method
;
240 msg
->module
= ent
->module
;
251 state
->cache_collisions
++;
253 state
->cache_misses
++;
261 /* Validate klass is valid even. */
262 if(NUM_FIELDS(klass
) <= CLASS_f_SUPERCLASS
) {
263 printf("Warning: encountered invalid class (not big enough).\n");
268 tbl
= module_get_method_table(klass
);
270 /* Ok, rather than assert, i'm going to just bail. Makes the error
271 a little strange, but handle-able in ruby land. */
273 if(!LOOKUPTABLE_P(tbl
)) {
274 printf("Warning: encountered invalid module (methods not a LookupTable).\n");
280 if(cpu_check_for_method(state
, c
, tbl
, msg
)) {
284 klass
= class_get_superclass(klass
);
285 if(NIL_P(klass
)) break;
291 if(!RTEST(msg
->method
)) return FALSE
;
293 #if USE_GLOBAL_CACHING
294 /* Update the cache. */
295 if(RTEST(msg
->method
)) {
296 ent
->klass
= msg
->klass
;
297 ent
->name
= msg
->name
;
300 if(TUPLE_P(msg
->method
)) {
301 ent
->method
= NTH_FIELD(msg
->method
, 1);
302 if(NTH_FIELD(msg
->method
, 0) == state
->global
->sym_public
) {
303 ent
->is_public
= TRUE
;
305 ent
->is_public
= FALSE
;
308 msg
->method
= ent
->method
;
310 ent
->method
= msg
->method
;
311 ent
->is_public
= TRUE
;
315 if(RTEST(msg
->method
)) {
316 UNVIS_METHOD(msg
->method
);
323 OBJECT
exported_cpu_find_method(STATE
, cpu c
, OBJECT klass
, OBJECT name
, OBJECT
*mod
) {
333 if(!cpu_find_method(state
, c
, &msg
)) {
342 OBJECT
cpu_locate_method_on(STATE
, cpu c
, OBJECT obj
, OBJECT sym
, OBJECT include_private
) {
347 msg
.klass
= _real_class(state
, obj
);
348 msg
.priv
= TRUE_P(include_private
);
352 if(cpu_find_method(state
, c
, &msg
)) {
353 if(RTEST(msg
.method
)) {
354 return tuple_new2(state
, 2, msg
.method
, msg
.module
);
361 static inline int cpu_locate_method(STATE
, cpu c
, struct message
*msg
) {
363 struct message missing
;
366 if(RUBINIUS_VM_LOOKUP_BEGIN_ENABLED()) {
367 RUBINIUS_VM_LOOKUP_BEGIN();
373 if(cpu_find_method(state
, c
, msg
)) goto done
;
377 missing
.name
= state
->global
->method_missing
;
379 /* If we couldn't even find method_missing. bad. */
380 if(!cpu_find_method(state
, c
, &missing
)) { ret
= FALSE
; goto done
; }
382 msg
->method
= missing
.method
;
383 msg
->module
= missing
.module
;
388 if(RUBINIUS_VM_LOOKUP_END_ENABLED()) {
389 RUBINIUS_VM_LOOKUP_END();
392 // printf("Found method: %p\n", mo);
397 static inline OBJECT
cpu_check_serial(STATE
, cpu c
, OBJECT obj
, OBJECT sym
, int serial
) {
402 msg
.klass
= _real_class(state
, obj
);
405 if(!cpu_find_method(state
, c
, &msg
)) {
409 if(N2I(fast_fetch(msg
.method
, CMETHOD_f_SERIAL
)) == serial
) {
416 OBJECT
cpu_compile_method(STATE
, OBJECT cm
) {
420 ba
= cmethod_get_compiled(cm
);
421 bc
= cmethod_get_bytecodes(cm
);
423 /* If we're direct threaded, the compiled version is an array of the pointer
426 target_size
= (BYTEARRAY_SIZE(bc
) / sizeof(uint32_t)) * sizeof(uintptr_t);
428 target_size
= BYTEARRAY_SIZE(bc
);
431 if(NIL_P(ba
) || BYTEARRAY_SIZE(ba
) < target_size
) {
432 /* First time this method has been compiled, or size of current
433 bytearray is insufficient to hold revised bytecode */
434 ba
= bytearray_new(state
, target_size
);
437 cpu_compile_instructions(state
, bc
, ba
);
438 cmethod_set_compiled(cm
, ba
);
443 void cpu_compile_instructions(STATE
, OBJECT bc
, OBJECT comp
) {
444 /* If this is not a big endian platform, we need to adjust
445 the iseq to have the right order */
446 #if !CONFIG_BIG_ENDIAN && !DIRECT_THREADED
447 iseq_flip(state
, bc
, comp
);
448 #elif DIRECT_THREADED
449 /* If we're compiled with direct threading, then translate
450 the compiled version into addresses. */
451 calculate_into_gotos(state
, bc
, comp
, _dt_addresses
, _dt_size
);
455 static inline OBJECT
_allocate_context(STATE
, cpu c
, OBJECT meth
, int locals
) {
457 struct fast_context
*fc
;
460 ctx
= object_memory_new_context(state
->om
, locals
);
461 if(ctx
>= state
->om
->context_last
) {
462 state
->om
->collect_now
|= OMCollectYoung
;
465 /* TODO this code only works if ctx is allocated in the context stack
466 * or young area. If it's allocated mature off the bat, the write
467 * barrier wont be run and we're screwed. */
469 ins
= fast_fetch(meth
, CMETHOD_f_COMPILED
);
472 ins
= cpu_compile_method(state
, meth
);
477 ctx
->field_count
= FASTCTX_FIELDS
;
481 fc
->sender
= c
->active_context
;
484 fc
->custom_iseq
= Qnil
;
485 fc
->data
= bytearray_byte_address(state
, ins
);
486 fc
->literals
= fast_fetch(meth
, CMETHOD_f_LITERALS
);
489 fc
->locals
= object_memory_context_locals(ctx
);
490 CLEAR_FLAGS(fc
->locals
);
491 fc
->locals
->gc_zone
= 0;
492 fc
->locals
->klass
= BASIC_CLASS(tuple
);
493 SET_NUM_FIELDS(fc
->locals
, locals
);
495 for(i
= 0; i
< locals
; i
++) {
496 SET_FIELD_DIRECT(fc
->locals
, i
, Qnil
);
502 // printf("Locals for %p at %p (%d, %d)\n", ctx, fc->locals, num_lcls, FASTCTX(ctx)->size);
507 static inline OBJECT
cpu_create_context(STATE
, cpu c
, const struct message
*msg
) {
509 struct fast_context
*fc
;
511 ctx
= _allocate_context(state
, c
, msg
->method
, N2I(cmethod_get_local_count(msg
->method
)));
517 /* fp points to the location on the stack as the context
518 was being created. */
521 fc
->block
= msg
->block
;
522 fc
->self
= msg
->recv
;
523 fc
->argcount
= msg
->args
;
524 fc
->name
= msg
->name
;
525 fc
->method_module
= msg
->module
;
526 fc
->type
= FASTCTX_NORMAL
;
529 if (RUBINIUS_FUNCTION_ENTRY_ENABLED()) {
530 dtrace_function_entry(state
, c
, msg
);
537 OBJECT
cpu_create_block_context(STATE
, cpu c
, OBJECT env
, int sp
) {
539 struct fast_context
*fc
;
541 ctx
= _allocate_context(state
, c
, blokenv_get_method(env
),
542 N2I(blokenv_get_local_count(env
)));
545 fc
->ip
= N2I(blokenv_get_initial_ip(env
));
555 fc
->method_module
= Qnil
;
556 fc
->type
= FASTCTX_BLOCK
;
562 void cpu_raise_from_errno(STATE
, cpu c
, const char *msg
) {
566 cls
= lookuptable_fetch(state
, state
->global
->errno_mapping
, I2N(errno
));
568 cls
= state
->global
->exc_arg
;
569 snprintf(buf
, sizeof(buf
), "Unknown errno %d", errno
);
573 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, cls
, msg
));
576 void cpu_raise_arg_error_generic(STATE
, cpu c
, const char *msg
) {
577 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, state
->global
->exc_arg
, msg
));
580 void cpu_raise_arg_error(STATE
, cpu c
, int args
, int req
) {
582 snprintf(msg
, 1024, "wrong number of arguments (got %d, required %d)", args
, req
);
584 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, state
->global
->exc_arg
, msg
));
587 void cpu_raise_primitive_failure(STATE
, cpu c
, int primitive_idx
) {
589 OBJECT primitive_failure
;
590 snprintf(msg
, 1024, "Primitive with index (%d) failed", primitive_idx
);
592 primitive_failure
= cpu_new_exception(state
, c
, state
->global
->exc_primitive_failure
, msg
);
593 cpu_raise_exception(state
, c
, primitive_failure
);
596 static int cpu_execute_primitive(STATE
, cpu c
, const struct message
*msg
, int prim
) {
599 if (RUBINIUS_FUNCTION_PRIMITIVE_ENTRY_ENABLED()) {
600 dtrace_function_primitive_entry(state
, c
, msg
);
604 c
->in_primitive
= prim
;
605 if(cpu_perform_system_primitive(state
, c
, prim
, msg
)) {
609 if(EXCESSIVE_TRACING
) {
610 printf("%05d: Called prim %s => %s on %s.\n", c
->depth
,
611 rbs_symbol_to_cstring(state
, cmethod_get_name(cpu_current_method(state
, c
))),
612 rbs_symbol_to_cstring(state
, msg
->name
), _inspect(msg
->recv
));
616 if (RUBINIUS_FUNCTION_PRIMITIVE_RETURN_ENABLED()) {
617 dtrace_function_primitive_return(state
, c
, msg
);
626 if(EXCESSIVE_TRACING
) {
627 printf("[[ Primitive failed! -- %d ]]\n", prim
);
634 static inline int cpu_try_primitive(STATE
, cpu c
, const struct message
*msg
) {
638 prim_obj
= fast_fetch(msg
->method
, CMETHOD_f_PRIMITIVE
);
640 if(NIL_P(prim_obj
)) {
642 } else if(!FIXNUM_P(prim_obj
)) {
643 if(SYMBOL_P(prim_obj
)) {
644 prim
= calc_primitive_index(state
, symbol_to_string(state
, prim_obj
));
648 cmethod_set_primitive(msg
->method
, I2N(prim
));
650 prim
= N2I(prim_obj
);
654 cmethod_set_primitive(msg
->method
, Qnil
);
658 return cpu_execute_primitive(state
, c
, msg
, prim
);
661 /* Raw most functions for moving in a method. Adjusts register. */
662 /* Stack offset is used to adjust sp when it's saved so when
663 this context is swapped back in, any arguments are automatically
664 removed from the stack */
665 inline void cpu_save_registers(STATE
, cpu c
, int offset
) {
666 struct fast_context
*fc
;
670 fc
= FASTCTX(c
->active_context
);
671 fc
->sp
= c
->sp
- offset
;
676 inline void cpu_yield_debugger_check(STATE
, cpu c
) {
677 /* Yield to the debugger if flag is set */
678 if(TASK_FLAG_P(c
, TASK_DEBUG_ON_CTXT_CHANGE
)) {
679 if(EXCESSIVE_TRACING
) {
680 printf("Yielding to debugger due to context change\n");
682 cpu_yield_debugger(state
, c
);
687 inline void cpu_restore_context_with_home(STATE
, cpu c
, OBJECT ctx
, OBJECT home
) {
688 struct fast_context
*fc
;
690 /* Home is actually the main context here because it's the method
691 context that holds all the data. So if it's a fast, we restore
692 it's data, then if ctx != home, we restore a little more */
696 CHECK_PTR(fc
->method
);
698 c
->argcount
= fc
->argcount
;
701 /* Only happens if we're restoring a block. */
709 c
->locals
= FASTCTX(home
)->locals
;
711 c
->sender
= fc
->sender
;
719 c
->home_context
= home
;
720 c
->active_context
= ctx
;
723 /* Layer 2 method movement: use lower level only. */
725 /* Used in debugging. Verifies that the expected depth is the actual depth. */
727 static void _verify_depth(cpu c) {
729 OBJECT ctx = c->active_context;
733 ctx = FASTCTX(ctx)->sender;
736 assert(count == c->depth);
740 inline void cpu_activate_context(STATE
, cpu c
, OBJECT ctx
, OBJECT home
, int so
) {
743 if(c
->active_context
!= Qnil
) {
744 cpu_save_registers(state
, c
, so
);
746 cpu_restore_context_with_home(state
, c
, ctx
, home
);
747 cpu_yield_debugger_check(state
, c
);
750 /* Layer 2.5: Uses lower layers to return to the calling context.
751 Returning ends here. */
753 void nmc_activate(STATE
, cpu c
, OBJECT nmc
, OBJECT val
, int reraise
);
755 inline int cpu_simple_return(STATE
, cpu c
, OBJECT val
) {
756 OBJECT current
, destination
, home
;
759 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
760 dtrace_function_return(state
, c
);
766 current
= c
->active_context
;
767 c
->active_context
= Qnil
;
768 destination
= cpu_current_sender(c
);
770 // printf("Rtrnng frm %p (%d)\n", current, FASTCTX(current)->size);
772 if(destination
== Qnil
) {
773 object_memory_retire_context(state
->om
, current
);
775 /* Thread exiting, reschedule.. */
776 if(c
->current_thread
!= c
->main_thread
) {
777 THDEBUG("%d: thread reached end, dead.\n", getpid());
778 cpu_thread_exited(state
, c
);
780 /* Switch back to the main task... */
781 } else if(c
->current_task
!= c
->main_task
) {
782 cpu_task_select(state
, c
, c
->main_task
);
785 /* The return value of the script is passed on the stack. */
788 /* retire this one context. */
789 object_memory_retire_context(state
->om
, current
);
791 /* Now, figure out if the destination is a block, so we pass the correct
792 home to restore_context */
793 if(block_context_p(state
, destination
)) {
794 home
= blokctx_home(state
, destination
);
800 if(EXCESSIVE_TRACING) {
801 if(stack_context_p(destination)) {
802 printf("Returning to a stack context %d / %d (%s).\n", (int)c->active_context, (int)destination, c->active_context - destination == CTX_SIZE ? "stack" : "REMOTE");
804 printf("Returning to %s.\n", _inspect(destination));
808 if(FASTCTX(home
)->type
== FASTCTX_NMC
) {
809 nmc_activate(state
, c
, home
, val
, FALSE
);
810 /* We return because nmc_activate will setup the cpu to do whatever
814 cpu_restore_context_with_home(state
, c
, destination
, home
);
822 /* Used by raise_exception to restore the previous context. */
823 int cpu_unwind(STATE
, cpu c
) {
824 OBJECT current
, destination
, home
;
825 current
= c
->active_context
;
826 c
->active_context
= Qnil
;
827 destination
= cpu_current_sender(c
);
830 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
831 dtrace_function_return(state
, c
);
837 if(destination
== Qnil
) {
838 object_memory_retire_context(state
->om
, current
);
840 /* Thread exitting, reschedule.. */
841 if(c
->current_thread
!= c
->main_thread
) {
842 THDEBUG("%d: thread reached end, dead.\n", getpid());
843 cpu_thread_exited(state
, c
);
845 /* Switch back to the main task... */
846 } else if(c
->current_task
!= c
->main_task
) {
847 cpu_task_select(state
, c
, c
->main_task
);
854 /* retire this one context. */
855 object_memory_retire_context(state
->om
, current
);
857 /* Now, figure out if the destination is a block, so we pass the correct
858 home to restore_context */
859 if(block_context_p(state
, destination
)) {
860 home
= blokctx_home(state
, destination
);
865 /* Commenting out 02.01.08 - Caleb Tennis.
866 I don't know the purpose of this code, but if an exception is throws from
867 a rb_funcall in subtend, this causes an endless loop in cpu_return_to_sender.
869 Commenting it out fixes that for now.
870 Hopefully someone smarter than me knows a better fix for the future.
872 Skip over NMCs for now.
874 if(exception && FASTCTX(destination)->type == FASTCTX_NMC) {
875 c->active_context = destination;
876 return cpu_return_to_sender(state, c, val, FALSE, TRUE);
881 /* Ok, reason we'd be restoring a native context:
882 1) the native context used rb_funcall and we need to return
883 it the result of the call.
885 cpu_restore_context_with_home(state
, c
, destination
, home
);
891 /* Layer 3: goto. Basically jumps directly into the specificed method.
892 no lookup required. */
894 inline void cpu_goto_method(STATE
, cpu c
, OBJECT recv
, OBJECT meth
,
895 int count
, OBJECT name
, OBJECT block
) {
906 if(cpu_try_primitive(state
, c
, &msg
)) return;
908 OBJECT scope
= cmethod_get_staticscope(meth
);
910 cmethod_set_staticscope(meth
, state
->global
->top_scope
);
913 ctx
= cpu_create_context(state
, c
, &msg
);
914 cpu_activate_context(state
, c
, ctx
, ctx
, 0);
917 /* Layer 3: hook. Shortcut for running hook methods. */
919 inline void cpu_perform_hook(STATE
, cpu c
, OBJECT recv
, OBJECT meth
, OBJECT arg
) {
925 msg
.klass
= _real_class(state
, recv
);
928 if(!cpu_find_method(state
, c
, &msg
)) return;
930 rub
= rbs_const_get(state
, BASIC_CLASS(object
), "Rubinius");
931 if(NIL_P(rub
)) return;
933 vm
= rbs_const_get(state
, rub
, "VM");
934 if(NIL_P(vm
)) return;
936 /* The top of the stack contains the value that should remain on the stack.
937 we pass that to the perform_hook call so it is returned and stays on
938 the top of the stack. Thats why we say there are 4 args.*/
944 cpu_send(state
, c
, vm
, SYM("perform_hook"), 4, Qnil
);
947 /* Layer 4: High level method calling. */
949 /* Layer 4: direct activation. Used for calling a method thats already
951 static inline void cpu_activate_method(STATE
, cpu c
, struct message
*msg
) {
955 if(c
->depth
== CPU_MAX_DEPTH
) {
956 machine_handle_fire(FIRE_STACK
);
959 if(cpu_try_primitive(state
, c
, msg
)) return;
961 ctx
= cpu_create_context(state
, c
, msg
);
963 cpu_save_registers(state
, c
, msg
->args
);
964 cpu_restore_context_with_home(state
, c
, ctx
, ctx
);
967 static inline void cpu_perform(STATE
, cpu c
, const struct message
*msg
) {
971 if(c
->depth
== CPU_MAX_DEPTH
) {
972 machine_handle_fire(FIRE_STACK
);
975 ctx
= cpu_create_context(state
, c
, msg
);
977 /* If it was missing, setup some extra data in the MethodContext for
978 the method_missing method to check out, to see why it was missing. */
979 if(msg
->missing
&& msg
->priv
) {
980 methctx_reference(state
, ctx
);
981 object_set_ivar(state
, ctx
, SYM("@send_private"), Qtrue
);
984 cpu_save_registers(state
, c
, msg
->args
);
985 cpu_restore_context_with_home(state
, c
, ctx
, ctx
);
986 cpu_yield_debugger_check(state
, c
);
990 static inline void cpu_patch_mono(struct message
*msg
);
992 static inline void cpu_patch_missing(struct message
*msg
);
994 static void _cpu_ss_basic(struct message
*msg
) {
996 const STATE
= msg
->state
;
997 const cpu c
= msg
->c
;
999 sassert(cpu_locate_method(state
, c
, msg
));
1001 /* If it's not method_missing, cache the details of msg in the send_site */
1003 cpu_patch_mono(msg
);
1005 cpu_patch_missing(msg
);
1007 stack_push(msg
->name
);
1010 if(cpu_try_primitive(state
, c
, msg
)) return;
1012 cpu_perform(state
, c
, msg
);
1015 void cpu_initialize_sendsite(STATE
, struct send_site
*ss
) {
1016 ss
->lookup
= _cpu_ss_basic
;
1019 static void _cpu_ss_disabled(struct message
*msg
) {
1021 const STATE
= msg
->state
;
1022 const cpu c
= msg
->c
;
1024 sassert(cpu_locate_method(state
, c
, msg
));
1026 /* If it's not method_missing, cache the details of msg in the send_site */
1029 stack_push(msg
->name
);
1032 if(cpu_try_primitive(state
, c
, msg
)) return;
1034 cpu_perform(state
, c
, msg
);
1037 void cpu_patch_disabled(struct message
*msg
, struct send_site
*ss
) {
1038 ss
->data1
= ss
->data2
= ss
->data3
= Qnil
;
1041 ss
->lookup
= _cpu_ss_disabled
;
1043 _cpu_ss_disabled(msg
);
1046 #define SS_DISABLE_THRESHOLD 10000
1047 #define SS_MISSES(ss) if(++ss->misses > SS_DISABLE_THRESHOLD) { cpu_patch_disabled(msg, ss); } else
1049 /* Send Site specialization 1: execute a primitive directly. */
1051 #define CHECK_CLASS(msg) (_real_class(msg->state, msg->recv) != SENDSITE(msg->send_site)->data1)
1053 static void _cpu_ss_mono_prim(struct message
*msg
) {
1054 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1057 OBJECT
*_orig_sp_ptr
;
1060 if(CHECK_CLASS(msg
)) {
1069 _orig_sp_ptr
= c
->sp_ptr
;
1072 func
= (prim_func
)ss
->c_data
;
1074 msg
->method
= ss
->data2
;
1075 msg
->module
= ss
->data3
;
1077 c
->in_primitive
= ss
->data4
;
1079 if(!func(msg
->state
, msg
->c
, msg
)) {
1080 c
->in_primitive
= 0;
1081 c
->sp_ptr
= _orig_sp_ptr
;
1084 cpu_perform(msg
->state
, msg
->c
, msg
);
1086 c
->in_primitive
= 0;
1090 /* Called before a primitive is run the slow way, allowing the send_site to be patch
1091 * to call the primitive directly. */
1092 void cpu_patch_primitive(STATE
, const struct message
*msg
, prim_func func
, int prim
) {
1093 struct send_site
*ss
;
1095 if(!REFERENCE_P(msg
->send_site
)) return;
1097 ss
= SENDSITE(msg
->send_site
);
1099 /* If this sendsite is disabled, leave it disabled. */
1100 if(ss
->lookup
== _cpu_ss_disabled
) return;
1102 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1103 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1104 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1107 ss
->c_data
= (void*)func
;
1108 ss
->lookup
= _cpu_ss_mono_prim
;
1111 /* Send Site specialization 2: Run an ffi function directly. */
1112 static void _cpu_ss_mono_ffi(struct message
*msg
) {
1113 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1115 if(CHECK_CLASS(msg
)) {
1124 ffi_call(msg
->state
, msg
->c
, nfunc_get_data(ss
->data2
));
1127 /* Called before an FFI function is run the slow way, allowing the send_site to be patch
1128 * to call the function directly. */
1129 void cpu_patch_ffi(STATE
, const struct message
*msg
) {
1130 struct send_site
*ss
;
1132 if(!REFERENCE_P(msg
->send_site
)) return;
1134 ss
= SENDSITE(msg
->send_site
);
1136 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1137 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1138 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1140 ss
->c_data
= *DATA_STRUCT(nfunc_get_data(msg
->method
), void**);
1141 ss
->lookup
= _cpu_ss_mono_ffi
;
1146 if(!REFERENCE_P(msg->send_site)) return;
1148 ss = SENDSITE(msg->send_site);
1149 SET_STRUCT_FIELD(msg->send_site, ss->data1, msg->recv);
1150 SET_STRUCT_FIELD(msg->send_site, ss->data2, nfunc_get_data(msg->method));
1151 ss->lookup = _cpu_ss_mono_ffi;
1155 /* Send Site specialzitation 3: simple monomorphic last implemenation cache. */
1156 static void _cpu_ss_mono(struct message
*msg
) {
1157 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1159 if(CHECK_CLASS(msg
)) {
1168 msg
->method
= ss
->data2
;
1169 msg
->module
= ss
->data3
;
1171 if(cpu_try_primitive(msg
->state
, msg
->c
, msg
)) return;
1173 cpu_perform(msg
->state
, msg
->c
, msg
);
1176 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1177 * that the next time +ss+ is used, it will try the cache details. */
1178 static inline void cpu_patch_mono(struct message
*msg
) {
1181 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1183 ss
->lookup
= _cpu_ss_mono
;
1184 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1185 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1186 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1189 static void _cpu_ss_missing(struct message
*msg
) {
1190 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1193 if(CHECK_CLASS(msg
)) {
1202 msg
->method
= ss
->data2
;
1203 msg
->module
= ss
->data3
;
1206 stack_push(msg
->name
);
1208 if(cpu_try_primitive(msg
->state
, msg
->c
, msg
)) return;
1210 cpu_perform(msg
->state
, msg
->c
, msg
);
1213 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1214 * that the next time +ss+ is used, it will try the cache details. */
1216 cpu_patch_missing(struct message
*msg
) {
1218 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1220 ss
->lookup
= _cpu_ss_missing
;
1221 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1222 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1223 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1226 static void _cpu_on_no_method(STATE
, cpu c
, const struct message
*msg
) {
1230 exc
= rbs_const_get(state
, BASIC_CLASS(object
), "RuntimeError");
1233 snprintf(str
, 1024, "Unable to find any version of '%s' to run", _inspect(msg
->name
));
1235 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, exc
, str
));
1240 /* Layer 4: send. Primary method calling function. */
1241 inline void cpu_send_message(STATE
, cpu c
, struct message
*msg
) {
1242 struct send_site
*ss
;
1245 uint64_t start
= measure_cpu_time();
1248 ss
= SENDSITE(msg
->send_site
);
1251 msg
->name
= ss
->name
;
1255 state
->lookup_time
+= (measure_cpu_time() - start
);
1260 void cpu_send_message_external(STATE
, cpu c
, struct message
*msg
) {
1263 if(!cpu_locate_method(state
, c
, msg
)) {
1264 _cpu_on_no_method(state
, c
, msg
);
1270 stack_push(msg
->name
);
1272 if(cpu_try_primitive(state
, c
, msg
)) return;
1276 if(c
->depth
== CPU_MAX_DEPTH
) {
1277 machine_handle_fire(FIRE_STACK
);
1280 ctx
= cpu_create_context(state
, c
, msg
);
1282 /* If it was missing, setup some extra data in the MethodContext for
1283 the method_missing method to check out, to see why it was missing. */
1284 if(msg
->missing
&& msg
->priv
) {
1285 methctx_reference(state
, ctx
);
1286 object_set_ivar(state
, ctx
, SYM("@send_private"), Qtrue
);
1289 cpu_save_registers(state
, c
, msg
->args
);
1290 cpu_restore_context_with_home(state
, c
, ctx
, ctx
);
1294 /* A version used when there is no send_site. */
1295 void cpu_send(STATE
, cpu c
, OBJECT recv
, OBJECT sym
, int args
, OBJECT block
) {
1302 msg
.klass
= _real_class(state
, recv
);
1303 msg
.priv
= c
->call_flags
;
1305 msg
.send_site
= Qnil
;
1309 cpu_send_message_external(state
, c
, &msg
);
1312 /* A version used to call method on superclass */
1313 void cpu_send_super(STATE
, cpu c
, OBJECT recv
, OBJECT sym
, int args
, OBJECT block
) {
1320 msg
.klass
= class_get_superclass(_real_class(state
, recv
));
1323 msg
.send_site
= Qnil
;
1327 cpu_send_message_external(state
, c
, &msg
);
1330 void cpu_raise_exception(STATE
, cpu c
, OBJECT exc
) {
1331 OBJECT ctx
, table
, ent
;
1332 int cur
, total
, target
, idx
, l
, r
;
1334 ctx
= c
->active_context
;
1337 cpu_save_registers(state
, c
, 0);
1339 /* NOTE: using return_to_sender worries me a little because it can
1340 switch to a different task if you try to return off the top
1343 while(!NIL_P(ctx
)) {
1344 if(c
->type
== FASTCTX_NMC
) goto skip
;
1346 table
= cmethod_get_exceptions(cpu_current_method(state
, c
));
1348 if(!table
|| NIL_P(table
)) goto skip
;
1351 total
= NUM_FIELDS(table
);
1353 for(idx
=0; idx
< total
; idx
++) {
1354 ent
= tuple_at(state
, table
, idx
);
1355 l
= N2I(tuple_at(state
, ent
, 0));
1356 r
= N2I(tuple_at(state
, ent
, 1));
1357 if(cur
>= l
&& cur
<= r
) {
1358 target
= N2I(tuple_at(state
, ent
, 2));
1366 /* unwind returns FALSE if we can't unwind anymore. */
1367 if(!cpu_unwind(state
, c
)) break;
1368 ctx
= c
->active_context
;
1371 /* Reset it because it can get overriden in the return_to_senders. */
1374 // printf("Unable to find exception handler, i'm confused.\n");
1378 void cpu_yield_debugger(STATE
, cpu c
) {
1379 /* Ensure the DEBUG_ON_CTXT_CHANGE flag is cleared so we don't try
1380 to yield more than once */
1381 if(TASK_FLAG_P(c
, TASK_DEBUG_ON_CTXT_CHANGE
)) {
1382 TASK_CLEAR_FLAG(c
, TASK_DEBUG_ON_CTXT_CHANGE
);
1383 struct cpu_task
*task
= (struct cpu_task
*)BYTES_OF(c
->current_task
);
1384 TASK_CLEAR_FLAG(task
, TASK_DEBUG_ON_CTXT_CHANGE
);
1389 methctx_reference(state
, c
->active_context
);
1391 OBJECT dbg
= c
->debug_channel
;
1393 /* No debug channel on the task, so use the VM default one (if any) */
1395 mod
= rbs_const_get(state
, BASIC_CLASS(object
), "Rubinius");
1397 vm
= rbs_const_get(state
, mod
, "VM");
1399 dbg
= object_get_ivar(state
, vm
, SYM("@debug_channel"));
1405 if(c
->control_channel
== Qnil
) {
1406 /* No control channel on the task, so create one */
1407 c
->control_channel
= cpu_channel_new(state
);
1410 sassert(cpu_channel_has_readers_p(state
, dbg
));
1411 cpu_channel_send(state
, c
, dbg
, c
->current_thread
);
1412 /* This is so when this task is reactivated, the sent value wont be placed
1413 on the stack, keeping the stack clean. */
1414 TASK_SET_FLAG(c
, TASK_NO_STACK
);
1415 cpu_channel_receive(state
, c
, c
->control_channel
, c
->current_thread
);
1417 cpu_raise_arg_error_generic(state
, c
, "Attempted to switch to debugger, no debugger installed");
1421 const char *cpu_op_to_name(STATE
, char op
) {
1422 #include "shotgun/lib/instruction_names.h"
1423 return get_instruction_name(op
);
1426 void state_collect(STATE
, cpu c
);
1427 void state_major_collect(STATE
, cpu c
);
1429 void cpu_run(STATE
, cpu c
, int setup
) {
1431 IP_TYPE
*ip_ptr
= NULL
;
1432 const char *firesuit_arg
;
1433 struct rubinius_globals
*global
= state
->global
;
1435 c
->ip_ptr
= &ip_ptr
;
1447 /* recache ip_ptr to make it valid. */
1450 current_machine
->g_use_firesuit
= 1;
1451 current_machine
->g_access_violation
= 0;
1452 getcontext(¤t_machine
->g_firesuit
);
1454 /* Ok, we jumped back here because something went south. */
1455 if(current_machine
->g_access_violation
) {
1456 switch(current_machine
->g_access_violation
) {
1458 cpu_raise_exception(state
, c
,
1459 cpu_new_exception(state
, c
, state
->global
->exc_arg
,
1460 "Accessed outside bounds of object"));
1463 cpu_raise_exception(state
, c
,
1464 cpu_new_exception(state
, c
, state
->global
->exc_arg
,
1465 "Attempted to access field of non-reference (null pointer)"));
1468 cpu_raise_exception(state
, c
,
1469 cpu_new_exception(state
, c
,
1470 rbs_const_get(state
, BASIC_CLASS(object
), "VMAssertion"),
1471 "An error has occured within the VM"));
1474 object_type_to_type(current_machine
->g_firesuit_arg
, firesuit_arg
);
1475 cpu_raise_exception(state
, c
,
1476 cpu_new_exception2(state
, c
, global
->exc_type
,
1477 "Invalid type encountered %s: %s",
1478 current_machine
->g_firesuit_message
, firesuit_arg
));
1479 free(current_machine
->g_firesuit_message
);
1482 cpu_raise_exception(state
, c
,
1483 cpu_new_exception(state
, c
,
1484 rbs_const_get(state
, BASIC_CLASS(object
), "SystemStackError"),
1485 "Maximum amount of stack space used"));
1488 cpu_raise_exception(state
, c
,
1489 cpu_new_exception2(state
, c
, global
->exc_type
,
1490 "Unknown firesuit reason: %d", current_machine
->g_access_violation
));
1496 while(c
->active_context
!= Qnil
) {
1499 if(EXCESSIVE_TRACING
) {
1500 printf("%-15s: => %p\n",
1501 rbs_symbol_to_cstring(state
, cmethod_get_name(cpu_current_method(state
, c
))),
1505 #include "shotgun/lib/instruction_dt.gen"
1511 if(EXCESSIVE_TRACING
) {
1514 printf("%-15s: OP: %s (%d/%d/%d)\n",
1515 rbs_symbol_to_cstring(state
, cmethod_get_name(cpu_current_method(state
, c
))),
1516 cpu_op_to_name(state
, op
), op
, c
->ip
, c
->sp
);
1519 #include "shotgun/lib/instructions.gen"
1523 if(state
->om
->collect_now
) {
1526 if (RUBINIUS_GC_BEGIN_ENABLED()) {
1527 dtrace_gc_begin(state
);
1530 int cm
= state
->om
->collect_now
;
1532 /* Collect the first generation. */
1533 if(cm
& OMCollectYoung
) {
1534 if(EXCESSIVE_TRACING
) {
1535 printf("[[ Collecting young objects. ]]\n");
1536 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c
->active_context
, cpu_current_data(c
), ip_ptr
, c
->ip
, *ip_ptr
);
1538 state_collect(state
, c
);
1539 if(EXCESSIVE_TRACING
) {
1540 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c
->active_context
, cpu_current_data(c
), ip_ptr
, c
->ip
, *ip_ptr
);
1541 printf("[[ Finished collect. ]]\n");
1545 /* Collect the old generation. */
1546 if(cm
& OMCollectMature
) {
1547 if(EXCESSIVE_TRACING
) {
1548 printf("[[ Collecting old objects. ]\n");
1550 state_major_collect(state
, c
);
1551 // printf("Done with major collection.\n");
1554 /* If someone is reading the ON_GC channel, write to it to notify them. */
1555 if(cpu_channel_has_readers_p(state
, state
->global
->on_gc_channel
)) {
1556 cpu_channel_send(state
, c
, state
->global
->on_gc_channel
, Qtrue
);
1559 state
->om
->collect_now
= 0;
1562 if (RUBINIUS_GC_END_ENABLED()) {
1563 dtrace_gc_end(state
);
1568 if(state
->check_events
) {
1569 state
->check_events
= 0;
1570 if(state
->pending_events
) cpu_event_runonce(state
);
1571 if(state
->pending_threads
) cpu_thread_preempt(state
, c
);
1576 void cpu_run_script(STATE
, cpu c
, OBJECT meth
) {
1578 name
= string_to_sym(state
, string_new(state
, "__script__"));
1579 cpu_goto_method(state
, c
, c
->main
, meth
, 0, name
, Qnil
);