4 #include "shotgun/lib/shotgun.h"
5 #include "shotgun/lib/cpu.h"
6 #include "shotgun/lib/tuple.h"
7 #include "shotgun/lib/module.h"
8 #include "shotgun/lib/class.h"
9 #include "shotgun/lib/hash.h"
10 #include "shotgun/lib/lookuptable.h"
11 #include "shotgun/lib/methctx.h"
12 #include "shotgun/lib/array.h"
13 #include "shotgun/lib/string.h"
14 #include "shotgun/lib/symbol.h"
15 #include "shotgun/lib/machine.h"
16 #include "shotgun/lib/bytearray.h"
17 #include "shotgun/lib/fixnum.h"
18 #include "shotgun/lib/primitive_util.h"
19 #include "shotgun/lib/sendsite.h"
20 #include "shotgun/lib/subtend/ffi.h"
21 #include "shotgun/lib/subtend/nmc.h"
23 #if CONFIG_ENABLE_DTRACE
24 #include "shotgun/lib/dtrace_probes.h"
33 #include <mach/mach.h>
34 #include <mach/mach_time.h>
36 #define measure_cpu_time mach_absolute_time
38 void cpu_show_lookup_time(STATE
) {
39 struct mach_timebase_info timeinfo
;
41 double seconds
, total
;
43 mach_timebase_info(&timeinfo
);
45 nano
= (state
->lookup_time
* timeinfo
.numer
/ timeinfo
.denom
);
47 seconds
= (double)(nano
/ (double)1000000000);
49 nano
= ((mach_absolute_time() - state
->system_start
) * timeinfo
.numer
/ timeinfo
.denom
);
51 total
= (double)(nano
/ (double)1000000000);
53 printf("Total time: % 3.3f\n", total
);
54 printf("Lookup time: % 3.3f\n", seconds
);
55 printf("Percent: % 3.3f\n", (seconds
/ total
) * 100);
60 #define RISA(obj,cls) (REFERENCE_P(obj) && ISA(obj,BASIC_CLASS(cls)))
62 #define next_int_into(val) val = *ip_ptr++;
63 #define next_int next_int_into(_int);
66 #include "shotgun/lib/instruction_funcs.gen"
70 #define NEXT_OP printf(" => %p\n", *ip_ptr); sassert(*ip_ptr); goto **ip_ptr++
72 #define NEXT_OP goto **ip_ptr++
77 #define next_literal_into(val) next_int; val = fast_fetch(cpu_current_literals(state, c), _int)
78 #define next_literal next_literal_into(_lit)
80 OBJECT
cpu_open_class(STATE
, cpu c
, OBJECT under
, OBJECT sup
, OBJECT sym
, int *created
) {
81 OBJECT val
, s1
, s2
, s3
, s4
, sup_itr
;
85 /* Evil people could do A = 12; class A::B; end */
86 if(!ISA(under
, BASIC_CLASS(module
))) {
87 cpu_raise_exception(state
, c
,
88 cpu_new_exception(state
, c
, state
->global
->exc_type
, "Nesting constant is not a Module"));
92 val
= module_const_get(state
, under
, sym
);
94 if(AUTOLOAD_P(val
)) { return val
; }
95 if(ISA(val
, BASIC_CLASS(class))) {
96 if(!NIL_P(sup
) && class_superclass(state
, val
) != sup
) {
97 cpu_raise_exception(state
, c
,
98 cpu_new_exception(state
, c
, state
->global
->exc_type
, "superclass mismatch"));
102 cpu_raise_exception(state
, c
,
103 cpu_new_exception(state
, c
, state
->global
->exc_type
, "constant is not a class"));
109 val
= class_constitute(state
, sup
, under
);
111 cpu_raise_exception(state
, c
,
112 cpu_new_exception(state
, c
, state
->global
->exc_arg
, "Invalid superclass"));
119 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
121 if(under
!= state
->global
->object
) {
122 s1
= symbol_to_string(state
, module_get_name(under
));
123 s2
= symbol_to_string(state
, sym
);
124 s3
= string_dup(state
, s1
);
125 string_append(state
, s3
, string_new(state
, "::"));
126 string_append(state
, s3
, s2
);
127 s4
= string_to_sym(state
, s3
);
128 module_set_name(val
, s4
);
130 module_set_name(val
, sym
);
132 module_const_set(state
, under
, sym
, val
);
138 /* Return the module object corresponding to the name or, if
139 * the module does not exist yet, create a new one first.
141 OBJECT
cpu_open_module(STATE
, cpu c
, OBJECT under
, OBJECT sym
) {
144 module
= module_const_get(state
, under
, sym
);
146 module
= module_allocate_mature(state
, 0);
147 module_setup_fields(state
, module
);
150 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
152 if(under
!= BASIC_CLASS(object
)) {
153 s1
= symbol_to_string(state
, module_get_name(under
));
154 s1
= string_dup(state
, s1
);
155 string_append(state
, s1
, string_new(state
, "::"));
156 string_append(state
, s1
, symbol_to_string(state
, sym
));
157 module_set_name(module
, string_to_sym(state
, s1
));
159 module_set_name(module
, sym
);
162 module_const_set(state
, under
, sym
, module
);
163 module_setup_fields(state
, object_metaclass(state
, module
));
164 module_set_encloser(module
, under
);
165 module_set_encloser(object_metaclass(state
, module
), under
);
171 /* Locate the method object for calling method +name+ on an instance of +klass+.
172 +mod+ is updated to point to the Module that holds the method.
173 The method is then looked for in the hash tables up the superclass chain.
174 returns TRUE if we found a method object that should be considered
175 returns FALSE if we need to keep looking 'up' for the method
177 static inline int cpu_check_for_method(STATE
, cpu c
, OBJECT tbl
, struct message
*msg
) {
180 msg
->method
= lookuptable_fetch(state
, tbl
, msg
->name
);
182 if(NIL_P(msg
->method
)) return FALSE
;
184 /* A 'false' method means to terminate method lookup. (e.g. undef_method) */
185 if(FALSE_P(msg
->method
)) return TRUE
;
188 if(TUPLE_P(msg
->method
)) {
189 obj
= tuple_at(state
, msg
->method
, 1);
190 /* nil means that the actual method object is 'up' from here */
191 if(NIL_P(obj
)) return FALSE
;
192 } /* otherwise, bypass all visibility checks */
196 /* Check that we are allowed to call this method */
197 if(TUPLE_P(msg
->method
)) {
198 vis
= tuple_at(state
, msg
->method
, 0);
199 if(vis
== state
->global
->sym_private
) {
200 /* We stop on private methods. */
201 msg
->method
= Qfalse
;
203 } else if(vis
== state
->global
->sym_protected
) {
204 /* If it's protected, bail if the receiver isn't the same
206 if(!object_kind_of_p(state
, c
->self
, msg
->module
)) {
207 msg
->method
= Qfalse
;
212 obj
= tuple_at(state
, msg
->method
, 1);
214 /* The method was callable, but we need to keep looking
215 * for the implementation, so make the invocation bypass all further
216 * visibility checks */
226 #define UNVIS_METHOD(var) if(TUPLE_P(var)) { var = tuple_at(state, var, 1); }
228 static inline int cpu_find_method(STATE
, cpu c
, struct message
*msg
) {
230 struct method_cache
*ent
;
232 #if USE_GLOBAL_CACHING
233 ent
= state
->method_cache
+ CPU_CACHE_HASH(msg
->klass
, msg
->name
);
234 /* We hit a hole. Stop. */
235 if(ent
->name
== msg
->name
&& ent
->klass
== msg
->klass
) {
237 /* TODO does this need to check for protected? */
238 if(msg
->priv
|| ent
->is_public
) {
239 msg
->method
= ent
->method
;
240 msg
->module
= ent
->module
;
251 state
->cache_collisions
++;
253 state
->cache_misses
++;
261 /* Validate klass is valid even. */
262 if(NUM_FIELDS(klass
) <= CLASS_f_SUPERCLASS
) {
263 printf("Warning: encountered invalid class (not big enough).\n");
268 tbl
= module_get_method_table(klass
);
270 /* Ok, rather than assert, i'm going to just bail. Makes the error
271 a little strange, but handle-able in ruby land. */
273 if(!LOOKUPTABLE_P(tbl
)) {
274 printf("Warning: encountered invalid module (methods not a LookupTable).\n");
280 if(cpu_check_for_method(state
, c
, tbl
, msg
)) {
284 klass
= class_get_superclass(klass
);
285 if(NIL_P(klass
)) break;
291 if(!RTEST(msg
->method
)) return FALSE
;
293 #if USE_GLOBAL_CACHING
294 /* Update the cache. */
295 if(RTEST(msg
->method
)) {
296 ent
->klass
= msg
->klass
;
297 ent
->name
= msg
->name
;
300 if(TUPLE_P(msg
->method
)) {
301 ent
->method
= NTH_FIELD(msg
->method
, 1);
302 if(NTH_FIELD(msg
->method
, 0) == state
->global
->sym_public
) {
303 ent
->is_public
= TRUE
;
305 ent
->is_public
= FALSE
;
308 msg
->method
= ent
->method
;
310 ent
->method
= msg
->method
;
311 ent
->is_public
= TRUE
;
315 if(RTEST(msg
->method
)) {
316 UNVIS_METHOD(msg
->method
);
323 OBJECT
exported_cpu_find_method(STATE
, cpu c
, OBJECT klass
, OBJECT name
, OBJECT
*mod
) {
333 if(!cpu_find_method(state
, c
, &msg
)) {
342 OBJECT
cpu_locate_method_on(STATE
, cpu c
, OBJECT obj
, OBJECT sym
, OBJECT include_private
) {
347 msg
.klass
= _real_class(state
, obj
);
348 msg
.priv
= TRUE_P(include_private
);
352 if(cpu_find_method(state
, c
, &msg
)) {
353 if(RTEST(msg
.method
)) {
354 return tuple_new2(state
, 2, msg
.method
, msg
.module
);
361 static inline int cpu_locate_method(STATE
, cpu c
, struct message
*msg
) {
363 struct message missing
;
366 if(RUBINIUS_VM_LOOKUP_BEGIN_ENABLED()) {
367 RUBINIUS_VM_LOOKUP_BEGIN();
373 if(cpu_find_method(state
, c
, msg
)) goto done
;
377 missing
.name
= state
->global
->method_missing
;
379 /* If we couldn't even find method_missing. bad. */
380 if(!cpu_find_method(state
, c
, &missing
)) { ret
= FALSE
; goto done
; }
382 msg
->method
= missing
.method
;
383 msg
->module
= missing
.module
;
388 if(RUBINIUS_VM_LOOKUP_END_ENABLED()) {
389 RUBINIUS_VM_LOOKUP_END();
392 // printf("Found method: %p\n", mo);
397 static inline OBJECT
cpu_check_serial(STATE
, cpu c
, OBJECT obj
, OBJECT sym
, int serial
) {
402 msg
.klass
= _real_class(state
, obj
);
405 if(!cpu_find_method(state
, c
, &msg
)) {
409 if(N2I(fast_fetch(msg
.method
, CMETHOD_f_SERIAL
)) == serial
) {
416 OBJECT
cpu_compile_method(STATE
, OBJECT cm
) {
420 ba
= cmethod_get_compiled(cm
);
421 bc
= cmethod_get_bytecodes(cm
);
423 /* If we're direct threaded, the compiled version is an array of the pointer
426 target_size
= (BYTEARRAY_SIZE(bc
) / sizeof(uint32_t)) * sizeof(uintptr_t);
428 target_size
= BYTEARRAY_SIZE(bc
);
431 if(NIL_P(ba
) || BYTEARRAY_SIZE(ba
) < target_size
) {
432 /* First time this method has been compiled, or size of current
433 bytearray is insufficient to hold revised bytecode */
434 ba
= bytearray_new(state
, target_size
);
437 cpu_compile_instructions(state
, bc
, ba
);
438 cmethod_set_compiled(cm
, ba
);
443 void cpu_compile_instructions(STATE
, OBJECT bc
, OBJECT comp
) {
444 /* If this is not a big endian platform, we need to adjust
445 the iseq to have the right order */
446 #if !CONFIG_BIG_ENDIAN && !DIRECT_THREADED
447 iseq_flip(state
, bc
, comp
);
448 #elif DIRECT_THREADED
449 /* If we're compiled with direct threading, then translate
450 the compiled version into addresses. */
451 calculate_into_gotos(state
, bc
, comp
, _dt_addresses
, _dt_size
);
455 static inline OBJECT
_allocate_context(STATE
, cpu c
, OBJECT meth
, int locals
) {
457 struct fast_context
*fc
;
460 ctx
= object_memory_new_context(state
->om
, locals
);
461 if(ctx
>= state
->om
->context_last
) {
462 state
->om
->collect_now
|= OMCollectYoung
;
465 ins
= fast_fetch(meth
, CMETHOD_f_COMPILED
);
468 ins
= cpu_compile_method(state
, meth
);
473 ctx
->field_count
= FASTCTX_FIELDS
;
477 fc
->sender
= c
->active_context
;
480 fc
->custom_iseq
= Qnil
;
481 fc
->data
= bytearray_byte_address(state
, ins
);
482 fc
->literals
= fast_fetch(meth
, CMETHOD_f_LITERALS
);
485 fc
->locals
= object_memory_context_locals(ctx
);
486 CLEAR_FLAGS(fc
->locals
);
487 fc
->locals
->gc_zone
= 0;
488 fc
->locals
->klass
= BASIC_CLASS(tuple
);
489 SET_NUM_FIELDS(fc
->locals
, locals
);
491 for(i
= 0; i
< locals
; i
++) {
492 SET_FIELD_DIRECT(fc
->locals
, i
, Qnil
);
498 // printf("Locals for %p at %p (%d, %d)\n", ctx, fc->locals, num_lcls, FASTCTX(ctx)->size);
503 static inline OBJECT
cpu_create_context(STATE
, cpu c
, const struct message
*msg
) {
505 struct fast_context
*fc
;
507 ctx
= _allocate_context(state
, c
, msg
->method
, N2I(cmethod_get_local_count(msg
->method
)));
513 /* fp points to the location on the stack as the context
514 was being created. */
517 fc
->block
= msg
->block
;
518 fc
->self
= msg
->recv
;
519 fc
->argcount
= msg
->args
;
520 fc
->name
= msg
->name
;
521 fc
->method_module
= msg
->module
;
522 fc
->type
= FASTCTX_NORMAL
;
525 if (RUBINIUS_FUNCTION_ENTRY_ENABLED()) {
526 dtrace_function_entry(state
, c
, msg
);
533 OBJECT
cpu_create_block_context(STATE
, cpu c
, OBJECT env
, int sp
) {
535 struct fast_context
*fc
;
537 ctx
= _allocate_context(state
, c
, blokenv_get_method(env
),
538 N2I(blokenv_get_local_count(env
)));
541 fc
->ip
= N2I(blokenv_get_initial_ip(env
));
551 fc
->method_module
= Qnil
;
552 fc
->type
= FASTCTX_BLOCK
;
558 void cpu_raise_from_errno(STATE
, cpu c
, const char *msg
) {
562 cls
= lookuptable_fetch(state
, state
->global
->errno_mapping
, I2N(errno
));
564 cls
= state
->global
->exc_arg
;
565 snprintf(buf
, sizeof(buf
), "Unknown errno %d", errno
);
569 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, cls
, msg
));
572 void cpu_raise_arg_error_generic(STATE
, cpu c
, const char *msg
) {
573 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, state
->global
->exc_arg
, msg
));
576 void cpu_raise_arg_error(STATE
, cpu c
, int args
, int req
) {
578 snprintf(msg
, 1024, "wrong number of arguments (got %d, required %d)", args
, req
);
580 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, state
->global
->exc_arg
, msg
));
583 void cpu_raise_primitive_failure(STATE
, cpu c
, int primitive_idx
) {
585 OBJECT primitive_failure
;
586 snprintf(msg
, 1024, "Primitive with index (%d) failed", primitive_idx
);
588 primitive_failure
= cpu_new_exception(state
, c
, state
->global
->exc_primitive_failure
, msg
);
589 cpu_raise_exception(state
, c
, primitive_failure
);
592 static int cpu_execute_primitive(STATE
, cpu c
, const struct message
*msg
, int prim
) {
595 if (RUBINIUS_FUNCTION_PRIMITIVE_ENTRY_ENABLED()) {
596 dtrace_function_primitive_entry(state
, c
, msg
);
600 c
->in_primitive
= prim
;
601 if(cpu_perform_system_primitive(state
, c
, prim
, msg
)) {
605 if(EXCESSIVE_TRACING
) {
606 printf("%05d: Called prim %s => %s on %s.\n", c
->depth
,
607 rbs_symbol_to_cstring(state
, cmethod_get_name(cpu_current_method(state
, c
))),
608 rbs_symbol_to_cstring(state
, msg
->name
), _inspect(msg
->recv
));
612 if (RUBINIUS_FUNCTION_PRIMITIVE_RETURN_ENABLED()) {
613 dtrace_function_primitive_return(state
, c
, msg
);
622 if(EXCESSIVE_TRACING
) {
623 printf("[[ Primitive failed! -- %d ]]\n", prim
);
630 static inline int cpu_try_primitive(STATE
, cpu c
, const struct message
*msg
) {
634 prim_obj
= fast_fetch(msg
->method
, CMETHOD_f_PRIMITIVE
);
636 if(NIL_P(prim_obj
)) {
638 } else if(!FIXNUM_P(prim_obj
)) {
639 if(SYMBOL_P(prim_obj
)) {
640 prim
= calc_primitive_index(state
, symbol_to_string(state
, prim_obj
));
644 cmethod_set_primitive(msg
->method
, I2N(prim
));
646 prim
= N2I(prim_obj
);
650 cmethod_set_primitive(msg
->method
, Qnil
);
654 return cpu_execute_primitive(state
, c
, msg
, prim
);
657 /* Raw most functions for moving in a method. Adjusts register. */
658 /* Stack offset is used to adjust sp when it's saved so when
659 this context is swapped back in, any arguments are automatically
660 removed from the stack */
661 inline void cpu_save_registers(STATE
, cpu c
, int offset
) {
662 struct fast_context
*fc
;
666 fc
= FASTCTX(c
->active_context
);
667 fc
->sp
= c
->sp
- offset
;
672 inline void cpu_yield_debugger_check(STATE
, cpu c
) {
673 /* Yield to the debugger if flag is set */
674 if(TASK_FLAG_P(c
, TASK_DEBUG_ON_CTXT_CHANGE
)) {
675 if(EXCESSIVE_TRACING
) {
676 printf("Yielding to debugger due to context change\n");
678 cpu_yield_debugger(state
, c
);
683 inline void cpu_restore_context_with_home(STATE
, cpu c
, OBJECT ctx
, OBJECT home
) {
684 struct fast_context
*fc
;
686 /* Home is actually the main context here because it's the method
687 context that holds all the data. So if it's a fast, we restore
688 it's data, then if ctx != home, we restore a little more */
692 CHECK_PTR(fc
->method
);
694 c
->argcount
= fc
->argcount
;
697 /* Only happens if we're restoring a block. */
705 c
->locals
= FASTCTX(home
)->locals
;
707 c
->sender
= fc
->sender
;
715 c
->home_context
= home
;
716 c
->active_context
= ctx
;
719 /* Layer 2 method movement: use lower level only. */
721 /* Used in debugging. Verifies that the expected depth is the actual depth. */
723 static void _verify_depth(cpu c) {
725 OBJECT ctx = c->active_context;
729 ctx = FASTCTX(ctx)->sender;
732 assert(count == c->depth);
736 inline void cpu_activate_context(STATE
, cpu c
, OBJECT ctx
, OBJECT home
, int so
) {
739 if(c
->active_context
!= Qnil
) {
740 cpu_save_registers(state
, c
, so
);
742 cpu_restore_context_with_home(state
, c
, ctx
, home
);
743 cpu_yield_debugger_check(state
, c
);
746 /* Layer 2.5: Uses lower layers to return to the calling context.
747 Returning ends here. */
749 void nmc_activate(STATE
, cpu c
, OBJECT nmc
, OBJECT val
, int reraise
);
751 inline int cpu_simple_return(STATE
, cpu c
, OBJECT val
) {
752 OBJECT current
, destination
, home
;
755 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
756 dtrace_function_return(state
, c
);
762 current
= c
->active_context
;
763 c
->active_context
= Qnil
;
764 destination
= cpu_current_sender(c
);
766 // printf("Rtrnng frm %p (%d)\n", current, FASTCTX(current)->size);
768 if(destination
== Qnil
) {
769 object_memory_retire_context(state
->om
, current
);
771 /* Thread exiting, reschedule.. */
772 if(c
->current_thread
!= c
->main_thread
) {
773 THDEBUG("%d: thread reached end, dead.\n", getpid());
774 cpu_thread_exited(state
, c
);
776 /* Switch back to the main task... */
777 } else if(c
->current_task
!= c
->main_task
) {
778 cpu_task_select(state
, c
, c
->main_task
);
781 /* The return value of the script is passed on the stack. */
784 /* retire this one context. */
785 object_memory_retire_context(state
->om
, current
);
787 /* Now, figure out if the destination is a block, so we pass the correct
788 home to restore_context */
789 if(block_context_p(state
, destination
)) {
790 home
= blokctx_home(state
, destination
);
796 if(EXCESSIVE_TRACING) {
797 if(stack_context_p(destination)) {
798 printf("Returning to a stack context %d / %d (%s).\n", (int)c->active_context, (int)destination, c->active_context - destination == CTX_SIZE ? "stack" : "REMOTE");
800 printf("Returning to %s.\n", _inspect(destination));
804 if(FASTCTX(home
)->type
== FASTCTX_NMC
) {
805 nmc_activate(state
, c
, home
, val
, FALSE
);
806 /* We return because nmc_activate will setup the cpu to do whatever
810 cpu_restore_context_with_home(state
, c
, destination
, home
);
818 /* Used by raise_exception to restore the previous context. */
819 int cpu_unwind(STATE
, cpu c
) {
820 OBJECT current
, destination
, home
;
821 current
= c
->active_context
;
822 c
->active_context
= Qnil
;
823 destination
= cpu_current_sender(c
);
826 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
827 dtrace_function_return(state
, c
);
833 if(destination
== Qnil
) {
834 object_memory_retire_context(state
->om
, current
);
836 /* Thread exitting, reschedule.. */
837 if(c
->current_thread
!= c
->main_thread
) {
838 THDEBUG("%d: thread reached end, dead.\n", getpid());
839 cpu_thread_exited(state
, c
);
841 /* Switch back to the main task... */
842 } else if(c
->current_task
!= c
->main_task
) {
843 cpu_task_select(state
, c
, c
->main_task
);
850 /* retire this one context. */
851 object_memory_retire_context(state
->om
, current
);
853 /* Now, figure out if the destination is a block, so we pass the correct
854 home to restore_context */
855 if(block_context_p(state
, destination
)) {
856 home
= blokctx_home(state
, destination
);
861 /* Commenting out 02.01.08 - Caleb Tennis.
862 I don't know the purpose of this code, but if an exception is throws from
863 a rb_funcall in subtend, this causes an endless loop in cpu_return_to_sender.
865 Commenting it out fixes that for now.
866 Hopefully someone smarter than me knows a better fix for the future.
868 Skip over NMCs for now.
870 if(exception && FASTCTX(destination)->type == FASTCTX_NMC) {
871 c->active_context = destination;
872 return cpu_return_to_sender(state, c, val, FALSE, TRUE);
877 /* Ok, reason we'd be restoring a native context:
878 1) the native context used rb_funcall and we need to return
879 it the result of the call.
881 cpu_restore_context_with_home(state
, c
, destination
, home
);
887 /* Layer 3: goto. Basically jumps directly into the specificed method.
888 no lookup required. */
890 inline void cpu_goto_method(STATE
, cpu c
, OBJECT recv
, OBJECT meth
,
891 int count
, OBJECT name
, OBJECT block
) {
902 if(cpu_try_primitive(state
, c
, &msg
)) return;
904 OBJECT scope
= cmethod_get_staticscope(meth
);
906 cmethod_set_staticscope(meth
, state
->global
->top_scope
);
909 ctx
= cpu_create_context(state
, c
, &msg
);
910 cpu_activate_context(state
, c
, ctx
, ctx
, 0);
913 /* Layer 3: hook. Shortcut for running hook methods. */
915 inline void cpu_perform_hook(STATE
, cpu c
, OBJECT recv
, OBJECT meth
, OBJECT arg
) {
921 msg
.klass
= _real_class(state
, recv
);
924 if(!cpu_find_method(state
, c
, &msg
)) return;
926 rub
= rbs_const_get(state
, BASIC_CLASS(object
), "Rubinius");
927 if(NIL_P(rub
)) return;
929 vm
= rbs_const_get(state
, rub
, "VM");
930 if(NIL_P(vm
)) return;
932 /* The top of the stack contains the value that should remain on the stack.
933 we pass that to the perform_hook call so it is returned and stays on
934 the top of the stack. Thats why we say there are 4 args.*/
940 cpu_send(state
, c
, vm
, SYM("perform_hook"), 4, Qnil
);
943 /* Layer 4: High level method calling. */
945 /* Layer 4: direct activation. Used for calling a method thats already
947 static inline void cpu_activate_method(STATE
, cpu c
, struct message
*msg
) {
951 if(c
->depth
== CPU_MAX_DEPTH
) {
952 machine_handle_fire(FIRE_STACK
);
955 if(cpu_try_primitive(state
, c
, msg
)) return;
957 ctx
= cpu_create_context(state
, c
, msg
);
959 cpu_save_registers(state
, c
, msg
->args
);
960 cpu_restore_context_with_home(state
, c
, ctx
, ctx
);
963 static inline void cpu_perform(STATE
, cpu c
, const struct message
*msg
) {
967 if(c
->depth
== CPU_MAX_DEPTH
) {
968 machine_handle_fire(FIRE_STACK
);
971 ctx
= cpu_create_context(state
, c
, msg
);
973 /* If it was missing, setup some extra data in the MethodContext for
974 the method_missing method to check out, to see why it was missing. */
975 if(msg
->missing
&& msg
->priv
) {
976 methctx_reference(state
, ctx
);
977 object_set_ivar(state
, ctx
, SYM("@send_private"), Qtrue
);
980 cpu_save_registers(state
, c
, msg
->args
);
981 cpu_restore_context_with_home(state
, c
, ctx
, ctx
);
982 cpu_yield_debugger_check(state
, c
);
986 static inline void cpu_patch_mono(struct message
*msg
);
988 static inline void cpu_patch_missing(struct message
*msg
);
990 static void _cpu_ss_basic(struct message
*msg
) {
992 const STATE
= msg
->state
;
993 const cpu c
= msg
->c
;
995 sassert(cpu_locate_method(state
, c
, msg
));
997 /* If it's not method_missing, cache the details of msg in the send_site */
1001 cpu_patch_missing(msg
);
1003 stack_push(msg
->name
);
1006 if(cpu_try_primitive(state
, c
, msg
)) return;
1008 cpu_perform(state
, c
, msg
);
1011 void cpu_initialize_sendsite(STATE
, struct send_site
*ss
) {
1012 ss
->lookup
= _cpu_ss_basic
;
1015 static void _cpu_ss_disabled(struct message
*msg
) {
1017 const STATE
= msg
->state
;
1018 const cpu c
= msg
->c
;
1020 sassert(cpu_locate_method(state
, c
, msg
));
1022 /* If it's not method_missing, cache the details of msg in the send_site */
1025 stack_push(msg
->name
);
1028 if(cpu_try_primitive(state
, c
, msg
)) return;
1030 cpu_perform(state
, c
, msg
);
1033 void cpu_patch_disabled(struct message
*msg
, struct send_site
*ss
) {
1034 ss
->data1
= ss
->data2
= ss
->data3
= Qnil
;
1037 ss
->lookup
= _cpu_ss_disabled
;
1039 _cpu_ss_disabled(msg
);
1042 #define SS_DISABLE_THRESHOLD 10000
1043 #define SS_MISSES(ss) if(++ss->misses > SS_DISABLE_THRESHOLD) { cpu_patch_disabled(msg, ss); } else
1045 /* Send Site specialization 1: execute a primitive directly. */
1047 #define CHECK_CLASS(msg) (_real_class(msg->state, msg->recv) != SENDSITE(msg->send_site)->data1)
1049 static void _cpu_ss_mono_prim(struct message
*msg
) {
1050 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1053 OBJECT
*_orig_sp_ptr
;
1056 if(CHECK_CLASS(msg
)) {
1065 _orig_sp_ptr
= c
->sp_ptr
;
1068 func
= (prim_func
)ss
->c_data
;
1070 msg
->method
= ss
->data2
;
1071 msg
->module
= ss
->data3
;
1073 c
->in_primitive
= ss
->data4
;
1075 if(!func(msg
->state
, msg
->c
, msg
)) {
1076 c
->in_primitive
= 0;
1077 c
->sp_ptr
= _orig_sp_ptr
;
1080 cpu_perform(msg
->state
, msg
->c
, msg
);
1082 c
->in_primitive
= 0;
1086 /* Called before a primitive is run the slow way, allowing the send_site to be patch
1087 * to call the primitive directly. */
1088 void cpu_patch_primitive(STATE
, const struct message
*msg
, prim_func func
, int prim
) {
1089 struct send_site
*ss
;
1091 if(!REFERENCE_P(msg
->send_site
)) return;
1093 ss
= SENDSITE(msg
->send_site
);
1095 /* If this sendsite is disabled, leave it disabled. */
1096 if(ss
->lookup
== _cpu_ss_disabled
) return;
1098 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1099 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1100 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1103 ss
->c_data
= (void*)func
;
1104 ss
->lookup
= _cpu_ss_mono_prim
;
1107 /* Send Site specialization 2: Run an ffi function directly. */
1108 static void _cpu_ss_mono_ffi(struct message
*msg
) {
1109 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1111 if(CHECK_CLASS(msg
)) {
1120 ffi_call(msg
->state
, msg
->c
, nfunc_get_data(ss
->data2
));
1123 /* Called before an FFI function is run the slow way, allowing the send_site to be patch
1124 * to call the function directly. */
1125 void cpu_patch_ffi(STATE
, const struct message
*msg
) {
1126 struct send_site
*ss
;
1128 if(!REFERENCE_P(msg
->send_site
)) return;
1130 ss
= SENDSITE(msg
->send_site
);
1132 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1133 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1134 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1136 ss
->c_data
= *DATA_STRUCT(nfunc_get_data(msg
->method
), void**);
1137 ss
->lookup
= _cpu_ss_mono_ffi
;
1142 if(!REFERENCE_P(msg->send_site)) return;
1144 ss = SENDSITE(msg->send_site);
1145 SET_STRUCT_FIELD(msg->send_site, ss->data1, msg->recv);
1146 SET_STRUCT_FIELD(msg->send_site, ss->data2, nfunc_get_data(msg->method));
1147 ss->lookup = _cpu_ss_mono_ffi;
1151 /* Send Site specialzitation 3: simple monomorphic last implemenation cache. */
1152 static void _cpu_ss_mono(struct message
*msg
) {
1153 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1155 if(CHECK_CLASS(msg
)) {
1164 msg
->method
= ss
->data2
;
1165 msg
->module
= ss
->data3
;
1167 if(cpu_try_primitive(msg
->state
, msg
->c
, msg
)) return;
1169 cpu_perform(msg
->state
, msg
->c
, msg
);
1172 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1173 * that the next time +ss+ is used, it will try the cache details. */
1174 static inline void cpu_patch_mono(struct message
*msg
) {
1177 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1179 ss
->lookup
= _cpu_ss_mono
;
1180 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1181 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1182 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1185 static void _cpu_ss_missing(struct message
*msg
) {
1186 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1189 if(CHECK_CLASS(msg
)) {
1198 msg
->method
= ss
->data2
;
1199 msg
->module
= ss
->data3
;
1202 stack_push(msg
->name
);
1204 if(cpu_try_primitive(msg
->state
, msg
->c
, msg
)) return;
1206 cpu_perform(msg
->state
, msg
->c
, msg
);
1209 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1210 * that the next time +ss+ is used, it will try the cache details. */
1212 cpu_patch_missing(struct message
*msg
) {
1214 struct send_site
*ss
= SENDSITE(msg
->send_site
);
1216 ss
->lookup
= _cpu_ss_missing
;
1217 SET_STRUCT_FIELD(msg
->send_site
, ss
->data1
, _real_class(state
, msg
->recv
));
1218 SET_STRUCT_FIELD(msg
->send_site
, ss
->data2
, msg
->method
);
1219 SET_STRUCT_FIELD(msg
->send_site
, ss
->data3
, msg
->module
);
1222 static void _cpu_on_no_method(STATE
, cpu c
, const struct message
*msg
) {
1226 exc
= rbs_const_get(state
, BASIC_CLASS(object
), "RuntimeError");
1229 snprintf(str
, 1024, "Unable to find any version of '%s' to run", _inspect(msg
->name
));
1231 cpu_raise_exception(state
, c
, cpu_new_exception(state
, c
, exc
, str
));
1236 /* Layer 4: send. Primary method calling function. */
1237 inline void cpu_send_message(STATE
, cpu c
, struct message
*msg
) {
1238 struct send_site
*ss
;
1241 uint64_t start
= measure_cpu_time();
1244 ss
= SENDSITE(msg
->send_site
);
1247 msg
->name
= ss
->name
;
1251 state
->lookup_time
+= (measure_cpu_time() - start
);
1256 void cpu_send_message_external(STATE
, cpu c
, struct message
*msg
) {
1259 if(!cpu_locate_method(state
, c
, msg
)) {
1260 _cpu_on_no_method(state
, c
, msg
);
1266 stack_push(msg
->name
);
1268 if(cpu_try_primitive(state
, c
, msg
)) return;
1272 if(c
->depth
== CPU_MAX_DEPTH
) {
1273 machine_handle_fire(FIRE_STACK
);
1276 ctx
= cpu_create_context(state
, c
, msg
);
1278 /* If it was missing, setup some extra data in the MethodContext for
1279 the method_missing method to check out, to see why it was missing. */
1280 if(msg
->missing
&& msg
->priv
) {
1281 methctx_reference(state
, ctx
);
1282 object_set_ivar(state
, ctx
, SYM("@send_private"), Qtrue
);
1285 cpu_save_registers(state
, c
, msg
->args
);
1286 cpu_restore_context_with_home(state
, c
, ctx
, ctx
);
1290 /* A version used when there is no send_site. */
1291 void cpu_send(STATE
, cpu c
, OBJECT recv
, OBJECT sym
, int args
, OBJECT block
) {
1298 msg
.klass
= _real_class(state
, recv
);
1299 msg
.priv
= c
->call_flags
;
1301 msg
.send_site
= Qnil
;
1305 cpu_send_message_external(state
, c
, &msg
);
1308 void cpu_raise_exception(STATE
, cpu c
, OBJECT exc
) {
1309 OBJECT ctx
, table
, ent
;
1310 int cur
, total
, target
, idx
, l
, r
;
1312 ctx
= c
->active_context
;
1315 cpu_save_registers(state
, c
, 0);
1317 /* NOTE: using return_to_sender worries me a little because it can
1318 switch to a different task if you try to return off the top
1321 while(!NIL_P(ctx
)) {
1322 if(c
->type
== FASTCTX_NMC
) goto skip
;
1324 table
= cmethod_get_exceptions(cpu_current_method(state
, c
));
1326 if(!table
|| NIL_P(table
)) goto skip
;
1329 total
= NUM_FIELDS(table
);
1331 for(idx
=0; idx
< total
; idx
++) {
1332 ent
= tuple_at(state
, table
, idx
);
1333 l
= N2I(tuple_at(state
, ent
, 0));
1334 r
= N2I(tuple_at(state
, ent
, 1));
1335 if(cur
>= l
&& cur
<= r
) {
1336 target
= N2I(tuple_at(state
, ent
, 2));
1344 /* unwind returns FALSE if we can't unwind anymore. */
1345 if(!cpu_unwind(state
, c
)) break;
1346 ctx
= c
->active_context
;
1349 /* Reset it because it can get overriden in the return_to_senders. */
1352 // printf("Unable to find exception handler, i'm confused.\n");
1356 void cpu_yield_debugger(STATE
, cpu c
) {
1357 /* Ensure the DEBUG_ON_CTXT_CHANGE flag is cleared so we don't try
1358 to yield more than once */
1359 if(TASK_FLAG_P(c
, TASK_DEBUG_ON_CTXT_CHANGE
)) {
1360 TASK_CLEAR_FLAG(c
, TASK_DEBUG_ON_CTXT_CHANGE
);
1361 struct cpu_task
*task
= (struct cpu_task
*)BYTES_OF(c
->current_task
);
1362 TASK_CLEAR_FLAG(task
, TASK_DEBUG_ON_CTXT_CHANGE
);
1367 methctx_reference(state
, c
->active_context
);
1369 OBJECT dbg
= c
->debug_channel
;
1371 /* No debug channel on the task, so use the VM default one (if any) */
1373 mod
= rbs_const_get(state
, BASIC_CLASS(object
), "Rubinius");
1375 vm
= rbs_const_get(state
, mod
, "VM");
1377 dbg
= object_get_ivar(state
, vm
, SYM("@debug_channel"));
1383 if(c
->control_channel
== Qnil
) {
1384 /* No control channel on the task, so create one */
1385 c
->control_channel
= cpu_channel_new(state
);
1388 sassert(cpu_channel_has_readers_p(state
, dbg
));
1389 cpu_channel_send(state
, c
, dbg
, c
->current_thread
);
1390 /* This is so when this task is reactivated, the sent value wont be placed
1391 on the stack, keeping the stack clean. */
1392 TASK_SET_FLAG(c
, TASK_NO_STACK
);
1393 cpu_channel_receive(state
, c
, c
->control_channel
, c
->current_thread
);
1395 cpu_raise_arg_error_generic(state
, c
, "Attempted to switch to debugger, no debugger installed");
1399 const char *cpu_op_to_name(STATE
, char op
) {
1400 #include "shotgun/lib/instruction_names.h"
1401 return get_instruction_name(op
);
1404 void state_collect(STATE
, cpu c
);
1405 void state_major_collect(STATE
, cpu c
);
1407 void cpu_run(STATE
, cpu c
, int setup
) {
1409 IP_TYPE
*ip_ptr
= NULL
;
1410 const char *firesuit_arg
;
1411 struct rubinius_globals
*global
= state
->global
;
1413 c
->ip_ptr
= &ip_ptr
;
1425 /* recache ip_ptr to make it valid. */
1428 current_machine
->g_use_firesuit
= 1;
1429 current_machine
->g_access_violation
= 0;
1430 getcontext(¤t_machine
->g_firesuit
);
1432 /* Ok, we jumped back here because something went south. */
1433 if(current_machine
->g_access_violation
) {
1434 switch(current_machine
->g_access_violation
) {
1436 cpu_raise_exception(state
, c
,
1437 cpu_new_exception(state
, c
, state
->global
->exc_arg
,
1438 "Accessed outside bounds of object"));
1441 cpu_raise_exception(state
, c
,
1442 cpu_new_exception(state
, c
, state
->global
->exc_arg
,
1443 "Attempted to access field of non-reference (null pointer)"));
1446 cpu_raise_exception(state
, c
,
1447 cpu_new_exception(state
, c
,
1448 rbs_const_get(state
, BASIC_CLASS(object
), "VMAssertion"),
1449 "An error has occured within the VM"));
1452 object_type_to_type(current_machine
->g_firesuit_arg
, firesuit_arg
);
1453 cpu_raise_exception(state
, c
,
1454 cpu_new_exception2(state
, c
, global
->exc_type
,
1455 "Invalid type encountered %s: %s",
1456 current_machine
->g_firesuit_message
, firesuit_arg
));
1457 free(current_machine
->g_firesuit_message
);
1460 cpu_raise_exception(state
, c
,
1461 cpu_new_exception(state
, c
,
1462 rbs_const_get(state
, BASIC_CLASS(object
), "SystemStackError"),
1463 "Maximum amount of stack space used"));
1466 cpu_raise_exception(state
, c
,
1467 cpu_new_exception2(state
, c
, global
->exc_type
,
1468 "Unknown firesuit reason: %d", current_machine
->g_access_violation
));
1474 while(c
->active_context
!= Qnil
) {
1477 if(EXCESSIVE_TRACING
) {
1478 printf("%-15s: => %p\n",
1479 rbs_symbol_to_cstring(state
, cmethod_get_name(cpu_current_method(state
, c
))),
1483 #include "shotgun/lib/instruction_dt.gen"
1489 if(EXCESSIVE_TRACING
) {
1492 printf("%-15s: OP: %s (%d/%d/%d)\n",
1493 rbs_symbol_to_cstring(state
, cmethod_get_name(cpu_current_method(state
, c
))),
1494 cpu_op_to_name(state
, op
), op
, c
->ip
, c
->sp
);
1497 #include "shotgun/lib/instructions.gen"
1501 if(state
->om
->collect_now
) {
1504 if (RUBINIUS_GC_BEGIN_ENABLED()) {
1505 dtrace_gc_begin(state
);
1508 int cm
= state
->om
->collect_now
;
1510 /* Collect the first generation. */
1511 if(cm
& OMCollectYoung
) {
1512 if(EXCESSIVE_TRACING
) {
1513 printf("[[ Collecting young objects. ]]\n");
1514 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c
->active_context
, cpu_current_data(c
), ip_ptr
, c
->ip
, *ip_ptr
);
1516 state_collect(state
, c
);
1517 if(EXCESSIVE_TRACING
) {
1518 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c
->active_context
, cpu_current_data(c
), ip_ptr
, c
->ip
, *ip_ptr
);
1519 printf("[[ Finished collect. ]]\n");
1523 /* Collect the old generation. */
1524 if(cm
& OMCollectMature
) {
1525 if(EXCESSIVE_TRACING
) {
1526 printf("[[ Collecting old objects. ]\n");
1528 state_major_collect(state
, c
);
1529 // printf("Done with major collection.\n");
1532 /* If someone is reading the ON_GC channel, write to it to notify them. */
1533 if(cpu_channel_has_readers_p(state
, state
->global
->on_gc_channel
)) {
1534 cpu_channel_send(state
, c
, state
->global
->on_gc_channel
, Qtrue
);
1537 state
->om
->collect_now
= 0;
1540 if (RUBINIUS_GC_END_ENABLED()) {
1541 dtrace_gc_end(state
);
1546 if(state
->check_events
) {
1547 state
->check_events
= 0;
1548 if(state
->pending_events
) cpu_event_runonce(state
);
1549 if(state
->pending_threads
) cpu_thread_preempt(state
, c
);
1554 void cpu_run_script(STATE
, cpu c
, OBJECT meth
) {
1556 name
= string_to_sym(state
, string_new(state
, "__script__"));
1557 cpu_goto_method(state
, c
, c
->main
, meth
, 0, name
, Qnil
);