Change soft-fail to use the config, rather than env
[rbx.git] / shotgun / lib / cpu_instructions.c
blobc71ddb1e610de2fa3fadbd7afcb89248458d1ac3
1 #include <string.h>
2 #include <errno.h>
4 #include "shotgun/lib/shotgun.h"
5 #include "shotgun/lib/cpu.h"
6 #include "shotgun/lib/tuple.h"
7 #include "shotgun/lib/module.h"
8 #include "shotgun/lib/class.h"
9 #include "shotgun/lib/hash.h"
10 #include "shotgun/lib/lookuptable.h"
11 #include "shotgun/lib/methctx.h"
12 #include "shotgun/lib/array.h"
13 #include "shotgun/lib/string.h"
14 #include "shotgun/lib/symbol.h"
15 #include "shotgun/lib/machine.h"
16 #include "shotgun/lib/bytearray.h"
17 #include "shotgun/lib/fixnum.h"
18 #include "shotgun/lib/primitive_util.h"
19 #include "shotgun/lib/sendsite.h"
20 #include "shotgun/lib/subtend/ffi.h"
21 #include "shotgun/lib/subtend/nmc.h"
23 #if CONFIG_ENABLE_DTRACE
24 #include "shotgun/lib/dtrace_probes.h"
25 #endif
27 #include <sys/time.h>
29 #if TIME_LOOKUP
31 #include <stdint.h>
32 #include <time.h>
33 #include <mach/mach.h>
34 #include <mach/mach_time.h>
36 #define measure_cpu_time mach_absolute_time
38 void cpu_show_lookup_time(STATE) {
39 struct mach_timebase_info timeinfo;
40 uint64_t nano;
41 double seconds, total;
43 mach_timebase_info(&timeinfo);
45 nano = (state->lookup_time * timeinfo.numer / timeinfo.denom);
47 seconds = (double)(nano / (double)1000000000);
49 nano = ((mach_absolute_time() - state->system_start) * timeinfo.numer / timeinfo.denom);
51 total = (double)(nano / (double)1000000000);
53 printf("Total time: % 3.3f\n", total);
54 printf("Lookup time: % 3.3f\n", seconds);
55 printf("Percent: % 3.3f\n", (seconds / total) * 100);
58 #endif
60 #define RISA(obj,cls) (REFERENCE_P(obj) && ISA(obj,BASIC_CLASS(cls)))
62 #define next_int_into(val) val = *ip_ptr++;
63 #define next_int next_int_into(_int);
65 #if DIRECT_THREADED
66 #include "shotgun/lib/instruction_funcs.gen"
67 DT_ADDRESSES;
69 #ifdef SHOW_OPS
70 #define NEXT_OP printf(" => %p\n", *ip_ptr); sassert(*ip_ptr); goto **ip_ptr++
71 #else
72 #define NEXT_OP goto **ip_ptr++
73 #endif
75 #endif
77 #define next_literal_into(val) next_int; val = fast_fetch(cpu_current_literals(state, c), _int)
78 #define next_literal next_literal_into(_lit)
80 OBJECT cpu_open_class(STATE, cpu c, OBJECT under, OBJECT sup, OBJECT sym, int *created) {
81 OBJECT val, s1, s2, s3, s4, sup_itr;
83 *created = FALSE;
85 /* Evil people could do A = 12; class A::B; end */
86 if(!ISA(under, BASIC_CLASS(module))) {
87 cpu_raise_exception(state, c,
88 cpu_new_exception(state, c, state->global->exc_type, "Nesting constant is not a Module"));
89 return Qundef;
92 val = module_const_get(state, under, sym);
93 if(RTEST(val)) {
94 if(AUTOLOAD_P(val)) { return val; }
95 if(ISA(val, BASIC_CLASS(class))) {
96 if(!NIL_P(sup) && class_superclass(state, val) != sup) {
97 cpu_raise_exception(state, c,
98 cpu_new_exception(state, c, state->global->exc_type, "superclass mismatch"));
99 return Qundef;
101 } else {
102 cpu_raise_exception(state, c,
103 cpu_new_exception(state, c, state->global->exc_type, "constant is not a class"));
104 return Qundef;
107 return val;
108 } else {
109 val = class_constitute(state, sup, under);
110 if(NIL_P(val)) {
111 cpu_raise_exception(state, c,
112 cpu_new_exception(state, c, state->global->exc_arg, "Invalid superclass"));
113 return Qundef;
116 *created = TRUE;
119 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
121 if(under != state->global->object) {
122 s1 = symbol_to_string(state, module_get_name(under));
123 s2 = symbol_to_string(state, sym);
124 s3 = string_dup(state, s1);
125 string_append(state, s3, string_new(state, "::"));
126 string_append(state, s3, s2);
127 s4 = string_to_sym(state, s3);
128 module_set_name(val, s4);
129 } else {
130 module_set_name(val, sym);
132 module_const_set(state, under, sym, val);
133 sup_itr = sup;
135 return val;
138 /* Return the module object corresponding to the name or, if
139 * the module does not exist yet, create a new one first.
141 OBJECT cpu_open_module(STATE, cpu c, OBJECT under, OBJECT sym) {
142 OBJECT module, s1;
144 module = module_const_get(state, under, sym);
145 if(!RTEST(module)) {
146 module = module_allocate_mature(state, 0);
147 module_setup_fields(state, module);
150 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
152 if(under != BASIC_CLASS(object)) {
153 s1 = symbol_to_string(state, module_get_name(under));
154 s1 = string_dup(state, s1);
155 string_append(state, s1, string_new(state, "::"));
156 string_append(state, s1, symbol_to_string(state, sym));
157 module_set_name(module, string_to_sym(state, s1));
158 } else {
159 module_set_name(module, sym);
162 module_const_set(state, under, sym, module);
163 module_setup_fields(state, object_metaclass(state, module));
164 module_set_encloser(module, under);
165 module_set_encloser(object_metaclass(state, module), under);
168 return module;
171 /* Locate the method object for calling method +name+ on an instance of +klass+.
172 +mod+ is updated to point to the Module that holds the method.
173 The method is then looked for in the hash tables up the superclass chain.
174 returns TRUE if we found a method object that should be considered
175 returns FALSE if we need to keep looking 'up' for the method
177 static inline int cpu_check_for_method(STATE, cpu c, OBJECT tbl, struct message *msg) {
178 OBJECT vis, obj;
180 msg->method = lookuptable_fetch(state, tbl, msg->name);
182 if(NIL_P(msg->method)) return FALSE;
184 /* A 'false' method means to terminate method lookup. (e.g. undef_method) */
185 if(FALSE_P(msg->method)) return TRUE;
187 if(msg->priv) {
188 if(TUPLE_P(msg->method)) {
189 obj = tuple_at(state, msg->method, 1);
190 /* nil means that the actual method object is 'up' from here */
191 if(NIL_P(obj)) return FALSE;
192 } /* otherwise, bypass all visibility checks */
193 return TRUE;
196 /* Check that we are allowed to call this method */
197 if(TUPLE_P(msg->method)) {
198 vis = tuple_at(state, msg->method, 0);
199 if(vis == state->global->sym_private) {
200 /* We stop on private methods. */
201 msg->method = Qfalse;
202 return TRUE;
203 } else if(vis == state->global->sym_protected) {
204 /* If it's protected, bail if the receiver isn't the same
205 class as self. */
206 if(!object_kind_of_p(state, c->self, msg->module)) {
207 msg->method = Qfalse;
208 return TRUE;
212 obj = tuple_at(state, msg->method, 1);
213 if(NIL_P(obj)) {
214 /* The method was callable, but we need to keep looking
215 * for the implementation, so make the invocation bypass all further
216 * visibility checks */
217 msg->priv = TRUE;
218 return FALSE;
223 return TRUE;
226 #define UNVIS_METHOD(var) if(TUPLE_P(var)) { var = tuple_at(state, var, 1); }
228 static inline int cpu_find_method(STATE, cpu c, struct message *msg) {
229 OBJECT tbl, klass;
230 struct method_cache *ent;
232 #if USE_GLOBAL_CACHING
233 ent = state->method_cache + CPU_CACHE_HASH(msg->klass, msg->name);
234 /* We hit a hole. Stop. */
235 if(ent->name == msg->name && ent->klass == msg->klass) {
237 /* TODO does this need to check for protected? */
238 if(msg->priv || ent->is_public) {
239 msg->method = ent->method;
240 msg->module = ent->module;
242 #if TRACK_STATS
243 state->cache_hits++;
244 #endif
245 return TRUE;
249 #if TRACK_STATS
250 if(ent->name) {
251 state->cache_collisions++;
253 state->cache_misses++;
254 #endif
255 #endif
257 klass = msg->klass;
259 do {
261 /* Validate klass is valid even. */
262 if(NUM_FIELDS(klass) <= CLASS_f_SUPERCLASS) {
263 printf("Warning: encountered invalid class (not big enough).\n");
264 sassert(0);
265 return FALSE;
268 tbl = module_get_method_table(klass);
270 /* Ok, rather than assert, i'm going to just bail. Makes the error
271 a little strange, but handle-able in ruby land. */
273 if(!LOOKUPTABLE_P(tbl)) {
274 printf("Warning: encountered invalid module (methods not a LookupTable).\n");
275 sassert(0);
276 return FALSE;
279 msg->module = klass;
280 if(cpu_check_for_method(state, c, tbl, msg)) {
281 goto cache;
284 klass = class_get_superclass(klass);
285 if(NIL_P(klass)) break;
287 } while(1);
289 cache:
291 if(!RTEST(msg->method)) return FALSE;
293 #if USE_GLOBAL_CACHING
294 /* Update the cache. */
295 if(RTEST(msg->method)) {
296 ent->klass = msg->klass;
297 ent->name = msg->name;
298 ent->module = klass;
300 if(TUPLE_P(msg->method)) {
301 ent->method = NTH_FIELD(msg->method, 1);
302 if(NTH_FIELD(msg->method, 0) == state->global->sym_public) {
303 ent->is_public = TRUE;
304 } else {
305 ent->is_public = FALSE;
308 msg->method = ent->method;
309 } else {
310 ent->method = msg->method;
311 ent->is_public = TRUE;
314 #else
315 if(RTEST(msg->method)) {
316 UNVIS_METHOD(msg->method);
318 #endif
320 return TRUE;
323 OBJECT exported_cpu_find_method(STATE, cpu c, OBJECT klass, OBJECT name, OBJECT *mod) {
324 struct message msg;
326 msg.name = name;
327 msg.klass = klass;
328 msg.recv = Qnil;
329 msg.priv = TRUE;
330 msg.module = Qnil;
331 msg.method = Qnil;
333 if(!cpu_find_method(state, c, &msg)) {
334 *mod = Qnil;
335 return Qnil;
338 *mod = msg.module;
339 return msg.method;
342 OBJECT cpu_locate_method_on(STATE, cpu c, OBJECT obj, OBJECT sym, OBJECT include_private) {
343 struct message msg;
345 msg.recv = obj;
346 msg.name = sym;
347 msg.klass = _real_class(state, obj);
348 msg.priv = TRUE_P(include_private);
349 msg.method = Qnil;
350 msg.module = Qnil;
352 if(cpu_find_method(state, c, &msg)) {
353 if(RTEST(msg.method)) {
354 return tuple_new2(state, 2, msg.method, msg.module);
358 return Qnil;
361 static inline int cpu_locate_method(STATE, cpu c, struct message *msg) {
362 int ret;
363 struct message missing;
365 #if ENABLE_DTRACE
366 if(RUBINIUS_VM_LOOKUP_BEGIN_ENABLED()) {
367 RUBINIUS_VM_LOOKUP_BEGIN();
369 #endif
371 ret = TRUE;
373 if(cpu_find_method(state, c, msg)) goto done;
375 missing = *msg;
376 missing.priv = TRUE;
377 missing.name = state->global->method_missing;
379 /* If we couldn't even find method_missing. bad. */
380 if(!cpu_find_method(state, c, &missing)) { ret = FALSE; goto done; }
382 msg->method = missing.method;
383 msg->module = missing.module;
384 msg->missing = TRUE;
386 done:
387 #if ENABLE_DTRACE
388 if(RUBINIUS_VM_LOOKUP_END_ENABLED()) {
389 RUBINIUS_VM_LOOKUP_END();
391 #endif
392 // printf("Found method: %p\n", mo);
394 return ret;
397 static inline OBJECT cpu_check_serial(STATE, cpu c, OBJECT obj, OBJECT sym, int serial) {
398 struct message msg;
400 msg.name = sym;
401 msg.recv = obj;
402 msg.klass = _real_class(state, obj);
403 msg.priv = TRUE;
405 if(!cpu_find_method(state, c, &msg)) {
406 return Qfalse;
409 if(N2I(fast_fetch(msg.method, CMETHOD_f_SERIAL)) == serial) {
410 return Qtrue;
413 return Qfalse;
416 OBJECT cpu_compile_method(STATE, OBJECT cm) {
417 OBJECT ba, bc;
418 int target_size;
420 ba = cmethod_get_compiled(cm);
421 bc = cmethod_get_bytecodes(cm);
423 /* If we're direct threaded, the compiled version is an array of the pointer
424 * size. */
425 #if DIRECT_THREADED
426 target_size = (BYTEARRAY_SIZE(bc) / sizeof(uint32_t)) * sizeof(uintptr_t);
427 #else
428 target_size = BYTEARRAY_SIZE(bc);
429 #endif
431 if(NIL_P(ba) || BYTEARRAY_SIZE(ba) < target_size) {
432 /* First time this method has been compiled, or size of current
433 bytearray is insufficient to hold revised bytecode */
434 ba = bytearray_new(state, target_size);
437 cpu_compile_instructions(state, bc, ba);
438 cmethod_set_compiled(cm, ba);
440 return ba;
443 void cpu_compile_instructions(STATE, OBJECT bc, OBJECT comp) {
444 /* If this is not a big endian platform, we need to adjust
445 the iseq to have the right order */
446 #if !CONFIG_BIG_ENDIAN && !DIRECT_THREADED
447 iseq_flip(state, bc, comp);
448 #elif DIRECT_THREADED
449 /* If we're compiled with direct threading, then translate
450 the compiled version into addresses. */
451 calculate_into_gotos(state, bc, comp, _dt_addresses, _dt_size);
452 #endif
455 static inline OBJECT _allocate_context(STATE, cpu c, OBJECT meth, int locals) {
456 OBJECT ctx, ins;
457 struct fast_context *fc;
458 int i;
460 ctx = object_memory_new_context(state->om, locals);
461 if(ctx >= state->om->context_last) {
462 state->om->collect_now |= OMCollectYoung;
465 /* TODO this code only works if ctx is allocated in the context stack
466 * or young area. If it's allocated mature off the bat, the write
467 * barrier wont be run and we're screwed. */
469 ins = fast_fetch(meth, CMETHOD_f_COMPILED);
471 if(NIL_P(ins)) {
472 ins = cpu_compile_method(state, meth);
475 CLEAR_FLAGS(ctx);
476 ctx->klass = Qnil;
477 ctx->field_count = FASTCTX_FIELDS;
479 fc = FASTCTX(ctx);
480 fc->flags = 0;
481 fc->sender = c->active_context;
483 fc->method = meth;
484 fc->custom_iseq = Qnil;
485 fc->data = bytearray_byte_address(state, ins);
486 fc->literals = fast_fetch(meth, CMETHOD_f_LITERALS);
488 if(locals > 0) {
489 fc->locals = object_memory_context_locals(ctx);
490 CLEAR_FLAGS(fc->locals);
491 fc->locals->gc_zone = 0;
492 fc->locals->klass = BASIC_CLASS(tuple);
493 SET_NUM_FIELDS(fc->locals, locals);
495 for(i = 0; i < locals; i++) {
496 SET_FIELD_DIRECT(fc->locals, i, Qnil);
499 } else {
500 fc->locals = Qnil;
502 // printf("Locals for %p at %p (%d, %d)\n", ctx, fc->locals, num_lcls, FASTCTX(ctx)->size);
504 return ctx;
507 static inline OBJECT cpu_create_context(STATE, cpu c, const struct message *msg) {
508 OBJECT ctx;
509 struct fast_context *fc;
511 ctx = _allocate_context(state, c, msg->method, N2I(cmethod_get_local_count(msg->method)));
512 fc = FASTCTX(ctx);
514 fc->ip = 0;
515 cpu_flush_sp(c);
516 fc->sp = c->sp;
517 /* fp points to the location on the stack as the context
518 was being created. */
519 fc->fp = c->sp;
521 fc->block = msg->block;
522 fc->self = msg->recv;
523 fc->argcount = msg->args;
524 fc->name = msg->name;
525 fc->method_module = msg->module;
526 fc->type = FASTCTX_NORMAL;
528 #if ENABLE_DTRACE
529 if (RUBINIUS_FUNCTION_ENTRY_ENABLED()) {
530 dtrace_function_entry(state, c, msg);
532 #endif
534 return ctx;
537 OBJECT cpu_create_block_context(STATE, cpu c, OBJECT env, int sp) {
538 OBJECT ctx;
539 struct fast_context *fc;
541 ctx = _allocate_context(state, c, blokenv_get_method(env),
542 N2I(blokenv_get_local_count(env)));
543 fc = FASTCTX(ctx);
545 fc->ip = N2I(blokenv_get_initial_ip(env));
546 fc->sp = sp;
548 fc->block = Qnil;
549 fc->self = Qnil;
550 fc->argcount = 0;
552 /* env lives here */
553 fc->name = env;
555 fc->method_module = Qnil;
556 fc->type = FASTCTX_BLOCK;
558 return ctx;
562 void cpu_raise_from_errno(STATE, cpu c, const char *msg) {
563 OBJECT cls;
564 char buf[32];
566 cls = lookuptable_fetch(state, state->global->errno_mapping, I2N(errno));
567 if(NIL_P(cls)) {
568 cls = state->global->exc_arg;
569 snprintf(buf, sizeof(buf), "Unknown errno %d", errno);
570 msg = buf;
573 cpu_raise_exception(state, c, cpu_new_exception(state, c, cls, msg));
576 void cpu_raise_arg_error_generic(STATE, cpu c, const char *msg) {
577 cpu_raise_exception(state, c, cpu_new_exception(state, c, state->global->exc_arg, msg));
580 void cpu_raise_arg_error(STATE, cpu c, int args, int req) {
581 char msg[1024];
582 snprintf(msg, 1024, "wrong number of arguments (got %d, required %d)", args, req);
583 cpu_flush_ip(c);
584 cpu_raise_exception(state, c, cpu_new_exception(state, c, state->global->exc_arg, msg));
587 void cpu_raise_primitive_failure(STATE, cpu c, int primitive_idx) {
588 char msg[1024];
589 OBJECT primitive_failure;
590 snprintf(msg, 1024, "Primitive with index (%d) failed", primitive_idx);
592 primitive_failure = cpu_new_exception(state, c, state->global->exc_primitive_failure, msg);
593 cpu_raise_exception(state, c, primitive_failure);
596 static int cpu_execute_primitive(STATE, cpu c, const struct message *msg, int prim) {
598 #if ENABLE_DTRACE
599 if (RUBINIUS_FUNCTION_PRIMITIVE_ENTRY_ENABLED()) {
600 dtrace_function_primitive_entry(state, c, msg);
602 #endif
604 c->in_primitive = prim;
605 if(cpu_perform_system_primitive(state, c, prim, msg)) {
606 /* Worked! */
607 c->in_primitive = 0;
609 if(EXCESSIVE_TRACING) {
610 printf("%05d: Called prim %s => %s on %s.\n", c->depth,
611 rbs_symbol_to_cstring(state, cmethod_get_name(cpu_current_method(state, c))),
612 rbs_symbol_to_cstring(state, msg->name), _inspect(msg->recv));
615 #if ENABLE_DTRACE
616 if (RUBINIUS_FUNCTION_PRIMITIVE_RETURN_ENABLED()) {
617 dtrace_function_primitive_return(state, c, msg);
619 #endif
621 return TRUE;
624 c->in_primitive = 0;
626 if(EXCESSIVE_TRACING) {
627 printf("[[ Primitive failed! -- %d ]]\n", prim);
630 return FALSE;
634 static inline int cpu_try_primitive(STATE, cpu c, const struct message *msg) {
635 int prim;
636 OBJECT prim_obj;
638 prim_obj = fast_fetch(msg->method, CMETHOD_f_PRIMITIVE);
640 if(NIL_P(prim_obj)) {
641 return FALSE;
642 } else if(!FIXNUM_P(prim_obj)) {
643 if(SYMBOL_P(prim_obj)) {
644 prim = calc_primitive_index(state, symbol_to_string(state, prim_obj));
645 } else {
646 prim = -1;
648 cmethod_set_primitive(msg->method, I2N(prim));
649 } else {
650 prim = N2I(prim_obj);
653 if(prim < 0) {
654 cmethod_set_primitive(msg->method, Qnil);
655 return FALSE;
658 return cpu_execute_primitive(state, c, msg, prim);
661 /* Raw most functions for moving in a method. Adjusts register. */
662 /* Stack offset is used to adjust sp when it's saved so when
663 this context is swapped back in, any arguments are automatically
664 removed from the stack */
665 inline void cpu_save_registers(STATE, cpu c, int offset) {
666 struct fast_context *fc;
668 cpu_flush_ip(c);
669 cpu_flush_sp(c);
670 fc = FASTCTX(c->active_context);
671 fc->sp = c->sp - offset;
672 fc->ip = c->ip;
676 inline void cpu_yield_debugger_check(STATE, cpu c) {
677 /* Yield to the debugger if flag is set */
678 if(TASK_FLAG_P(c, TASK_DEBUG_ON_CTXT_CHANGE)) {
679 if(EXCESSIVE_TRACING) {
680 printf("Yielding to debugger due to context change\n");
682 cpu_yield_debugger(state, c);
687 inline void cpu_restore_context_with_home(STATE, cpu c, OBJECT ctx, OBJECT home) {
688 struct fast_context *fc;
690 /* Home is actually the main context here because it's the method
691 context that holds all the data. So if it's a fast, we restore
692 it's data, then if ctx != home, we restore a little more */
694 fc = FASTCTX(home);
695 CHECK_PTR(fc->self);
696 CHECK_PTR(fc->method);
698 c->argcount = fc->argcount;
699 c->self = fc->self;
701 /* Only happens if we're restoring a block. */
702 if(ctx != home) {
703 fc = FASTCTX(ctx);
706 c->data = fc->data;
707 c->type = fc->type;
709 c->locals = FASTCTX(home)->locals;
711 c->sender = fc->sender;
712 c->sp = fc->sp;
713 c->ip = fc->ip;
714 c->fp = fc->fp;
716 cpu_cache_ip(c);
717 cpu_cache_sp(c);
719 c->home_context = home;
720 c->active_context = ctx;
723 /* Layer 2 method movement: use lower level only. */
725 /* Used in debugging. Verifies that the expected depth is the actual depth. */
727 static void _verify_depth(cpu c) {
728 int count = 0;
729 OBJECT ctx = c->active_context;
731 while(!NIL_P(ctx)) {
732 count++;
733 ctx = FASTCTX(ctx)->sender;
736 assert(count == c->depth);
740 inline void cpu_activate_context(STATE, cpu c, OBJECT ctx, OBJECT home, int so) {
741 c->depth++;
743 if(c->active_context != Qnil) {
744 cpu_save_registers(state, c, so);
746 cpu_restore_context_with_home(state, c, ctx, home);
747 cpu_yield_debugger_check(state, c);
750 /* Layer 2.5: Uses lower layers to return to the calling context.
751 Returning ends here. */
753 void nmc_activate(STATE, cpu c, OBJECT nmc, OBJECT val, int reraise);
755 inline int cpu_simple_return(STATE, cpu c, OBJECT val) {
756 OBJECT current, destination, home;
758 #if ENABLE_DTRACE
759 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
760 dtrace_function_return(state, c);
762 #endif
764 c->depth--;
766 current = c->active_context;
767 c->active_context = Qnil;
768 destination = cpu_current_sender(c);
770 // printf("Rtrnng frm %p (%d)\n", current, FASTCTX(current)->size);
772 if(destination == Qnil) {
773 object_memory_retire_context(state->om, current);
775 /* Thread exiting, reschedule.. */
776 if(c->current_thread != c->main_thread) {
777 THDEBUG("%d: thread reached end, dead.\n", getpid());
778 cpu_thread_exited(state, c);
779 return FALSE;
780 /* Switch back to the main task... */
781 } else if(c->current_task != c->main_task) {
782 cpu_task_select(state, c, c->main_task);
783 return FALSE;
785 /* The return value of the script is passed on the stack. */
786 stack_push(val);
787 } else {
788 /* retire this one context. */
789 object_memory_retire_context(state->om, current);
791 /* Now, figure out if the destination is a block, so we pass the correct
792 home to restore_context */
793 if(block_context_p(state, destination)) {
794 home = blokctx_home(state, destination);
795 } else {
796 home = destination;
800 if(EXCESSIVE_TRACING) {
801 if(stack_context_p(destination)) {
802 printf("Returning to a stack context %d / %d (%s).\n", (int)c->active_context, (int)destination, c->active_context - destination == CTX_SIZE ? "stack" : "REMOTE");
803 } else {
804 printf("Returning to %s.\n", _inspect(destination));
808 if(FASTCTX(home)->type == FASTCTX_NMC) {
809 nmc_activate(state, c, home, val, FALSE);
810 /* We return because nmc_activate will setup the cpu to do whatever
811 it needs to next. */
812 return TRUE;
813 } else {
814 cpu_restore_context_with_home(state, c, destination, home);
815 stack_push(val);
819 return TRUE;
822 /* Used by raise_exception to restore the previous context. */
823 int cpu_unwind(STATE, cpu c) {
824 OBJECT current, destination, home;
825 current = c->active_context;
826 c->active_context = Qnil;
827 destination = cpu_current_sender(c);
829 #if ENABLE_DTRACE
830 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
831 dtrace_function_return(state, c);
833 #endif
835 c->depth--;
837 if(destination == Qnil) {
838 object_memory_retire_context(state->om, current);
840 /* Thread exitting, reschedule.. */
841 if(c->current_thread != c->main_thread) {
842 THDEBUG("%d: thread reached end, dead.\n", getpid());
843 cpu_thread_exited(state, c);
844 return FALSE;
845 /* Switch back to the main task... */
846 } else if(c->current_task != c->main_task) {
847 cpu_task_select(state, c, c->main_task);
848 return FALSE;
850 stack_push(Qnil);
851 return FALSE;
853 } else {
854 /* retire this one context. */
855 object_memory_retire_context(state->om, current);
857 /* Now, figure out if the destination is a block, so we pass the correct
858 home to restore_context */
859 if(block_context_p(state, destination)) {
860 home = blokctx_home(state, destination);
861 } else {
862 home = destination;
865 /* Commenting out 02.01.08 - Caleb Tennis.
866 I don't know the purpose of this code, but if an exception is throws from
867 a rb_funcall in subtend, this causes an endless loop in cpu_return_to_sender.
869 Commenting it out fixes that for now.
870 Hopefully someone smarter than me knows a better fix for the future.
872 Skip over NMCs for now.
874 if(exception && FASTCTX(destination)->type == FASTCTX_NMC) {
875 c->active_context = destination;
876 return cpu_return_to_sender(state, c, val, FALSE, TRUE);
881 /* Ok, reason we'd be restoring a native context:
882 1) the native context used rb_funcall and we need to return
883 it the result of the call.
885 cpu_restore_context_with_home(state, c, destination, home);
888 return TRUE;
891 /* Layer 3: goto. Basically jumps directly into the specificed method.
892 no lookup required. */
894 inline void cpu_goto_method(STATE, cpu c, OBJECT recv, OBJECT meth,
895 int count, OBJECT name, OBJECT block) {
896 OBJECT ctx;
897 struct message msg;
899 msg.recv = recv;
900 msg.name = name;
901 msg.method = meth;
902 msg.module = Qnil;
903 msg.block = block;
904 msg.args = count;
906 if(cpu_try_primitive(state, c, &msg)) return;
908 OBJECT scope = cmethod_get_staticscope(meth);
909 if(NIL_P(scope)) {
910 cmethod_set_staticscope(meth, state->global->top_scope);
913 ctx = cpu_create_context(state, c, &msg);
914 cpu_activate_context(state, c, ctx, ctx, 0);
917 /* Layer 3: hook. Shortcut for running hook methods. */
919 inline void cpu_perform_hook(STATE, cpu c, OBJECT recv, OBJECT meth, OBJECT arg) {
920 OBJECT rub, vm;
921 struct message msg;
923 msg.name = meth;
924 msg.recv = recv;
925 msg.klass = _real_class(state, recv);
926 msg.priv = TRUE;
928 if(!cpu_find_method(state, c, &msg)) return;
930 rub = rbs_const_get(state, BASIC_CLASS(object), "Rubinius");
931 if(NIL_P(rub)) return;
933 vm = rbs_const_get(state, rub, "VM");
934 if(NIL_P(vm)) return;
936 /* The top of the stack contains the value that should remain on the stack.
937 we pass that to the perform_hook call so it is returned and stays on
938 the top of the stack. Thats why we say there are 4 args.*/
940 stack_push(arg);
941 stack_push(meth);
942 stack_push(recv);
944 cpu_send(state, c, vm, SYM("perform_hook"), 4, Qnil);
947 /* Layer 4: High level method calling. */
949 /* Layer 4: direct activation. Used for calling a method thats already
950 been looked up. */
951 static inline void cpu_activate_method(STATE, cpu c, struct message *msg) {
952 OBJECT ctx;
954 c->depth++;
955 if(c->depth == CPU_MAX_DEPTH) {
956 machine_handle_fire(FIRE_STACK);
959 if(cpu_try_primitive(state, c, msg)) return;
961 ctx = cpu_create_context(state, c, msg);
963 cpu_save_registers(state, c, msg->args);
964 cpu_restore_context_with_home(state, c, ctx, ctx);
967 static inline void cpu_perform(STATE, cpu c, const struct message *msg) {
968 OBJECT ctx;
970 c->depth++;
971 if(c->depth == CPU_MAX_DEPTH) {
972 machine_handle_fire(FIRE_STACK);
975 ctx = cpu_create_context(state, c, msg);
977 /* If it was missing, setup some extra data in the MethodContext for
978 the method_missing method to check out, to see why it was missing. */
979 if(msg->missing && msg->priv) {
980 methctx_reference(state, ctx);
981 object_set_ivar(state, ctx, SYM("@send_private"), Qtrue);
984 cpu_save_registers(state, c, msg->args);
985 cpu_restore_context_with_home(state, c, ctx, ctx);
986 cpu_yield_debugger_check(state, c);
990 static inline void cpu_patch_mono(struct message *msg);
992 static inline void cpu_patch_missing(struct message *msg);
994 static void _cpu_ss_basic(struct message *msg) {
995 msg->missing = 0;
996 const STATE = msg->state;
997 const cpu c = msg->c;
999 sassert(cpu_locate_method(state, c, msg));
1001 /* If it's not method_missing, cache the details of msg in the send_site */
1002 if(!msg->missing) {
1003 cpu_patch_mono(msg);
1004 } else {
1005 cpu_patch_missing(msg);
1006 msg->args += 1;
1007 stack_push(msg->name);
1010 if(cpu_try_primitive(state, c, msg)) return;
1012 cpu_perform(state, c, msg);
1015 void cpu_initialize_sendsite(STATE, struct send_site *ss) {
1016 ss->lookup = _cpu_ss_basic;
1019 static void _cpu_ss_disabled(struct message *msg) {
1020 msg->missing = 0;
1021 const STATE = msg->state;
1022 const cpu c = msg->c;
1024 sassert(cpu_locate_method(state, c, msg));
1026 /* If it's not method_missing, cache the details of msg in the send_site */
1027 if(msg->missing) {
1028 msg->args += 1;
1029 stack_push(msg->name);
1032 if(cpu_try_primitive(state, c, msg)) return;
1034 cpu_perform(state, c, msg);
1037 void cpu_patch_disabled(struct message *msg, struct send_site *ss) {
1038 ss->data1 = ss->data2 = ss->data3 = Qnil;
1039 ss->data4 = 0;
1040 ss->c_data = NULL;
1041 ss->lookup = _cpu_ss_disabled;
1043 _cpu_ss_disabled(msg);
1046 #define SS_DISABLE_THRESHOLD 10000
1047 #define SS_MISSES(ss) if(++ss->misses > SS_DISABLE_THRESHOLD) { cpu_patch_disabled(msg, ss); } else
1049 /* Send Site specialization 1: execute a primitive directly. */
1051 #define CHECK_CLASS(msg) (_real_class(msg->state, msg->recv) != SENDSITE(msg->send_site)->data1)
1053 static void _cpu_ss_mono_prim(struct message *msg) {
1054 struct send_site *ss = SENDSITE(msg->send_site);
1055 prim_func func;
1056 int _orig_sp;
1057 OBJECT *_orig_sp_ptr;
1058 cpu c = msg->c;
1060 if(CHECK_CLASS(msg)) {
1061 SS_MISSES(ss) {
1062 _cpu_ss_basic(msg);
1064 return;
1067 ss->hits++;
1069 _orig_sp_ptr = c->sp_ptr;
1070 _orig_sp = c->sp;
1072 func = (prim_func)ss->c_data;
1074 msg->method = ss->data2;
1075 msg->module = ss->data3;
1077 c->in_primitive = ss->data4;
1079 if(!func(msg->state, msg->c, msg)) {
1080 c->in_primitive = 0;
1081 c->sp_ptr = _orig_sp_ptr;
1082 c->sp = _orig_sp;
1084 cpu_perform(msg->state, msg->c, msg);
1085 } else {
1086 c->in_primitive = 0;
1090 /* Called before a primitive is run the slow way, allowing the send_site to be patch
1091 * to call the primitive directly. */
1092 void cpu_patch_primitive(STATE, const struct message *msg, prim_func func, int prim) {
1093 struct send_site *ss;
1095 if(!REFERENCE_P(msg->send_site)) return;
1097 ss = SENDSITE(msg->send_site);
1099 /* If this sendsite is disabled, leave it disabled. */
1100 if(ss->lookup == _cpu_ss_disabled) return;
1102 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1103 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1104 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1106 ss->data4 = prim;
1107 ss->c_data = (void*)func;
1108 ss->lookup = _cpu_ss_mono_prim;
1111 /* Send Site specialization 2: Run an ffi function directly. */
1112 static void _cpu_ss_mono_ffi(struct message *msg) {
1113 struct send_site *ss = SENDSITE(msg->send_site);
1115 if(CHECK_CLASS(msg)) {
1116 SS_MISSES(ss) {
1117 _cpu_ss_basic(msg);
1119 return;
1122 ss->hits++;
1124 ffi_call(msg->state, msg->c, nfunc_get_data(ss->data2));
1127 /* Called before an FFI function is run the slow way, allowing the send_site to be patch
1128 * to call the function directly. */
1129 void cpu_patch_ffi(STATE, const struct message *msg) {
1130 struct send_site *ss;
1132 if(!REFERENCE_P(msg->send_site)) return;
1134 ss = SENDSITE(msg->send_site);
1136 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1137 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1138 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1140 ss->c_data = *DATA_STRUCT(nfunc_get_data(msg->method), void**);
1141 ss->lookup = _cpu_ss_mono_ffi;
1143 return;
1146 if(!REFERENCE_P(msg->send_site)) return;
1148 ss = SENDSITE(msg->send_site);
1149 SET_STRUCT_FIELD(msg->send_site, ss->data1, msg->recv);
1150 SET_STRUCT_FIELD(msg->send_site, ss->data2, nfunc_get_data(msg->method));
1151 ss->lookup = _cpu_ss_mono_ffi;
1155 /* Send Site specialzitation 3: simple monomorphic last implemenation cache. */
1156 static void _cpu_ss_mono(struct message *msg) {
1157 struct send_site *ss = SENDSITE(msg->send_site);
1159 if(CHECK_CLASS(msg)) {
1160 SS_MISSES(ss) {
1161 _cpu_ss_basic(msg);
1163 return;
1166 ss->hits++;
1168 msg->method = ss->data2;
1169 msg->module = ss->data3;
1171 if(cpu_try_primitive(msg->state, msg->c, msg)) return;
1173 cpu_perform(msg->state, msg->c, msg);
1176 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1177 * that the next time +ss+ is used, it will try the cache details. */
1178 static inline void cpu_patch_mono(struct message *msg) {
1179 STATE = msg->state;
1181 struct send_site *ss = SENDSITE(msg->send_site);
1183 ss->lookup = _cpu_ss_mono;
1184 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1185 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1186 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1189 static void _cpu_ss_missing(struct message *msg) {
1190 struct send_site *ss = SENDSITE(msg->send_site);
1191 cpu c = msg->c;
1193 if(CHECK_CLASS(msg)) {
1194 SS_MISSES(ss) {
1195 _cpu_ss_basic(msg);
1197 return;
1200 ss->hits++;
1202 msg->method = ss->data2;
1203 msg->module = ss->data3;
1205 msg->args += 1;
1206 stack_push(msg->name);
1208 if(cpu_try_primitive(msg->state, msg->c, msg)) return;
1210 cpu_perform(msg->state, msg->c, msg);
1213 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1214 * that the next time +ss+ is used, it will try the cache details. */
1215 static inline void
1216 cpu_patch_missing(struct message *msg) {
1217 STATE = msg->state;
1218 struct send_site *ss = SENDSITE(msg->send_site);
1220 ss->lookup = _cpu_ss_missing;
1221 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1222 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1223 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1226 static void _cpu_on_no_method(STATE, cpu c, const struct message *msg) {
1227 char *str;
1228 OBJECT exc;
1230 exc = rbs_const_get(state, BASIC_CLASS(object), "RuntimeError");
1232 str = malloc(1024);
1233 snprintf(str, 1024, "Unable to find any version of '%s' to run", _inspect(msg->name));
1235 cpu_raise_exception(state, c, cpu_new_exception(state, c, exc, str));
1237 free(str);
1240 /* Layer 4: send. Primary method calling function. */
1241 inline void cpu_send_message(STATE, cpu c, struct message *msg) {
1242 struct send_site *ss;
1244 #ifdef TIME_LOOKUP
1245 uint64_t start = measure_cpu_time();
1246 #endif
1248 ss = SENDSITE(msg->send_site);
1249 msg->state = state;
1250 msg->c = c;
1251 msg->name = ss->name;
1252 ss->lookup(msg);
1254 #ifdef TIME_LOOKUP
1255 state->lookup_time += (measure_cpu_time() - start);
1256 #endif
1260 void cpu_send_message_external(STATE, cpu c, struct message *msg) {
1261 OBJECT ctx;
1263 if(!cpu_locate_method(state, c, msg)) {
1264 _cpu_on_no_method(state, c, msg);
1265 return;
1268 if(msg->missing) {
1269 msg->args += 1;
1270 stack_push(msg->name);
1271 } else {
1272 if(cpu_try_primitive(state, c, msg)) return;
1275 c->depth++;
1276 if(c->depth == CPU_MAX_DEPTH) {
1277 machine_handle_fire(FIRE_STACK);
1280 ctx = cpu_create_context(state, c, msg);
1282 /* If it was missing, setup some extra data in the MethodContext for
1283 the method_missing method to check out, to see why it was missing. */
1284 if(msg->missing && msg->priv) {
1285 methctx_reference(state, ctx);
1286 object_set_ivar(state, ctx, SYM("@send_private"), Qtrue);
1289 cpu_save_registers(state, c, msg->args);
1290 cpu_restore_context_with_home(state, c, ctx, ctx);
1294 /* A version used when there is no send_site. */
1295 void cpu_send(STATE, cpu c, OBJECT recv, OBJECT sym, int args, OBJECT block) {
1296 struct message msg;
1298 msg.recv = recv;
1299 msg.name = sym;
1300 msg.args = args;
1301 msg.block = block;
1302 msg.klass = _real_class(state, recv);
1303 msg.priv = c->call_flags;
1304 msg.missing = 0;
1305 msg.send_site = Qnil;
1307 c->call_flags = 0;
1309 cpu_send_message_external(state, c, &msg);
1312 /* A version used to call method on superclass */
1313 void cpu_send_super(STATE, cpu c, OBJECT recv, OBJECT sym, int args, OBJECT block) {
1314 struct message msg;
1316 msg.recv = recv;
1317 msg.name = sym;
1318 msg.args = args;
1319 msg.block = block;
1320 msg.klass = class_get_superclass(_real_class(state, recv));
1321 msg.priv = TRUE;
1322 msg.missing = 0;
1323 msg.send_site = Qnil;
1325 c->call_flags = 0;
1327 cpu_send_message_external(state, c, &msg);
1330 void cpu_raise_exception(STATE, cpu c, OBJECT exc) {
1331 OBJECT ctx, table, ent;
1332 int cur, total, target, idx, l, r;
1333 c->exception = exc;
1334 ctx = c->active_context;
1336 cpu_flush_ip(c);
1337 cpu_save_registers(state, c, 0);
1339 /* NOTE: using return_to_sender worries me a little because it can
1340 switch to a different task if you try to return off the top
1341 of a task.. */
1343 while(!NIL_P(ctx)) {
1344 if(c->type == FASTCTX_NMC) goto skip;
1346 table = cmethod_get_exceptions(cpu_current_method(state, c));
1348 if(!table || NIL_P(table)) goto skip;
1350 cur = c->ip;
1351 total = NUM_FIELDS(table);
1352 target = 0;
1353 for(idx=0; idx < total; idx++) {
1354 ent = tuple_at(state, table, idx);
1355 l = N2I(tuple_at(state, ent, 0));
1356 r = N2I(tuple_at(state, ent, 1));
1357 if(cur >= l && cur <= r) {
1358 target = N2I(tuple_at(state, ent, 2));
1359 c->ip = target;
1360 cpu_cache_ip(c);
1361 return;
1365 skip:
1366 /* unwind returns FALSE if we can't unwind anymore. */
1367 if(!cpu_unwind(state, c)) break;
1368 ctx = c->active_context;
1371 /* Reset it because it can get overriden in the return_to_senders. */
1372 c->exception = exc;
1374 // printf("Unable to find exception handler, i'm confused.\n");
1375 return;
1378 void cpu_yield_debugger(STATE, cpu c) {
1379 /* Ensure the DEBUG_ON_CTXT_CHANGE flag is cleared so we don't try
1380 to yield more than once */
1381 if(TASK_FLAG_P(c, TASK_DEBUG_ON_CTXT_CHANGE)) {
1382 TASK_CLEAR_FLAG(c, TASK_DEBUG_ON_CTXT_CHANGE);
1383 struct cpu_task *task = (struct cpu_task*)BYTES_OF(c->current_task);
1384 TASK_CLEAR_FLAG(task, TASK_DEBUG_ON_CTXT_CHANGE);
1387 cpu_flush_sp(c);
1388 cpu_flush_ip(c);
1389 methctx_reference(state, c->active_context);
1391 OBJECT dbg = c->debug_channel;
1392 if(dbg == Qnil) {
1393 /* No debug channel on the task, so use the VM default one (if any) */
1394 OBJECT mod, vm;
1395 mod = rbs_const_get(state, BASIC_CLASS(object), "Rubinius");
1396 if(!NIL_P(mod)) {
1397 vm = rbs_const_get(state, mod, "VM");
1398 if(!NIL_P(vm)) {
1399 dbg = object_get_ivar(state, vm, SYM("@debug_channel"));
1404 if(dbg != Qnil) {
1405 if(c->control_channel == Qnil) {
1406 /* No control channel on the task, so create one */
1407 c->control_channel = cpu_channel_new(state);
1410 sassert(cpu_channel_has_readers_p(state, dbg));
1411 cpu_channel_send(state, c, dbg, c->current_thread);
1412 /* This is so when this task is reactivated, the sent value wont be placed
1413 on the stack, keeping the stack clean. */
1414 TASK_SET_FLAG(c, TASK_NO_STACK);
1415 cpu_channel_receive(state, c, c->control_channel, c->current_thread);
1416 } else {
1417 cpu_raise_arg_error_generic(state, c, "Attempted to switch to debugger, no debugger installed");
1421 const char *cpu_op_to_name(STATE, char op) {
1422 #include "shotgun/lib/instruction_names.h"
1423 return get_instruction_name(op);
1426 void state_collect(STATE, cpu c);
1427 void state_major_collect(STATE, cpu c);
1429 void cpu_run(STATE, cpu c, int setup) {
1430 IP_TYPE op;
1431 IP_TYPE *ip_ptr = NULL;
1432 const char *firesuit_arg;
1433 struct rubinius_globals *global = state->global;
1435 c->ip_ptr = &ip_ptr;
1437 if(setup) {
1438 (void)op;
1439 #if DIRECT_THREADED
1440 SETUP_DT_ADDRESSES;
1441 return;
1442 #else
1443 return;
1444 #endif
1447 /* recache ip_ptr to make it valid. */
1448 cpu_cache_ip(c);
1450 current_machine->g_use_firesuit = 1;
1451 current_machine->g_access_violation = 0;
1452 getcontext(&current_machine->g_firesuit);
1454 /* Ok, we jumped back here because something went south. */
1455 if(current_machine->g_access_violation) {
1456 switch(current_machine->g_access_violation) {
1457 case FIRE_ACCESS:
1458 cpu_raise_exception(state, c,
1459 cpu_new_exception(state, c, state->global->exc_arg,
1460 "Accessed outside bounds of object"));
1461 break;
1462 case FIRE_NULL:
1463 cpu_raise_exception(state, c,
1464 cpu_new_exception(state, c, state->global->exc_arg,
1465 "Attempted to access field of non-reference (null pointer)"));
1466 break;
1467 case FIRE_ASSERT:
1468 cpu_raise_exception(state, c,
1469 cpu_new_exception(state, c,
1470 rbs_const_get(state, BASIC_CLASS(object), "VMAssertion"),
1471 "An error has occured within the VM"));
1472 break;
1473 case FIRE_TYPE:
1474 object_type_to_type(current_machine->g_firesuit_arg, firesuit_arg);
1475 cpu_raise_exception(state, c,
1476 cpu_new_exception2(state, c, global->exc_type,
1477 "Invalid type encountered %s: %s",
1478 current_machine->g_firesuit_message, firesuit_arg));
1479 free(current_machine->g_firesuit_message);
1480 break;
1481 case FIRE_STACK:
1482 cpu_raise_exception(state, c,
1483 cpu_new_exception(state, c,
1484 rbs_const_get(state, BASIC_CLASS(object), "SystemStackError"),
1485 "Maximum amount of stack space used"));
1486 break;
1487 default:
1488 cpu_raise_exception(state, c,
1489 cpu_new_exception2(state, c, global->exc_type,
1490 "Unknown firesuit reason: %d", current_machine->g_access_violation));
1491 break;
1495 insn_start:
1496 while(c->active_context != Qnil) {
1498 #if DIRECT_THREADED
1499 if(EXCESSIVE_TRACING) {
1500 printf("%-15s: => %p\n",
1501 rbs_symbol_to_cstring(state, cmethod_get_name(cpu_current_method(state, c))),
1502 (void*)*ip_ptr);
1504 NEXT_OP;
1505 #include "shotgun/lib/instruction_dt.gen"
1506 #else
1508 next_op:
1509 op = *ip_ptr++;
1511 if(EXCESSIVE_TRACING) {
1512 cpu_flush_ip(c);
1513 cpu_flush_sp(c);
1514 printf("%-15s: OP: %s (%d/%d/%d)\n",
1515 rbs_symbol_to_cstring(state, cmethod_get_name(cpu_current_method(state, c))),
1516 cpu_op_to_name(state, op), op, c->ip, c->sp);
1519 #include "shotgun/lib/instructions.gen"
1521 #endif
1522 check_interrupts:
1523 if(state->om->collect_now) {
1525 #if ENABLE_DTRACE
1526 if (RUBINIUS_GC_BEGIN_ENABLED()) {
1527 dtrace_gc_begin(state);
1529 #endif
1530 int cm = state->om->collect_now;
1532 /* Collect the first generation. */
1533 if(cm & OMCollectYoung) {
1534 if(EXCESSIVE_TRACING) {
1535 printf("[[ Collecting young objects. ]]\n");
1536 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c->active_context, cpu_current_data(c), ip_ptr, c->ip, *ip_ptr);
1538 state_collect(state, c);
1539 if(EXCESSIVE_TRACING) {
1540 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c->active_context, cpu_current_data(c), ip_ptr, c->ip, *ip_ptr);
1541 printf("[[ Finished collect. ]]\n");
1545 /* Collect the old generation. */
1546 if(cm & OMCollectMature) {
1547 if(EXCESSIVE_TRACING) {
1548 printf("[[ Collecting old objects. ]\n");
1550 state_major_collect(state, c);
1551 // printf("Done with major collection.\n");
1554 /* If someone is reading the ON_GC channel, write to it to notify them. */
1555 if(cpu_channel_has_readers_p(state, state->global->on_gc_channel)) {
1556 cpu_channel_send(state, c, state->global->on_gc_channel, Qtrue);
1559 state->om->collect_now = 0;
1561 #if ENABLE_DTRACE
1562 if (RUBINIUS_GC_END_ENABLED()) {
1563 dtrace_gc_end(state);
1565 #endif
1568 if(state->check_events) {
1569 state->check_events = 0;
1570 if(state->pending_events) cpu_event_runonce(state);
1571 if(state->pending_threads) cpu_thread_preempt(state, c);
1576 void cpu_run_script(STATE, cpu c, OBJECT meth) {
1577 OBJECT name;
1578 name = string_to_sym(state, string_new(state, "__script__"));
1579 cpu_goto_method(state, c, c->main, meth, 0, name, Qnil);