Added spec:commit task to commit changes to spec/ruby sources.
[rbx.git] / shotgun / lib / cpu_instructions.c
blob9abe708c93f59b63abb038d2f49afca06b9d2dea
1 #include <string.h>
2 #include <errno.h>
4 #include "shotgun/lib/shotgun.h"
5 #include "shotgun/lib/cpu.h"
6 #include "shotgun/lib/tuple.h"
7 #include "shotgun/lib/module.h"
8 #include "shotgun/lib/class.h"
9 #include "shotgun/lib/hash.h"
10 #include "shotgun/lib/lookuptable.h"
11 #include "shotgun/lib/methctx.h"
12 #include "shotgun/lib/array.h"
13 #include "shotgun/lib/string.h"
14 #include "shotgun/lib/symbol.h"
15 #include "shotgun/lib/machine.h"
16 #include "shotgun/lib/bytearray.h"
17 #include "shotgun/lib/fixnum.h"
18 #include "shotgun/lib/primitive_util.h"
19 #include "shotgun/lib/sendsite.h"
20 #include "shotgun/lib/subtend/ffi.h"
21 #include "shotgun/lib/subtend/nmc.h"
23 #if CONFIG_ENABLE_DTRACE
24 #include "shotgun/lib/dtrace_probes.h"
25 #endif
27 #include <sys/time.h>
29 #if TIME_LOOKUP
31 #include <stdint.h>
32 #include <time.h>
33 #include <mach/mach.h>
34 #include <mach/mach_time.h>
36 #define measure_cpu_time mach_absolute_time
38 void cpu_show_lookup_time(STATE) {
39 struct mach_timebase_info timeinfo;
40 uint64_t nano;
41 double seconds, total;
43 mach_timebase_info(&timeinfo);
45 nano = (state->lookup_time * timeinfo.numer / timeinfo.denom);
47 seconds = (double)(nano / (double)1000000000);
49 nano = ((mach_absolute_time() - state->system_start) * timeinfo.numer / timeinfo.denom);
51 total = (double)(nano / (double)1000000000);
53 printf("Total time: % 3.3f\n", total);
54 printf("Lookup time: % 3.3f\n", seconds);
55 printf("Percent: % 3.3f\n", (seconds / total) * 100);
58 #endif
60 #define RISA(obj,cls) (REFERENCE_P(obj) && ISA(obj,BASIC_CLASS(cls)))
62 #define next_int_into(val) val = *ip_ptr++;
63 #define next_int next_int_into(_int);
65 #if DIRECT_THREADED
66 #include "shotgun/lib/instruction_funcs.gen"
67 DT_ADDRESSES;
69 #ifdef SHOW_OPS
70 #define NEXT_OP printf(" => %p\n", *ip_ptr); sassert(*ip_ptr); goto **ip_ptr++
71 #else
72 #define NEXT_OP goto **ip_ptr++
73 #endif
75 #endif
77 #define next_literal_into(val) next_int; val = fast_fetch(cpu_current_literals(state, c), _int)
78 #define next_literal next_literal_into(_lit)
80 OBJECT cpu_open_class(STATE, cpu c, OBJECT under, OBJECT sup, OBJECT sym, int *created) {
81 OBJECT val, s1, s2, s3, s4, sup_itr;
83 *created = FALSE;
85 /* Evil people could do A = 12; class A::B; end */
86 if(!ISA(under, BASIC_CLASS(module))) {
87 cpu_raise_exception(state, c,
88 cpu_new_exception(state, c, state->global->exc_type, "Nesting constant is not a Module"));
89 return Qundef;
92 val = module_const_get(state, under, sym);
93 if(RTEST(val)) {
94 if(AUTOLOAD_P(val)) { return val; }
95 if(ISA(val, BASIC_CLASS(class))) {
96 if(!NIL_P(sup) && class_superclass(state, val) != sup) {
97 cpu_raise_exception(state, c,
98 cpu_new_exception(state, c, state->global->exc_type, "superclass mismatch"));
99 return Qundef;
101 } else {
102 cpu_raise_exception(state, c,
103 cpu_new_exception(state, c, state->global->exc_type, "constant is not a class"));
104 return Qundef;
107 return val;
108 } else {
109 val = class_constitute(state, sup, under);
110 if(NIL_P(val)) {
111 cpu_raise_exception(state, c,
112 cpu_new_exception(state, c, state->global->exc_arg, "Invalid superclass"));
113 return Qundef;
116 *created = TRUE;
119 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
121 if(under != state->global->object) {
122 s1 = symbol_to_string(state, module_get_name(under));
123 s2 = symbol_to_string(state, sym);
124 s3 = string_dup(state, s1);
125 string_append(state, s3, string_new(state, "::"));
126 string_append(state, s3, s2);
127 s4 = string_to_sym(state, s3);
128 module_set_name(val, s4);
129 } else {
130 module_set_name(val, sym);
132 module_const_set(state, under, sym, val);
133 sup_itr = sup;
135 return val;
138 /* Return the module object corresponding to the name or, if
139 * the module does not exist yet, create a new one first.
141 OBJECT cpu_open_module(STATE, cpu c, OBJECT under, OBJECT sym) {
142 OBJECT module, s1;
144 module = module_const_get(state, under, sym);
145 if(!RTEST(module)) {
146 module = module_allocate_mature(state, 0);
147 module_setup_fields(state, module);
150 printf("Defining %s under %s.\n", rbs_symbol_to_cstring(state, sym), _inspect(under));
152 if(under != BASIC_CLASS(object)) {
153 s1 = symbol_to_string(state, module_get_name(under));
154 s1 = string_dup(state, s1);
155 string_append(state, s1, string_new(state, "::"));
156 string_append(state, s1, symbol_to_string(state, sym));
157 module_set_name(module, string_to_sym(state, s1));
158 } else {
159 module_set_name(module, sym);
162 module_const_set(state, under, sym, module);
163 module_setup_fields(state, object_metaclass(state, module));
164 module_set_encloser(module, under);
165 module_set_encloser(object_metaclass(state, module), under);
168 return module;
171 /* Locate the method object for calling method +name+ on an instance of +klass+.
172 +mod+ is updated to point to the Module that holds the method.
173 The method is then looked for in the hash tables up the superclass chain.
174 returns TRUE if we found a method object that should be considered
175 returns FALSE if we need to keep looking 'up' for the method
177 static inline int cpu_check_for_method(STATE, cpu c, OBJECT tbl, struct message *msg) {
178 OBJECT vis, obj;
180 msg->method = lookuptable_fetch(state, tbl, msg->name);
182 if(NIL_P(msg->method)) return FALSE;
184 /* A 'false' method means to terminate method lookup. (e.g. undef_method) */
185 if(FALSE_P(msg->method)) return TRUE;
187 if(msg->priv) {
188 if(TUPLE_P(msg->method)) {
189 obj = tuple_at(state, msg->method, 1);
190 /* nil means that the actual method object is 'up' from here */
191 if(NIL_P(obj)) return FALSE;
192 } /* otherwise, bypass all visibility checks */
193 return TRUE;
196 /* Check that we are allowed to call this method */
197 if(TUPLE_P(msg->method)) {
198 vis = tuple_at(state, msg->method, 0);
199 if(vis == state->global->sym_private) {
200 /* We stop on private methods. */
201 msg->method = Qfalse;
202 return TRUE;
203 } else if(vis == state->global->sym_protected) {
204 /* If it's protected, bail if the receiver isn't the same
205 class as self. */
206 if(!object_kind_of_p(state, c->self, msg->module)) {
207 msg->method = Qfalse;
208 return TRUE;
212 obj = tuple_at(state, msg->method, 1);
213 if(NIL_P(obj)) {
214 /* The method was callable, but we need to keep looking
215 * for the implementation, so make the invocation bypass all further
216 * visibility checks */
217 msg->priv = TRUE;
218 return FALSE;
223 return TRUE;
226 #define UNVIS_METHOD(var) if(TUPLE_P(var)) { var = tuple_at(state, var, 1); }
228 static inline int cpu_find_method(STATE, cpu c, struct message *msg) {
229 OBJECT tbl, klass;
230 struct method_cache *ent;
232 #if USE_GLOBAL_CACHING
233 ent = state->method_cache + CPU_CACHE_HASH(msg->klass, msg->name);
234 /* We hit a hole. Stop. */
235 if(ent->name == msg->name && ent->klass == msg->klass) {
237 /* TODO does this need to check for protected? */
238 if(msg->priv || ent->is_public) {
239 msg->method = ent->method;
240 msg->module = ent->module;
242 #if TRACK_STATS
243 state->cache_hits++;
244 #endif
245 return TRUE;
249 #if TRACK_STATS
250 if(ent->name) {
251 state->cache_collisions++;
253 state->cache_misses++;
254 #endif
255 #endif
257 klass = msg->klass;
259 do {
261 /* Validate klass is valid even. */
262 if(NUM_FIELDS(klass) <= CLASS_f_SUPERCLASS) {
263 printf("Warning: encountered invalid class (not big enough).\n");
264 sassert(0);
265 return FALSE;
268 tbl = module_get_method_table(klass);
270 /* Ok, rather than assert, i'm going to just bail. Makes the error
271 a little strange, but handle-able in ruby land. */
273 if(!LOOKUPTABLE_P(tbl)) {
274 printf("Warning: encountered invalid module (methods not a LookupTable).\n");
275 sassert(0);
276 return FALSE;
279 msg->module = klass;
280 if(cpu_check_for_method(state, c, tbl, msg)) {
281 goto cache;
284 klass = class_get_superclass(klass);
285 if(NIL_P(klass)) break;
287 } while(1);
289 cache:
291 if(!RTEST(msg->method)) return FALSE;
293 #if USE_GLOBAL_CACHING
294 /* Update the cache. */
295 if(RTEST(msg->method)) {
296 ent->klass = msg->klass;
297 ent->name = msg->name;
298 ent->module = klass;
300 if(TUPLE_P(msg->method)) {
301 ent->method = NTH_FIELD(msg->method, 1);
302 if(NTH_FIELD(msg->method, 0) == state->global->sym_public) {
303 ent->is_public = TRUE;
304 } else {
305 ent->is_public = FALSE;
308 msg->method = ent->method;
309 } else {
310 ent->method = msg->method;
311 ent->is_public = TRUE;
314 #else
315 if(RTEST(msg->method)) {
316 UNVIS_METHOD(msg->method);
318 #endif
320 return TRUE;
323 OBJECT exported_cpu_find_method(STATE, cpu c, OBJECT klass, OBJECT name, OBJECT *mod) {
324 struct message msg;
326 msg.name = name;
327 msg.klass = klass;
328 msg.recv = Qnil;
329 msg.priv = TRUE;
330 msg.module = Qnil;
331 msg.method = Qnil;
333 if(!cpu_find_method(state, c, &msg)) {
334 *mod = Qnil;
335 return Qnil;
338 *mod = msg.module;
339 return msg.method;
342 OBJECT cpu_locate_method_on(STATE, cpu c, OBJECT obj, OBJECT sym, OBJECT include_private) {
343 struct message msg;
345 msg.recv = obj;
346 msg.name = sym;
347 msg.klass = _real_class(state, obj);
348 msg.priv = TRUE_P(include_private);
349 msg.method = Qnil;
350 msg.module = Qnil;
352 if(cpu_find_method(state, c, &msg)) {
353 if(RTEST(msg.method)) {
354 return tuple_new2(state, 2, msg.method, msg.module);
358 return Qnil;
361 static inline int cpu_locate_method(STATE, cpu c, struct message *msg) {
362 int ret;
363 struct message missing;
365 #if ENABLE_DTRACE
366 if(RUBINIUS_VM_LOOKUP_BEGIN_ENABLED()) {
367 RUBINIUS_VM_LOOKUP_BEGIN();
369 #endif
371 ret = TRUE;
373 if(cpu_find_method(state, c, msg)) goto done;
375 missing = *msg;
376 missing.priv = TRUE;
377 missing.name = state->global->method_missing;
379 /* If we couldn't even find method_missing. bad. */
380 if(!cpu_find_method(state, c, &missing)) { ret = FALSE; goto done; }
382 msg->method = missing.method;
383 msg->module = missing.module;
384 msg->missing = TRUE;
386 done:
387 #if ENABLE_DTRACE
388 if(RUBINIUS_VM_LOOKUP_END_ENABLED()) {
389 RUBINIUS_VM_LOOKUP_END();
391 #endif
392 // printf("Found method: %p\n", mo);
394 return ret;
397 static inline OBJECT cpu_check_serial(STATE, cpu c, OBJECT obj, OBJECT sym, int serial) {
398 struct message msg;
400 msg.name = sym;
401 msg.recv = obj;
402 msg.klass = _real_class(state, obj);
403 msg.priv = TRUE;
405 if(!cpu_find_method(state, c, &msg)) {
406 return Qfalse;
409 if(N2I(fast_fetch(msg.method, CMETHOD_f_SERIAL)) == serial) {
410 return Qtrue;
413 return Qfalse;
416 OBJECT cpu_compile_method(STATE, OBJECT cm) {
417 OBJECT ba, bc;
418 int target_size;
420 ba = cmethod_get_compiled(cm);
421 bc = cmethod_get_bytecodes(cm);
423 /* If we're direct threaded, the compiled version is an array of the pointer
424 * size. */
425 #if DIRECT_THREADED
426 target_size = (BYTEARRAY_SIZE(bc) / sizeof(uint32_t)) * sizeof(uintptr_t);
427 #else
428 target_size = BYTEARRAY_SIZE(bc);
429 #endif
431 if(NIL_P(ba) || BYTEARRAY_SIZE(ba) < target_size) {
432 /* First time this method has been compiled, or size of current
433 bytearray is insufficient to hold revised bytecode */
434 ba = bytearray_new(state, target_size);
437 cpu_compile_instructions(state, bc, ba);
438 cmethod_set_compiled(cm, ba);
440 return ba;
443 void cpu_compile_instructions(STATE, OBJECT bc, OBJECT comp) {
444 /* If this is not a big endian platform, we need to adjust
445 the iseq to have the right order */
446 #if !CONFIG_BIG_ENDIAN && !DIRECT_THREADED
447 iseq_flip(state, bc, comp);
448 #elif DIRECT_THREADED
449 /* If we're compiled with direct threading, then translate
450 the compiled version into addresses. */
451 calculate_into_gotos(state, bc, comp, _dt_addresses, _dt_size);
452 #endif
455 static inline OBJECT _allocate_context(STATE, cpu c, OBJECT meth, int locals) {
456 OBJECT ctx, ins;
457 struct fast_context *fc;
458 int i;
460 ctx = object_memory_new_context(state->om, locals);
461 if(ctx >= state->om->context_last) {
462 state->om->collect_now |= OMCollectYoung;
465 ins = fast_fetch(meth, CMETHOD_f_COMPILED);
467 if(NIL_P(ins)) {
468 ins = cpu_compile_method(state, meth);
471 CLEAR_FLAGS(ctx);
472 ctx->klass = Qnil;
473 ctx->field_count = FASTCTX_FIELDS;
475 fc = FASTCTX(ctx);
476 fc->flags = 0;
477 fc->sender = c->active_context;
479 fc->method = meth;
480 fc->custom_iseq = Qnil;
481 fc->data = bytearray_byte_address(state, ins);
482 fc->literals = fast_fetch(meth, CMETHOD_f_LITERALS);
484 if(locals > 0) {
485 fc->locals = object_memory_context_locals(ctx);
486 CLEAR_FLAGS(fc->locals);
487 fc->locals->gc_zone = 0;
488 fc->locals->klass = BASIC_CLASS(tuple);
489 SET_NUM_FIELDS(fc->locals, locals);
491 for(i = 0; i < locals; i++) {
492 SET_FIELD_DIRECT(fc->locals, i, Qnil);
495 } else {
496 fc->locals = Qnil;
498 // printf("Locals for %p at %p (%d, %d)\n", ctx, fc->locals, num_lcls, FASTCTX(ctx)->size);
500 return ctx;
503 static inline OBJECT cpu_create_context(STATE, cpu c, const struct message *msg) {
504 OBJECT ctx;
505 struct fast_context *fc;
507 ctx = _allocate_context(state, c, msg->method, N2I(cmethod_get_local_count(msg->method)));
508 fc = FASTCTX(ctx);
510 fc->ip = 0;
511 cpu_flush_sp(c);
512 fc->sp = c->sp;
513 /* fp points to the location on the stack as the context
514 was being created. */
515 fc->fp = c->sp;
517 fc->block = msg->block;
518 fc->self = msg->recv;
519 fc->argcount = msg->args;
520 fc->name = msg->name;
521 fc->method_module = msg->module;
522 fc->type = FASTCTX_NORMAL;
524 #if ENABLE_DTRACE
525 if (RUBINIUS_FUNCTION_ENTRY_ENABLED()) {
526 dtrace_function_entry(state, c, msg);
528 #endif
530 return ctx;
533 OBJECT cpu_create_block_context(STATE, cpu c, OBJECT env, int sp) {
534 OBJECT ctx;
535 struct fast_context *fc;
537 ctx = _allocate_context(state, c, blokenv_get_method(env),
538 N2I(blokenv_get_local_count(env)));
539 fc = FASTCTX(ctx);
541 fc->ip = N2I(blokenv_get_initial_ip(env));
542 fc->sp = sp;
544 fc->block = Qnil;
545 fc->self = Qnil;
546 fc->argcount = 0;
548 /* env lives here */
549 fc->name = env;
551 fc->method_module = Qnil;
552 fc->type = FASTCTX_BLOCK;
554 return ctx;
558 void cpu_raise_from_errno(STATE, cpu c, const char *msg) {
559 OBJECT cls;
560 char buf[32];
562 cls = lookuptable_fetch(state, state->global->errno_mapping, I2N(errno));
563 if(NIL_P(cls)) {
564 cls = state->global->exc_arg;
565 snprintf(buf, sizeof(buf), "Unknown errno %d", errno);
566 msg = buf;
569 cpu_raise_exception(state, c, cpu_new_exception(state, c, cls, msg));
572 void cpu_raise_arg_error_generic(STATE, cpu c, const char *msg) {
573 cpu_raise_exception(state, c, cpu_new_exception(state, c, state->global->exc_arg, msg));
576 void cpu_raise_arg_error(STATE, cpu c, int args, int req) {
577 char msg[1024];
578 snprintf(msg, 1024, "wrong number of arguments (got %d, required %d)", args, req);
579 cpu_flush_ip(c);
580 cpu_raise_exception(state, c, cpu_new_exception(state, c, state->global->exc_arg, msg));
583 void cpu_raise_primitive_failure(STATE, cpu c, int primitive_idx) {
584 char msg[1024];
585 OBJECT primitive_failure;
586 snprintf(msg, 1024, "Primitive with index (%d) failed", primitive_idx);
588 primitive_failure = cpu_new_exception(state, c, state->global->exc_primitive_failure, msg);
589 cpu_raise_exception(state, c, primitive_failure);
592 static int cpu_execute_primitive(STATE, cpu c, const struct message *msg, int prim) {
594 #if ENABLE_DTRACE
595 if (RUBINIUS_FUNCTION_PRIMITIVE_ENTRY_ENABLED()) {
596 dtrace_function_primitive_entry(state, c, msg);
598 #endif
600 c->in_primitive = prim;
601 if(cpu_perform_system_primitive(state, c, prim, msg)) {
602 /* Worked! */
603 c->in_primitive = 0;
605 if(EXCESSIVE_TRACING) {
606 printf("%05d: Called prim %s => %s on %s.\n", c->depth,
607 rbs_symbol_to_cstring(state, cmethod_get_name(cpu_current_method(state, c))),
608 rbs_symbol_to_cstring(state, msg->name), _inspect(msg->recv));
611 #if ENABLE_DTRACE
612 if (RUBINIUS_FUNCTION_PRIMITIVE_RETURN_ENABLED()) {
613 dtrace_function_primitive_return(state, c, msg);
615 #endif
617 return TRUE;
620 c->in_primitive = 0;
622 if(EXCESSIVE_TRACING) {
623 printf("[[ Primitive failed! -- %d ]]\n", prim);
626 return FALSE;
630 static inline int cpu_try_primitive(STATE, cpu c, const struct message *msg) {
631 int prim;
632 OBJECT prim_obj;
634 prim_obj = fast_fetch(msg->method, CMETHOD_f_PRIMITIVE);
636 if(NIL_P(prim_obj)) {
637 return FALSE;
638 } else if(!FIXNUM_P(prim_obj)) {
639 if(SYMBOL_P(prim_obj)) {
640 prim = calc_primitive_index(state, symbol_to_string(state, prim_obj));
641 } else {
642 prim = -1;
644 cmethod_set_primitive(msg->method, I2N(prim));
645 } else {
646 prim = N2I(prim_obj);
649 if(prim < 0) {
650 cmethod_set_primitive(msg->method, Qnil);
651 return FALSE;
654 return cpu_execute_primitive(state, c, msg, prim);
657 /* Raw most functions for moving in a method. Adjusts register. */
658 /* Stack offset is used to adjust sp when it's saved so when
659 this context is swapped back in, any arguments are automatically
660 removed from the stack */
661 inline void cpu_save_registers(STATE, cpu c, int offset) {
662 struct fast_context *fc;
664 cpu_flush_ip(c);
665 cpu_flush_sp(c);
666 fc = FASTCTX(c->active_context);
667 fc->sp = c->sp - offset;
668 fc->ip = c->ip;
672 inline void cpu_yield_debugger_check(STATE, cpu c) {
673 /* Yield to the debugger if flag is set */
674 if(TASK_FLAG_P(c, TASK_DEBUG_ON_CTXT_CHANGE)) {
675 if(EXCESSIVE_TRACING) {
676 printf("Yielding to debugger due to context change\n");
678 cpu_yield_debugger(state, c);
683 inline void cpu_restore_context_with_home(STATE, cpu c, OBJECT ctx, OBJECT home) {
684 struct fast_context *fc;
686 /* Home is actually the main context here because it's the method
687 context that holds all the data. So if it's a fast, we restore
688 it's data, then if ctx != home, we restore a little more */
690 fc = FASTCTX(home);
691 CHECK_PTR(fc->self);
692 CHECK_PTR(fc->method);
694 c->argcount = fc->argcount;
695 c->self = fc->self;
697 /* Only happens if we're restoring a block. */
698 if(ctx != home) {
699 fc = FASTCTX(ctx);
702 c->data = fc->data;
703 c->type = fc->type;
705 c->locals = FASTCTX(home)->locals;
707 c->sender = fc->sender;
708 c->sp = fc->sp;
709 c->ip = fc->ip;
710 c->fp = fc->fp;
712 cpu_cache_ip(c);
713 cpu_cache_sp(c);
715 c->home_context = home;
716 c->active_context = ctx;
719 /* Layer 2 method movement: use lower level only. */
721 /* Used in debugging. Verifies that the expected depth is the actual depth. */
723 static void _verify_depth(cpu c) {
724 int count = 0;
725 OBJECT ctx = c->active_context;
727 while(!NIL_P(ctx)) {
728 count++;
729 ctx = FASTCTX(ctx)->sender;
732 assert(count == c->depth);
736 inline void cpu_activate_context(STATE, cpu c, OBJECT ctx, OBJECT home, int so) {
737 c->depth++;
739 if(c->active_context != Qnil) {
740 cpu_save_registers(state, c, so);
742 cpu_restore_context_with_home(state, c, ctx, home);
743 cpu_yield_debugger_check(state, c);
746 /* Layer 2.5: Uses lower layers to return to the calling context.
747 Returning ends here. */
749 void nmc_activate(STATE, cpu c, OBJECT nmc, OBJECT val, int reraise);
751 inline int cpu_simple_return(STATE, cpu c, OBJECT val) {
752 OBJECT current, destination, home;
754 #if ENABLE_DTRACE
755 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
756 dtrace_function_return(state, c);
758 #endif
760 c->depth--;
762 current = c->active_context;
763 c->active_context = Qnil;
764 destination = cpu_current_sender(c);
766 // printf("Rtrnng frm %p (%d)\n", current, FASTCTX(current)->size);
768 if(destination == Qnil) {
769 object_memory_retire_context(state->om, current);
771 /* Thread exiting, reschedule.. */
772 if(c->current_thread != c->main_thread) {
773 THDEBUG("%d: thread reached end, dead.\n", getpid());
774 cpu_thread_exited(state, c);
775 return FALSE;
776 /* Switch back to the main task... */
777 } else if(c->current_task != c->main_task) {
778 cpu_task_select(state, c, c->main_task);
779 return FALSE;
781 /* The return value of the script is passed on the stack. */
782 stack_push(val);
783 } else {
784 /* retire this one context. */
785 object_memory_retire_context(state->om, current);
787 /* Now, figure out if the destination is a block, so we pass the correct
788 home to restore_context */
789 if(block_context_p(state, destination)) {
790 home = blokctx_home(state, destination);
791 } else {
792 home = destination;
796 if(EXCESSIVE_TRACING) {
797 if(stack_context_p(destination)) {
798 printf("Returning to a stack context %d / %d (%s).\n", (int)c->active_context, (int)destination, c->active_context - destination == CTX_SIZE ? "stack" : "REMOTE");
799 } else {
800 printf("Returning to %s.\n", _inspect(destination));
804 if(FASTCTX(home)->type == FASTCTX_NMC) {
805 nmc_activate(state, c, home, val, FALSE);
806 /* We return because nmc_activate will setup the cpu to do whatever
807 it needs to next. */
808 return TRUE;
809 } else {
810 cpu_restore_context_with_home(state, c, destination, home);
811 stack_push(val);
815 return TRUE;
818 /* Used by raise_exception to restore the previous context. */
819 int cpu_unwind(STATE, cpu c) {
820 OBJECT current, destination, home;
821 current = c->active_context;
822 c->active_context = Qnil;
823 destination = cpu_current_sender(c);
825 #if ENABLE_DTRACE
826 if (RUBINIUS_FUNCTION_RETURN_ENABLED()) {
827 dtrace_function_return(state, c);
829 #endif
831 c->depth--;
833 if(destination == Qnil) {
834 object_memory_retire_context(state->om, current);
836 /* Thread exitting, reschedule.. */
837 if(c->current_thread != c->main_thread) {
838 THDEBUG("%d: thread reached end, dead.\n", getpid());
839 cpu_thread_exited(state, c);
840 return FALSE;
841 /* Switch back to the main task... */
842 } else if(c->current_task != c->main_task) {
843 cpu_task_select(state, c, c->main_task);
844 return FALSE;
846 stack_push(Qnil);
847 return FALSE;
849 } else {
850 /* retire this one context. */
851 object_memory_retire_context(state->om, current);
853 /* Now, figure out if the destination is a block, so we pass the correct
854 home to restore_context */
855 if(block_context_p(state, destination)) {
856 home = blokctx_home(state, destination);
857 } else {
858 home = destination;
861 /* Commenting out 02.01.08 - Caleb Tennis.
862 I don't know the purpose of this code, but if an exception is throws from
863 a rb_funcall in subtend, this causes an endless loop in cpu_return_to_sender.
865 Commenting it out fixes that for now.
866 Hopefully someone smarter than me knows a better fix for the future.
868 Skip over NMCs for now.
870 if(exception && FASTCTX(destination)->type == FASTCTX_NMC) {
871 c->active_context = destination;
872 return cpu_return_to_sender(state, c, val, FALSE, TRUE);
877 /* Ok, reason we'd be restoring a native context:
878 1) the native context used rb_funcall and we need to return
879 it the result of the call.
881 cpu_restore_context_with_home(state, c, destination, home);
884 return TRUE;
887 /* Layer 3: goto. Basically jumps directly into the specificed method.
888 no lookup required. */
890 inline void cpu_goto_method(STATE, cpu c, OBJECT recv, OBJECT meth,
891 int count, OBJECT name, OBJECT block) {
892 OBJECT ctx;
893 struct message msg;
895 msg.recv = recv;
896 msg.name = name;
897 msg.method = meth;
898 msg.module = Qnil;
899 msg.block = block;
900 msg.args = count;
902 if(cpu_try_primitive(state, c, &msg)) return;
904 OBJECT scope = cmethod_get_staticscope(meth);
905 if(NIL_P(scope)) {
906 cmethod_set_staticscope(meth, state->global->top_scope);
909 ctx = cpu_create_context(state, c, &msg);
910 cpu_activate_context(state, c, ctx, ctx, 0);
913 /* Layer 3: hook. Shortcut for running hook methods. */
915 inline void cpu_perform_hook(STATE, cpu c, OBJECT recv, OBJECT meth, OBJECT arg) {
916 OBJECT rub, vm;
917 struct message msg;
919 msg.name = meth;
920 msg.recv = recv;
921 msg.klass = _real_class(state, recv);
922 msg.priv = TRUE;
924 if(!cpu_find_method(state, c, &msg)) return;
926 rub = rbs_const_get(state, BASIC_CLASS(object), "Rubinius");
927 if(NIL_P(rub)) return;
929 vm = rbs_const_get(state, rub, "VM");
930 if(NIL_P(vm)) return;
932 /* The top of the stack contains the value that should remain on the stack.
933 we pass that to the perform_hook call so it is returned and stays on
934 the top of the stack. Thats why we say there are 4 args.*/
936 stack_push(arg);
937 stack_push(meth);
938 stack_push(recv);
940 cpu_send(state, c, vm, SYM("perform_hook"), 4, Qnil);
943 /* Layer 4: High level method calling. */
945 /* Layer 4: direct activation. Used for calling a method thats already
946 been looked up. */
947 static inline void cpu_activate_method(STATE, cpu c, struct message *msg) {
948 OBJECT ctx;
950 c->depth++;
951 if(c->depth == CPU_MAX_DEPTH) {
952 machine_handle_fire(FIRE_STACK);
955 if(cpu_try_primitive(state, c, msg)) return;
957 ctx = cpu_create_context(state, c, msg);
959 cpu_save_registers(state, c, msg->args);
960 cpu_restore_context_with_home(state, c, ctx, ctx);
963 static inline void cpu_perform(STATE, cpu c, const struct message *msg) {
964 OBJECT ctx;
966 c->depth++;
967 if(c->depth == CPU_MAX_DEPTH) {
968 machine_handle_fire(FIRE_STACK);
971 ctx = cpu_create_context(state, c, msg);
973 /* If it was missing, setup some extra data in the MethodContext for
974 the method_missing method to check out, to see why it was missing. */
975 if(msg->missing && msg->priv) {
976 methctx_reference(state, ctx);
977 object_set_ivar(state, ctx, SYM("@send_private"), Qtrue);
980 cpu_save_registers(state, c, msg->args);
981 cpu_restore_context_with_home(state, c, ctx, ctx);
982 cpu_yield_debugger_check(state, c);
986 static inline void cpu_patch_mono(struct message *msg);
988 static inline void cpu_patch_missing(struct message *msg);
990 static void _cpu_ss_basic(struct message *msg) {
991 msg->missing = 0;
992 const STATE = msg->state;
993 const cpu c = msg->c;
995 sassert(cpu_locate_method(state, c, msg));
997 /* If it's not method_missing, cache the details of msg in the send_site */
998 if(!msg->missing) {
999 cpu_patch_mono(msg);
1000 } else {
1001 cpu_patch_missing(msg);
1002 msg->args += 1;
1003 stack_push(msg->name);
1006 if(cpu_try_primitive(state, c, msg)) return;
1008 cpu_perform(state, c, msg);
1011 void cpu_initialize_sendsite(STATE, struct send_site *ss) {
1012 ss->lookup = _cpu_ss_basic;
1015 static void _cpu_ss_disabled(struct message *msg) {
1016 msg->missing = 0;
1017 const STATE = msg->state;
1018 const cpu c = msg->c;
1020 sassert(cpu_locate_method(state, c, msg));
1022 /* If it's not method_missing, cache the details of msg in the send_site */
1023 if(msg->missing) {
1024 msg->args += 1;
1025 stack_push(msg->name);
1028 if(cpu_try_primitive(state, c, msg)) return;
1030 cpu_perform(state, c, msg);
1033 void cpu_patch_disabled(struct message *msg, struct send_site *ss) {
1034 ss->data1 = ss->data2 = ss->data3 = Qnil;
1035 ss->data4 = 0;
1036 ss->c_data = NULL;
1037 ss->lookup = _cpu_ss_disabled;
1039 _cpu_ss_disabled(msg);
1042 #define SS_DISABLE_THRESHOLD 10000
1043 #define SS_MISSES(ss) if(++ss->misses > SS_DISABLE_THRESHOLD) { cpu_patch_disabled(msg, ss); } else
1045 /* Send Site specialization 1: execute a primitive directly. */
1047 #define CHECK_CLASS(msg) (_real_class(msg->state, msg->recv) != SENDSITE(msg->send_site)->data1)
1049 static void _cpu_ss_mono_prim(struct message *msg) {
1050 struct send_site *ss = SENDSITE(msg->send_site);
1051 prim_func func;
1052 int _orig_sp;
1053 OBJECT *_orig_sp_ptr;
1054 cpu c = msg->c;
1056 if(CHECK_CLASS(msg)) {
1057 SS_MISSES(ss) {
1058 _cpu_ss_basic(msg);
1060 return;
1063 ss->hits++;
1065 _orig_sp_ptr = c->sp_ptr;
1066 _orig_sp = c->sp;
1068 func = (prim_func)ss->c_data;
1070 msg->method = ss->data2;
1071 msg->module = ss->data3;
1073 c->in_primitive = ss->data4;
1075 if(!func(msg->state, msg->c, msg)) {
1076 c->in_primitive = 0;
1077 c->sp_ptr = _orig_sp_ptr;
1078 c->sp = _orig_sp;
1080 cpu_perform(msg->state, msg->c, msg);
1081 } else {
1082 c->in_primitive = 0;
1086 /* Called before a primitive is run the slow way, allowing the send_site to be patch
1087 * to call the primitive directly. */
1088 void cpu_patch_primitive(STATE, const struct message *msg, prim_func func, int prim) {
1089 struct send_site *ss;
1091 if(!REFERENCE_P(msg->send_site)) return;
1093 ss = SENDSITE(msg->send_site);
1095 /* If this sendsite is disabled, leave it disabled. */
1096 if(ss->lookup == _cpu_ss_disabled) return;
1098 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1099 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1100 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1102 ss->data4 = prim;
1103 ss->c_data = (void*)func;
1104 ss->lookup = _cpu_ss_mono_prim;
1107 /* Send Site specialization 2: Run an ffi function directly. */
1108 static void _cpu_ss_mono_ffi(struct message *msg) {
1109 struct send_site *ss = SENDSITE(msg->send_site);
1111 if(CHECK_CLASS(msg)) {
1112 SS_MISSES(ss) {
1113 _cpu_ss_basic(msg);
1115 return;
1118 ss->hits++;
1120 ffi_call(msg->state, msg->c, nfunc_get_data(ss->data2));
1123 /* Called before an FFI function is run the slow way, allowing the send_site to be patch
1124 * to call the function directly. */
1125 void cpu_patch_ffi(STATE, const struct message *msg) {
1126 struct send_site *ss;
1128 if(!REFERENCE_P(msg->send_site)) return;
1130 ss = SENDSITE(msg->send_site);
1132 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1133 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1134 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1136 ss->c_data = *DATA_STRUCT(nfunc_get_data(msg->method), void**);
1137 ss->lookup = _cpu_ss_mono_ffi;
1139 return;
1142 if(!REFERENCE_P(msg->send_site)) return;
1144 ss = SENDSITE(msg->send_site);
1145 SET_STRUCT_FIELD(msg->send_site, ss->data1, msg->recv);
1146 SET_STRUCT_FIELD(msg->send_site, ss->data2, nfunc_get_data(msg->method));
1147 ss->lookup = _cpu_ss_mono_ffi;
1151 /* Send Site specialzitation 3: simple monomorphic last implemenation cache. */
1152 static void _cpu_ss_mono(struct message *msg) {
1153 struct send_site *ss = SENDSITE(msg->send_site);
1155 if(CHECK_CLASS(msg)) {
1156 SS_MISSES(ss) {
1157 _cpu_ss_basic(msg);
1159 return;
1162 ss->hits++;
1164 msg->method = ss->data2;
1165 msg->module = ss->data3;
1167 if(cpu_try_primitive(msg->state, msg->c, msg)) return;
1169 cpu_perform(msg->state, msg->c, msg);
1172 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1173 * that the next time +ss+ is used, it will try the cache details. */
1174 static inline void cpu_patch_mono(struct message *msg) {
1175 STATE = msg->state;
1177 struct send_site *ss = SENDSITE(msg->send_site);
1179 ss->lookup = _cpu_ss_mono;
1180 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1181 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1182 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1185 static void _cpu_ss_missing(struct message *msg) {
1186 struct send_site *ss = SENDSITE(msg->send_site);
1187 cpu c = msg->c;
1189 if(CHECK_CLASS(msg)) {
1190 SS_MISSES(ss) {
1191 _cpu_ss_basic(msg);
1193 return;
1196 ss->hits++;
1198 msg->method = ss->data2;
1199 msg->module = ss->data3;
1201 msg->args += 1;
1202 stack_push(msg->name);
1204 if(cpu_try_primitive(msg->state, msg->c, msg)) return;
1206 cpu_perform(msg->state, msg->c, msg);
1209 /* Saves the details of +msg+ in +ss+ and install _cpu_ss_mono in +ss+, so
1210 * that the next time +ss+ is used, it will try the cache details. */
1211 static inline void
1212 cpu_patch_missing(struct message *msg) {
1213 STATE = msg->state;
1214 struct send_site *ss = SENDSITE(msg->send_site);
1216 ss->lookup = _cpu_ss_missing;
1217 SET_STRUCT_FIELD(msg->send_site, ss->data1, _real_class(state, msg->recv));
1218 SET_STRUCT_FIELD(msg->send_site, ss->data2, msg->method);
1219 SET_STRUCT_FIELD(msg->send_site, ss->data3, msg->module);
1222 static void _cpu_on_no_method(STATE, cpu c, const struct message *msg) {
1223 char *str;
1224 OBJECT exc;
1226 exc = rbs_const_get(state, BASIC_CLASS(object), "RuntimeError");
1228 str = malloc(1024);
1229 snprintf(str, 1024, "Unable to find any version of '%s' to run", _inspect(msg->name));
1231 cpu_raise_exception(state, c, cpu_new_exception(state, c, exc, str));
1233 free(str);
1236 /* Layer 4: send. Primary method calling function. */
1237 inline void cpu_send_message(STATE, cpu c, struct message *msg) {
1238 struct send_site *ss;
1240 #ifdef TIME_LOOKUP
1241 uint64_t start = measure_cpu_time();
1242 #endif
1244 ss = SENDSITE(msg->send_site);
1245 msg->state = state;
1246 msg->c = c;
1247 msg->name = ss->name;
1248 ss->lookup(msg);
1250 #ifdef TIME_LOOKUP
1251 state->lookup_time += (measure_cpu_time() - start);
1252 #endif
1256 void cpu_send_message_external(STATE, cpu c, struct message *msg) {
1257 OBJECT ctx;
1259 if(!cpu_locate_method(state, c, msg)) {
1260 _cpu_on_no_method(state, c, msg);
1261 return;
1264 if(msg->missing) {
1265 msg->args += 1;
1266 stack_push(msg->name);
1267 } else {
1268 if(cpu_try_primitive(state, c, msg)) return;
1271 c->depth++;
1272 if(c->depth == CPU_MAX_DEPTH) {
1273 machine_handle_fire(FIRE_STACK);
1276 ctx = cpu_create_context(state, c, msg);
1278 /* If it was missing, setup some extra data in the MethodContext for
1279 the method_missing method to check out, to see why it was missing. */
1280 if(msg->missing && msg->priv) {
1281 methctx_reference(state, ctx);
1282 object_set_ivar(state, ctx, SYM("@send_private"), Qtrue);
1285 cpu_save_registers(state, c, msg->args);
1286 cpu_restore_context_with_home(state, c, ctx, ctx);
1290 /* A version used when there is no send_site. */
1291 void cpu_send(STATE, cpu c, OBJECT recv, OBJECT sym, int args, OBJECT block) {
1292 struct message msg;
1294 msg.recv = recv;
1295 msg.name = sym;
1296 msg.args = args;
1297 msg.block = block;
1298 msg.klass = _real_class(state, recv);
1299 msg.priv = c->call_flags;
1300 msg.missing = 0;
1301 msg.send_site = Qnil;
1303 c->call_flags = 0;
1305 cpu_send_message_external(state, c, &msg);
1308 void cpu_raise_exception(STATE, cpu c, OBJECT exc) {
1309 OBJECT ctx, table, ent;
1310 int cur, total, target, idx, l, r;
1311 c->exception = exc;
1312 ctx = c->active_context;
1314 cpu_flush_ip(c);
1315 cpu_save_registers(state, c, 0);
1317 /* NOTE: using return_to_sender worries me a little because it can
1318 switch to a different task if you try to return off the top
1319 of a task.. */
1321 while(!NIL_P(ctx)) {
1322 if(c->type == FASTCTX_NMC) goto skip;
1324 table = cmethod_get_exceptions(cpu_current_method(state, c));
1326 if(!table || NIL_P(table)) goto skip;
1328 cur = c->ip;
1329 total = NUM_FIELDS(table);
1330 target = 0;
1331 for(idx=0; idx < total; idx++) {
1332 ent = tuple_at(state, table, idx);
1333 l = N2I(tuple_at(state, ent, 0));
1334 r = N2I(tuple_at(state, ent, 1));
1335 if(cur >= l && cur <= r) {
1336 target = N2I(tuple_at(state, ent, 2));
1337 c->ip = target;
1338 cpu_cache_ip(c);
1339 return;
1343 skip:
1344 /* unwind returns FALSE if we can't unwind anymore. */
1345 if(!cpu_unwind(state, c)) break;
1346 ctx = c->active_context;
1349 /* Reset it because it can get overriden in the return_to_senders. */
1350 c->exception = exc;
1352 // printf("Unable to find exception handler, i'm confused.\n");
1353 return;
1356 void cpu_yield_debugger(STATE, cpu c) {
1357 /* Ensure the DEBUG_ON_CTXT_CHANGE flag is cleared so we don't try
1358 to yield more than once */
1359 if(TASK_FLAG_P(c, TASK_DEBUG_ON_CTXT_CHANGE)) {
1360 TASK_CLEAR_FLAG(c, TASK_DEBUG_ON_CTXT_CHANGE);
1361 struct cpu_task *task = (struct cpu_task*)BYTES_OF(c->current_task);
1362 TASK_CLEAR_FLAG(task, TASK_DEBUG_ON_CTXT_CHANGE);
1365 cpu_flush_sp(c);
1366 cpu_flush_ip(c);
1367 methctx_reference(state, c->active_context);
1369 OBJECT dbg = c->debug_channel;
1370 if(dbg == Qnil) {
1371 /* No debug channel on the task, so use the VM default one (if any) */
1372 OBJECT mod, vm;
1373 mod = rbs_const_get(state, BASIC_CLASS(object), "Rubinius");
1374 if(!NIL_P(mod)) {
1375 vm = rbs_const_get(state, mod, "VM");
1376 if(!NIL_P(vm)) {
1377 dbg = object_get_ivar(state, vm, SYM("@debug_channel"));
1382 if(dbg != Qnil) {
1383 if(c->control_channel == Qnil) {
1384 /* No control channel on the task, so create one */
1385 c->control_channel = cpu_channel_new(state);
1388 sassert(cpu_channel_has_readers_p(state, dbg));
1389 cpu_channel_send(state, c, dbg, c->current_thread);
1390 /* This is so when this task is reactivated, the sent value wont be placed
1391 on the stack, keeping the stack clean. */
1392 TASK_SET_FLAG(c, TASK_NO_STACK);
1393 cpu_channel_receive(state, c, c->control_channel, c->current_thread);
1394 } else {
1395 cpu_raise_arg_error_generic(state, c, "Attempted to switch to debugger, no debugger installed");
1399 const char *cpu_op_to_name(STATE, char op) {
1400 #include "shotgun/lib/instruction_names.h"
1401 return get_instruction_name(op);
1404 void state_collect(STATE, cpu c);
1405 void state_major_collect(STATE, cpu c);
1407 void cpu_run(STATE, cpu c, int setup) {
1408 IP_TYPE op;
1409 IP_TYPE *ip_ptr = NULL;
1410 const char *firesuit_arg;
1411 struct rubinius_globals *global = state->global;
1413 c->ip_ptr = &ip_ptr;
1415 if(setup) {
1416 (void)op;
1417 #if DIRECT_THREADED
1418 SETUP_DT_ADDRESSES;
1419 return;
1420 #else
1421 return;
1422 #endif
1425 /* recache ip_ptr to make it valid. */
1426 cpu_cache_ip(c);
1428 current_machine->g_use_firesuit = 1;
1429 current_machine->g_access_violation = 0;
1430 getcontext(&current_machine->g_firesuit);
1432 /* Ok, we jumped back here because something went south. */
1433 if(current_machine->g_access_violation) {
1434 switch(current_machine->g_access_violation) {
1435 case FIRE_ACCESS:
1436 cpu_raise_exception(state, c,
1437 cpu_new_exception(state, c, state->global->exc_arg,
1438 "Accessed outside bounds of object"));
1439 break;
1440 case FIRE_NULL:
1441 cpu_raise_exception(state, c,
1442 cpu_new_exception(state, c, state->global->exc_arg,
1443 "Attempted to access field of non-reference (null pointer)"));
1444 break;
1445 case FIRE_ASSERT:
1446 cpu_raise_exception(state, c,
1447 cpu_new_exception(state, c,
1448 rbs_const_get(state, BASIC_CLASS(object), "VMAssertion"),
1449 "An error has occured within the VM"));
1450 break;
1451 case FIRE_TYPE:
1452 object_type_to_type(current_machine->g_firesuit_arg, firesuit_arg);
1453 cpu_raise_exception(state, c,
1454 cpu_new_exception2(state, c, global->exc_type,
1455 "Invalid type encountered %s: %s",
1456 current_machine->g_firesuit_message, firesuit_arg));
1457 free(current_machine->g_firesuit_message);
1458 break;
1459 case FIRE_STACK:
1460 cpu_raise_exception(state, c,
1461 cpu_new_exception(state, c,
1462 rbs_const_get(state, BASIC_CLASS(object), "SystemStackError"),
1463 "Maximum amount of stack space used"));
1464 break;
1465 default:
1466 cpu_raise_exception(state, c,
1467 cpu_new_exception2(state, c, global->exc_type,
1468 "Unknown firesuit reason: %d", current_machine->g_access_violation));
1469 break;
1473 insn_start:
1474 while(c->active_context != Qnil) {
1476 #if DIRECT_THREADED
1477 if(EXCESSIVE_TRACING) {
1478 printf("%-15s: => %p\n",
1479 rbs_symbol_to_cstring(state, cmethod_get_name(cpu_current_method(state, c))),
1480 (void*)*ip_ptr);
1482 NEXT_OP;
1483 #include "shotgun/lib/instruction_dt.gen"
1484 #else
1486 next_op:
1487 op = *ip_ptr++;
1489 if(EXCESSIVE_TRACING) {
1490 cpu_flush_ip(c);
1491 cpu_flush_sp(c);
1492 printf("%-15s: OP: %s (%d/%d/%d)\n",
1493 rbs_symbol_to_cstring(state, cmethod_get_name(cpu_current_method(state, c))),
1494 cpu_op_to_name(state, op), op, c->ip, c->sp);
1497 #include "shotgun/lib/instructions.gen"
1499 #endif
1500 check_interrupts:
1501 if(state->om->collect_now) {
1503 #if ENABLE_DTRACE
1504 if (RUBINIUS_GC_BEGIN_ENABLED()) {
1505 dtrace_gc_begin(state);
1507 #endif
1508 int cm = state->om->collect_now;
1510 /* Collect the first generation. */
1511 if(cm & OMCollectYoung) {
1512 if(EXCESSIVE_TRACING) {
1513 printf("[[ Collecting young objects. ]]\n");
1514 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c->active_context, cpu_current_data(c), ip_ptr, c->ip, *ip_ptr);
1516 state_collect(state, c);
1517 if(EXCESSIVE_TRACING) {
1518 printf("[[ ctx=%p, data=%p, ip_ptr=%p, ip=%d, op=%d ]]\n", (void*)c->active_context, cpu_current_data(c), ip_ptr, c->ip, *ip_ptr);
1519 printf("[[ Finished collect. ]]\n");
1523 /* Collect the old generation. */
1524 if(cm & OMCollectMature) {
1525 if(EXCESSIVE_TRACING) {
1526 printf("[[ Collecting old objects. ]\n");
1528 state_major_collect(state, c);
1529 // printf("Done with major collection.\n");
1532 /* If someone is reading the ON_GC channel, write to it to notify them. */
1533 if(cpu_channel_has_readers_p(state, state->global->on_gc_channel)) {
1534 cpu_channel_send(state, c, state->global->on_gc_channel, Qtrue);
1537 state->om->collect_now = 0;
1539 #if ENABLE_DTRACE
1540 if (RUBINIUS_GC_END_ENABLED()) {
1541 dtrace_gc_end(state);
1543 #endif
1546 if(state->check_events) {
1547 state->check_events = 0;
1548 if(state->pending_events) cpu_event_runonce(state);
1549 if(state->pending_threads) cpu_thread_preempt(state, c);
1554 void cpu_run_script(STATE, cpu c, OBJECT meth) {
1555 OBJECT name;
1556 name = string_to_sym(state, string_new(state, "__script__"));
1557 cpu_goto_method(state, c, c->main, meth, 0, name, Qnil);