Added spec:commit task to commit changes to spec/ruby sources.
[rbx.git] / shotgun / lib / cpu.h
blob30d4886642de1c7a55e7d0a3b5f900c19fbe66db
1 #ifndef RBS_CPU_H
2 #define RBS_CPU_H
4 #include <bstrlib.h>
6 /* Configuration macros. */
8 /* Enable direct threading */
9 #if CONFIG_ENABLE_DT
10 #define DIRECT_THREADED 1
11 #else
12 #define DIRECT_THREADED 0
13 #endif
15 /* Whether or not to do runtime tracing support */
16 #define EXCESSIVE_TRACING state->excessive_tracing
17 // #define EXCESSIVE_TRACING 0
19 /* For profiling of code */
20 #define USE_GLOBAL_CACHING 1
21 #define USE_INLINE_CACHING 1
23 /* number of contexts deep to allow */
24 #define CPU_MAX_DEPTH 6000
26 #define IP_TYPE uint32_t
27 #define BS_JUMP 2
29 #define CTX_FLAG_NO_LONG_RETURN (1<<0)
31 literals: literals frame
33 literal frame stores items that cannot be referenced
34 by bytecodes, such as class variables, most of
35 literal constants (numbers, strings, symbols, method call sites) and
36 message selectors other than special
37 TODO: describe those specials in Rubinius
39 locals: local variables frame
41 local variables frame stores compiled method's
42 local variables
44 argcount: method's arity
45 TODO: investigate and document negative arities
47 self: message receiver
48 ip : instruction pointer, position of next bytecode to be executed in CompiledMethod
49 sp : stack pointer, current position on the stack
50 fp : execution frame (current context) pointer
52 method_module: module or class that holds the method
54 #define CPU_REGISTERS OBJECT sender; \
55 OBJECT block; \
56 OBJECT method; \
57 OBJECT literals; \
58 OBJECT locals; \
59 unsigned short argcount; \
60 OBJECT name; \
61 OBJECT method_module; \
62 void *opaque_data; \
63 OBJECT self; \
64 OBJECT custom_iseq; \
65 IP_TYPE *data; \
66 unsigned char type; \
67 unsigned char flags; \
68 unsigned int ip; \
69 unsigned int sp; \
70 unsigned int fp;
72 /* optimisation: context with direct access to size */
73 struct fast_context {
74 CPU_REGISTERS
75 unsigned int size;
77 /* fast context treats OBJECT as series of bytes instead of normal object */
78 #define FASTCTX(ctx) ((struct fast_context*)BYTES_OF(ctx))
80 /* 1Meg of stack */
81 #define InitialStackSize 262144
83 #define TASK_NO_STACK 1
84 #define TASK_DEBUG_ON_CTXT_CHANGE 2
85 #define TASK_FLAG_P(task, flag) ((task->flags & flag) == flag)
86 #define TASK_SET_FLAG(task, flag) (task->flags |= flag)
87 #define TASK_CLEAR_FLAG(task, flag) (task->flags ^= flag)
90 * stack_slave: when tasks are duplicated they share the same stack (TODO: clarify)
91 * cache_index is deprecated after SendSite introduction
92 * exception
93 * stack_top: pointer to top of the stack
94 * stack_size: obviously the size of the stack
95 * enclosing_class
96 * active_context represents current state of the interpreter
97 * home_context: home context of BlockContext is MethodContext which CompiledMethod contained the block
98 * main
99 * paths
100 * depth: stack depth
101 * current_scope: current visibility scope
102 * ip_ptr: instruction pointer, position of next bytecode to be executed in CompiledMethod
103 * sp_ptr: stack pointer, current position on the stack
104 * call_flags
105 * debug_channel
106 * flags
107 * control_channel
108 * blockargs is list of arguments passed to the block of executed method
110 #define CPU_TASK_REGISTERS long int args; \
111 unsigned long int stack_slave; \
112 long int cache_index; \
113 OBJECT *stack_top; \
114 unsigned long int stack_size; \
115 OBJECT exception; \
116 OBJECT enclosing_class; \
117 OBJECT active_context, home_context, main; \
118 ptr_array paths; \
119 int depth; \
120 OBJECT current_scope; \
121 IP_TYPE **ip_ptr; \
122 OBJECT *sp_ptr; \
123 int call_flags; \
124 OBJECT debug_channel; \
125 OBJECT control_channel; \
126 unsigned long int flags; \
127 unsigned int blockargs;
129 struct cpu_task_shared {
130 CPU_TASK_REGISTERS;
134 Each Shotgun task maintains an operand stack (Shotgun is a
135 stack-based VM though it uses so called spaghetti stack)
136 and a reference to the current execution context.
137 Tasks are very similar to threads, but lack pre-emption or
138 scheduling. In practice, they are similar to Ruby 1.9
139 fibres, although unlike fibres, there is currently no way
140 to co-operatively multi-task (or yield to a co-routine)
141 using Rubinius tasks.
143 active : true for active tasks
144 saved_errno: error code saved when the trouble strikes
146 struct cpu_task {
147 CPU_TASK_REGISTERS;
148 unsigned int active;
149 int saved_errno;
153 Normal registers are saved and restored per new method call.
154 Task registers are saved and restored when tasks are switched.
156 self : current object which self pseudo variable points to
157 sender: message sender
158 locals: list of local variables of the scope
159 IP : instruction pointer
160 SP : stack pointer
161 FP : frame pointer
163 these point to different locations on the stack
164 see http://en.wikipedia.org/wiki/Call_stack for more details
167 struct rubinius_cpu {
168 OBJECT self, sender;
169 OBJECT locals;
170 IP_TYPE *data;
171 unsigned short type;
172 unsigned short argcount;
173 unsigned int ip;
174 unsigned int sp;
175 unsigned int fp;
177 // CPU_REGISTERS;
179 OBJECT current_task, main_task;
180 OBJECT current_thread, main_thread;
181 int in_primitive;
183 CPU_TASK_REGISTERS;
186 #define CPU_TASKS_LOCATION(cp) (((char*)cp) + offsetof(struct rubinius_cpu, args))
188 typedef struct rubinius_cpu *cpu;
189 typedef OBJECT (*cpu_sampler_collect_cb)(STATE, void*, OBJECT);
190 typedef OBJECT (*cpu_event_each_channel_cb)(STATE, void*, OBJECT);
192 #define cpu_stack_empty_p(state, cpu) (cpu->sp_ptr <= cpu->stack_top)
193 #define cpu_local_get(state, cpu, idx) (NTH_FIELD(cpu->locals, idx))
194 #define cpu_local_set(state, cpu, idx, obj) (SET_FIELD(cpu->locals, idx, obj))
196 #define SP_PTR c->sp_ptr
197 #define stack_push(obj) (*++SP_PTR = obj)
198 #define stack_pop() (*SP_PTR--)
199 #define stack_top() (*SP_PTR)
200 #define stack_back(idx) (SP_PTR[-idx])
201 #define stack_clear(idx) (SP_PTR -= idx)
202 #define stack_pop_2(v1, v2) v1 = stack_back(0); v2 = stack_back(1);
203 #define stack_pop_3 (v1, v2, v3) v1 = stack_back(0); v2 = stack_back(1); v3 = stack_back(2);
204 #define stack_set_top(val) (*SP_PTR = (val))
206 #define cpu_current_block(state, cpu) (FASTCTX(cpu->home_context)->block)
207 #define cpu_current_method(state, cpu) (FASTCTX(cpu->active_context)->method)
208 #define cpu_current_literals(state, cpu) (FASTCTX(cpu->active_context)->literals)
209 #define cpu_current_locals(state, cpu) (FASTCTX(cpu->home_context)->locals)
210 #define cpu_set_locals(state, cpu, obj) (FASTCTX(cpu->home_context)->locals = obj)
211 #define cpu_current_name(state, cpu) (FASTCTX(cpu->home_context)->name)
212 #define cpu_current_module(state, cpu) (FASTCTX(cpu->home_context)->method_module)
213 #define cpu_current_data(cpu) (FASTCTX(cpu->home_context)->data)
214 #define cpu_current_argcount(cpu) (cpu->argcount)
215 #define cpu_current_sender(cpu) (cpu->sender)
216 #define cpu_current_scope(state, cpu) cmethod_get_staticscope(FASTCTX(cpu->home_context)->method)
218 #define cpu_flush_ip(cpu) (cpu->ip = (unsigned int)(*cpu->ip_ptr - cpu->data))
219 #define cpu_flush_sp(cpu) (cpu->sp = (unsigned int)(cpu->sp_ptr - cpu->stack_top))
221 #define cpu_cache_ip(cpu) (*(cpu->ip_ptr) = (cpu->data + cpu->ip))
222 #define cpu_cache_sp(cpu) (cpu->sp_ptr = (cpu->stack_top + cpu->sp))
224 cpu cpu_new(STATE);
225 void cpu_destroy(cpu c);
226 void cpu_initialize(STATE, cpu c);
227 void cpu_setup_top_scope(STATE, cpu c);
228 void cpu_initialize_context(STATE, cpu c);
229 void cpu_update_roots(STATE, cpu c, ptr_array roots, int start);
230 void cpu_activate_context(STATE, cpu c, OBJECT ctx, OBJECT home, int so);
231 int cpu_return_to_sender(STATE, cpu c, OBJECT val, int consider_block, int exception);
232 int cpu_simple_return(STATE, cpu c, OBJECT val);
233 void cpu_save_registers(STATE, cpu c, int offset);
234 void cpu_yield_debugger_check(STATE, cpu c);
236 OBJECT cpu_const_get_in_context(STATE, cpu c, OBJECT sym);
237 OBJECT cpu_const_get_from(STATE, cpu c, OBJECT sym, OBJECT under);
239 OBJECT cpu_const_get(STATE, cpu c, OBJECT sym, OBJECT under);
240 OBJECT cpu_const_set(STATE, cpu c, OBJECT sym, OBJECT val, OBJECT under);
241 void cpu_run(STATE, cpu c, int setup);
242 int cpu_dispatch(STATE, cpu c);
243 void cpu_compile_instructions(STATE, OBJECT bc, OBJECT ba);
244 OBJECT cpu_compile_method(STATE, OBJECT cm);
245 OBJECT cpu_create_block_context(STATE, cpu c, OBJECT env, int sp);
247 void cpu_set_encloser_path(STATE, cpu c, OBJECT cls);
248 void cpu_push_encloser(STATE, cpu c);
249 void cpu_add_method(STATE, cpu c, OBJECT target, OBJECT sym, OBJECT method);
250 void cpu_attach_method(STATE, cpu c, OBJECT target, OBJECT sym, OBJECT method);
251 void cpu_raise_exception(STATE, cpu c, OBJECT exc);
252 void cpu_raise_arg_error(STATE, cpu c, int args, int req);
253 void cpu_raise_arg_error_generic(STATE, cpu c, const char *msg);
254 void cpu_raise_from_errno(STATE, cpu c, const char *msg);
255 OBJECT cpu_new_exception(STATE, cpu c, OBJECT klass, const char *msg);
256 OBJECT cpu_new_exception2(STATE, cpu c, OBJECT klass, const char *msg, ...);
257 void cpu_perform_hook(STATE, cpu c, OBJECT recv, OBJECT meth, OBJECT arg);
259 void cpu_goto_method(STATE, cpu c, OBJECT recv, OBJECT meth,
260 int count, OBJECT name, OBJECT block);
262 void cpu_send(STATE, cpu c, OBJECT recv, OBJECT sym, int args, OBJECT block);
263 OBJECT cpu_locate_method_on(STATE, cpu c, OBJECT obj, OBJECT sym, OBJECT include_private);
264 void cpu_restore_context_with_home(STATE, cpu c, OBJECT ctx, OBJECT home);
265 void cpu_yield_debugger(STATE, cpu c);
267 void cpu_run_script(STATE, cpu c, OBJECT meth);
269 OBJECT exported_cpu_find_method(STATE, cpu c, OBJECT recv, OBJECT name, OBJECT *mod);
271 OBJECT cpu_unmarshal(STATE, uint8_t *str, int len, int version);
272 OBJECT cpu_marshal(STATE, OBJECT obj, int version);
273 OBJECT cpu_unmarshal_file(STATE, const char *path, int version);
274 bstring cpu_marshal_to_bstring(STATE, OBJECT obj, int version);
275 OBJECT cpu_marshal_to_file(STATE, OBJECT obj, char *path, int version);
277 void cpu_bootstrap(STATE);
278 void cpu_add_roots(STATE, cpu c, ptr_array roots);
279 void cpu_update_roots(STATE, cpu c, ptr_array roots, int start);
280 int cpu_ip2line(STATE, OBJECT meth, int ip);
283 /* Method cache functions */
284 void cpu_clear_cache(STATE, cpu c);
285 void cpu_clear_cache_for_method(STATE, cpu c, OBJECT meth, int full);
286 void cpu_clear_cache_for_class(STATE, cpu c, OBJECT klass);
288 void cpu_task_flush(STATE, cpu c);
289 OBJECT cpu_task_dup(STATE, cpu c, OBJECT cur);
290 int cpu_task_select(STATE, cpu c, OBJECT self);
291 OBJECT cpu_task_associate(STATE, cpu c, OBJECT self, OBJECT be);
292 void cpu_task_set_debugging(STATE, OBJECT self, OBJECT dc, OBJECT cc);
293 OBJECT cpu_channel_new(STATE);
294 OBJECT cpu_channel_send(STATE, cpu c, OBJECT self, OBJECT obj);
295 void cpu_channel_receive(STATE, cpu c, OBJECT self, OBJECT cur_task);
296 int cpu_channel_has_readers_p(STATE, OBJECT self);
297 void cpu_event_clear_channel(STATE, OBJECT chan);
299 OBJECT cpu_thread_new(STATE, cpu c);
300 void cpu_thread_dequeue(STATE, OBJECT thr);
301 void cpu_thread_switch(STATE, cpu c, OBJECT thr);
302 OBJECT cpu_thread_get_task(STATE, OBJECT self);
303 void cpu_thread_preempt(STATE, cpu c);
304 void cpu_thread_schedule(STATE, OBJECT self);
305 void cpu_thread_run_best(STATE, cpu c);
306 void cpu_thread_suspend_current(STATE, cpu c);
307 void cpu_thread_force_run(STATE, cpu c, OBJECT thr);
308 void cpu_thread_exited(STATE, cpu c);
309 int cpu_thread_alive_p(STATE, OBJECT self);
311 void cpu_task_disable_preemption(STATE);
312 void cpu_task_configure_preemption(STATE);
314 void cpu_sampler_collect(STATE, cpu_sampler_collect_cb, void *cb_data);
316 #define cpu_event_outstanding_p(state) (state->thread_infos != NULL)
317 #define cpu_event_update(state) if(cpu_event_outstanding_p(state)) cpu_event_runonce(state)
318 void cpu_event_runonce(STATE);
319 void cpu_event_init(STATE);
320 void cpu_event_run(STATE);
321 void cpu_event_each_channel(STATE, cpu_event_each_channel_cb, void *cb_data);
322 OBJECT cpu_event_wake_channel(STATE, cpu c, OBJECT channel, double seconds, OBJECT tag);
323 OBJECT cpu_event_wait_readable(STATE, cpu c, OBJECT channel, int fd, OBJECT buffer, int count);
324 OBJECT cpu_event_wait_writable(STATE, cpu c, OBJECT channel, int fd);
325 OBJECT cpu_event_wait_signal(STATE, cpu c, OBJECT channel, int sig);
326 OBJECT cpu_event_wait_child(STATE, cpu c, OBJECT channel, int pid, int flags);
327 int cpu_event_cancel_event(STATE, OBJECT oid);
328 void cpu_channel_register(STATE, cpu c, OBJECT self, OBJECT cur_thr);
329 void cpu_event_setup_children(STATE, cpu c);
330 void cpu_event_clear(STATE, int fd);
331 void cpu_find_waiters(STATE);
333 #define channel_set_waiting(obj, val) SET_FIELD(obj, 1, val)
334 #define channel_get_waiting(obj) NTH_FIELD(obj, 1)
335 #define channel_set_value(obj, val) SET_FIELD(obj, 2, val)
336 #define channel_get_value(obj) NTH_FIELD(obj, 2)
338 void cpu_sampler_init(STATE, cpu c);
339 void cpu_sampler_activate(STATE, int hz);
340 OBJECT cpu_sampler_disable(STATE);
342 #define type_assert(obj, type, message) ({\
343 if(type == FixnumType) {\
344 if(!FIXNUM_P(obj)) machine_handle_type_error(obj, message); \
345 } else if(type == SymbolType) { \
346 if(!SYMBOL_P(obj)) machine_handle_type_error(obj, message); \
347 } else {\
348 if(!REFERENCE_P(obj) || obj->obj_type != type) \
349 machine_handle_type_error(obj, message); \
353 #define cpu_stack_push(state, c, oop, check) (*++(c)->sp_ptr = oop);
354 #define cpu_stack_pop(state, c) (*(c)->sp_ptr--)
355 #define cpu_stack_top(state, c) (*(c)->sp_ptr)
356 #define cpu_stack_set_top(state, c, oop) (*(c)->sp_ptr = oop)
358 #include "shotgun/lib/sendsite.h"
359 #include "shotgun/lib/selector.h"
361 void cpu_initialize_sendsite(STATE, struct send_site *ss);
362 typedef int (*prim_func)(STATE, cpu c, const struct message *msg);
363 void cpu_patch_primitive(STATE, const struct message *msg, prim_func func, int prim);
364 int cpu_perform_system_primitive(STATE, cpu c, int prim, const struct message *msg);
365 OBJECT cpu_populate_prim_names(STATE);
366 void cpu_patch_ffi(STATE, const struct message *msg);
367 void ffi_call_stub(STATE, cpu c, OBJECT ptr);
368 void ffi_call_libffi(STATE, cpu c, OBJECT ptr);
369 #define ffi_call ffi_call_libffi
370 void ffi_autorelease(OBJECT ptr, int ar);
371 OBJECT ffi_new_pointer(STATE, void *ptr);
373 #endif /* RBS_CPU_H */