6 typedef struct rubinius_object_t
* OBJECT
;
7 typedef void * xpointer
;
8 typedef intptr_t native_int
;
11 * [30 (or 62) bits of data | 2 bits of tag]
12 * if tag == 00, the whole thing is a pointer to a memory location.
13 * if tag == 01, the data is a fixnum
14 * if tag == 10, the data is a literal
15 * if tag == 11, the data is any data, using the DATA_* macros
22 #define TAG_FIXNUM 0x1
23 /* literals are numbers, symbols, ranges, regexp */
24 #define TAG_LITERAL 0x2
28 #define TAG(v) (((intptr_t)v) & TAG_MASK)
29 #define APPLY_TAG(v, tag) ((OBJECT)(((intptr_t)v << TAG_SHIFT) | tag))
30 #define STRIP_TAG(v) (((intptr_t)v) >> TAG_SHIFT)
32 #define DATA_P(v) (TAG(v) == TAG_DATA)
33 #define FIXNUM_P(v) (TAG(v) == TAG_FIXNUM)
38 #define DATA_TAG_SYMBOL 0x3
39 #define DATA_TAG_CUSTOM 0x7
41 #define DATA_TAG(v) ((intptr_t)(v) & DATA_MASK)
42 #define DATA_APPLY_TAG(v, tag) (OBJECT)((v << DATA_SHIFT) | tag)
43 #define DATA_STRIP_TAG(v) (((intptr_t)v) >> DATA_SHIFT)
45 #define SYMBOL_P(v) (DATA_TAG(v) == DATA_TAG_SYMBOL)
46 #define CUSTOM_P(v) (DATA_TAG(v) == DATA_TAG_CUSTOM)
48 /* How many bits of data are available in fixnum, not including
50 #define FIXNUM_WIDTH ((8 * sizeof(native_int)) - TAG_SHIFT - 2)
51 #define FIXNUM_MAX (((native_int)1 << FIXNUM_WIDTH) - 1)
52 #define FIXNUM_MIN (-(FIXNUM_MAX) - 2)
54 /* rubinius_object types, takes up 3 bits */
91 LastObjectType
// must remain at end
94 #define object_type_to_type(object_type, type) do {\
95 switch((object_type)) { \
100 type = "MethodContext"; \
103 type = "BlockContext"; \
108 case MetaclassType: \
109 type = "Metaclass"; \
112 type = "MethodTable"; \
114 case WrapsStructType: \
115 type = "SubtendCStructure"; \
118 type = "included Module"; \
133 type = "MemoryPointer"; \
142 type = "CompiledMethod"; \
145 type = "NativeMethod"; \
150 case LookupTableType: \
151 type = "LookupTable"; \
161 /* rubinius_object gc zone, takes up two bits */
165 MatureObjectZone
= 1,
170 /* the sizeof(struct rubinius_object) must an increment of the platform
171 pointer size, so that the bytes located directly after a
172 struct rubinius_object can hold a pointer which can be
173 dereferenced. (an 32 bit platforms, pointers must be aligned
174 on 32bit (word) boundaries. on 64 bit platforms, pointers probably
175 have to be aligned on 64bit (double word) boundaries) */
177 /* On a 32 bit platform, I expect rubinius_object to take up
178 4 + 4 + 4 = 12 bytes.
180 4 + 4 + 8 = 16 bytes.
183 struct rubinius_object_t
{
186 object_type obj_type
: 6;
188 unsigned int copy_count
: 3;
190 unsigned int Forwarded
: 1;
191 unsigned int Remember
: 1;
192 unsigned int Marked
: 1;
193 unsigned int ForeverYoung
: 1;
194 unsigned int CanStoreIvars
: 1;
195 unsigned int StoresBytes
: 1;
196 unsigned int RequiresCleanup
: 1;
197 unsigned int IsBlockContext
: 1;
198 unsigned int IsMeta
: 1;
200 unsigned int CTXFast
: 1;
201 unsigned int IsTainted
: 1;
202 unsigned int IsFrozen
: 1;
203 unsigned int IsLittleEndian
: 1;
204 unsigned int RefsAreWeak
: 1;
208 uint32_t field_count
;
214 A rubinius object can be followed by:
215 - a series of fields, possibly including an ivar
216 - a series of bytes (ByteArray)
217 - a fast_context pointer
220 /* Object access, lowest level. These read and set fields of an OBJECT
221 * directly. They're built on to integrate with the GC properly. */
222 #define CLASS_OBJECT(obj) (obj->klass)
223 #define SIZE_OF_OBJECT ((unsigned int)(sizeof(OBJECT)))
224 #define NUM_FIELDS(obj) (obj->field_count)
225 #define SET_NUM_FIELDS(obj, fel) (obj->field_count = fel)
226 /* size of rubinius_object_t plus size of fields (all in bytes) */
227 #define SIZE_IN_BYTES_FIELDS(fel) ((unsigned int)(sizeof(struct rubinius_object_t) + \
229 /* size of rubinius_object_t and fields in words */
230 #define SIZE_IN_WORDS_FIELDS(fel) (sizeof(struct rubinius_object_t)/SIZE_OF_OBJECT + fel)
231 /* size of object in bytes */
232 #define SIZE_IN_BYTES(obj) SIZE_IN_BYTES_FIELDS(obj->field_count)
233 /* size of fields only */
234 #define SIZE_OF_BODY(obj) (obj->field_count * SIZE_OF_OBJECT)
235 #define ADDRESS_OF_FIELD(obj, fel) (&obj->field[fel])
236 #define NTH_FIELD_DIRECT(obj, fel) (obj->field[fel])
237 #define SET_FIELD_DIRECT(obj, fel, val) (obj->field[fel] = val)
238 #define BYTES_OF(obj) ((char*)obj->field)
251 /* Standard Rubinius Representation
253 Bit layout of special literals:
258 18:undef 10010 % 6 = 2
261 false and nil share the same base bit pattern, allowing RTEST
262 to be a simple test for that bit pattern.
265 #define Qfalse ((OBJECT)6L)
266 #define Qnil ((OBJECT)14L)
267 #define Qtrue ((OBJECT)10L)
268 #define Qundef ((OBJECT)18L)
270 /* returns TRUE when v is Ruby false, FALSE otherwise */
271 #define FALSE_P(v) ((OBJECT)(v) == (OBJECT)Qfalse)
272 /* returns TRUE when v is Ruby true, FALSE otherwise */
273 #define TRUE_P(v) ((OBJECT)(v) == (OBJECT)Qtrue)
274 /* returns TRUE if v is Ruby nil, FALSE otherwise */
275 #define NIL_P(v) ((OBJECT)(v) == (OBJECT)Qnil)
276 /* returns TRUE when value of v is undefined, FALSE otherwise */
277 #define UNDEF_P(v) ((OBJECT)(v) == (OBJECT)Qundef)
278 /* returns TRUE when v is not nil, FALSE otherwise */
279 #define RTEST(v) (((uintptr_t)(v) & 0x7) != 0x6)
280 /* returns TRUE when v is a reference, FALSE otherwise */
281 #define REFERENCE_P(v) (TAG(v) == TAG_REF)
282 /* same as REFERENCE_P but checks v value first */
283 #define REFERENCE2_P(v) (v && REFERENCE_P(v))
285 #define INDEXED(obj) (REFERENCE_P(obj) && !obj->StoresBytes)
287 /* copy flags not used by garbage collector */
288 static inline void object_copy_nongc_flags(OBJECT target
, OBJECT source
)
290 target
->obj_type
= source
->obj_type
;
291 target
->CanStoreIvars
= source
->CanStoreIvars
;
292 target
->StoresBytes
= source
->StoresBytes
;
293 target
->RequiresCleanup
= source
->RequiresCleanup
;
294 target
->IsBlockContext
= source
->IsBlockContext
;
295 target
->IsMeta
= source
->IsMeta
;
298 #define CLEAR_FLAGS(obj) (obj)->all_flags = 0
299 /* stack context has GC zone unspecified */
300 #define stack_context_p(obj) ((obj)->gc_zone == UnspecifiedZone)
301 /* use this to check forwarded pointer on object:
302 * when object is copied from space to space by GC,
303 * forwarding pointer is left in old location
305 #define SET_FORWARDED(obj) (obj)->Forwarded = TRUE
306 #define FORWARDED_P(obj) ((obj)->Forwarded)
308 /* objects are getting old as they survive GC tracing */
309 #define AGE(obj) (obj->copy_count)
310 #define CLEAR_AGE(obj) (obj->copy_count = 0)
311 #define INCREMENT_AGE(obj) (obj->copy_count++)
314 #define rbs_set_class(om, obj, cls) ({ \
315 OBJECT _o = (obj), _c = (cls); \
316 RUN_WB2(om, _o, _c); _o->klass = _c; })
318 #define SET_CLASS(o,v) rbs_set_class(state->om, o, v)
320 /* Accessing and assigning the fields of an object */
321 #define SET_FIELD(obj, fel, val) rbs_set_field(state->om, obj, fel, val)
322 #define NTH_FIELD(obj, fel) rbs_get_field(obj, fel)
324 #define RUN_WB(obj, val) RUN_WB2(state->om, obj, val)
325 #define RUN_WB2(om, obj, val) if(REFERENCE_P(val) && (obj->gc_zone < val->gc_zone)) object_memory_update_rs(om, obj, val)
327 /* No bounds checking! Be careful! */
328 #define fast_fetch(obj, idx) NTH_FIELD_DIRECT(obj, idx)
330 /* Only ever call this with constant arguments! */
331 #define fast_set(obj, idx, val) ({ \
333 SET_FIELD_DIRECT(obj, idx, val); \
337 #define fast_unsafe_set(obj, idx, val) SET_FIELD_DIRECT(obj, idx, val)
339 #define fast_set_int(obj, idx, int) fast_unsafe_set(obj, idx, I2N(int))
340 #define fast_inc(obj, idx) fast_unsafe_set(obj, idx, (void*)((uintptr_t)fast_fetch(obj, idx) + (1 << TAG_SHIFT)))
342 #define EXTRA_PROTECTION 0
346 #define rbs_set_field(om, obj, fel, val) ({ \
347 OBJECT _v = (val), _o = (obj); \
348 RUN_WB2(om, _o, _v); \
349 SET_FIELD_DIRECT(_o, fel, _v); })
351 /* alias macro for obtaining object field directly by position */
352 #define rbs_get_field(obj, fel) NTH_FIELD_DIRECT(obj, fel)
354 #else /*DISABLE_CHECKS*/
356 /* compared to _bad_reference prints more verbose error message */
357 static void _bad_reference2(OBJECT in
, int fel
) {
358 printf("Attempted to access field %d in an object with %lu fields.\n",
359 fel
, (unsigned long)NUM_FIELDS(in
));
361 if(current_machine
->g_use_firesuit
) {
362 machine_handle_fire(FIRE_ACCESS
);
368 /* versions that check for reference */
369 static void _bad_reference(OBJECT in
) {
370 printf("Attempted to access field of non-reference.\n");
371 /* handle segfault */
372 if(current_machine
->g_use_firesuit
) {
373 machine_handle_fire(FIRE_NULL
);
377 #define rbs_set_field(om, obj, fel, val) ({ \
378 OBJECT _v = (val), _o = (obj); \
379 unsigned int _fel = (unsigned int)(fel); \
380 if(!REFERENCE_P(_o)) _bad_reference(_o); \
381 if(_fel >= _o->field_count) _bad_reference2(_o, _fel); \
382 RUN_WB2(om, _o, _v); \
383 SET_FIELD_DIRECT(_o, _fel, _v); })
385 #define rbs_get_field(i_in, i_fel) ({ \
386 OBJECT in = (i_in); unsigned int fel = (unsigned int)(i_fel); \
387 if(!REFERENCE_P(in)) _bad_reference(in); \
388 if(fel >= in->field_count) _bad_reference2(in, fel); \
389 NTH_FIELD_DIRECT(in, fel); })
391 #else /*EXTRA_PROTECTION*/
393 /* These are the typically used versions. They don't check for ref, they
394 the segfault handler do that. */
396 #define rbs_set_field(om, obj, fel, val) ({ \
397 OBJECT _v = (val), _o = (obj); \
398 unsigned int _fel = (unsigned int)(fel); \
399 if(_fel >= _o->field_count) _bad_reference2(_o, _fel); \
400 RUN_WB2(om, _o, _v); \
401 SET_FIELD_DIRECT(_o, _fel, _v); })
403 #define rbs_get_field(obj, fel) ({ \
405 unsigned int _fel = (unsigned int)(fel); \
406 if(_fel >= _obj->field_count) _bad_reference2(_o, _fel); \
407 NTH_FIELD_DIRECT(_o, _fel); })
410 #endif /*EXTRA_PROTECTION*/
412 #endif /*DISABLE_CHECKS*/
414 /* A kind of odd one. Use this when assigning an OBJECT into a 'managed'
415 * structed, ie a struct that is contained in the rubinius heap. This
416 * allows the GC to run the write barrier and keep things sane. */
417 #define SET_STRUCT_FIELD(obj, fel, val) ({ OBJECT _o = (obj), _tmp = (val); RUN_WB(_o, _tmp); fel = _tmp; _tmp; })
419 /* Return the OBJECT for the class of +obj+. Works for all OBJECTs. */
420 #define _real_class(state, obj) (REFERENCE_P(obj) ? obj->klass : object_class(state, obj))
423 #define RTYPE(obj,type) (REFERENCE_P(obj) && obj->obj_type == type)
424 /* shortcut, doing what Ruby's is_a? does */
425 #define RISA(obj,cls) (REFERENCE_P(obj) && ISA(obj,BASIC_CLASS(cls)))
427 /* Type predicates */
428 #define BIGNUM_P(obj) (RTYPE(obj, BignumType))
429 #define FLOAT_P(obj) (RTYPE(obj, FloatType))
430 #define COMPLEX_P(obj) (FALSE)
432 #define INTEGER_P(obj) (FIXNUM_P(obj) || BIGNUM_P(obj))
433 #define NUMERIC_P(obj) (FIXNUM_P(obj) || COMPLEX_P(obj) || BIGNUM_P(obj) || FLOAT_P(obj))
435 #define CLASS_P(obj) RTYPE(obj, ClassType)
436 #define TUPLE_P(obj) RTYPE(obj, TupleType)
437 #define IO_P(obj) RISA(obj, io)
438 #define STRING_P(obj) RTYPE(obj, StringType)
439 #define LOOKUPTABLE_P(obj) RISA(obj, lookuptable)
440 #define METHODTABLE_P(obj) RTYPE(obj, MTType)
441 #define ARRAY_P(obj) RTYPE(obj, ArrayType)
442 #define AUTOLOAD_P(obj) RTYPE(obj, AutoloadType)
444 #define STRING_OR_NIL_P(obj) (STRING_P(obj) || NIL_P(obj))
446 #define CMETHOD_P(obj) RTYPE(obj, CMethodType)
447 #define REGEXP_P(obj) RTYPE(obj, RegexpType)
449 #define CTX_P(obj) RISA(obj, fastctx)
450 #define BYTEARRAY_P(obj) RTYPE(obj, ByteArrayType)
451 /* iseq is instruction sequence */
452 #define ISEQ_P(obj) RTYPE(obj, ISeqType)
453 #define TASK_P(obj) RTYPE(obj, TaskType)
454 #define CHANNEL_P(obj) RTYPE(obj, ChannelType)
455 #define BLOCKENV_P(obj) RTYPE(obj, BlockEnvType)
456 #define THREAD_P(obj) RTYPE(obj, ThreadType)
457 #define SENDSITE_P(obj) RTYPE(obj, SendSiteType)
458 #define SELECTOR_P(obj) RTYPE(obj, SelectorType)
459 #define HASH_P(obj) (RISA(obj, hash))
460 #define MODULE_P(obj) (RTYPE(obj, ModuleType))
461 #define POINTER_P(obj) RTYPE(obj, MemPtrType)
463 /* HACK: refactor this to use the state_setup_type code path. */
464 struct wraps_struct
{
470 #define MARK_WRAPPED_STRUCT(obj) do { \
471 struct wraps_struct *s = (struct wraps_struct *)BYTES_OF(obj); \
472 if(s->mark != NULL) { s->mark(s->ptr); } \
475 #define FREE_WRAPPED_STRUCT(obj) do { \
476 struct wraps_struct *s = (struct wraps_struct *)BYTES_OF(obj); \
477 if(s->free != NULL) { s->free(s->ptr); } \