* ext/socket/socket.c: use PRIuSIZE.
[ruby-svn.git] / gc.c
blobeccd1648018a35aab8085f24e922e35bf9f4df08
1 /**********************************************************************
3 gc.c -
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
12 **********************************************************************/
14 #include "ruby/ruby.h"
15 #include "ruby/signal.h"
16 #include "ruby/st.h"
17 #include "ruby/node.h"
18 #include "ruby/re.h"
19 #include "ruby/io.h"
20 #include "ruby/util.h"
21 #include "eval_intern.h"
22 #include "vm_core.h"
23 #include "gc.h"
24 #include <stdio.h>
25 #include <setjmp.h>
26 #include <sys/types.h>
28 #ifdef HAVE_SYS_TIME_H
29 #include <sys/time.h>
30 #endif
32 #ifdef HAVE_SYS_RESOURCE_H
33 #include <sys/resource.h>
34 #endif
36 #if defined _WIN32 || defined __CYGWIN__
37 #include <windows.h>
38 #endif
40 #ifdef HAVE_VALGRIND_MEMCHECK_H
41 # include <valgrind/memcheck.h>
42 # ifndef VALGRIND_MAKE_MEM_DEFINED
43 # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE(p, n)
44 # endif
45 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
46 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE(p, n)
47 # endif
48 #else
49 # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
50 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
51 #endif
53 int rb_io_fptr_finalize(struct rb_io_t*);
55 #define rb_setjmp(env) RUBY_SETJMP(env)
56 #define rb_jmp_buf rb_jmpbuf_t
58 /* Make alloca work the best possible way. */
59 #ifdef __GNUC__
60 # ifndef atarist
61 # ifndef alloca
62 # define alloca __builtin_alloca
63 # endif
64 # endif /* atarist */
65 #else
66 # ifdef HAVE_ALLOCA_H
67 # include <alloca.h>
68 # else
69 # ifdef _AIX
70 #pragma alloca
71 # else
72 # ifndef alloca /* predefined by HP cc +Olibcalls */
73 void *alloca ();
74 # endif
75 # endif /* AIX */
76 # endif /* HAVE_ALLOCA_H */
77 #endif /* __GNUC__ */
79 #ifndef GC_MALLOC_LIMIT
80 #if defined(MSDOS) || defined(__human68k__)
81 #define GC_MALLOC_LIMIT 200000
82 #else
83 #define GC_MALLOC_LIMIT 8000000
84 #endif
85 #endif
87 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
89 #define MARK_STACK_MAX 1024
91 int ruby_gc_debug_indent = 0;
93 #undef GC_DEBUG
95 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
96 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
97 #endif
99 typedef struct RVALUE {
100 union {
101 struct {
102 VALUE flags; /* always 0 for freed obj */
103 struct RVALUE *next;
104 } free;
105 struct RBasic basic;
106 struct RObject object;
107 struct RClass klass;
108 struct RFloat flonum;
109 struct RString string;
110 struct RArray array;
111 struct RRegexp regexp;
112 struct RHash hash;
113 struct RData data;
114 struct RStruct rstruct;
115 struct RBignum bignum;
116 struct RFile file;
117 struct RNode node;
118 struct RMatch match;
119 struct RRational rational;
120 struct RComplex complex;
121 } as;
122 #ifdef GC_DEBUG
123 char *file;
124 int line;
125 #endif
126 } RVALUE;
128 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
129 #pragma pack(pop)
130 #endif
132 struct heaps_slot {
133 void *membase;
134 RVALUE *slot;
135 int limit;
138 #define HEAP_MIN_SLOTS 10000
139 #define FREE_MIN 4096
141 struct gc_list {
142 VALUE *varptr;
143 struct gc_list *next;
146 #define CALC_EXACT_MALLOC_SIZE 0
148 typedef struct rb_objspace {
149 struct {
150 size_t limit;
151 size_t increase;
152 #if CALC_EXACT_MALLOC_SIZE
153 size_t allocated_size;
154 size_t allocations;
155 #endif
156 } malloc_params;
157 struct {
158 size_t increment;
159 struct heaps_slot *ptr;
160 size_t length;
161 size_t used;
162 RVALUE *freelist;
163 RVALUE *range[2];
164 RVALUE *freed;
165 } heap;
166 struct {
167 int dont_gc;
168 int during_gc;
169 } flags;
170 struct {
171 st_table *table;
172 RVALUE *deferred;
173 } final;
174 struct {
175 VALUE buffer[MARK_STACK_MAX];
176 VALUE *ptr;
177 int overflow;
178 } markstack;
179 struct gc_list *global_list;
180 unsigned int count;
181 int gc_stress;
182 } rb_objspace_t;
184 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
185 #define rb_objspace (*GET_VM()->objspace)
186 static int ruby_initial_gc_stress = 0;
187 int *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
188 #else
189 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}, {HEAP_MIN_SLOTS}};
190 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
191 #endif
192 #define malloc_limit objspace->malloc_params.limit
193 #define malloc_increase objspace->malloc_params.increase
194 #define heap_slots objspace->heap.slots
195 #define heaps objspace->heap.ptr
196 #define heaps_length objspace->heap.length
197 #define heaps_used objspace->heap.used
198 #define freelist objspace->heap.freelist
199 #define lomem objspace->heap.range[0]
200 #define himem objspace->heap.range[1]
201 #define heaps_inc objspace->heap.increment
202 #define heaps_freed objspace->heap.freed
203 #define dont_gc objspace->flags.dont_gc
204 #define during_gc objspace->flags.during_gc
205 #define finalizer_table objspace->final.table
206 #define deferred_final_list objspace->final.deferred
207 #define mark_stack objspace->markstack.buffer
208 #define mark_stack_ptr objspace->markstack.ptr
209 #define mark_stack_overflow objspace->markstack.overflow
210 #define global_List objspace->global_list
211 #define ruby_gc_stress objspace->gc_stress
213 #define need_call_final (finalizer_table && finalizer_table->num_entries)
215 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
216 rb_objspace_t *
217 rb_objspace_alloc(void)
219 rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
220 memset(objspace, 0, sizeof(*objspace));
221 malloc_limit = GC_MALLOC_LIMIT;
222 ruby_gc_stress = ruby_initial_gc_stress;
224 return objspace;
226 #endif
228 /* tiny heap size */
229 /* 32KB */
230 /*#define HEAP_SIZE 0x8000 */
231 /* 128KB */
232 /*#define HEAP_SIZE 0x20000 */
233 /* 64KB */
234 /*#define HEAP_SIZE 0x10000 */
235 /* 16KB */
236 #define HEAP_SIZE 0x4000
237 /* 8KB */
238 /*#define HEAP_SIZE 0x2000 */
239 /* 4KB */
240 /*#define HEAP_SIZE 0x1000 */
241 /* 2KB */
242 /*#define HEAP_SIZE 0x800 */
244 #define HEAP_OBJ_LIMIT (HEAP_SIZE / sizeof(struct RVALUE))
246 extern st_table *rb_class_tbl;
248 int ruby_disable_gc_stress = 0;
250 static void run_final(rb_objspace_t *objspace, VALUE obj);
251 static int garbage_collect(rb_objspace_t *objspace);
253 void
254 rb_global_variable(VALUE *var)
256 rb_gc_register_address(var);
259 void
260 rb_memerror(void)
262 rb_thread_t *th = GET_THREAD();
263 if (!nomem_error ||
264 (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
265 fprintf(stderr, "[FATAL] failed to allocate memory\n");
266 exit(EXIT_FAILURE);
268 if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
269 rb_thread_raised_clear(th);
270 GET_THREAD()->errinfo = nomem_error;
271 JUMP_TAG(TAG_RAISE);
273 rb_thread_raised_set(th, RAISED_NOMEMORY);
274 rb_exc_raise(nomem_error);
278 * call-seq:
279 * GC.stress => true or false
281 * returns current status of GC stress mode.
284 static VALUE
285 gc_stress_get(VALUE self)
287 rb_objspace_t *objspace = &rb_objspace;
288 return ruby_gc_stress ? Qtrue : Qfalse;
292 * call-seq:
293 * GC.stress = bool => bool
295 * updates GC stress mode.
297 * When GC.stress = true, GC is invoked for all GC opportunity:
298 * all memory and object allocation.
300 * Since it makes Ruby very slow, it is only for debugging.
303 static VALUE
304 gc_stress_set(VALUE self, VALUE bool)
306 rb_objspace_t *objspace = &rb_objspace;
307 rb_secure(2);
308 ruby_gc_stress = RTEST(bool);
309 return bool;
312 static void *
313 vm_xmalloc(rb_objspace_t *objspace, size_t size)
315 void *mem;
317 if (size < 0) {
318 rb_raise(rb_eNoMemError, "negative allocation size (or too big)");
320 if (size == 0) size = 1;
322 #if CALC_EXACT_MALLOC_SIZE
323 size += sizeof(size_t);
324 #endif
326 if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
327 (malloc_increase+size) > malloc_limit) {
328 garbage_collect(objspace);
330 RUBY_CRITICAL(mem = malloc(size));
331 if (!mem) {
332 if (garbage_collect(objspace)) {
333 RUBY_CRITICAL(mem = malloc(size));
335 if (!mem) {
336 rb_memerror();
339 malloc_increase += size;
341 #if CALC_EXACT_MALLOC_SIZE
342 objspace->malloc_params.allocated_size += size;
343 objspace->malloc_params.allocations++;
344 ((size_t *)mem)[0] = size;
345 mem = (size_t *)mem + 1;
346 #endif
348 return mem;
351 static void *
352 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
354 void *mem;
356 if (size < 0) {
357 rb_raise(rb_eArgError, "negative re-allocation size");
359 if (!ptr) return ruby_xmalloc(size);
360 if (size == 0) size = 1;
361 if (ruby_gc_stress && !ruby_disable_gc_stress) garbage_collect(objspace);
363 #if CALC_EXACT_MALLOC_SIZE
364 size += sizeof(size_t);
365 objspace->malloc_params.allocated_size -= size;
366 ptr = (size_t *)ptr - 1;
367 #endif
369 RUBY_CRITICAL(mem = realloc(ptr, size));
370 if (!mem) {
371 if (garbage_collect(objspace)) {
372 RUBY_CRITICAL(mem = realloc(ptr, size));
374 if (!mem) {
375 rb_memerror();
378 malloc_increase += size;
380 #if CALC_EXACT_MALLOC_SIZE
381 objspace->malloc_params.allocated_size += size;
382 ((size_t *)mem)[0] = size;
383 mem = (size_t *)mem + 1;
384 #endif
386 return mem;
389 static void
390 vm_xfree(rb_objspace_t *objspace, void *ptr)
392 #if CALC_EXACT_MALLOC_SIZE
393 size_t size;
394 ptr = ((size_t *)ptr) - 1;
395 size = ((size_t*)ptr)[0];
396 objspace->malloc_params.allocated_size -= size;
397 objspace->malloc_params.allocations--;
398 #endif
400 RUBY_CRITICAL(free(ptr));
403 void *
404 ruby_xmalloc(size_t size)
406 return vm_xmalloc(&rb_objspace, size);
409 void *
410 ruby_xmalloc2(size_t n, size_t size)
412 size_t len = size * n;
413 if (n != 0 && size != len / n) {
414 rb_raise(rb_eArgError, "malloc: possible integer overflow");
416 return vm_xmalloc(&rb_objspace, len);
419 void *
420 ruby_xcalloc(size_t n, size_t size)
422 void *mem = ruby_xmalloc2(n, size);
423 memset(mem, 0, n * size);
425 return mem;
428 void *
429 ruby_xrealloc(void *ptr, size_t size)
431 return vm_xrealloc(&rb_objspace, ptr, size);
434 void *
435 ruby_xrealloc2(void *ptr, size_t n, size_t size)
437 size_t len = size * n;
438 if (n != 0 && size != len / n) {
439 rb_raise(rb_eArgError, "realloc: possible integer overflow");
441 return ruby_xrealloc(ptr, len);
444 void
445 ruby_xfree(void *x)
447 if (x)
448 vm_xfree(&rb_objspace, x);
453 * call-seq:
454 * GC.enable => true or false
456 * Enables garbage collection, returning <code>true</code> if garbage
457 * collection was previously disabled.
459 * GC.disable #=> false
460 * GC.enable #=> true
461 * GC.enable #=> false
465 VALUE
466 rb_gc_enable(void)
468 rb_objspace_t *objspace = &rb_objspace;
469 int old = dont_gc;
471 dont_gc = Qfalse;
472 return old;
476 * call-seq:
477 * GC.disable => true or false
479 * Disables garbage collection, returning <code>true</code> if garbage
480 * collection was already disabled.
482 * GC.disable #=> false
483 * GC.disable #=> true
487 VALUE
488 rb_gc_disable(void)
490 rb_objspace_t *objspace = &rb_objspace;
491 int old = dont_gc;
493 dont_gc = Qtrue;
494 return old;
497 VALUE rb_mGC;
499 void
500 rb_register_mark_object(VALUE obj)
502 VALUE ary = GET_THREAD()->vm->mark_object_ary;
503 rb_ary_push(ary, obj);
506 void
507 rb_gc_register_address(VALUE *addr)
509 rb_objspace_t *objspace = &rb_objspace;
510 struct gc_list *tmp;
512 tmp = ALLOC(struct gc_list);
513 tmp->next = global_List;
514 tmp->varptr = addr;
515 global_List = tmp;
518 void
519 rb_gc_unregister_address(VALUE *addr)
521 rb_objspace_t *objspace = &rb_objspace;
522 struct gc_list *tmp = global_List;
524 if (tmp->varptr == addr) {
525 global_List = tmp->next;
526 xfree(tmp);
527 return;
529 while (tmp->next) {
530 if (tmp->next->varptr == addr) {
531 struct gc_list *t = tmp->next;
533 tmp->next = tmp->next->next;
534 xfree(t);
535 break;
537 tmp = tmp->next;
542 static void
543 allocate_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
545 struct heaps_slot *p;
546 size_t size;
548 size = next_heaps_length*sizeof(struct heaps_slot);
549 RUBY_CRITICAL(
550 if (heaps_used > 0) {
551 p = (struct heaps_slot *)realloc(heaps, size);
552 if (p) heaps = p;
554 else {
555 p = heaps = (struct heaps_slot *)malloc(size);
558 if (p == 0) rb_memerror();
559 heaps_length = next_heaps_length;
562 static void
563 assign_heap_slot(rb_objspace_t *objspace)
565 RVALUE *p, *pend, *membase;
566 size_t hi, lo, mid;
567 int objs;
569 objs = HEAP_OBJ_LIMIT;
570 RUBY_CRITICAL(p = (RVALUE*)malloc(HEAP_SIZE));
571 if (p == 0)
572 rb_memerror();
574 membase = p;
575 if ((VALUE)p % sizeof(RVALUE) != 0) {
576 p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
577 if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < ((char*)p - (char*)membase)) {
578 objs--;
583 lo = 0;
584 hi = heaps_used;
585 while (lo < hi) {
586 register RVALUE *mid_membase;
587 mid = (lo + hi) / 2;
588 mid_membase = heaps[mid].membase;
589 if (mid_membase < membase) {
590 lo = mid + 1;
592 else if (mid_membase > membase) {
593 hi = mid;
595 else {
596 rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, membase, (VALUE)mid);
599 if (hi < heaps_used) {
600 MEMMOVE(&heaps[hi+1], &heaps[hi], struct heaps_slot, heaps_used - hi);
602 heaps[hi].membase = membase;
603 heaps[hi].slot = p;
604 heaps[hi].limit = objs;
605 pend = p + objs;
606 if (lomem == 0 || lomem > p) lomem = p;
607 if (himem < pend) himem = pend;
608 heaps_used++;
610 while (p < pend) {
611 p->as.free.flags = 0;
612 p->as.free.next = freelist;
613 freelist = p;
614 p++;
618 static void
619 init_heap(rb_objspace_t *objspace)
621 size_t add, i;
623 add = HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT;
625 if ((heaps_used + add) > heaps_length) {
626 allocate_heaps(objspace, heaps_used + add);
629 for (i = 0; i < add; i++) {
630 assign_heap_slot(objspace);
632 heaps_inc = 0;
636 static void
637 set_heaps_increment(rb_objspace_t *objspace)
639 size_t next_heaps_length = heaps_used * 1.8;
640 heaps_inc = next_heaps_length - heaps_used;
642 if (next_heaps_length > heaps_length) {
643 allocate_heaps(objspace, next_heaps_length);
647 static int
648 heaps_increment(rb_objspace_t *objspace)
650 if (heaps_inc > 0) {
651 assign_heap_slot(objspace);
652 heaps_inc--;
653 return Qtrue;
655 return Qfalse;
658 #define RANY(o) ((RVALUE*)(o))
660 static VALUE
661 rb_newobj_from_heap(rb_objspace_t *objspace)
663 VALUE obj;
665 if ((ruby_gc_stress && !ruby_disable_gc_stress) || !freelist) {
666 if (!heaps_increment(objspace) && !garbage_collect(objspace)) {
667 rb_memerror();
671 obj = (VALUE)freelist;
672 freelist = freelist->as.free.next;
674 MEMZERO((void*)obj, RVALUE, 1);
675 #ifdef GC_DEBUG
676 RANY(obj)->file = rb_sourcefile();
677 RANY(obj)->line = rb_sourceline();
678 #endif
680 return obj;
683 #if USE_VALUE_CACHE
684 static VALUE
685 rb_fill_value_cache(rb_thread_t *th)
687 rb_objspace_t *objspace = &rb_objspace;
688 int i;
689 VALUE rv;
691 /* LOCK */
692 for (i=0; i<RUBY_VM_VALUE_CACHE_SIZE; i++) {
693 VALUE v = rb_newobj_from_heap(objspace);
695 th->value_cache[i] = v;
696 RBASIC(v)->flags = FL_MARK;
698 th->value_cache_ptr = &th->value_cache[0];
699 rv = rb_newobj_from_heap(objspace);
700 /* UNLOCK */
701 return rv;
703 #endif
706 rb_during_gc(void)
708 rb_objspace_t *objspace = &rb_objspace;
709 return during_gc;
712 VALUE
713 rb_newobj(void)
715 #if USE_VALUE_CACHE || (defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE)
716 rb_thread_t *th = GET_THREAD();
717 #endif
718 #if USE_VALUE_CACHE
719 VALUE v = *th->value_cache_ptr;
720 #endif
721 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
722 rb_objspace_t *objspace = th->vm->objspace;
723 #else
724 rb_objspace_t *objspace = &rb_objspace;
725 #endif
727 if (during_gc) {
728 dont_gc = 1;
729 during_gc = 0;
730 rb_bug("object allocation during garbage collection phase");
733 #if USE_VALUE_CACHE
734 if (v) {
735 RBASIC(v)->flags = 0;
736 th->value_cache_ptr++;
738 else {
739 v = rb_fill_value_cache(th);
742 #if defined(GC_DEBUG)
743 printf("cache index: %d, v: %p, th: %p\n",
744 th->value_cache_ptr - th->value_cache, v, th);
745 #endif
746 return v;
747 #else
748 return rb_newobj_from_heap(objspace);
749 #endif
752 NODE*
753 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
755 NODE *n = (NODE*)rb_newobj();
757 n->flags |= T_NODE;
758 nd_set_type(n, type);
760 n->u1.value = a0;
761 n->u2.value = a1;
762 n->u3.value = a2;
764 return n;
767 VALUE
768 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
770 NEWOBJ(data, struct RData);
771 if (klass) Check_Type(klass, T_CLASS);
772 OBJSETUP(data, klass, T_DATA);
773 data->data = datap;
774 data->dfree = dfree;
775 data->dmark = dmark;
777 return (VALUE)data;
780 #ifdef __ia64
781 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
782 #else
783 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
784 #endif
786 #define STACK_START (th->machine_stack_start)
787 #define STACK_END (th->machine_stack_end)
788 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
790 #if STACK_GROW_DIRECTION < 0
791 # define STACK_LENGTH (STACK_START - STACK_END)
792 #elif STACK_GROW_DIRECTION > 0
793 # define STACK_LENGTH (STACK_END - STACK_START + 1)
794 #else
795 # define STACK_LENGTH ((STACK_END < STACK_START) ? STACK_START - STACK_END\
796 : STACK_END - STACK_START + 1)
797 #endif
798 #if !STACK_GROW_DIRECTION
799 int ruby_stack_grow_direction;
801 ruby_get_stack_grow_direction(VALUE *addr)
803 rb_thread_t *th = GET_THREAD();
804 SET_STACK_END;
806 if (STACK_END > addr) return ruby_stack_grow_direction = 1;
807 return ruby_stack_grow_direction = -1;
809 #endif
811 #define GC_WATER_MARK 512
814 ruby_stack_length(VALUE **p)
816 rb_thread_t *th = GET_THREAD();
817 SET_STACK_END;
818 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
819 return STACK_LENGTH;
823 ruby_stack_check(void)
825 int ret;
826 rb_thread_t *th = GET_THREAD();
827 SET_STACK_END;
828 ret = STACK_LENGTH > STACK_LEVEL_MAX - GC_WATER_MARK;
829 #ifdef __ia64
830 if (!ret) {
831 ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
832 th->machine_register_stack_maxsize/sizeof(VALUE) - GC_WATER_MARK;
834 #endif
835 return ret;
838 static void
839 init_mark_stack(rb_objspace_t *objspace)
841 mark_stack_overflow = 0;
842 mark_stack_ptr = mark_stack;
845 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
847 static void gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev);
848 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev);
850 static void
851 gc_mark_all(rb_objspace_t *objspace)
853 RVALUE *p, *pend;
854 size_t i;
856 init_mark_stack(objspace);
857 for (i = 0; i < heaps_used; i++) {
858 p = heaps[i].slot; pend = p + heaps[i].limit;
859 while (p < pend) {
860 if ((p->as.basic.flags & FL_MARK) &&
861 (p->as.basic.flags != FL_MARK)) {
862 gc_mark_children(objspace, (VALUE)p, 0);
864 p++;
869 static void
870 gc_mark_rest(rb_objspace_t *objspace)
872 VALUE tmp_arry[MARK_STACK_MAX];
873 VALUE *p;
875 p = (mark_stack_ptr - mark_stack) + tmp_arry;
876 MEMCPY(tmp_arry, mark_stack, VALUE, p - tmp_arry);
878 init_mark_stack(objspace);
879 while (p != tmp_arry) {
880 p--;
881 gc_mark_children(objspace, *p, 0);
885 static inline int
886 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
888 register RVALUE *p = RANY(ptr);
889 register struct heaps_slot *heap;
890 register size_t hi, lo, mid;
892 if (p < lomem || p > himem) return Qfalse;
893 if ((VALUE)p % sizeof(RVALUE) != 0) return Qfalse;
895 /* check if p looks like a pointer using bsearch*/
896 lo = 0;
897 hi = heaps_used;
898 while (lo < hi) {
899 mid = (lo + hi) / 2;
900 heap = &heaps[mid];
901 if (heap->slot <= p) {
902 if (p < heap->slot + heap->limit)
903 return Qtrue;
904 lo = mid + 1;
906 else {
907 hi = mid;
910 return Qfalse;
913 static void
914 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
916 VALUE v;
917 while (n--) {
918 v = *x;
919 VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
920 if (is_pointer_to_heap(objspace, (void *)v)) {
921 gc_mark(objspace, v, 0);
923 x++;
927 static void
928 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
930 long n;
932 if (end <= start) return;
933 n = end - start;
934 mark_locations_array(objspace, start, n);
937 void
938 rb_gc_mark_locations(VALUE *start, VALUE *end)
940 gc_mark_locations(&rb_objspace, start, end);
943 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, start, end)
945 struct mark_tbl_arg {
946 rb_objspace_t *objspace;
947 int lev;
950 static int
951 mark_entry(ID key, VALUE value, st_data_t data)
953 struct mark_tbl_arg *arg = (void*)data;
954 gc_mark(arg->objspace, value, arg->lev);
955 return ST_CONTINUE;
958 static void
959 mark_tbl(rb_objspace_t *objspace, st_table *tbl, int lev)
961 struct mark_tbl_arg arg;
962 if (!tbl) return;
963 arg.objspace = objspace;
964 arg.lev = lev;
965 st_foreach(tbl, mark_entry, (st_data_t)&arg);
968 void
969 rb_mark_tbl(st_table *tbl)
971 mark_tbl(&rb_objspace, tbl, 0);
974 static int
975 mark_key(VALUE key, VALUE value, st_data_t data)
977 struct mark_tbl_arg *arg = (void*)data;
978 gc_mark(arg->objspace, key, arg->lev);
979 return ST_CONTINUE;
982 static void
983 mark_set(rb_objspace_t *objspace, st_table *tbl, int lev)
985 struct mark_tbl_arg arg;
986 if (!tbl) return;
987 arg.objspace = objspace;
988 arg.lev = lev;
989 st_foreach(tbl, mark_key, (st_data_t)&arg);
992 void
993 rb_mark_set(st_table *tbl)
995 mark_set(&rb_objspace, tbl, 0);
998 static int
999 mark_keyvalue(VALUE key, VALUE value, st_data_t data)
1001 struct mark_tbl_arg *arg = (void*)data;
1002 gc_mark(arg->objspace, key, arg->lev);
1003 gc_mark(arg->objspace, value, arg->lev);
1004 return ST_CONTINUE;
1007 static void
1008 mark_hash(rb_objspace_t *objspace, st_table *tbl, int lev)
1010 struct mark_tbl_arg arg;
1011 if (!tbl) return;
1012 arg.objspace = objspace;
1013 arg.lev = lev;
1014 st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
1017 void
1018 rb_mark_hash(st_table *tbl)
1020 mark_hash(&rb_objspace, tbl, 0);
1023 void
1024 rb_gc_mark_maybe(VALUE obj)
1026 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
1027 gc_mark(&rb_objspace, obj, 0);
1031 #define GC_LEVEL_MAX 250
1033 static void
1034 gc_mark(rb_objspace_t *objspace, VALUE ptr, int lev)
1036 register RVALUE *obj;
1038 obj = RANY(ptr);
1039 if (rb_special_const_p(ptr)) return; /* special const not marked */
1040 if (obj->as.basic.flags == 0) return; /* free cell */
1041 if (obj->as.basic.flags & FL_MARK) return; /* already marked */
1042 obj->as.basic.flags |= FL_MARK;
1044 if (lev > GC_LEVEL_MAX || (lev == 0 && ruby_stack_check())) {
1045 if (!mark_stack_overflow) {
1046 if (mark_stack_ptr - mark_stack < MARK_STACK_MAX) {
1047 *mark_stack_ptr = ptr;
1048 mark_stack_ptr++;
1050 else {
1051 mark_stack_overflow = 1;
1054 return;
1056 gc_mark_children(objspace, ptr, lev+1);
1059 void
1060 rb_gc_mark(VALUE ptr)
1062 gc_mark(&rb_objspace, ptr, 0);
1065 static void
1066 gc_mark_children(rb_objspace_t *objspace, VALUE ptr, int lev)
1068 register RVALUE *obj = RANY(ptr);
1070 goto marking; /* skip */
1072 again:
1073 obj = RANY(ptr);
1074 if (rb_special_const_p(ptr)) return; /* special const not marked */
1075 if (obj->as.basic.flags == 0) return; /* free cell */
1076 if (obj->as.basic.flags & FL_MARK) return; /* already marked */
1077 obj->as.basic.flags |= FL_MARK;
1079 marking:
1080 if (FL_TEST(obj, FL_EXIVAR)) {
1081 rb_mark_generic_ivar(ptr);
1084 switch (obj->as.basic.flags & T_MASK) {
1085 case T_NIL:
1086 case T_FIXNUM:
1087 rb_bug("rb_gc_mark() called for broken object");
1088 break;
1090 case T_NODE:
1091 switch (nd_type(obj)) {
1092 case NODE_IF: /* 1,2,3 */
1093 case NODE_FOR:
1094 case NODE_ITER:
1095 case NODE_WHEN:
1096 case NODE_MASGN:
1097 case NODE_RESCUE:
1098 case NODE_RESBODY:
1099 case NODE_CLASS:
1100 case NODE_BLOCK_PASS:
1101 gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
1102 /* fall through */
1103 case NODE_BLOCK: /* 1,3 */
1104 case NODE_OPTBLOCK:
1105 case NODE_ARRAY:
1106 case NODE_DSTR:
1107 case NODE_DXSTR:
1108 case NODE_DREGX:
1109 case NODE_DREGX_ONCE:
1110 case NODE_ENSURE:
1111 case NODE_CALL:
1112 case NODE_DEFS:
1113 case NODE_OP_ASGN1:
1114 case NODE_ARGS:
1115 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1116 /* fall through */
1117 case NODE_SUPER: /* 3 */
1118 case NODE_FCALL:
1119 case NODE_DEFN:
1120 case NODE_ARGS_AUX:
1121 ptr = (VALUE)obj->as.node.u3.node;
1122 goto again;
1124 case NODE_METHOD: /* 1,2 */
1125 case NODE_WHILE:
1126 case NODE_UNTIL:
1127 case NODE_AND:
1128 case NODE_OR:
1129 case NODE_CASE:
1130 case NODE_SCLASS:
1131 case NODE_DOT2:
1132 case NODE_DOT3:
1133 case NODE_FLIP2:
1134 case NODE_FLIP3:
1135 case NODE_MATCH2:
1136 case NODE_MATCH3:
1137 case NODE_OP_ASGN_OR:
1138 case NODE_OP_ASGN_AND:
1139 case NODE_MODULE:
1140 case NODE_ALIAS:
1141 case NODE_VALIAS:
1142 case NODE_ARGSCAT:
1143 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1144 /* fall through */
1145 case NODE_FBODY: /* 2 */
1146 case NODE_GASGN:
1147 case NODE_LASGN:
1148 case NODE_DASGN:
1149 case NODE_DASGN_CURR:
1150 case NODE_IASGN:
1151 case NODE_IASGN2:
1152 case NODE_CVASGN:
1153 case NODE_COLON3:
1154 case NODE_OPT_N:
1155 case NODE_EVSTR:
1156 case NODE_UNDEF:
1157 case NODE_POSTEXE:
1158 ptr = (VALUE)obj->as.node.u2.node;
1159 goto again;
1161 case NODE_HASH: /* 1 */
1162 case NODE_LIT:
1163 case NODE_STR:
1164 case NODE_XSTR:
1165 case NODE_DEFINED:
1166 case NODE_MATCH:
1167 case NODE_RETURN:
1168 case NODE_BREAK:
1169 case NODE_NEXT:
1170 case NODE_YIELD:
1171 case NODE_COLON2:
1172 case NODE_SPLAT:
1173 case NODE_TO_ARY:
1174 ptr = (VALUE)obj->as.node.u1.node;
1175 goto again;
1177 case NODE_SCOPE: /* 2,3 */
1178 case NODE_CDECL:
1179 case NODE_OPT_ARG:
1180 gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
1181 ptr = (VALUE)obj->as.node.u2.node;
1182 goto again;
1184 case NODE_ZARRAY: /* - */
1185 case NODE_ZSUPER:
1186 case NODE_CFUNC:
1187 case NODE_VCALL:
1188 case NODE_GVAR:
1189 case NODE_LVAR:
1190 case NODE_DVAR:
1191 case NODE_IVAR:
1192 case NODE_CVAR:
1193 case NODE_NTH_REF:
1194 case NODE_BACK_REF:
1195 case NODE_REDO:
1196 case NODE_RETRY:
1197 case NODE_SELF:
1198 case NODE_NIL:
1199 case NODE_TRUE:
1200 case NODE_FALSE:
1201 case NODE_ERRINFO:
1202 case NODE_ATTRSET:
1203 case NODE_BLOCK_ARG:
1204 break;
1205 case NODE_ALLOCA:
1206 mark_locations_array(objspace,
1207 (VALUE*)obj->as.node.u1.value,
1208 obj->as.node.u3.cnt);
1209 ptr = (VALUE)obj->as.node.u2.node;
1210 goto again;
1212 default: /* unlisted NODE */
1213 if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
1214 gc_mark(objspace, (VALUE)obj->as.node.u1.node, lev);
1216 if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
1217 gc_mark(objspace, (VALUE)obj->as.node.u2.node, lev);
1219 if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
1220 gc_mark(objspace, (VALUE)obj->as.node.u3.node, lev);
1223 return; /* no need to mark class. */
1226 gc_mark(objspace, obj->as.basic.klass, lev);
1227 switch (obj->as.basic.flags & T_MASK) {
1228 case T_ICLASS:
1229 case T_CLASS:
1230 case T_MODULE:
1231 mark_tbl(objspace, RCLASS_M_TBL(obj), lev);
1232 mark_tbl(objspace, RCLASS_IV_TBL(obj), lev);
1233 ptr = RCLASS_SUPER(obj);
1234 goto again;
1236 case T_ARRAY:
1237 if (FL_TEST(obj, ELTS_SHARED)) {
1238 ptr = obj->as.array.aux.shared;
1239 goto again;
1241 else {
1242 long i, len = RARRAY_LEN(obj);
1243 VALUE *ptr = RARRAY_PTR(obj);
1244 for (i=0; i < len; i++) {
1245 gc_mark(objspace, *ptr++, lev);
1248 break;
1250 case T_HASH:
1251 mark_hash(objspace, obj->as.hash.ntbl, lev);
1252 ptr = obj->as.hash.ifnone;
1253 goto again;
1255 case T_STRING:
1256 #define STR_ASSOC FL_USER3 /* copied from string.c */
1257 if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
1258 ptr = obj->as.string.as.heap.aux.shared;
1259 goto again;
1261 break;
1263 case T_DATA:
1264 if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
1265 break;
1267 case T_OBJECT:
1269 long i, len = ROBJECT_NUMIV(obj);
1270 VALUE *ptr = ROBJECT_IVPTR(obj);
1271 for (i = 0; i < len; i++) {
1272 gc_mark(objspace, *ptr++, lev);
1275 break;
1277 case T_FILE:
1278 if (obj->as.file.fptr)
1279 gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing, lev);
1280 break;
1282 case T_REGEXP:
1283 gc_mark(objspace, obj->as.regexp.src, lev);
1284 break;
1286 case T_FLOAT:
1287 case T_BIGNUM:
1288 break;
1290 case T_MATCH:
1291 gc_mark(objspace, obj->as.match.regexp, lev);
1292 if (obj->as.match.str) {
1293 ptr = obj->as.match.str;
1294 goto again;
1296 break;
1298 case T_RATIONAL:
1299 gc_mark(objspace, obj->as.rational.num, lev);
1300 gc_mark(objspace, obj->as.rational.den, lev);
1301 break;
1303 case T_COMPLEX:
1304 gc_mark(objspace, obj->as.complex.real, lev);
1305 gc_mark(objspace, obj->as.complex.image, lev);
1306 break;
1308 case T_STRUCT:
1310 long len = RSTRUCT_LEN(obj);
1311 VALUE *ptr = RSTRUCT_PTR(obj);
1313 while (len--) {
1314 gc_mark(objspace, *ptr++, lev);
1317 break;
1319 default:
1320 rb_bug("rb_gc_mark(): unknown data type 0x%lx(%p) %s",
1321 obj->as.basic.flags & T_MASK, obj,
1322 is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
1326 static void obj_free(rb_objspace_t *, VALUE);
1328 static void
1329 finalize_list(rb_objspace_t *objspace, RVALUE *p)
1331 while (p) {
1332 RVALUE *tmp = p->as.free.next;
1333 run_final(objspace, (VALUE)p);
1334 if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
1335 VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
1336 p->as.free.flags = 0;
1337 p->as.free.next = freelist;
1338 freelist = p;
1340 p = tmp;
1344 static void
1345 free_unused_heaps(rb_objspace_t *objspace)
1347 size_t i, j;
1348 RVALUE *last = 0;
1350 for (i = j = 1; j < heaps_used; i++) {
1351 if (heaps[i].limit == 0) {
1352 if (!last) {
1353 last = heaps[i].membase;
1355 else {
1356 free(heaps[i].membase);
1358 heaps_used--;
1360 else {
1361 if (i != j) {
1362 heaps[j] = heaps[i];
1364 j++;
1367 if (last) {
1368 if (last < heaps_freed) {
1369 free(heaps_freed);
1370 heaps_freed = last;
1372 else {
1373 free(last);
1378 static void
1379 gc_sweep(rb_objspace_t *objspace)
1381 RVALUE *p, *pend, *final_list;
1382 size_t freed = 0;
1383 size_t i;
1384 size_t live = 0, free_min = 0, do_heap_free = 0;
1386 do_heap_free = (heaps_used * HEAP_OBJ_LIMIT) * 0.65;
1387 free_min = (heaps_used * HEAP_OBJ_LIMIT) * 0.2;
1388 if (free_min < FREE_MIN) {
1389 do_heap_free = heaps_used * HEAP_OBJ_LIMIT;
1390 free_min = FREE_MIN;
1393 freelist = 0;
1394 final_list = deferred_final_list;
1395 deferred_final_list = 0;
1396 for (i = 0; i < heaps_used; i++) {
1397 int n = 0;
1398 RVALUE *free = freelist;
1399 RVALUE *final = final_list;
1401 p = heaps[i].slot; pend = p + heaps[i].limit;
1402 while (p < pend) {
1403 if (!(p->as.basic.flags & FL_MARK)) {
1404 if (p->as.basic.flags) {
1405 obj_free(objspace, (VALUE)p);
1407 if (need_call_final && FL_TEST(p, FL_FINALIZE)) {
1408 p->as.free.flags = FL_MARK; /* remain marked */
1409 p->as.free.next = final_list;
1410 final_list = p;
1412 else {
1413 VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
1414 p->as.free.flags = 0;
1415 p->as.free.next = freelist;
1416 freelist = p;
1418 n++;
1420 else if (RBASIC(p)->flags == FL_MARK) {
1421 /* objects to be finalized */
1422 /* do nothing remain marked */
1424 else {
1425 RBASIC(p)->flags &= ~FL_MARK;
1426 live++;
1428 p++;
1430 if (n == heaps[i].limit && freed > do_heap_free) {
1431 RVALUE *pp;
1433 heaps[i].limit = 0;
1434 for (pp = final_list; pp != final; pp = pp->as.free.next) {
1435 p->as.free.flags |= FL_SINGLETON; /* freeing page mark */
1437 freelist = free; /* cancel this page from freelist */
1439 else {
1440 freed += n;
1443 if (malloc_increase > malloc_limit) {
1444 malloc_limit += (malloc_increase - malloc_limit) * (double)live / (live + freed);
1445 if (malloc_limit < GC_MALLOC_LIMIT) malloc_limit = GC_MALLOC_LIMIT;
1447 malloc_increase = 0;
1448 if (freed < free_min) {
1449 set_heaps_increment(objspace);
1450 heaps_increment(objspace);
1452 during_gc = 0;
1454 /* clear finalization list */
1455 if (final_list) {
1456 deferred_final_list = final_list;
1457 return;
1459 free_unused_heaps(objspace);
1462 void
1463 rb_gc_force_recycle(VALUE p)
1465 rb_objspace_t *objspace = &rb_objspace;
1466 VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
1467 RANY(p)->as.free.flags = 0;
1468 RANY(p)->as.free.next = freelist;
1469 freelist = RANY(p);
1472 static void
1473 obj_free(rb_objspace_t *objspace, VALUE obj)
1475 switch (RANY(obj)->as.basic.flags & T_MASK) {
1476 case T_NIL:
1477 case T_FIXNUM:
1478 case T_TRUE:
1479 case T_FALSE:
1480 rb_bug("obj_free() called for broken object");
1481 break;
1484 if (FL_TEST(obj, FL_EXIVAR)) {
1485 rb_free_generic_ivar((VALUE)obj);
1488 switch (RANY(obj)->as.basic.flags & T_MASK) {
1489 case T_OBJECT:
1490 if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
1491 RANY(obj)->as.object.as.heap.ivptr) {
1492 xfree(RANY(obj)->as.object.as.heap.ivptr);
1494 break;
1495 case T_MODULE:
1496 case T_CLASS:
1497 rb_clear_cache_by_class((VALUE)obj);
1498 st_free_table(RCLASS_M_TBL(obj));
1499 if (RCLASS_IV_TBL(obj)) {
1500 st_free_table(RCLASS_IV_TBL(obj));
1502 if (RCLASS_IV_INDEX_TBL(obj)) {
1503 st_free_table(RCLASS_IV_INDEX_TBL(obj));
1505 xfree(RANY(obj)->as.klass.ptr);
1506 break;
1507 case T_STRING:
1508 rb_str_free(obj);
1509 break;
1510 case T_ARRAY:
1511 rb_ary_free(obj);
1512 break;
1513 case T_HASH:
1514 if (RANY(obj)->as.hash.ntbl) {
1515 st_free_table(RANY(obj)->as.hash.ntbl);
1517 break;
1518 case T_REGEXP:
1519 if (RANY(obj)->as.regexp.ptr) {
1520 onig_free(RANY(obj)->as.regexp.ptr);
1522 break;
1523 case T_DATA:
1524 if (DATA_PTR(obj)) {
1525 if ((long)RANY(obj)->as.data.dfree == -1) {
1526 xfree(DATA_PTR(obj));
1528 else if (RANY(obj)->as.data.dfree) {
1529 (*RANY(obj)->as.data.dfree)(DATA_PTR(obj));
1532 break;
1533 case T_MATCH:
1534 if (RANY(obj)->as.match.rmatch) {
1535 struct rmatch *rm = RANY(obj)->as.match.rmatch;
1536 onig_region_free(&rm->regs, 0);
1537 if (rm->char_offset)
1538 xfree(rm->char_offset);
1539 xfree(rm);
1541 break;
1542 case T_FILE:
1543 if (RANY(obj)->as.file.fptr) {
1544 rb_io_fptr_finalize(RANY(obj)->as.file.fptr);
1546 break;
1547 case T_RATIONAL:
1548 case T_COMPLEX:
1549 break;
1550 case T_ICLASS:
1551 /* iClass shares table with the module */
1552 break;
1554 case T_FLOAT:
1555 break;
1557 case T_BIGNUM:
1558 if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
1559 xfree(RBIGNUM_DIGITS(obj));
1561 break;
1562 case T_NODE:
1563 switch (nd_type(obj)) {
1564 case NODE_SCOPE:
1565 if (RANY(obj)->as.node.u1.tbl) {
1566 xfree(RANY(obj)->as.node.u1.tbl);
1568 break;
1569 case NODE_ALLOCA:
1570 xfree(RANY(obj)->as.node.u1.node);
1571 break;
1573 return; /* no need to free iv_tbl */
1575 case T_STRUCT:
1576 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1577 RANY(obj)->as.rstruct.as.heap.ptr) {
1578 xfree(RANY(obj)->as.rstruct.as.heap.ptr);
1580 break;
1582 default:
1583 rb_bug("gc_sweep(): unknown data type 0x%lx(%p)",
1584 RANY(obj)->as.basic.flags & T_MASK, (void*)obj);
1588 #ifdef __GNUC__
1589 #if defined(__human68k__) || defined(DJGPP)
1590 #undef rb_setjmp
1591 #undef rb_jmp_buf
1592 #if defined(__human68k__)
1593 typedef unsigned long rb_jmp_buf[8];
1594 __asm__ (".even\n\
1595 _rb_setjmp:\n\
1596 move.l 4(sp),a0\n\
1597 movem.l d3-d7/a3-a5,(a0)\n\
1598 moveq.l #0,d0\n\
1599 rts");
1600 #else
1601 #if defined(DJGPP)
1602 typedef unsigned long rb_jmp_buf[6];
1603 __asm__ (".align 4\n\
1604 _rb_setjmp:\n\
1605 pushl %ebp\n\
1606 movl %esp,%ebp\n\
1607 movl 8(%ebp),%ebp\n\
1608 movl %eax,(%ebp)\n\
1609 movl %ebx,4(%ebp)\n\
1610 movl %ecx,8(%ebp)\n\
1611 movl %edx,12(%ebp)\n\
1612 movl %esi,16(%ebp)\n\
1613 movl %edi,20(%ebp)\n\
1614 popl %ebp\n\
1615 xorl %eax,%eax\n\
1616 ret");
1617 #endif
1618 #endif
1619 int rb_setjmp (rb_jmp_buf);
1620 #endif /* __human68k__ or DJGPP */
1621 #endif /* __GNUC__ */
1623 #define GC_NOTIFY 0
1625 void rb_vm_mark(void *ptr);
1627 static void
1628 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
1630 rb_jmp_buf save_regs_gc_mark;
1631 VALUE *stack_start, *stack_end;
1633 SET_STACK_END;
1634 #if STACK_GROW_DIRECTION < 0
1635 stack_start = th->machine_stack_end;
1636 stack_end = th->machine_stack_start;
1637 #elif STACK_GROW_DIRECTION > 0
1638 stack_start = th->machine_stack_start;
1639 stack_end = th->machine_stack_end + 1;
1640 #else
1641 if (th->machine_stack_end < th->machine_stack_start) {
1642 stack_start = th->machine_stack_end;
1643 stack_end = th->machine_stack_start;
1645 else {
1646 stack_start = th->machine_stack_start;
1647 stack_end = th->machine_stack_end + 1;
1649 #endif
1651 FLUSH_REGISTER_WINDOWS;
1652 /* This assumes that all registers are saved into the jmp_buf (and stack) */
1653 rb_setjmp(save_regs_gc_mark);
1654 mark_locations_array(objspace,
1655 (VALUE*)save_regs_gc_mark,
1656 sizeof(save_regs_gc_mark) / sizeof(VALUE));
1658 rb_gc_mark_locations(stack_start, stack_end);
1659 #ifdef __ia64
1660 rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
1661 #endif
1662 #if defined(__human68k__) || defined(__mc68000__)
1663 mark_locations_array((VALUE*)((char*)STACK_END + 2),
1664 (STACK_START - STACK_END));
1665 #endif
1668 void rb_gc_mark_encodings(void);
1670 static int
1671 garbage_collect(rb_objspace_t *objspace)
1673 struct gc_list *list;
1674 rb_thread_t *th = GET_THREAD();
1676 if (GC_NOTIFY) printf("start garbage_collect()\n");
1678 if (!heaps) {
1679 return Qfalse;
1682 if (dont_gc || during_gc) {
1683 if (!freelist) {
1684 if (!heaps_increment(objspace)) {
1685 set_heaps_increment(objspace);
1686 heaps_increment(objspace);
1689 return Qtrue;
1691 during_gc++;
1692 objspace->count++;
1694 SET_STACK_END;
1696 init_mark_stack(objspace);
1698 th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
1700 if (finalizer_table) {
1701 mark_tbl(objspace, finalizer_table, 0);
1704 mark_current_machine_context(objspace, th);
1706 rb_gc_mark_threads();
1707 rb_gc_mark_symbols();
1708 rb_gc_mark_encodings();
1710 /* mark protected global variables */
1711 for (list = global_List; list; list = list->next) {
1712 rb_gc_mark_maybe(*list->varptr);
1714 rb_mark_end_proc();
1715 rb_gc_mark_global_tbl();
1717 mark_tbl(objspace, rb_class_tbl, 0);
1718 rb_gc_mark_trap_list();
1720 /* mark generic instance variables for special constants */
1721 rb_mark_generic_ivar_tbl();
1723 rb_gc_mark_parser();
1725 /* gc_mark objects whose marking are not completed*/
1726 while (!MARK_STACK_EMPTY) {
1727 if (mark_stack_overflow) {
1728 gc_mark_all(objspace);
1730 else {
1731 gc_mark_rest(objspace);
1735 gc_sweep(objspace);
1737 if (GC_NOTIFY) printf("end garbage_collect()\n");
1738 return Qtrue;
1742 rb_garbage_collect(void)
1744 return garbage_collect(&rb_objspace);
1747 void
1748 rb_gc_mark_machine_stack(rb_thread_t *th)
1750 rb_objspace_t *objspace = &rb_objspace;
1751 #if STACK_GROW_DIRECTION < 0
1752 rb_gc_mark_locations(th->machine_stack_end, th->machine_stack_start);
1753 #elif STACK_GROW_DIRECTION > 0
1754 rb_gc_mark_locations(th->machine_stack_start, th->machine_stack_end);
1755 #else
1756 if (th->machine_stack_start < th->machine_stack_end) {
1757 rb_gc_mark_locations(th->machine_stack_start, th->machine_stack_end);
1759 else {
1760 rb_gc_mark_locations(th->machine_stack_end, th->machine_stack_start);
1762 #endif
1763 #ifdef __ia64
1764 rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
1765 #endif
1770 * call-seq:
1771 * GC.start => nil
1772 * gc.garbage_collect => nil
1773 * ObjectSpace.garbage_collect => nil
1775 * Initiates garbage collection, unless manually disabled.
1779 VALUE
1780 rb_gc_start(void)
1782 rb_gc();
1783 return Qnil;
1786 #undef Init_stack
1788 void
1789 Init_stack(VALUE *addr)
1791 ruby_init_stack(addr);
1795 * Document-class: ObjectSpace
1797 * The <code>ObjectSpace</code> module contains a number of routines
1798 * that interact with the garbage collection facility and allow you to
1799 * traverse all living objects with an iterator.
1801 * <code>ObjectSpace</code> also provides support for object
1802 * finalizers, procs that will be called when a specific object is
1803 * about to be destroyed by garbage collection.
1805 * include ObjectSpace
1808 * a = "A"
1809 * b = "B"
1810 * c = "C"
1813 * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
1814 * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
1815 * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
1817 * <em>produces:</em>
1819 * Finalizer three on 537763470
1820 * Finalizer one on 537763480
1821 * Finalizer two on 537763480
1825 void
1826 Init_heap(void)
1828 init_heap(&rb_objspace);
1831 static VALUE
1832 os_obj_of(rb_objspace_t *objspace, VALUE of)
1834 size_t i;
1835 size_t n = 0;
1836 RVALUE *membase = 0;
1837 RVALUE *p, *pend;
1838 volatile VALUE v;
1840 i = 0;
1841 while (i < heaps_used) {
1842 while (0 < i && (uintptr_t)membase < (uintptr_t)heaps[i-1].membase)
1843 i--;
1844 while (i < heaps_used && (uintptr_t)heaps[i].membase <= (uintptr_t)membase )
1845 i++;
1846 if (heaps_used <= i)
1847 break;
1848 membase = heaps[i].membase;
1850 p = heaps[i].slot; pend = p + heaps[i].limit;
1851 for (;p < pend; p++) {
1852 if (p->as.basic.flags) {
1853 switch (BUILTIN_TYPE(p)) {
1854 case T_NONE:
1855 case T_ICLASS:
1856 case T_NODE:
1857 continue;
1858 case T_CLASS:
1859 if (FL_TEST(p, FL_SINGLETON)) continue;
1860 default:
1861 if (!p->as.basic.klass) continue;
1862 v = (VALUE)p;
1863 if (!of || rb_obj_is_kind_of(v, of)) {
1864 rb_yield(v);
1865 n++;
1872 return SIZET2NUM(n);
1876 * call-seq:
1877 * ObjectSpace.each_object([module]) {|obj| ... } => fixnum
1879 * Calls the block once for each living, nonimmediate object in this
1880 * Ruby process. If <i>module</i> is specified, calls the block
1881 * for only those classes or modules that match (or are a subclass of)
1882 * <i>module</i>. Returns the number of objects found. Immediate
1883 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1884 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1885 * never returned. In the example below, <code>each_object</code>
1886 * returns both the numbers we defined and several constants defined in
1887 * the <code>Math</code> module.
1889 * a = 102.7
1890 * b = 95 # Won't be returned
1891 * c = 12345678987654321
1892 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1893 * puts "Total count: #{count}"
1895 * <em>produces:</em>
1897 * 12345678987654321
1898 * 102.7
1899 * 2.71828182845905
1900 * 3.14159265358979
1901 * 2.22044604925031e-16
1902 * 1.7976931348623157e+308
1903 * 2.2250738585072e-308
1904 * Total count: 7
1908 static VALUE
1909 os_each_obj(int argc, VALUE *argv, VALUE os)
1911 VALUE of;
1913 rb_secure(4);
1914 if (argc == 0) {
1915 of = 0;
1917 else {
1918 rb_scan_args(argc, argv, "01", &of);
1920 RETURN_ENUMERATOR(os, 1, &of);
1921 return os_obj_of(&rb_objspace, of);
1925 * call-seq:
1926 * ObjectSpace.undefine_finalizer(obj)
1928 * Removes all finalizers for <i>obj</i>.
1932 static VALUE
1933 undefine_final(VALUE os, VALUE obj)
1935 rb_objspace_t *objspace = &rb_objspace;
1936 if (finalizer_table) {
1937 st_delete(finalizer_table, (st_data_t*)&obj, 0);
1939 return obj;
1943 * call-seq:
1944 * ObjectSpace.define_finalizer(obj, aProc=proc())
1946 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1947 * was destroyed.
1951 static VALUE
1952 define_final(int argc, VALUE *argv, VALUE os)
1954 rb_objspace_t *objspace = &rb_objspace;
1955 VALUE obj, block, table;
1957 rb_scan_args(argc, argv, "11", &obj, &block);
1958 if (argc == 1) {
1959 block = rb_block_proc();
1961 else if (!rb_respond_to(block, rb_intern("call"))) {
1962 rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
1963 rb_obj_classname(block));
1965 FL_SET(obj, FL_FINALIZE);
1967 block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
1969 if (!finalizer_table) {
1970 finalizer_table = st_init_numtable();
1972 if (st_lookup(finalizer_table, obj, &table)) {
1973 rb_ary_push(table, block);
1975 else {
1976 st_add_direct(finalizer_table, obj, rb_ary_new3(1, block));
1978 return block;
1981 void
1982 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1984 rb_objspace_t *objspace = &rb_objspace;
1985 VALUE table;
1987 if (!finalizer_table) return;
1988 if (!FL_TEST(obj, FL_FINALIZE)) return;
1989 if (st_lookup(finalizer_table, obj, &table)) {
1990 st_insert(finalizer_table, dest, table);
1992 FL_SET(dest, FL_FINALIZE);
1995 static VALUE
1996 run_single_final(VALUE arg)
1998 VALUE *args = (VALUE *)arg;
1999 rb_eval_cmd(args[0], args[1], (int)args[2]);
2000 return Qnil;
2003 static void
2004 run_final(rb_objspace_t *objspace, VALUE obj)
2006 long i;
2007 int status, critical_save = rb_thread_critical;
2008 VALUE args[3], table, objid;
2010 objid = rb_obj_id(obj); /* make obj into id */
2011 rb_thread_critical = Qtrue;
2012 args[1] = 0;
2013 args[2] = (VALUE)rb_safe_level();
2014 if (finalizer_table && st_delete(finalizer_table, (st_data_t*)&obj, &table)) {
2015 if (!args[1] && RARRAY_LEN(table) > 0) {
2016 args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
2018 for (i=0; i<RARRAY_LEN(table); i++) {
2019 VALUE final = RARRAY_PTR(table)[i];
2020 args[0] = RARRAY_PTR(final)[1];
2021 args[2] = FIX2INT(RARRAY_PTR(final)[0]);
2022 rb_protect(run_single_final, (VALUE)args, &status);
2025 rb_thread_critical = critical_save;
2028 static void
2029 gc_finalize_deferred(rb_objspace_t *objspace)
2031 RVALUE *p = deferred_final_list;
2033 deferred_final_list = 0;
2034 if (p) {
2035 finalize_list(objspace, p);
2037 free_unused_heaps(objspace);
2040 void
2041 rb_gc_finalize_deferred(void)
2043 gc_finalize_deferred(&rb_objspace);
2046 static int
2047 chain_finalized_object(st_data_t key, st_data_t val, st_data_t arg)
2049 RVALUE *p = (RVALUE *)key, **final_list = (RVALUE **)arg;
2050 if (p->as.basic.flags & FL_FINALIZE) {
2051 p->as.free.flags = FL_MARK; /* remain marked */
2052 p->as.free.next = *final_list;
2053 *final_list = p;
2055 return ST_CONTINUE;
2058 void
2059 rb_gc_call_finalizer_at_exit(void)
2061 rb_objspace_t *objspace = &rb_objspace;
2062 RVALUE *p, *pend;
2063 size_t i;
2065 /* run finalizers */
2066 if (need_call_final) {
2067 do {
2068 p = deferred_final_list;
2069 deferred_final_list = 0;
2070 finalize_list(objspace, p);
2071 st_foreach(finalizer_table, chain_finalized_object,
2072 (st_data_t)&deferred_final_list);
2073 } while (deferred_final_list);
2075 /* finalizers are part of garbage collection */
2076 during_gc++;
2077 /* run data object's finalizers */
2078 for (i = 0; i < heaps_used; i++) {
2079 p = heaps[i].slot; pend = p + heaps[i].limit;
2080 while (p < pend) {
2081 if (BUILTIN_TYPE(p) == T_DATA &&
2082 DATA_PTR(p) && RANY(p)->as.data.dfree &&
2083 RANY(p)->as.basic.klass != rb_cThread) {
2084 p->as.free.flags = 0;
2085 if ((long)RANY(p)->as.data.dfree == -1) {
2086 xfree(DATA_PTR(p));
2088 else if (RANY(p)->as.data.dfree) {
2089 (*RANY(p)->as.data.dfree)(DATA_PTR(p));
2091 VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2093 else if (BUILTIN_TYPE(p) == T_FILE) {
2094 if (rb_io_fptr_finalize(RANY(p)->as.file.fptr)) {
2095 p->as.free.flags = 0;
2096 VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2099 p++;
2102 during_gc = 0;
2105 void
2106 rb_gc(void)
2108 rb_objspace_t *objspace = &rb_objspace;
2109 garbage_collect(objspace);
2110 gc_finalize_deferred(objspace);
2114 * call-seq:
2115 * ObjectSpace._id2ref(object_id) -> an_object
2117 * Converts an object id to a reference to the object. May not be
2118 * called on an object id passed as a parameter to a finalizer.
2120 * s = "I am a string" #=> "I am a string"
2121 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2122 * r == s #=> true
2126 static VALUE
2127 id2ref(VALUE obj, VALUE objid)
2129 #if SIZEOF_LONG == SIZEOF_VOIDP
2130 #define NUM2PTR(x) NUM2ULONG(x)
2131 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2132 #define NUM2PTR(x) NUM2ULL(x)
2133 #endif
2134 rb_objspace_t *objspace = &rb_objspace;
2135 VALUE ptr;
2136 void *p0;
2138 rb_secure(4);
2139 ptr = NUM2PTR(objid);
2140 p0 = (void *)ptr;
2142 if (ptr == Qtrue) return Qtrue;
2143 if (ptr == Qfalse) return Qfalse;
2144 if (ptr == Qnil) return Qnil;
2145 if (FIXNUM_P(ptr)) return (VALUE)ptr;
2146 ptr = objid ^ FIXNUM_FLAG; /* unset FIXNUM_FLAG */
2148 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
2149 ID symid = ptr / sizeof(RVALUE);
2150 if (rb_id2name(symid) == 0)
2151 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
2152 return ID2SYM(symid);
2155 if (!is_pointer_to_heap(objspace, (void *)ptr) ||
2156 BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
2157 rb_raise(rb_eRangeError, "%p is not id value", p0);
2159 if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
2160 rb_raise(rb_eRangeError, "%p is recycled object", p0);
2162 return (VALUE)ptr;
2166 * Document-method: __id__
2167 * Document-method: object_id
2169 * call-seq:
2170 * obj.__id__ => fixnum
2171 * obj.object_id => fixnum
2173 * Returns an integer identifier for <i>obj</i>. The same number will
2174 * be returned on all calls to <code>id</code> for a given object, and
2175 * no two active objects will share an id.
2176 * <code>Object#object_id</code> is a different concept from the
2177 * <code>:name</code> notation, which returns the symbol id of
2178 * <code>name</code>. Replaces the deprecated <code>Object#id</code>.
2182 * call-seq:
2183 * obj.hash => fixnum
2185 * Generates a <code>Fixnum</code> hash value for this object. This
2186 * function must have the property that <code>a.eql?(b)</code> implies
2187 * <code>a.hash == b.hash</code>. The hash value is used by class
2188 * <code>Hash</code>. Any hash value that exceeds the capacity of a
2189 * <code>Fixnum</code> will be truncated before being used.
2192 VALUE
2193 rb_obj_id(VALUE obj)
2196 * 32-bit VALUE space
2197 * MSB ------------------------ LSB
2198 * false 00000000000000000000000000000000
2199 * true 00000000000000000000000000000010
2200 * nil 00000000000000000000000000000100
2201 * undef 00000000000000000000000000000110
2202 * symbol ssssssssssssssssssssssss00001110
2203 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
2204 * fixnum fffffffffffffffffffffffffffffff1
2206 * object_id space
2207 * LSB
2208 * false 00000000000000000000000000000000
2209 * true 00000000000000000000000000000010
2210 * nil 00000000000000000000000000000100
2211 * undef 00000000000000000000000000000110
2212 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
2213 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
2214 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
2216 * where A = sizeof(RVALUE)/4
2218 * sizeof(RVALUE) is
2219 * 20 if 32-bit, double is 4-byte aligned
2220 * 24 if 32-bit, double is 8-byte aligned
2221 * 40 if 64-bit
2223 if (TYPE(obj) == T_SYMBOL) {
2224 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
2226 if (SPECIAL_CONST_P(obj)) {
2227 return LONG2NUM((SIGNED_VALUE)obj);
2229 return (VALUE)((SIGNED_VALUE)obj|FIXNUM_FLAG);
2232 static int
2233 set_zero(st_data_t key, st_data_t val, st_data_t arg)
2235 VALUE k = (VALUE)key;
2236 VALUE hash = (VALUE)arg;
2237 rb_hash_aset(hash, k, INT2FIX(0));
2238 return ST_CONTINUE;
2242 * call-seq:
2243 * ObjectSpace.count_objects([result_hash]) -> hash
2245 * Counts objects for each type.
2247 * It returns a hash as:
2248 * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
2250 * If the optional argument, result_hash, is given,
2251 * it is overwritten and returned.
2252 * This is intended to avoid probe effect.
2254 * The contents of the returned hash is implementation defined.
2255 * It may be changed in future.
2257 * This method is not expected to work except C Ruby.
2261 static VALUE
2262 count_objects(int argc, VALUE *argv, VALUE os)
2264 rb_objspace_t *objspace = &rb_objspace;
2265 size_t counts[T_MASK+1];
2266 size_t freed = 0;
2267 size_t total = 0;
2268 size_t i;
2269 VALUE hash;
2271 if (rb_scan_args(argc, argv, "01", &hash) == 1) {
2272 if (TYPE(hash) != T_HASH)
2273 rb_raise(rb_eTypeError, "non-hash given");
2276 for (i = 0; i <= T_MASK; i++) {
2277 counts[i] = 0;
2280 for (i = 0; i < heaps_used; i++) {
2281 RVALUE *p, *pend;
2283 p = heaps[i].slot; pend = p + heaps[i].limit;
2284 for (;p < pend; p++) {
2285 if (p->as.basic.flags) {
2286 counts[BUILTIN_TYPE(p)]++;
2288 else {
2289 freed++;
2292 total += heaps[i].limit;
2295 if (hash == Qnil) {
2296 hash = rb_hash_new();
2298 else if (!RHASH_EMPTY_P(hash)) {
2299 st_foreach(RHASH_TBL(hash), set_zero, hash);
2301 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
2302 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
2303 for (i = 0; i <= T_MASK; i++) {
2304 VALUE type;
2305 switch (i) {
2306 #define COUNT_TYPE(t) case t: type = ID2SYM(rb_intern(#t)); break;
2307 COUNT_TYPE(T_NONE);
2308 COUNT_TYPE(T_OBJECT);
2309 COUNT_TYPE(T_CLASS);
2310 COUNT_TYPE(T_MODULE);
2311 COUNT_TYPE(T_FLOAT);
2312 COUNT_TYPE(T_STRING);
2313 COUNT_TYPE(T_REGEXP);
2314 COUNT_TYPE(T_ARRAY);
2315 COUNT_TYPE(T_HASH);
2316 COUNT_TYPE(T_STRUCT);
2317 COUNT_TYPE(T_BIGNUM);
2318 COUNT_TYPE(T_FILE);
2319 COUNT_TYPE(T_DATA);
2320 COUNT_TYPE(T_MATCH);
2321 COUNT_TYPE(T_COMPLEX);
2322 COUNT_TYPE(T_RATIONAL);
2323 COUNT_TYPE(T_NIL);
2324 COUNT_TYPE(T_TRUE);
2325 COUNT_TYPE(T_FALSE);
2326 COUNT_TYPE(T_SYMBOL);
2327 COUNT_TYPE(T_FIXNUM);
2328 COUNT_TYPE(T_UNDEF);
2329 COUNT_TYPE(T_NODE);
2330 COUNT_TYPE(T_ICLASS);
2331 #undef COUNT_TYPE
2332 default: type = INT2NUM(i); break;
2334 if (counts[i])
2335 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
2338 return hash;
2342 * call-seq:
2343 * GC.count -> Integer
2345 * The number of times GC occured.
2347 * It returns the number of times GC occured since the process started.
2351 static VALUE
2352 gc_count(VALUE self)
2354 return UINT2NUM((&rb_objspace)->count);
2357 #if CALC_EXACT_MALLOC_SIZE
2359 * call-seq:
2360 * GC.malloc_allocated_size -> Integer
2362 * The allocated size by malloc().
2364 * It returns the allocated size by malloc().
2367 static VALUE
2368 gc_malloc_allocated_size(VALUE self)
2370 return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
2374 * call-seq:
2375 * GC.malloc_allocations -> Integer
2377 * The number of allocated memory object by malloc().
2379 * It returns the number of allocated memory object by malloc().
2382 static VALUE
2383 gc_malloc_allocations(VALUE self)
2385 return UINT2NUM((&rb_objspace)->malloc_params.allocations);
2387 #endif
2390 * The <code>GC</code> module provides an interface to Ruby's mark and
2391 * sweep garbage collection mechanism. Some of the underlying methods
2392 * are also available via the <code>ObjectSpace</code> module.
2395 void
2396 Init_GC(void)
2398 VALUE rb_mObSpace;
2400 rb_mGC = rb_define_module("GC");
2401 rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
2402 rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
2403 rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
2404 rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
2405 rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
2406 rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
2407 rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
2409 rb_mObSpace = rb_define_module("ObjectSpace");
2410 rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
2411 rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
2413 rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
2414 rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
2416 rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
2418 nomem_error = rb_exc_new3(rb_eNoMemError,
2419 rb_obj_freeze(rb_str_new2("failed to allocate memory")));
2420 OBJ_TAINT(nomem_error);
2421 OBJ_FREEZE(nomem_error);
2423 rb_define_method(rb_mKernel, "hash", rb_obj_id, 0);
2424 rb_define_method(rb_mKernel, "__id__", rb_obj_id, 0);
2425 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
2427 rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
2429 #if CALC_EXACT_MALLOC_SIZE
2430 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
2431 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
2432 #endif