Added spec:commit task to commit changes to spec/ruby sources.
[rbx.git] / shotgun / lib / marksweep.c
bloba018738668b5193a57575bdbefc6760ae71eda21
1 /*
3 A mark/sweep GC.
5 Starting at the roots, all objects that are
6 reachable are marked, regardless if they are
7 located in the mark/sweep or not (ie, they could
8 be in another generation).
10 Then, all objects allocated under the mark/sweep
11 are traversed and any that aren't marked are
12 placed by in the free_list.
14 Finally, all objects in other generations have
15 their mark cleared.
19 #include "shotgun/lib/shotgun.h"
20 #include "shotgun/lib/object.h"
21 #include "shotgun/lib/cpu.h"
22 #include "shotgun/lib/methctx.h"
23 #include "shotgun/lib/bytearray.h"
24 #include "shotgun/lib/tuple.h"
26 #define to_header(obj) ((struct ms_header*)((uintptr_t)(obj) - sizeof(struct ms_header)))
27 #define to_object(hed) ((OBJECT)((uintptr_t)(hed) + sizeof(struct ms_header)))
29 #define FREE_OBJECT 0x10000
30 #define BARRIER (2**SIZE_OF_OBJECT)
32 #define TABLE_INCS 4096
34 #define FREE_FLAG 0xff
36 #define TRACK_REFERENCE 0
37 #define TRACK_DONT_FREE 0
39 #undef MS_COLLECTION_FREQUENCY
40 #define MS_COLLECTION_FREQUENCY 5000
42 // 2 Megs
43 #define MS_COLLECTION_BYTES 10485760
46 mark_sweep_gc mark_sweep_new() {
47 mark_sweep_gc ms;
49 ms = calloc(1, sizeof(struct _mark_sweep_gc));
50 ms->remember_set = ptr_array_new(8);
51 mark_sweep_add_chunk(ms);
52 ms->chunks = ms->current;
53 ms->enlarged = 0;
54 ms->seen_weak_refs = NULL;
55 ms->next_collection_objects = MS_COLLECTION_FREQUENCY;
56 ms->next_collection_bytes = MS_COLLECTION_BYTES;
57 ms->become_from = ms->become_to = Qnil;
58 #if TRACK_REFERENCE
59 if(getenv("MSTRACK")) {
60 ms->track = (OBJECT)atoi(getenv("MSTRACK"));
62 #endif
63 return ms;
66 void mark_sweep_destroy(mark_sweep_gc ms) {
67 int i, count;
68 struct ms_entry *ent;
69 ms_chunk *cur, *old;
71 cur = ms->chunks;
72 count = 0;
74 while(cur) {
76 for(i = 0; i < cur->num_entries; i++) {
77 ent = &(cur->entries[i]);
78 if(ent->bytes) {
79 free(ent->object);
83 old = cur;
84 cur = cur->next;
85 free(old->entries);
86 free(old);
89 free(ms);
92 void mark_sweep_add_chunk(mark_sweep_gc ms) {
93 ms_chunk *new;
94 struct ms_entry *ent;
95 int i;
97 ms->enlarged = 1;
99 new = calloc(1, sizeof(struct ms_chunk));
101 ms->num_chunks++;
102 new->num_entries = TABLE_INCS;
103 new->entries = calloc(new->num_entries, sizeof(struct ms_entry));
104 new->next = NULL;
105 new->next_entry = 0;
107 if(!ms->current) {
108 ms->current = new;
109 } else {
110 ms->current->next = new;
111 ms->current = new;
113 for(i = 0; i < TABLE_INCS; i++) {
114 ent = new->entries + i;
115 ent->object = (struct ms_header*)(ms->free_list);
116 ms->free_list = ent;
121 OBJECT mark_sweep_allocate(mark_sweep_gc ms, int obj_fields) {
122 int bytes;
123 struct ms_header *oh;
124 struct ms_entry *ent;
125 OBJECT ro;
127 bytes = sizeof(struct ms_header) + SIZE_IN_BYTES_FIELDS(obj_fields);
129 ent = ms->free_list;
131 if(!ent) {
132 mark_sweep_add_chunk(ms);
133 ent = ms->free_list;
136 ms->free_list = (struct ms_entry*)ent->object;
137 oh = (struct ms_header*)calloc(1, bytes);
138 ent->object = oh;
139 ent->fields = obj_fields;
140 ent->bytes = bytes;
141 ent->marked = 0;
143 ms->allocated_bytes += bytes;
144 ms->last_allocated += bytes;
145 ms->allocated_objects++;
146 // ms->next_collection_objects--;
147 ms->next_collection_bytes -= bytes;
149 if(obj_fields > 10000) {
150 // printf("[GC M allocating large object %d]\n", obj_fields);
153 if(!ms->enlarged) {
155 if(ms->next_collection_objects <= 0) {
156 // printf("[GC M Collecting based on objects]\n");
157 ms->enlarged = 1;
158 ms->next_collection_bytes = MS_COLLECTION_FREQUENCY;
159 } else if(ms->next_collection_bytes <= 0) {
160 // printf("[GC M Collecting based on bytes: %d]\n", ms->allocated_bytes);
161 ms->enlarged = 1;
167 if(ms->last_allocated_bytes) {
168 if(ms->last_allocated_bytes * 2 <= ms->allocated_bytes) {
169 printf("[GC M Collecting based on heap size (%d, %d)]\n", ms->last_allocated_bytes, ms->allocated_bytes);
170 ms->enlarged = 1;
172 } else if(ms->last_allocated >= ms->next_collection) {
173 curt = clock();
174 ms->last_allocated = 0;
175 ms->enlarged = 1;
176 printf("[GC M Collecting based on bytes (every %d bytes, %d total)]\n", MS_COLLECTION_BYTES, ms->allocated_bytes);
177 printf("[GC M last_clock=%d, cur=%d]\n", ms->last_clock, curt);
178 ms->last_clock = curt;
183 oh->entry = ent;
184 ro = to_object(oh);
185 // printf("Allocated %d\n", ro);
186 SET_NUM_FIELDS(ro, obj_fields);
188 #if TRACK_REFERENCE
189 if(ms->track == ro) {
190 printf("Allocated tracked object %p\n", ro);
192 #endif
195 return ro;
198 void mark_sweep_free_entry(STATE, mark_sweep_gc ms, struct ms_entry *ent) {
199 OBJECT obj;
201 obj = to_object(ent->object);
203 #if TRACK_REFERENCE
204 if(ms->track == obj) {
205 printf("Free'ing tracked object %p\n", obj);
207 #endif
209 if(obj->Remember) {
210 ptr_array_remove_fast(state->om->gc->remember_set, (xpointer)obj);
213 if(obj->RequiresCleanup) {
214 if(obj->obj_type == MemPtrType) {
215 void *addr = *DATA_STRUCT(obj, void**);
216 if(addr) free(addr);
217 obj->RequiresCleanup = 0;
218 } else {
219 state_run_cleanup(state, obj);
223 if(obj->obj_type == WrapsStructType) FREE_WRAPPED_STRUCT(obj);
225 #if TRACK_DONT_FREE
226 memset(obj, 0, SIZE_IN_BYTES(obj));
227 *((int*)(obj)) = 0xbaddecaf;
228 #else
229 free(ent->object);
230 #endif
232 ms->last_freed++;
234 ms->allocated_bytes -= ent->bytes;
235 ms->allocated_objects--;
237 ent->bytes = 0;
238 ent->marked = 0;
239 ent->fields = 0;
241 ent->object = (struct ms_header*)(ms->free_list);
242 ms->free_list = ent;
245 #define mark_sweep_free_object(obj) (obj->flags == FREE_FLAG)
247 void mark_sweep_describe(mark_sweep_gc ms) {
248 printf("Last marked: %d\n", ms->last_marked);
251 #define BCM_P(obj) (ms->become_from == obj)
252 #define BCM_TO (ms->become_to)
254 int _object_stores_bytes(OBJECT self);
256 int mark_sweep_contains_p(mark_sweep_gc ms, OBJECT obj) {
257 return obj->gc_zone == MatureObjectZone;
261 static OBJECT mark_sweep_mark_object(STATE, mark_sweep_gc ms, OBJECT iobj) {
262 OBJECT cls, tmp;
263 int i;
264 struct ms_header *header;
266 #if TRACK_REFERENCE
267 if(ms->track == iobj) {
268 printf("Found tracked object %p\n", iobj);
270 #endif
272 if(iobj->gc_zone == MatureObjectZone) {
273 header = to_header(iobj);
275 assert(header->entry->object == header);
277 /* Already marked! */
278 if(header->entry->marked) return iobj;
279 header->entry->marked = 1;
281 if(iobj->obj_type == WrapsStructType) MARK_WRAPPED_STRUCT(iobj);
282 } else {
283 if(iobj->Marked) return iobj;
285 iobj->Marked = TRUE;
286 if(iobj->obj_type == WrapsStructType) MARK_WRAPPED_STRUCT(iobj);
289 // printf("Marked %d\n", iobj);
291 ms->last_marked++;
293 cls = CLASS_OBJECT(iobj);
294 if(REFERENCE_P(cls)) {
295 if(BCM_P(cls)) {
296 SET_CLASS(iobj, BCM_TO);
297 } else {
298 mark_sweep_mark_object(state, ms, cls);
302 if(iobj->RefsAreWeak) {
303 // printf("%p has weak refs.\n", (void*)iobj);
304 ptr_array_append(ms->seen_weak_refs, (xpointer)iobj);
305 return iobj;
308 if(!_object_stores_bytes(iobj)) {
309 for(i = 0; i < NUM_FIELDS(iobj); i++) {
310 tmp = NTH_FIELD(iobj, i);
311 if(!REFERENCE_P(tmp)) continue;
313 if(BCM_P(tmp)) {
314 SET_FIELD(iobj, i, BCM_TO);
315 } else {
316 mark_sweep_mark_object(state, ms, tmp);
319 } else {
320 #define fc_mutate(field) if(REFERENCE2_P(fc->field)) { \
321 if(BCM_P(fc->field)) { fc->field = BCM_TO; \
322 } else { mark_sweep_mark_object(state, ms, fc->field); } }
324 if(context_p(state, iobj)) {
325 struct fast_context *fc = FASTCTX(iobj);
326 fc_mutate(sender);
328 fc_mutate(block);
329 fc_mutate(method);
330 fc_mutate(literals);
331 fc_mutate(self);
332 fc_mutate(custom_iseq);
333 fc_mutate(locals);
334 fc_mutate(method_module);
335 fc_mutate(name);
337 if(!NIL_P(fc->method) && fc->method->obj_type == CMethodType) {
338 /* We cache the bytecode in a char*, so adjust it. */
339 OBJECT ba;
340 if(NIL_P(fc->custom_iseq)) {
341 ba = cmethod_get_compiled(fc->method);
342 fc->data = BYTEARRAY_ADDRESS(ba);
343 } else {
344 /* A context can be set to use a custom instruction sequence
345 by the debugger; if this is the case, we need to preserve
346 the context's use of that custom iseq. */
347 fc->data = BYTEARRAY_ADDRESS(fc->custom_iseq);
350 } else if(iobj->obj_type == TaskType) {
351 struct cpu_task *fc = (struct cpu_task*)BYTES_OF(iobj);
353 if(!fc->active) {
354 fc_mutate(exception);
355 fc_mutate(enclosing_class);
356 fc_mutate(active_context);
357 fc_mutate(home_context);
358 fc_mutate(main);
359 fc_mutate(debug_channel);
360 fc_mutate(control_channel);
361 fc_mutate(current_scope);
363 OBJECT *sp;
365 sp = fc->stack_top;
366 while(sp <= fc->sp_ptr) {
367 if(REFERENCE2_P(*sp)) {
368 if(BCM_P(*sp)) {
369 *sp = BCM_TO;
370 } else {
371 mark_sweep_mark_object(state, ms, *sp);
374 sp++;
377 int i;
378 for(i = 0; i < ptr_array_length(fc->paths); i++) {
379 tmp = (OBJECT)ptr_array_get_index(fc->paths, i);
380 if(BCM_P(tmp)) {
381 ptr_array_set_index(fc->paths,i,(xpointer)BCM_TO);
382 } else {
383 mark_sweep_mark_object(state, ms, tmp);
387 } else {
388 /* Handle the generic type_info prefix fields */
389 int fields = state->type_info[iobj->obj_type].object_fields;
390 for(i = 0; i < fields; i++) {
391 tmp = NTH_FIELD(iobj, i);
392 if(!REFERENCE_P(tmp)) continue;
394 if(BCM_P(tmp)) {
395 SET_FIELD(iobj, i, BCM_TO);
396 } else {
397 mark_sweep_mark_object(state, ms, tmp);
402 #undef fc_mutate
405 return iobj;
408 void mark_sweep_mark_context(STATE, mark_sweep_gc ms, OBJECT iobj) {
409 #define fc_mutate(field) if(fc->field && REFERENCE_P(fc->field)) mark_sweep_mark_object(state, ms, fc->field)
410 if (iobj->Marked) return;
412 iobj->Marked = TRUE;
414 struct fast_context *fc = FASTCTX(iobj);
416 if(REFERENCE_P(fc->sender) && om_in_heap(state->om, fc->sender)) {
417 fc_mutate(sender);
420 fc_mutate(method);
421 fc_mutate(block);
422 fc_mutate(literals);
423 fc_mutate(self);
424 fc_mutate(custom_iseq);
425 fc_mutate(locals);
426 fc_mutate(method_module);
427 fc_mutate(name);
428 fc_mutate(locals);
430 if(!NIL_P(fc->method) && fc->method->obj_type == CMethodType) {
431 /* We cache the bytecode in a char*, so adjust it. */
432 OBJECT ba;
433 if(NIL_P(fc->custom_iseq)) {
434 ba = cmethod_get_compiled(fc->method);
435 fc->data = BYTEARRAY_ADDRESS(ba);
436 } else {
437 /* A context can be set to use a custom instruction sequence
438 by the debugger; if this is the case, we need to preserve
439 the context's use of that custom iseq. */
440 fc->data = BYTEARRAY_ADDRESS(fc->custom_iseq);
443 #undef fc_mutate
446 void mark_sweep_clear_mark(STATE, OBJECT iobj) {
447 iobj->Marked = FALSE;
450 void mark_sweep_mark_phase(STATE, mark_sweep_gc ms, ptr_array roots) {
451 int i, sz;
452 OBJECT root, tmp;
454 if(!NIL_P(ms->become_to)) {
455 mark_sweep_mark_object(state, ms, ms->become_to);
458 if(!NIL_P(ms->become_from)) {
459 mark_sweep_mark_object(state, ms, ms->become_from);
462 sz = ptr_array_length(roots);
463 for(i = 0; i < sz; i++) {
464 root = (OBJECT)(ptr_array_get_index(roots, i));
465 if(!REFERENCE2_P(root)) { continue; }
466 if(BCM_P(root)) {
467 ptr_array_set_index(roots, i, (xpointer)BCM_TO);
468 } else {
469 mark_sweep_mark_object(state, ms, root);
473 sz = ptr_array_length(ms->remember_set);
474 for(i = 0; i < sz; i++) {
475 root = (OBJECT)(ptr_array_get_index(ms->remember_set, i));
476 if(!REFERENCE2_P(root)) { continue; }
477 if(BCM_P(root)) {
478 ptr_array_set_index(ms->remember_set, i, (xpointer)BCM_TO);
479 } else {
480 mark_sweep_mark_object(state, ms, root);
484 /* Now the stack. */
485 OBJECT *sp;
487 sp = state->current_stack;
488 while(sp <= state->current_sp) {
489 if(REFERENCE2_P(*sp)) {
490 if(BCM_P(*sp)) {
491 *sp = BCM_TO;
492 } else {
493 mark_sweep_mark_object(state, ms, *sp);
496 sp++;
499 /* Now the handle table. */
500 for(i = 0; i < state->handle_tbl->total; i++) {
501 if(state->handle_tbl->entries[i]) {
502 tmp = state->handle_tbl->entries[i]->object;
503 if(BCM_P(tmp)) {
504 state->handle_tbl->entries[i]->object = BCM_TO;
505 } else {
506 mark_sweep_mark_object(state, ms, tmp);
511 cpu_event_each_channel(state,
512 (cpu_event_each_channel_cb) mark_sweep_mark_object, ms);
513 cpu_sampler_collect(state,
514 (cpu_sampler_collect_cb) mark_sweep_mark_object, ms);
516 object_memory_mark_contexts(state, state->om);
519 void mark_sweep_sweep_phase(STATE, mark_sweep_gc ms) {
520 int i, count;
521 OBJECT obj;
522 struct ms_entry *ent;
523 ms_chunk *cur;
525 cur = ms->chunks;
526 count = 0;
528 while(cur) {
530 for(i = 0; i < cur->num_entries; i++) {
531 ent = &(cur->entries[i]);
532 count++;
533 if(ent->bytes) {
534 obj = to_object(ent->object);
535 #if TRACK_REFERENCE
536 if(ms->track == obj) {
537 printf("Detect tracked object in sweep %p\n", obj);
539 #endif
541 assert(ent->fields == NUM_FIELDS(obj));
542 if(!ent->marked) {
543 mark_sweep_free_entry(state, ms, ent);
544 } else {
545 obj->Marked = FALSE;
546 ent->marked = 0;
551 cur = cur->next;
556 void mark_sweep_collect(STATE, mark_sweep_gc ms, ptr_array roots) {
557 struct method_cache *end, *ent;
558 ms->enlarged = 0;
559 ms->last_freed = 0;
560 ms->last_marked = 0;
562 ms->seen_weak_refs = ptr_array_new(8);
563 mark_sweep_mark_phase(state, ms, roots);
565 /* We handle the method cache a little differently. We treat it like every
566 * ref is weak so that it doesn't cause objects to live longer than they should. */
568 ent = state->method_cache;
569 end = ent + CPU_CACHE_SIZE;
571 while(ent < end) {
572 if(ent->klass) {
573 if(ent->klass->gc_zone == MatureObjectZone) {
574 if(!to_header(ent->klass)->entry->marked) {
575 ent->klass = 0;
580 if(ent->module) {
581 if(ent->module->gc_zone == MatureObjectZone) {
582 if(!to_header(ent->module)->entry->marked) {
583 ent->module = 0;
588 if(ent->method) {
589 if(ent->method->gc_zone == MatureObjectZone) {
590 if(!to_header(ent->method)->entry->marked) {
591 ent->method = 0;
596 if(!ent->klass || !ent->module || !ent->method) {
597 ent->name = 0;
598 ent->klass = 0;
599 ent->module = 0;
600 ent->method = 0;
603 ent++;
606 /* We update the weakrefs BEFORE we sweep, since the sweep will free
607 * the data for the object, and we'll loose important info. */
608 int j, i;
609 OBJECT tmp, t2;
610 for(i = 0; i < ptr_array_length(ms->seen_weak_refs); i++) {
611 tmp = (OBJECT)ptr_array_get_index(ms->seen_weak_refs, i);
612 for(j = 0; j < NUM_FIELDS(tmp); j++) {
613 t2 = tuple_at(state, tmp, j);
614 if(REFERENCE_P(t2) && t2->gc_zone == MatureObjectZone) {
615 if(!to_header(t2)->entry->marked) {
616 tuple_put(state, tmp, j, Qnil);
622 mark_sweep_sweep_phase(state, ms);
624 ptr_array_free(ms->seen_weak_refs);
625 ms->seen_weak_refs = NULL;
627 // printf("[GC M compacted to %d bytes]\n", ms->allocated_bytes);
628 ms->next_collection_bytes = ms->allocated_bytes + MS_COLLECTION_BYTES;
631 void mark_sweep_collect_references(STATE, mark_sweep_gc ms, OBJECT mark, ptr_array refs) {
632 int i, count;
633 OBJECT obj;
634 struct ms_entry *ent;
635 ms_chunk *cur;
637 cur = ms->chunks;
638 count = 0;
640 while(cur) {
642 for(i = 0; i < cur->num_entries; i++) {
643 ent = &(cur->entries[i]);
644 count++;
645 if(ent->bytes) {
646 obj = to_object(ent->object);
648 assert(ent->fields == NUM_FIELDS(obj));
650 int j;
651 if(!_object_stores_bytes(obj)) {
652 for(j = 0; j < NUM_FIELDS(obj); j++) {
653 if(NTH_FIELD(obj, j) == mark) {
654 ptr_array_append(refs, (xpointer)obj);
661 cur = cur->next;
668 void mark_sweep_collect_references(STATE, mark_sweep_gc ms, OBJECT mark, ptr_array refs) {
669 ms_chunk *cur;
670 char *addr, *last;
671 OBJECT obj;
672 int osz, used, i;
674 cur = ms->chunks;
676 freed_objects = 0;
678 while(cur) {
680 addr = (char*)(cur->address);
681 last = (addr + cur->size) - (sizeof(struct rubinius_object));
683 used = 0;
684 while(addr < last) {
685 obj = (OBJECT)addr;
686 osz = SIZE_IN_BYTES(obj);
688 if(obj->flags != FREE_FLAG && !_object_stores_bytes(obj)) {
689 for(i = 0; i < NUM_FIELDS(obj); i++) {
690 if(NTH_FIELD(obj, i) == mark) {
691 ptr_array_append(refs, (xpointer)obj);
695 addr += osz;
698 cur = cur->next;