5 Starting at the roots, all objects that are
6 reachable are marked, regardless if they are
7 located in the mark/sweep or not (ie, they could
8 be in another generation).
10 Then, all objects allocated under the mark/sweep
11 are traversed and any that aren't marked are
12 placed by in the free_list.
14 Finally, all objects in other generations have
19 #include "shotgun/lib/shotgun.h"
20 #include "shotgun/lib/object.h"
21 #include "shotgun/lib/cpu.h"
22 #include "shotgun/lib/methctx.h"
23 #include "shotgun/lib/bytearray.h"
24 #include "shotgun/lib/tuple.h"
26 #define to_header(obj) ((struct ms_header*)((uintptr_t)(obj) - sizeof(struct ms_header)))
27 #define to_object(hed) ((OBJECT)((uintptr_t)(hed) + sizeof(struct ms_header)))
29 #define FREE_OBJECT 0x10000
30 #define BARRIER (2**SIZE_OF_OBJECT)
32 #define TABLE_INCS 4096
34 #define FREE_FLAG 0xff
36 #define TRACK_REFERENCE 0
37 #define TRACK_DONT_FREE 0
39 #undef MS_COLLECTION_FREQUENCY
40 #define MS_COLLECTION_FREQUENCY 5000
43 #define MS_COLLECTION_BYTES 10485760
46 mark_sweep_gc
mark_sweep_new() {
49 ms
= calloc(1, sizeof(struct _mark_sweep_gc
));
50 ms
->remember_set
= ptr_array_new(8);
51 mark_sweep_add_chunk(ms
);
52 ms
->chunks
= ms
->current
;
54 ms
->seen_weak_refs
= NULL
;
55 ms
->next_collection_objects
= MS_COLLECTION_FREQUENCY
;
56 ms
->next_collection_bytes
= MS_COLLECTION_BYTES
;
57 ms
->become_from
= ms
->become_to
= Qnil
;
59 if(getenv("MSTRACK")) {
60 ms
->track
= (OBJECT
)atoi(getenv("MSTRACK"));
66 void mark_sweep_destroy(mark_sweep_gc ms
) {
76 for(i
= 0; i
< cur
->num_entries
; i
++) {
77 ent
= &(cur
->entries
[i
]);
92 void mark_sweep_add_chunk(mark_sweep_gc ms
) {
99 new = calloc(1, sizeof(struct ms_chunk
));
102 new->num_entries
= TABLE_INCS
;
103 new->entries
= calloc(new->num_entries
, sizeof(struct ms_entry
));
110 ms
->current
->next
= new;
113 for(i
= 0; i
< TABLE_INCS
; i
++) {
114 ent
= new->entries
+ i
;
115 ent
->object
= (struct ms_header
*)(ms
->free_list
);
121 OBJECT
mark_sweep_allocate(mark_sweep_gc ms
, int obj_fields
) {
123 struct ms_header
*oh
;
124 struct ms_entry
*ent
;
127 bytes
= sizeof(struct ms_header
) + SIZE_IN_BYTES_FIELDS(obj_fields
);
132 mark_sweep_add_chunk(ms
);
136 ms
->free_list
= (struct ms_entry
*)ent
->object
;
137 oh
= (struct ms_header
*)calloc(1, bytes
);
139 ent
->fields
= obj_fields
;
143 ms
->allocated_bytes
+= bytes
;
144 ms
->last_allocated
+= bytes
;
145 ms
->allocated_objects
++;
146 // ms->next_collection_objects--;
147 ms
->next_collection_bytes
-= bytes
;
149 if(obj_fields
> 10000) {
150 // printf("[GC M allocating large object %d]\n", obj_fields);
155 if(ms
->next_collection_objects
<= 0) {
156 // printf("[GC M Collecting based on objects]\n");
158 ms
->next_collection_bytes
= MS_COLLECTION_FREQUENCY
;
159 } else if(ms
->next_collection_bytes
<= 0) {
160 // printf("[GC M Collecting based on bytes: %d]\n", ms->allocated_bytes);
167 if(ms->last_allocated_bytes) {
168 if(ms->last_allocated_bytes * 2 <= ms->allocated_bytes) {
169 printf("[GC M Collecting based on heap size (%d, %d)]\n", ms->last_allocated_bytes, ms->allocated_bytes);
172 } else if(ms->last_allocated >= ms->next_collection) {
174 ms->last_allocated = 0;
176 printf("[GC M Collecting based on bytes (every %d bytes, %d total)]\n", MS_COLLECTION_BYTES, ms->allocated_bytes);
177 printf("[GC M last_clock=%d, cur=%d]\n", ms->last_clock, curt);
178 ms->last_clock = curt;
185 // printf("Allocated %d\n", ro);
186 SET_NUM_FIELDS(ro
, obj_fields
);
189 if(ms
->track
== ro
) {
190 printf("Allocated tracked object %p\n", ro
);
198 void mark_sweep_free_entry(STATE
, mark_sweep_gc ms
, struct ms_entry
*ent
) {
201 obj
= to_object(ent
->object
);
204 if(ms
->track
== obj
) {
205 printf("Free'ing tracked object %p\n", obj
);
210 ptr_array_remove_fast(state
->om
->gc
->remember_set
, (xpointer
)obj
);
213 if(obj
->RequiresCleanup
) {
214 if(obj
->obj_type
== MemPtrType
) {
215 void *addr
= *DATA_STRUCT(obj
, void**);
217 obj
->RequiresCleanup
= 0;
219 state_run_cleanup(state
, obj
);
223 if(obj
->obj_type
== WrapsStructType
) FREE_WRAPPED_STRUCT(obj
);
226 memset(obj
, 0, SIZE_IN_BYTES(obj
));
227 *((int*)(obj
)) = 0xbaddecaf;
234 ms
->allocated_bytes
-= ent
->bytes
;
235 ms
->allocated_objects
--;
241 ent
->object
= (struct ms_header
*)(ms
->free_list
);
245 #define mark_sweep_free_object(obj) (obj->flags == FREE_FLAG)
247 void mark_sweep_describe(mark_sweep_gc ms
) {
248 printf("Last marked: %d\n", ms
->last_marked
);
251 #define BCM_P(obj) (ms->become_from == obj)
252 #define BCM_TO (ms->become_to)
254 int _object_stores_bytes(OBJECT self
);
256 int mark_sweep_contains_p(mark_sweep_gc ms
, OBJECT obj
) {
257 return obj
->gc_zone
== MatureObjectZone
;
261 static OBJECT
mark_sweep_mark_object(STATE
, mark_sweep_gc ms
, OBJECT iobj
) {
264 struct ms_header
*header
;
267 if(ms
->track
== iobj
) {
268 printf("Found tracked object %p\n", iobj
);
272 if(iobj
->gc_zone
== MatureObjectZone
) {
273 header
= to_header(iobj
);
275 assert(header
->entry
->object
== header
);
277 /* Already marked! */
278 if(header
->entry
->marked
) return iobj
;
279 header
->entry
->marked
= 1;
281 if(iobj
->obj_type
== WrapsStructType
) MARK_WRAPPED_STRUCT(iobj
);
283 if(iobj
->Marked
) return iobj
;
286 if(iobj
->obj_type
== WrapsStructType
) MARK_WRAPPED_STRUCT(iobj
);
289 // printf("Marked %d\n", iobj);
293 cls
= CLASS_OBJECT(iobj
);
294 if(REFERENCE_P(cls
)) {
296 SET_CLASS(iobj
, BCM_TO
);
298 mark_sweep_mark_object(state
, ms
, cls
);
302 if(iobj
->RefsAreWeak
) {
303 // printf("%p has weak refs.\n", (void*)iobj);
304 ptr_array_append(ms
->seen_weak_refs
, (xpointer
)iobj
);
308 if(!_object_stores_bytes(iobj
)) {
309 for(i
= 0; i
< NUM_FIELDS(iobj
); i
++) {
310 tmp
= NTH_FIELD(iobj
, i
);
311 if(!REFERENCE_P(tmp
)) continue;
314 SET_FIELD(iobj
, i
, BCM_TO
);
316 mark_sweep_mark_object(state
, ms
, tmp
);
320 #define fc_mutate(field) if(REFERENCE2_P(fc->field)) { \
321 if(BCM_P(fc->field)) { fc->field = BCM_TO; \
322 } else { mark_sweep_mark_object(state, ms, fc->field); } }
324 if(context_p(state
, iobj
)) {
325 struct fast_context
*fc
= FASTCTX(iobj
);
332 fc_mutate(custom_iseq
);
334 fc_mutate(method_module
);
337 if(!NIL_P(fc
->method
) && fc
->method
->obj_type
== CMethodType
) {
338 /* We cache the bytecode in a char*, so adjust it. */
340 if(NIL_P(fc
->custom_iseq
)) {
341 ba
= cmethod_get_compiled(fc
->method
);
342 fc
->data
= BYTEARRAY_ADDRESS(ba
);
344 /* A context can be set to use a custom instruction sequence
345 by the debugger; if this is the case, we need to preserve
346 the context's use of that custom iseq. */
347 fc
->data
= BYTEARRAY_ADDRESS(fc
->custom_iseq
);
350 } else if(iobj
->obj_type
== TaskType
) {
351 struct cpu_task
*fc
= (struct cpu_task
*)BYTES_OF(iobj
);
354 fc_mutate(exception
);
355 fc_mutate(enclosing_class
);
356 fc_mutate(active_context
);
357 fc_mutate(home_context
);
359 fc_mutate(debug_channel
);
360 fc_mutate(control_channel
);
361 fc_mutate(current_scope
);
366 while(sp
<= fc
->sp_ptr
) {
367 if(REFERENCE2_P(*sp
)) {
371 mark_sweep_mark_object(state
, ms
, *sp
);
378 for(i
= 0; i
< ptr_array_length(fc
->paths
); i
++) {
379 tmp
= (OBJECT
)ptr_array_get_index(fc
->paths
, i
);
381 ptr_array_set_index(fc
->paths
,i
,(xpointer
)BCM_TO
);
383 mark_sweep_mark_object(state
, ms
, tmp
);
388 /* Handle the generic type_info prefix fields */
389 int fields
= state
->type_info
[iobj
->obj_type
].object_fields
;
390 for(i
= 0; i
< fields
; i
++) {
391 tmp
= NTH_FIELD(iobj
, i
);
392 if(!REFERENCE_P(tmp
)) continue;
395 SET_FIELD(iobj
, i
, BCM_TO
);
397 mark_sweep_mark_object(state
, ms
, tmp
);
408 void mark_sweep_mark_context(STATE
, mark_sweep_gc ms
, OBJECT iobj
) {
409 #define fc_mutate(field) if(fc->field && REFERENCE_P(fc->field)) mark_sweep_mark_object(state, ms, fc->field)
410 if (iobj
->Marked
) return;
414 struct fast_context
*fc
= FASTCTX(iobj
);
416 if(REFERENCE_P(fc
->sender
) && om_in_heap(state
->om
, fc
->sender
)) {
424 fc_mutate(custom_iseq
);
426 fc_mutate(method_module
);
430 if(!NIL_P(fc
->method
) && fc
->method
->obj_type
== CMethodType
) {
431 /* We cache the bytecode in a char*, so adjust it. */
433 if(NIL_P(fc
->custom_iseq
)) {
434 ba
= cmethod_get_compiled(fc
->method
);
435 fc
->data
= BYTEARRAY_ADDRESS(ba
);
437 /* A context can be set to use a custom instruction sequence
438 by the debugger; if this is the case, we need to preserve
439 the context's use of that custom iseq. */
440 fc
->data
= BYTEARRAY_ADDRESS(fc
->custom_iseq
);
446 void mark_sweep_clear_mark(STATE
, OBJECT iobj
) {
447 iobj
->Marked
= FALSE
;
450 void mark_sweep_mark_phase(STATE
, mark_sweep_gc ms
, ptr_array roots
) {
454 if(!NIL_P(ms
->become_to
)) {
455 mark_sweep_mark_object(state
, ms
, ms
->become_to
);
458 if(!NIL_P(ms
->become_from
)) {
459 mark_sweep_mark_object(state
, ms
, ms
->become_from
);
462 sz
= ptr_array_length(roots
);
463 for(i
= 0; i
< sz
; i
++) {
464 root
= (OBJECT
)(ptr_array_get_index(roots
, i
));
465 if(!REFERENCE2_P(root
)) { continue; }
467 ptr_array_set_index(roots
, i
, (xpointer
)BCM_TO
);
469 mark_sweep_mark_object(state
, ms
, root
);
473 sz
= ptr_array_length(ms
->remember_set
);
474 for(i
= 0; i
< sz
; i
++) {
475 root
= (OBJECT
)(ptr_array_get_index(ms
->remember_set
, i
));
476 if(!REFERENCE2_P(root
)) { continue; }
478 ptr_array_set_index(ms
->remember_set
, i
, (xpointer
)BCM_TO
);
480 mark_sweep_mark_object(state
, ms
, root
);
487 sp
= state
->current_stack
;
488 while(sp
<= state
->current_sp
) {
489 if(REFERENCE2_P(*sp
)) {
493 mark_sweep_mark_object(state
, ms
, *sp
);
499 /* Now the handle table. */
500 for(i
= 0; i
< state
->handle_tbl
->total
; i
++) {
501 if(state
->handle_tbl
->entries
[i
]) {
502 tmp
= state
->handle_tbl
->entries
[i
]->object
;
504 state
->handle_tbl
->entries
[i
]->object
= BCM_TO
;
506 mark_sweep_mark_object(state
, ms
, tmp
);
511 cpu_event_each_channel(state
,
512 (cpu_event_each_channel_cb
) mark_sweep_mark_object
, ms
);
513 cpu_sampler_collect(state
,
514 (cpu_sampler_collect_cb
) mark_sweep_mark_object
, ms
);
516 object_memory_mark_contexts(state
, state
->om
);
519 void mark_sweep_sweep_phase(STATE
, mark_sweep_gc ms
) {
522 struct ms_entry
*ent
;
530 for(i
= 0; i
< cur
->num_entries
; i
++) {
531 ent
= &(cur
->entries
[i
]);
534 obj
= to_object(ent
->object
);
536 if(ms
->track
== obj
) {
537 printf("Detect tracked object in sweep %p\n", obj
);
541 assert(ent
->fields
== NUM_FIELDS(obj
));
543 mark_sweep_free_entry(state
, ms
, ent
);
556 void mark_sweep_collect(STATE
, mark_sweep_gc ms
, ptr_array roots
) {
557 struct method_cache
*end
, *ent
;
562 ms
->seen_weak_refs
= ptr_array_new(8);
563 mark_sweep_mark_phase(state
, ms
, roots
);
565 /* We handle the method cache a little differently. We treat it like every
566 * ref is weak so that it doesn't cause objects to live longer than they should. */
568 ent
= state
->method_cache
;
569 end
= ent
+ CPU_CACHE_SIZE
;
573 if(ent
->klass
->gc_zone
== MatureObjectZone
) {
574 if(!to_header(ent
->klass
)->entry
->marked
) {
581 if(ent
->module
->gc_zone
== MatureObjectZone
) {
582 if(!to_header(ent
->module
)->entry
->marked
) {
589 if(ent
->method
->gc_zone
== MatureObjectZone
) {
590 if(!to_header(ent
->method
)->entry
->marked
) {
596 if(!ent
->klass
|| !ent
->module
|| !ent
->method
) {
606 /* We update the weakrefs BEFORE we sweep, since the sweep will free
607 * the data for the object, and we'll loose important info. */
610 for(i
= 0; i
< ptr_array_length(ms
->seen_weak_refs
); i
++) {
611 tmp
= (OBJECT
)ptr_array_get_index(ms
->seen_weak_refs
, i
);
612 for(j
= 0; j
< NUM_FIELDS(tmp
); j
++) {
613 t2
= tuple_at(state
, tmp
, j
);
614 if(REFERENCE_P(t2
) && t2
->gc_zone
== MatureObjectZone
) {
615 if(!to_header(t2
)->entry
->marked
) {
616 tuple_put(state
, tmp
, j
, Qnil
);
622 mark_sweep_sweep_phase(state
, ms
);
624 ptr_array_free(ms
->seen_weak_refs
);
625 ms
->seen_weak_refs
= NULL
;
627 // printf("[GC M compacted to %d bytes]\n", ms->allocated_bytes);
628 ms
->next_collection_bytes
= ms
->allocated_bytes
+ MS_COLLECTION_BYTES
;
631 void mark_sweep_collect_references(STATE
, mark_sweep_gc ms
, OBJECT mark
, ptr_array refs
) {
634 struct ms_entry
*ent
;
642 for(i
= 0; i
< cur
->num_entries
; i
++) {
643 ent
= &(cur
->entries
[i
]);
646 obj
= to_object(ent
->object
);
648 assert(ent
->fields
== NUM_FIELDS(obj
));
651 if(!_object_stores_bytes(obj
)) {
652 for(j
= 0; j
< NUM_FIELDS(obj
); j
++) {
653 if(NTH_FIELD(obj
, j
) == mark
) {
654 ptr_array_append(refs
, (xpointer
)obj
);
668 void mark_sweep_collect_references(STATE, mark_sweep_gc ms, OBJECT mark, ptr_array refs) {
680 addr = (char*)(cur->address);
681 last = (addr + cur->size) - (sizeof(struct rubinius_object));
686 osz = SIZE_IN_BYTES(obj);
688 if(obj->flags != FREE_FLAG && !_object_stores_bytes(obj)) {
689 for(i = 0; i < NUM_FIELDS(obj); i++) {
690 if(NTH_FIELD(obj, i) == mark) {
691 ptr_array_append(refs, (xpointer)obj);