Added spec:commit task to commit changes to spec/ruby sources.
[rbx.git] / shotgun / lib / baker.h
blob45ee64fc0230702e5b3e9cb7c494a4b3a9a97e67
1 #ifndef RBS_BAKER_H
2 #define RBS_BAKER_H
4 #include "shotgun/lib/heap.h"
6 /*
7 Rubinius uses Henry Baker's generational GC that divides heap into spaces:
8 notably "from" space and "to" space. After tracing live objects are
9 moved from "from" space to "to"
10 space leaving forwarding address that points to new object location in "to"
11 space at the old object location. When object moved (also referenced as
12 "evacuated") it may point to objects in "from" heap space. So these
13 referenced objects are copied as well and pointers are updates.
14 This is called scavenging. Then "from" space can be reused and
15 heap spaces flipped.
17 Objects tenured to old generation after surviving certain number of
18 GC cycles.
20 Pros of this alrogithm is that it does not stop the world for too long
21 and leaves much much less heap fragmentation than naive mark and sweep
22 algorithm.
24 Overview of Baker's algorithm is available online at
25 http://web.media.mit.edu/~lieber/Lieberary/GC/Realtime/Realtime.html
27 space_a and space_b are two heap spaces used
28 current is "from" heap space
29 next is "to" heap space
30 used is how much memory overall heap uses
31 (offset = current heap peak position - heap bottom)
33 tenure_age is how many times object should be traced before
34 tenuring: may vary between GC instances
35 num_collection is how many GC cycles passed
38 struct baker_gc_struct {
39 rheap space_a;
40 rheap space_b;
41 rheap current;
42 rheap next;
43 size_t used;
44 int tenure_age;
45 ptr_array remember_set;
46 void *tenure_data;
47 OBJECT (*tenure)(void*, OBJECT obj);
48 int tenure_now;
49 void *om;
50 ptr_array seen_weak_refs;
51 OBJECT become_from, become_to;
52 char *last_start, *last_end;
53 int num_collection;
54 ptr_array tenured_objects;
57 typedef struct baker_gc_struct* baker_gc;
59 baker_gc baker_gc_new(int size);
60 address baker_gc_start_address(baker_gc g);
61 size_t baker_gc_used(baker_gc g);
62 int baker_gc_swap(baker_gc g);
63 int baker_gc_destroy(baker_gc g);
64 address baker_gc_allocate(baker_gc g, int size);
65 int baker_gc_set_next(baker_gc g, rheap h);
66 address baker_gc_allocate_spilled(baker_gc g, int size);
67 void baker_gc_set_forwarding_address(OBJECT obj, OBJECT dest);
68 OBJECT baker_gc_forwarded_object(OBJECT obj);
69 OBJECT baker_gc_mutate_object(STATE, baker_gc g, OBJECT obj);
70 int baker_gc_contains_p(baker_gc g, OBJECT obj);
71 int baker_gc_contains_spill_p(baker_gc g, OBJECT obj);
72 OBJECT baker_gc_mutate_from(STATE, baker_gc g, OBJECT iobj);
73 unsigned int baker_gc_collect(STATE, baker_gc g, ptr_array roots);
74 void baker_gc_clear_marked(baker_gc g);
75 void baker_gc_describe(baker_gc g);
76 int baker_gc_memory_allocated(baker_gc g);
77 int baker_gc_memory_in_use(baker_gc g);
78 void baker_gc_reset_used(baker_gc g);
79 void baker_gc_find_lost_souls(STATE, baker_gc g);
80 void baker_gc_collect_references(STATE, baker_gc g, OBJECT mark, ptr_array refs);
81 void baker_gc_mutate_context(STATE, baker_gc g, OBJECT iobj, int shifted, int top);
83 static inline int baker_gc_forwarded_p(OBJECT obj) {
84 return FORWARDED_P(obj);
88 #define baker_gc_allocate_ultra(g, size) heap_allocate_dirty((g)->current, size)
89 #define baker_gc_allocate_spilled_ultra(g, size) heap_allocate_dirty((g)->next, size)
91 // #define MEM_DEBUG 1
93 #ifdef MEM_DEBUG
95 #define baker_gc_allocate(g, size) (heap_allocate((g)->current, size))
96 #define baker_gc_allocate_spilled(g, size) (heap_allocate((g)->next, size))
98 #else
100 #define baker_gc_allocate(g, size) baker_gc_allocate_ultra(g, size); g->used++;
101 #define baker_gc_allocate_spilled(g, size) baker_gc_allocate_spilled_ultra(g, size); g->used++;
103 #endif
105 #endif