1 /***********************************************************************/
5 /* Damien Doligez, projet Para, INRIA Rocquencourt */
7 /* Copyright 1996 Institut National de Recherche en Informatique et */
8 /* en Automatique. All rights reserved. This file is distributed */
9 /* under the terms of the GNU Library General Public License, with */
10 /* the special exception on linking described in file ../LICENSE. */
12 /***********************************************************************/
31 asize_t caml_minor_heap_size
;
32 CAMLexport
char *caml_young_start
= NULL
, *caml_young_end
= NULL
;
33 CAMLexport
char *caml_young_ptr
= NULL
, *caml_young_limit
= NULL
;
35 CAMLexport
struct caml_ref_table
36 caml_ref_table
= { NULL
, NULL
, NULL
, NULL
, NULL
, 0, 0},
37 caml_weak_ref_table
= { NULL
, NULL
, NULL
, NULL
, NULL
, 0, 0};
39 int caml_in_minor_collection
= 0;
42 static unsigned long minor_gc_counter
= 0;
45 void caml_alloc_table (struct caml_ref_table
*tbl
, asize_t sz
, asize_t rsv
)
51 new_table
= (value
**) caml_stat_alloc ((tbl
->size
+ tbl
->reserve
)
53 if (tbl
->base
!= NULL
) caml_stat_free (tbl
->base
);
54 tbl
->base
= new_table
;
56 tbl
->threshold
= tbl
->base
+ tbl
->size
;
57 tbl
->limit
= tbl
->threshold
;
58 tbl
->end
= tbl
->base
+ tbl
->size
+ tbl
->reserve
;
61 static void reset_table (struct caml_ref_table
*tbl
)
65 if (tbl
->base
!= NULL
) caml_stat_free (tbl
->base
);
66 tbl
->base
= tbl
->ptr
= tbl
->threshold
= tbl
->limit
= tbl
->end
= NULL
;
69 static void clear_table (struct caml_ref_table
*tbl
)
72 tbl
->limit
= tbl
->threshold
;
75 void caml_set_minor_heap_size (asize_t size
)
79 Assert (size
>= Minor_heap_min
);
80 Assert (size
<= Minor_heap_max
);
81 Assert (size
% sizeof (value
) == 0);
82 if (caml_young_ptr
!= caml_young_end
) caml_minor_collection ();
83 Assert (caml_young_ptr
== caml_young_end
);
84 new_heap
= (char *) caml_stat_alloc (size
);
85 if (caml_young_start
!= NULL
){
86 caml_stat_free (caml_young_start
);
88 caml_young_start
= new_heap
;
89 caml_young_end
= new_heap
+ size
;
90 caml_young_limit
= caml_young_start
;
91 caml_young_ptr
= caml_young_end
;
92 caml_minor_heap_size
= size
;
94 reset_table (&caml_ref_table
);
95 reset_table (&caml_weak_ref_table
);
98 static value oldify_todo_list
= 0;
100 /* Note that the tests on the tag depend on the fact that Infix_tag,
101 Forward_tag, and No_scan_tag are contiguous. */
103 void caml_oldify_one (value v
, value
*p
)
111 if (Is_block (v
) && Is_young (v
)){
112 Assert (Hp_val (v
) >= caml_young_ptr
);
114 if (hd
== 0){ /* If already forwarded */
115 *p
= Field (v
, 0); /* then forward pointer is first field. */
118 if (tag
< Infix_tag
){
122 result
= caml_alloc_shr (sz
, tag
);
124 field0
= Field (v
, 0);
125 Hd_val (v
) = 0; /* Set forward flag */
126 Field (v
, 0) = result
; /* and forward pointer. */
128 Field (result
, 0) = field0
;
129 Field (result
, 1) = oldify_todo_list
; /* Add this block */
130 oldify_todo_list
= v
; /* to the "to do" list. */
133 p
= &Field (result
, 0);
137 }else if (tag
>= No_scan_tag
){
139 result
= caml_alloc_shr (sz
, tag
);
140 for (i
= 0; i
< sz
; i
++) Field (result
, i
) = Field (v
, i
);
141 Hd_val (v
) = 0; /* Set forward flag */
142 Field (v
, 0) = result
; /* and forward pointer. */
144 }else if (tag
== Infix_tag
){
145 mlsize_t offset
= Infix_offset_hd (hd
);
146 caml_oldify_one (v
- offset
, p
); /* Cannot recurse deeper than 1. */
149 value f
= Forward_val (v
);
152 Assert (tag
== Forward_tag
);
153 if (Is_block (f
) && (Is_young (f
) || Is_in_heap (f
))){
154 ft
= Tag_val (Hd_val (f
) == 0 ? Field (f
, 0) : f
);
156 if (ft
== Forward_tag
|| ft
== Lazy_tag
|| ft
== Double_tag
){
157 /* Do not short-circuit the pointer. Copy as a normal block. */
158 Assert (Wosize_hd (hd
) == 1);
159 result
= caml_alloc_shr (1, Forward_tag
);
161 Hd_val (v
) = 0; /* Set (GC) forward flag */
162 Field (v
, 0) = result
; /* and forward pointer. */
163 p
= &Field (result
, 0);
167 v
= f
; /* Follow the forwarding */
168 goto tail_call
; /* then oldify. */
177 /* Finish the work that was put off by [caml_oldify_one].
178 Note that [caml_oldify_one] itself is called by oldify_mopup, so we
179 have to be careful to remove the first entry from the list before
180 oldifying its fields. */
181 void caml_oldify_mopup (void)
186 while (oldify_todo_list
!= 0){
187 v
= oldify_todo_list
; /* Get the head. */
188 Assert (Hd_val (v
) == 0); /* It must be forwarded. */
189 new_v
= Field (v
, 0); /* Follow forward pointer. */
190 oldify_todo_list
= Field (new_v
, 1); /* Remove from list. */
192 f
= Field (new_v
, 0);
193 if (Is_block (f
) && Is_young (f
)){
194 caml_oldify_one (f
, &Field (new_v
, 0));
196 for (i
= 1; i
< Wosize_val (new_v
); i
++){
198 if (Is_block (f
) && Is_young (f
)){
199 caml_oldify_one (f
, &Field (new_v
, i
));
201 Field (new_v
, i
) = f
;
207 /* Make sure the minor heap is empty by performing a minor collection
210 void caml_empty_minor_heap (void)
214 if (caml_young_ptr
!= caml_young_end
){
215 caml_in_minor_collection
= 1;
216 caml_gc_message (0x02, "<", 0);
217 caml_oldify_local_roots();
218 for (r
= caml_ref_table
.base
; r
< caml_ref_table
.ptr
; r
++){
219 caml_oldify_one (**r
, *r
);
221 caml_oldify_mopup ();
222 for (r
= caml_weak_ref_table
.base
; r
< caml_weak_ref_table
.ptr
; r
++){
223 if (Is_block (**r
) && Is_young (**r
)){
224 if (Hd_val (**r
) == 0){
225 **r
= Field (**r
, 0);
227 **r
= caml_weak_none
;
231 if (caml_young_ptr
< caml_young_start
) caml_young_ptr
= caml_young_start
;
232 caml_stat_minor_words
+= Wsize_bsize (caml_young_end
- caml_young_ptr
);
233 caml_young_ptr
= caml_young_end
;
234 caml_young_limit
= caml_young_start
;
235 clear_table (&caml_ref_table
);
236 clear_table (&caml_weak_ref_table
);
237 caml_gc_message (0x02, ">", 0);
238 caml_in_minor_collection
= 0;
240 caml_final_empty_young ();
244 for (p
= (value
*) caml_young_start
; p
< (value
*) caml_young_end
; ++p
){
245 *p
= Debug_free_minor
;
252 /* Do a minor collection and a slice of major collection, call finalisation
254 Leave the minor heap empty.
256 CAMLexport
void caml_minor_collection (void)
258 intnat prev_alloc_words
= caml_allocated_words
;
260 caml_empty_minor_heap ();
262 caml_stat_promoted_words
+= caml_allocated_words
- prev_alloc_words
;
263 ++ caml_stat_minor_collections
;
264 caml_major_collection_slice (0);
265 caml_force_major_slice
= 0;
267 caml_final_do_calls ();
269 caml_empty_minor_heap ();
272 CAMLexport value
caml_check_urgent_gc (value extra_root
)
274 CAMLparam1 (extra_root
);
275 if (caml_force_major_slice
) caml_minor_collection();
276 CAMLreturn (extra_root
);
279 void caml_realloc_ref_table (struct caml_ref_table
*tbl
)
280 { Assert (tbl
->ptr
== tbl
->limit
);
281 Assert (tbl
->limit
<= tbl
->end
);
282 Assert (tbl
->limit
>= tbl
->threshold
);
284 if (tbl
->base
== NULL
){
285 caml_alloc_table (tbl
, caml_minor_heap_size
/ sizeof (value
) / 8, 256);
286 }else if (tbl
->limit
== tbl
->threshold
){
287 caml_gc_message (0x08, "ref_table threshold crossed\n", 0);
288 tbl
->limit
= tbl
->end
;
289 caml_urge_major_slice ();
290 }else{ /* This will almost never happen with the bytecode interpreter. */
292 asize_t cur_ptr
= tbl
->ptr
- tbl
->base
;
293 Assert (caml_force_major_slice
);
296 sz
= (tbl
->size
+ tbl
->reserve
) * sizeof (value
*);
297 caml_gc_message (0x08, "Growing ref_table to %"
298 ARCH_INTNAT_PRINTF_FORMAT
"dk bytes\n",
300 tbl
->base
= (value
**) realloc ((char *) tbl
->base
, sz
);
301 if (tbl
->base
== NULL
){
302 caml_fatal_error ("Fatal error: ref_table overflow\n");
304 tbl
->end
= tbl
->base
+ tbl
->size
+ tbl
->reserve
;
305 tbl
->threshold
= tbl
->base
+ tbl
->size
;
306 tbl
->ptr
= tbl
->base
+ cur_ptr
;
307 tbl
->limit
= tbl
->end
;