1 /***********************************************************************/
5 /* Damien Doligez, projet Para, INRIA Rocquencourt */
7 /* Copyright 1996 Institut National de Recherche en Informatique et */
8 /* en Automatique. All rights reserved. This file is distributed */
9 /* under the terms of the GNU Library General Public License, with */
10 /* the special exception on linking described in file ../LICENSE. */
12 /***********************************************************************/
30 extern uintnat caml_percent_free
; /* major_gc.c */
32 #ifdef USE_MMAP_INSTEAD_OF_MALLOC
33 extern char * caml_aligned_mmap (asize_t size
, int modulo
, void ** block
);
34 extern void caml_aligned_munmap (char * addr
, asize_t size
);
37 /* Allocate a block of the requested size, to be passed to
38 [caml_add_to_heap] later.
39 [request] must be a multiple of [Page_size].
40 [caml_alloc_for_heap] returns NULL if the request cannot be satisfied.
41 The returned pointer is a hp, but the header must be initialized by
44 char *caml_alloc_for_heap (asize_t request
)
48 Assert (request
% Page_size
== 0);
49 #ifdef USE_MMAP_INSTEAD_OF_MALLOC
50 mem
= caml_aligned_mmap (request
+ sizeof (heap_chunk_head
),
51 sizeof (heap_chunk_head
), &block
);
53 mem
= caml_aligned_malloc (request
+ sizeof (heap_chunk_head
),
54 sizeof (heap_chunk_head
), &block
);
56 if (mem
== NULL
) return NULL
;
57 mem
+= sizeof (heap_chunk_head
);
58 Chunk_size (mem
) = request
;
59 Chunk_block (mem
) = block
;
63 /* Use this function to free a block allocated with [caml_alloc_for_heap]
64 if you don't add it with [caml_add_to_heap].
66 void caml_free_for_heap (char *mem
)
68 #ifdef USE_MMAP_INSTEAD_OF_MALLOC
69 caml_aligned_munmap (Chunk_block (mem
),
70 Chunk_size (mem
) + sizeof (heap_chunk_head
));
72 free (Chunk_block (mem
));
76 /* Take a chunk of memory as argument, which must be the result of a
77 call to [caml_alloc_for_heap], and insert it into the heap chaining.
78 The contents of the chunk must be a sequence of valid blocks and
79 fragments: no space between blocks and no trailing garbage. If
80 some blocks are blue, they must be added to the free list by the
81 caller. All other blocks must have the color [caml_allocation_color(mem)].
82 The caller must update [caml_allocated_words] if applicable.
83 Return value: 0 if no error; -1 in case of error.
85 int caml_add_to_heap (char *m
)
88 Assert (Chunk_size (m
) % Page_size
== 0);
90 /* Should check the contents of the block. */
93 caml_gc_message (0x04, "Growing heap to %luk bytes\n",
94 (caml_stat_heap_size
+ Chunk_size (m
)) / 1024);
96 /* Extend the page table as needed. */
97 if (Page (m
) < caml_page_low
){
98 page_table_entry
*block
, *new_page_table
;
99 asize_t new_page_low
= Page (m
);
100 asize_t new_size
= caml_page_high
- new_page_low
;
102 caml_gc_message (0x08, "Growing page table to %lu entries\n", new_size
);
103 block
= malloc (new_size
* sizeof (page_table_entry
));
105 caml_gc_message (0x08, "No room for growing page table\n", 0);
108 new_page_table
= block
- new_page_low
;
109 for (i
= new_page_low
; i
< caml_page_low
; i
++){
110 new_page_table
[i
] = Not_in_heap
;
112 for (i
= caml_page_low
; i
< caml_page_high
; i
++){
113 new_page_table
[i
] = caml_page_table
[i
];
115 free (caml_page_table
+ caml_page_low
);
116 caml_page_table
= new_page_table
;
117 caml_page_low
= new_page_low
;
119 if (Page (m
+ Chunk_size (m
)) > caml_page_high
){
120 page_table_entry
*block
, *new_page_table
;
121 asize_t new_page_high
= Page (m
+ Chunk_size (m
));
122 asize_t new_size
= new_page_high
- caml_page_low
;
124 caml_gc_message (0x08, "Growing page table to %lu entries\n", new_size
);
125 block
= malloc (new_size
* sizeof (page_table_entry
));
127 caml_gc_message (0x08, "No room for growing page table\n", 0);
130 new_page_table
= block
- caml_page_low
;
131 for (i
= caml_page_low
; i
< caml_page_high
; i
++){
132 new_page_table
[i
] = caml_page_table
[i
];
134 for (i
= caml_page_high
; i
< new_page_high
; i
++){
135 new_page_table
[i
] = Not_in_heap
;
137 free (caml_page_table
+ caml_page_low
);
138 caml_page_table
= new_page_table
;
139 caml_page_high
= new_page_high
;
142 /* Mark the pages as being in the heap. */
143 for (i
= Page (m
); i
< Page (m
+ Chunk_size (m
)); i
++){
144 caml_page_table
[i
] = In_heap
;
147 /* Chain this heap chunk. */
149 char **last
= &caml_heap_start
;
152 while (cur
!= NULL
&& cur
< m
){
153 last
= &(Chunk_next (cur
));
156 Chunk_next (m
) = cur
;
159 ++ caml_stat_heap_chunks
;
162 /* Update the heap bounds as needed. */
163 /* already done: if (m < caml_heap_start) heap_start = m; */
164 if (m
+ Chunk_size (m
) > caml_heap_end
) caml_heap_end
= m
+ Chunk_size (m
);
166 caml_stat_heap_size
+= Chunk_size (m
);
167 if (caml_stat_heap_size
> caml_stat_top_heap_size
){
168 caml_stat_top_heap_size
= caml_stat_heap_size
;
173 /* Allocate more memory from malloc for the heap.
174 Return a blue block of at least the requested size.
175 The blue block is chained to a sequence of blue blocks (through their
176 field 0); the last block of the chain is pointed by field 1 of the
177 first. There may be a fragment after the last block.
178 The caller must insert the blocks into the free list.
179 The request must be less than or equal to Max_wosize.
180 Return NULL when out of memory.
182 static char *expand_heap (mlsize_t request
)
184 char *mem
, *hp
, *prev
;
185 asize_t over_request
, malloc_request
, remain
;
187 Assert (request
<= Max_wosize
);
188 over_request
= request
+ request
/ 100 * caml_percent_free
;
189 malloc_request
= caml_round_heap_chunk_size (Bhsize_wosize (over_request
));
190 mem
= caml_alloc_for_heap (malloc_request
);
192 caml_gc_message (0x04, "No room for growing heap\n", 0);
195 remain
= malloc_request
;
197 /* XXX find a way to do this with a call to caml_make_free_blocks */
198 while (Wosize_bhsize (remain
) > Max_wosize
){
199 Hd_hp (hp
) = Make_header (Max_wosize
, 0, Caml_blue
);
201 caml_set_fields (Bp_hp (hp
), 0, Debug_free_major
);
203 hp
+= Bhsize_wosize (Max_wosize
);
204 remain
-= Bhsize_wosize (Max_wosize
);
205 Field (Op_hp (mem
), 1) = Field (Op_hp (prev
), 0) = (value
) Op_hp (hp
);
209 Hd_hp (hp
) = Make_header (Wosize_bhsize (remain
), 0, Caml_blue
);
211 caml_set_fields (Bp_hp (hp
), 0, Debug_free_major
);
213 Field (Op_hp (mem
), 1) = Field (Op_hp (prev
), 0) = (value
) Op_hp (hp
);
214 Field (Op_hp (hp
), 0) = (value
) NULL
;
216 Field (Op_hp (prev
), 0) = (value
) NULL
;
217 if (remain
== 1) Hd_hp (hp
) = Make_header (0, 0, Caml_white
);
219 Assert (Wosize_hp (mem
) >= request
);
220 if (caml_add_to_heap (mem
) != 0){
221 caml_free_for_heap (mem
);
227 /* Remove the heap chunk [chunk] from the heap and give the memory back
230 void caml_shrink_heap (char *chunk
)
235 /* Never deallocate the first block, because caml_heap_start is both the
236 first block and the base address for page numbers, and we don't
237 want to shift the page table, it's too messy (see above).
238 It will never happen anyway, because of the way compaction works.
241 if (chunk
== caml_heap_start
) return;
243 caml_stat_heap_size
-= Chunk_size (chunk
);
244 caml_gc_message (0x04, "Shrinking heap to %luk bytes\n",
245 caml_stat_heap_size
/ 1024);
250 for (i
= 0; i
< Wsize_bsize (Chunk_size (chunk
)); i
++){
251 ((value
*) chunk
) [i
] = Debug_free_shrink
;
256 -- caml_stat_heap_chunks
;
258 /* Remove [chunk] from the list of chunks. */
259 cp
= &caml_heap_start
;
260 while (*cp
!= chunk
) cp
= &(Chunk_next (*cp
));
261 *cp
= Chunk_next (chunk
);
263 /* Remove the pages of [chunk] from the page table. */
264 for (i
= Page (chunk
); i
< Page (chunk
+ Chunk_size (chunk
)); i
++){
265 caml_page_table
[i
] = Not_in_heap
;
268 /* Free the [malloc] block that contains [chunk]. */
269 caml_free_for_heap (chunk
);
272 color_t
caml_allocation_color (void *hp
)
274 if (caml_gc_phase
== Phase_mark
275 || (caml_gc_phase
== Phase_sweep
&& (addr
)hp
>= (addr
)caml_gc_sweep_hp
)){
278 Assert (caml_gc_phase
== Phase_idle
279 || (caml_gc_phase
== Phase_sweep
280 && (addr
)hp
< (addr
)caml_gc_sweep_hp
));
285 CAMLexport value
caml_alloc_shr (mlsize_t wosize
, tag_t tag
)
287 char *hp
, *new_block
;
289 if (wosize
> Max_wosize
) caml_raise_out_of_memory ();
290 hp
= caml_fl_allocate (wosize
);
292 new_block
= expand_heap (wosize
);
293 if (new_block
== NULL
) {
294 if (caml_in_minor_collection
)
295 caml_fatal_error ("Fatal error: out of memory.\n");
297 caml_raise_out_of_memory ();
299 caml_fl_add_blocks (new_block
);
300 hp
= caml_fl_allocate (wosize
);
303 Assert (Is_in_heap (Val_hp (hp
)));
305 /* Inline expansion of caml_allocation_color. */
306 if (caml_gc_phase
== Phase_mark
307 || (caml_gc_phase
== Phase_sweep
&& (addr
)hp
>= (addr
)caml_gc_sweep_hp
)){
308 Hd_hp (hp
) = Make_header (wosize
, tag
, Caml_black
);
310 Assert (caml_gc_phase
== Phase_idle
311 || (caml_gc_phase
== Phase_sweep
312 && (addr
)hp
< (addr
)caml_gc_sweep_hp
));
313 Hd_hp (hp
) = Make_header (wosize
, tag
, Caml_white
);
315 Assert (Hd_hp (hp
) == Make_header (wosize
, tag
, caml_allocation_color (hp
)));
316 caml_allocated_words
+= Whsize_wosize (wosize
);
317 if (caml_allocated_words
> Wsize_bsize (caml_minor_heap_size
)){
318 caml_urge_major_slice ();
323 for (i
= 0; i
< wosize
; i
++){
324 Field (Val_hp (hp
), i
) = Debug_uninit_major
;
331 /* Dependent memory is all memory blocks allocated out of the heap
332 that depend on the GC (and finalizers) for deallocation.
333 For the GC to take dependent memory into account when computing
334 its automatic speed setting,
335 you must call [caml_alloc_dependent_memory] when you alloate some
336 dependent memory, and [caml_free_dependent_memory] when you
337 free it. In both cases, you pass as argument the size (in bytes)
338 of the block being allocated or freed.
340 CAMLexport
void caml_alloc_dependent_memory (mlsize_t nbytes
)
342 caml_dependent_size
+= nbytes
/ sizeof (value
);
343 caml_dependent_allocated
+= nbytes
/ sizeof (value
);
346 CAMLexport
void caml_free_dependent_memory (mlsize_t nbytes
)
348 if (caml_dependent_size
< nbytes
/ sizeof (value
)){
349 caml_dependent_size
= 0;
351 caml_dependent_size
-= nbytes
/ sizeof (value
);
355 /* Use this function to tell the major GC to speed up when you use
356 finalized blocks to automatically deallocate resources (other
357 than memory). The GC will do at least one cycle every [max]
358 allocated resources; [res] is the number of resources allocated
360 Note that only [res/max] is relevant. The units (and kind of
361 resource) can change between calls to [caml_adjust_gc_speed].
363 CAMLexport
void caml_adjust_gc_speed (mlsize_t res
, mlsize_t max
)
365 if (max
== 0) max
= 1;
366 if (res
> max
) res
= max
;
367 caml_extra_heap_resources
+= (double) res
/ (double) max
;
368 if (caml_extra_heap_resources
> 1.0){
369 caml_extra_heap_resources
= 1.0;
370 caml_urge_major_slice ();
372 if (caml_extra_heap_resources
373 > (double) Wsize_bsize (caml_minor_heap_size
) / 2.0
374 / (double) Wsize_bsize (caml_stat_heap_size
)) {
375 caml_urge_major_slice ();
379 /* You must use [caml_initialize] to store the initial value in a field of
380 a shared block, unless you are sure the value is not a young block.
381 A block value [v] is a shared block if and only if [Is_in_heap (v)]
384 /* [caml_initialize] never calls the GC, so you may call it while an block is
385 unfinished (i.e. just after a call to [caml_alloc_shr].) */
386 void caml_initialize (value
*fp
, value val
)
389 if (Is_block (val
) && Is_young (val
) && Is_in_heap (fp
)){
390 if (caml_ref_table
.ptr
>= caml_ref_table
.limit
){
391 caml_realloc_ref_table (&caml_ref_table
);
393 *caml_ref_table
.ptr
++ = fp
;
397 /* You must use [caml_modify] to change a field of an existing shared block,
398 unless you are sure the value being overwritten is not a shared block and
399 the value being written is not a young block. */
400 /* [caml_modify] never calls the GC. */
401 void caml_modify (value
*fp
, value val
)
406 CAMLexport
void * caml_stat_alloc (asize_t sz
)
408 void * result
= malloc (sz
);
410 /* malloc() may return NULL if size is 0 */
411 if (result
== NULL
&& sz
!= 0) caml_raise_out_of_memory ();
413 memset (result
, Debug_uninit_stat
, sz
);
418 CAMLexport
void caml_stat_free (void * blk
)
423 CAMLexport
void * caml_stat_resize (void * blk
, asize_t sz
)
425 void * result
= realloc (blk
, sz
);
427 if (result
== NULL
) caml_raise_out_of_memory ();