1 /**********************************************************************
6 created at: Tue Oct 5 09:44:46 JST 1993
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
12 **********************************************************************/
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
17 #include "ruby/internal/config.h"
19 # include "ruby/ruby.h"
24 #define sighandler_t ruby_sighandler_t
35 /* MALLOC_HEADERS_BEGIN */
36 #ifndef HAVE_MALLOC_USABLE_SIZE
38 # define HAVE_MALLOC_USABLE_SIZE
39 # define malloc_usable_size(a) _msize(a)
40 # elif defined HAVE_MALLOC_SIZE
41 # define HAVE_MALLOC_USABLE_SIZE
42 # define malloc_usable_size(a) malloc_size(a)
46 #ifdef HAVE_MALLOC_USABLE_SIZE
47 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
48 /* Alternative malloc header is included in ruby/missing.h */
49 # elif defined(HAVE_MALLOC_H)
51 # elif defined(HAVE_MALLOC_NP_H)
52 # include <malloc_np.h>
53 # elif defined(HAVE_MALLOC_MALLOC_H)
54 # include <malloc/malloc.h>
58 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
59 /* LIST_HEAD conflicts with sys/queue.h on macOS */
60 # include <sys/user.h>
62 /* MALLOC_HEADERS_END */
64 #ifdef HAVE_SYS_TIME_H
65 # include <sys/time.h>
68 #ifdef HAVE_SYS_RESOURCE_H
69 # include <sys/resource.h>
72 #if defined _WIN32 || defined __CYGWIN__
74 #elif defined(HAVE_POSIX_MEMALIGN)
75 #elif defined(HAVE_MEMALIGN)
79 #include <sys/types.h>
82 #include <emscripten.h>
85 #undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
88 #include "debug_counter.h"
89 #include "eval_intern.h"
93 #include "internal/class.h"
94 #include "internal/complex.h"
95 #include "internal/cont.h"
96 #include "internal/error.h"
97 #include "internal/eval.h"
98 #include "internal/gc.h"
99 #include "internal/hash.h"
100 #include "internal/imemo.h"
101 #include "internal/io.h"
102 #include "internal/numeric.h"
103 #include "internal/object.h"
104 #include "internal/proc.h"
105 #include "internal/rational.h"
106 #include "internal/sanitizers.h"
107 #include "internal/struct.h"
108 #include "internal/symbol.h"
109 #include "internal/thread.h"
110 #include "internal/variable.h"
111 #include "internal/warnings.h"
115 #include "ruby/debug.h"
119 #include "ruby/thread.h"
120 #include "ruby/util.h"
121 #include "ruby_assert.h"
122 #include "ruby_atomic.h"
124 #include "transient_heap.h"
127 #include "vm_callinfo.h"
128 #include "ractor_core.h"
132 #define rb_setjmp(env) RUBY_SETJMP(env)
133 #define rb_jmp_buf rb_jmpbuf_t
134 #undef rb_data_object_wrap
136 static inline struct rbimpl_size_mul_overflow_tag
137 size_add_overflow(size_t x
, size_t y
)
143 #elif __has_builtin(__builtin_add_overflow)
144 p
= __builtin_add_overflow(x
, y
, &z
);
146 #elif defined(DSIZE_T)
147 RB_GNUC_EXTENSION DSIZE_T dx
= x
;
148 RB_GNUC_EXTENSION DSIZE_T dy
= y
;
149 RB_GNUC_EXTENSION DSIZE_T dz
= dx
+ dy
;
158 return (struct rbimpl_size_mul_overflow_tag
) { p
, z
, };
161 static inline struct rbimpl_size_mul_overflow_tag
162 size_mul_add_overflow(size_t x
, size_t y
, size_t z
) /* x * y + z */
164 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
165 struct rbimpl_size_mul_overflow_tag u
= size_add_overflow(t
.right
, z
);
166 return (struct rbimpl_size_mul_overflow_tag
) { t
.left
|| u
.left
, u
.right
};
169 static inline struct rbimpl_size_mul_overflow_tag
170 size_mul_add_mul_overflow(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
172 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
173 struct rbimpl_size_mul_overflow_tag u
= rbimpl_size_mul_overflow(z
, w
);
174 struct rbimpl_size_mul_overflow_tag v
= size_add_overflow(t
.right
, u
.right
);
175 return (struct rbimpl_size_mul_overflow_tag
) { t
.left
|| u
.left
|| v
.left
, v
.right
};
178 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE
, const char*, ...)), 2, 3);
181 size_mul_or_raise(size_t x
, size_t y
, VALUE exc
)
183 struct rbimpl_size_mul_overflow_tag t
= rbimpl_size_mul_overflow(x
, y
);
184 if (LIKELY(!t
.left
)) {
187 else if (rb_during_gc()) {
188 rb_memerror(); /* or...? */
193 "integer overflow: %"PRIuSIZE
196 x
, y
, (size_t)SIZE_MAX
);
201 rb_size_mul_or_raise(size_t x
, size_t y
, VALUE exc
)
203 return size_mul_or_raise(x
, y
, exc
);
207 size_mul_add_or_raise(size_t x
, size_t y
, size_t z
, VALUE exc
)
209 struct rbimpl_size_mul_overflow_tag t
= size_mul_add_overflow(x
, y
, z
);
210 if (LIKELY(!t
.left
)) {
213 else if (rb_during_gc()) {
214 rb_memerror(); /* or...? */
219 "integer overflow: %"PRIuSIZE
223 x
, y
, z
, (size_t)SIZE_MAX
);
228 rb_size_mul_add_or_raise(size_t x
, size_t y
, size_t z
, VALUE exc
)
230 return size_mul_add_or_raise(x
, y
, z
, exc
);
234 size_mul_add_mul_or_raise(size_t x
, size_t y
, size_t z
, size_t w
, VALUE exc
)
236 struct rbimpl_size_mul_overflow_tag t
= size_mul_add_mul_overflow(x
, y
, z
, w
);
237 if (LIKELY(!t
.left
)) {
240 else if (rb_during_gc()) {
241 rb_memerror(); /* or...? */
246 "integer overflow: %"PRIdSIZE
251 x
, y
, z
, w
, (size_t)SIZE_MAX
);
255 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
256 /* trick the compiler into thinking a external signal handler uses this */
257 volatile VALUE rb_gc_guarded_val
;
259 rb_gc_guarded_ptr_val(volatile VALUE
*ptr
, VALUE val
)
261 rb_gc_guarded_val
= val
;
267 #ifndef GC_HEAP_INIT_SLOTS
268 #define GC_HEAP_INIT_SLOTS 10000
270 #ifndef GC_HEAP_FREE_SLOTS
271 #define GC_HEAP_FREE_SLOTS 4096
273 #ifndef GC_HEAP_GROWTH_FACTOR
274 #define GC_HEAP_GROWTH_FACTOR 1.8
276 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
277 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
279 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
280 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
283 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
284 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
286 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
287 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
289 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
290 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
293 #ifndef GC_MALLOC_LIMIT_MIN
294 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
296 #ifndef GC_MALLOC_LIMIT_MAX
297 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
299 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
300 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
303 #ifndef GC_OLDMALLOC_LIMIT_MIN
304 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
306 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
307 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
309 #ifndef GC_OLDMALLOC_LIMIT_MAX
310 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
313 #ifndef PRINT_MEASURE_LINE
314 #define PRINT_MEASURE_LINE 0
316 #ifndef PRINT_ENTER_EXIT_TICK
317 #define PRINT_ENTER_EXIT_TICK 0
319 #ifndef PRINT_ROOT_TICKS
320 #define PRINT_ROOT_TICKS 0
323 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
327 size_t heap_init_slots
;
328 size_t heap_free_slots
;
329 double growth_factor
;
330 size_t growth_max_slots
;
332 double heap_free_slots_min_ratio
;
333 double heap_free_slots_goal_ratio
;
334 double heap_free_slots_max_ratio
;
335 double oldobject_limit_factor
;
337 size_t malloc_limit_min
;
338 size_t malloc_limit_max
;
339 double malloc_limit_growth_factor
;
341 size_t oldmalloc_limit_min
;
342 size_t oldmalloc_limit_max
;
343 double oldmalloc_limit_growth_factor
;
348 static ruby_gc_params_t gc_params
= {
351 GC_HEAP_GROWTH_FACTOR
,
352 GC_HEAP_GROWTH_MAX_SLOTS
,
354 GC_HEAP_FREE_SLOTS_MIN_RATIO
,
355 GC_HEAP_FREE_SLOTS_GOAL_RATIO
,
356 GC_HEAP_FREE_SLOTS_MAX_RATIO
,
357 GC_HEAP_OLDOBJECT_LIMIT_FACTOR
,
361 GC_MALLOC_LIMIT_GROWTH_FACTOR
,
363 GC_OLDMALLOC_LIMIT_MIN
,
364 GC_OLDMALLOC_LIMIT_MAX
,
365 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
,
371 * enable to embed GC debugging information.
378 * 1: basic information
379 * 2: remember set operation
386 #define RGENGC_DEBUG -1
388 #define RGENGC_DEBUG 0
391 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
392 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
393 #elif defined(HAVE_VA_ARGS_MACRO)
394 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
396 # define RGENGC_DEBUG_ENABLED(level) 0
398 int ruby_rgengc_debug
;
401 * 0: disable all assertions
402 * 1: enable assertions (to debug RGenGC)
403 * 2: enable internal consistency check at each GC (for debugging)
404 * 3: enable internal consistency check at each GC steps (for debugging)
405 * 4: enable liveness check
406 * 5: show all references
408 #ifndef RGENGC_CHECK_MODE
409 #define RGENGC_CHECK_MODE 0
412 // Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
413 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
415 /* RGENGC_OLD_NEWOBJ_CHECK
416 * 0: disable all assertions
417 * >0: make a OLD object when new object creation.
419 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
421 #ifndef RGENGC_OLD_NEWOBJ_CHECK
422 #define RGENGC_OLD_NEWOBJ_CHECK 0
426 * 0: disable RGenGC profiling
427 * 1: enable profiling for basic information
428 * 2: enable profiling for each types
430 #ifndef RGENGC_PROFILE
431 #define RGENGC_PROFILE 0
434 /* RGENGC_ESTIMATE_OLDMALLOC
435 * Enable/disable to estimate increase size of malloc'ed size by old objects.
436 * If estimation exceeds threshold, then will invoke full GC.
437 * 0: disable estimation.
438 * 1: enable estimation.
440 #ifndef RGENGC_ESTIMATE_OLDMALLOC
441 #define RGENGC_ESTIMATE_OLDMALLOC 1
444 /* RGENGC_FORCE_MAJOR_GC
445 * Force major/full GC if this macro is not 0.
447 #ifndef RGENGC_FORCE_MAJOR_GC
448 #define RGENGC_FORCE_MAJOR_GC 0
451 #ifndef GC_PROFILE_MORE_DETAIL
452 #define GC_PROFILE_MORE_DETAIL 0
454 #ifndef GC_PROFILE_DETAIL_MEMORY
455 #define GC_PROFILE_DETAIL_MEMORY 0
457 #ifndef GC_ENABLE_INCREMENTAL_MARK
458 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
460 #ifndef GC_ENABLE_LAZY_SWEEP
461 #define GC_ENABLE_LAZY_SWEEP 1
463 #ifndef CALC_EXACT_MALLOC_SIZE
464 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
466 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
467 #ifndef MALLOC_ALLOCATED_SIZE
468 #define MALLOC_ALLOCATED_SIZE 0
471 #define MALLOC_ALLOCATED_SIZE 0
473 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
474 #define MALLOC_ALLOCATED_SIZE_CHECK 0
477 #ifndef GC_DEBUG_STRESS_TO_CLASS
478 #define GC_DEBUG_STRESS_TO_CLASS 0
481 #ifndef RGENGC_OBJ_INFO
482 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
486 GPR_FLAG_NONE
= 0x000,
488 GPR_FLAG_MAJOR_BY_NOFREE
= 0x001,
489 GPR_FLAG_MAJOR_BY_OLDGEN
= 0x002,
490 GPR_FLAG_MAJOR_BY_SHADY
= 0x004,
491 GPR_FLAG_MAJOR_BY_FORCE
= 0x008,
492 #if RGENGC_ESTIMATE_OLDMALLOC
493 GPR_FLAG_MAJOR_BY_OLDMALLOC
= 0x020,
495 GPR_FLAG_MAJOR_MASK
= 0x0ff,
498 GPR_FLAG_NEWOBJ
= 0x100,
499 GPR_FLAG_MALLOC
= 0x200,
500 GPR_FLAG_METHOD
= 0x400,
501 GPR_FLAG_CAPI
= 0x800,
502 GPR_FLAG_STRESS
= 0x1000,
505 GPR_FLAG_IMMEDIATE_SWEEP
= 0x2000,
506 GPR_FLAG_HAVE_FINALIZE
= 0x4000,
507 GPR_FLAG_IMMEDIATE_MARK
= 0x8000,
508 GPR_FLAG_FULL_MARK
= 0x10000,
509 GPR_FLAG_COMPACT
= 0x20000,
512 (GPR_FLAG_FULL_MARK
| GPR_FLAG_IMMEDIATE_MARK
|
513 GPR_FLAG_IMMEDIATE_SWEEP
| GPR_FLAG_CAPI
),
514 } gc_profile_record_flag
;
516 typedef struct gc_profile_record
{
520 double gc_invoke_time
;
522 size_t heap_total_objects
;
523 size_t heap_use_size
;
524 size_t heap_total_size
;
525 size_t moved_objects
;
527 #if GC_PROFILE_MORE_DETAIL
529 double gc_sweep_time
;
531 size_t heap_use_pages
;
532 size_t heap_live_objects
;
533 size_t heap_free_objects
;
535 size_t allocate_increase
;
536 size_t allocate_limit
;
539 size_t removing_objects
;
540 size_t empty_objects
;
541 #if GC_PROFILE_DETAIL_MEMORY
547 #if MALLOC_ALLOCATED_SIZE
548 size_t allocated_size
;
551 #if RGENGC_PROFILE > 0
553 size_t remembered_normal_objects
;
554 size_t remembered_shady_objects
;
558 #define FL_FROM_FREELIST FL_USER0
566 #define RMOVED(obj) ((struct RMoved *)(obj))
568 typedef struct RVALUE
{
571 VALUE flags
; /* always 0 for freed obj */
576 struct RObject object
;
578 struct RFloat flonum
;
579 struct RString string
;
581 struct RRegexp regexp
;
584 struct RTypedData typeddata
;
585 struct RStruct rstruct
;
586 struct RBignum bignum
;
589 struct RRational rational
;
590 struct RComplex
complex;
591 struct RSymbol symbol
;
595 struct vm_throw_data throw_data
;
596 struct vm_ifunc ifunc
;
598 struct rb_method_entry_struct ment
;
599 const rb_iseq_t iseq
;
601 struct rb_imemo_tmpbuf_struct alloc
;
618 STATIC_ASSERT(sizeof_rvalue
, offsetof(RVALUE
, file
) == SIZEOF_VALUE
* 5);
620 STATIC_ASSERT(sizeof_rvalue
, sizeof(RVALUE
) == SIZEOF_VALUE
* 5);
622 STATIC_ASSERT(alignof_rvalue
, RUBY_ALIGNOF(RVALUE
) == SIZEOF_VALUE
);
624 typedef uintptr_t bits_t
;
626 BITS_SIZE
= sizeof(bits_t
),
627 BITS_BITLENGTH
= ( BITS_SIZE
* CHAR_BIT
)
629 #define popcount_bits rb_popcount_intptr
631 struct heap_page_header
{
632 struct heap_page
*page
;
635 struct heap_page_body
{
636 struct heap_page_header header
;
638 /* RVALUE values[]; */
643 struct gc_list
*next
;
646 #define STACK_CHUNK_SIZE 500
648 typedef struct stack_chunk
{
649 VALUE data
[STACK_CHUNK_SIZE
];
650 struct stack_chunk
*next
;
653 typedef struct mark_stack
{
654 stack_chunk_t
*chunk
;
655 stack_chunk_t
*cache
;
659 size_t unused_cache_size
;
662 #define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
663 #define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
665 typedef struct rb_heap_struct
{
666 struct heap_page
*free_pages
;
667 struct list_head pages
;
668 struct heap_page
*sweeping_page
; /* iterator for .pages */
669 struct heap_page
*compact_cursor
;
670 RVALUE
* compact_cursor_index
;
671 #if GC_ENABLE_INCREMENTAL_MARK
672 struct heap_page
*pooled_pages
;
674 size_t total_pages
; /* total page count in a heap */
675 size_t total_slots
; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
678 typedef struct rb_size_pool_struct
{
681 size_t allocatable_pages
;
684 /* Sweeping statistics */
688 /* Global statistics */
689 size_t force_major_gc_count
;
702 typedef struct rb_objspace
{
706 #if MALLOC_ALLOCATED_SIZE
707 size_t allocated_size
;
713 unsigned int mode
: 2;
714 unsigned int immediate_sweep
: 1;
715 unsigned int dont_gc
: 1;
716 unsigned int dont_incremental
: 1;
717 unsigned int during_gc
: 1;
718 unsigned int during_compacting
: 1;
719 unsigned int gc_stressful
: 1;
720 unsigned int has_hook
: 1;
721 unsigned int during_minor_gc
: 1;
722 #if GC_ENABLE_INCREMENTAL_MARK
723 unsigned int during_incremental_marking
: 1;
725 unsigned int measure_gc
: 1;
728 rb_event_flag_t hook_events
;
729 size_t total_allocated_objects
;
730 VALUE next_object_id
;
732 rb_size_pool_t size_pools
[SIZE_POOL_COUNT
];
735 rb_atomic_t finalizing
;
738 mark_stack_t mark_stack
;
742 struct heap_page
**sorted
;
743 size_t allocated_pages
;
744 size_t allocatable_pages
;
745 size_t sorted_length
;
747 size_t freeable_pages
;
751 VALUE deferred_final
;
754 st_table
*finalizer_table
;
758 unsigned int latest_gc_info
;
759 gc_profile_record
*records
;
760 gc_profile_record
*current_record
;
764 #if GC_PROFILE_MORE_DETAIL
769 size_t minor_gc_count
;
770 size_t major_gc_count
;
771 size_t compact_count
;
772 size_t read_barrier_faults
;
773 #if RGENGC_PROFILE > 0
774 size_t total_generated_normal_object_count
;
775 size_t total_generated_shady_object_count
;
776 size_t total_shade_operation_count
;
777 size_t total_promoted_count
;
778 size_t total_remembered_normal_object_count
;
779 size_t total_remembered_shady_object_count
;
781 #if RGENGC_PROFILE >= 2
782 size_t generated_normal_object_count_types
[RUBY_T_MASK
];
783 size_t generated_shady_object_count_types
[RUBY_T_MASK
];
784 size_t shade_operation_count_types
[RUBY_T_MASK
];
785 size_t promoted_types
[RUBY_T_MASK
];
786 size_t remembered_normal_object_count_types
[RUBY_T_MASK
];
787 size_t remembered_shady_object_count_types
[RUBY_T_MASK
];
789 #endif /* RGENGC_PROFILE */
791 /* temporary profiling space */
792 double gc_sweep_start_time
;
793 size_t total_allocated_objects_at_gc_start
;
794 size_t heap_used_at_gc_start
;
796 /* basic statistics */
798 size_t total_freed_objects
;
799 size_t total_allocated_pages
;
800 size_t total_freed_pages
;
801 uint64_t total_time_ns
;
802 struct timespec start_time
;
804 struct gc_list
*global_list
;
806 VALUE gc_stress_mode
;
811 size_t last_major_gc
;
812 size_t uncollectible_wb_unprotected_objects
;
813 size_t uncollectible_wb_unprotected_objects_limit
;
815 size_t old_objects_limit
;
817 #if RGENGC_ESTIMATE_OLDMALLOC
818 size_t oldmalloc_increase
;
819 size_t oldmalloc_increase_limit
;
822 #if RGENGC_CHECK_MODE >= 2
823 struct st_table
*allrefs_table
;
829 size_t considered_count_table
[T_MASK
];
830 size_t moved_count_table
[T_MASK
];
834 #if GC_ENABLE_INCREMENTAL_MARK
841 st_table
*id_to_obj_tbl
;
842 st_table
*obj_to_id_tbl
;
844 #if GC_DEBUG_STRESS_TO_CLASS
845 VALUE stress_to_class
;
850 #if defined(__APPLE__) && defined(__LP64__) && !defined(HEAP_PAGE_ALIGN_LOG)
851 /* for slow mmap: 64KiB */
852 #define HEAP_PAGE_ALIGN_LOG 16
855 #ifndef HEAP_PAGE_ALIGN_LOG
856 /* default tiny heap size: 16KB */
857 #define HEAP_PAGE_ALIGN_LOG 14
859 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
861 HEAP_PAGE_ALIGN
= (1UL << HEAP_PAGE_ALIGN_LOG
),
862 HEAP_PAGE_ALIGN_MASK
= (~(~0UL << HEAP_PAGE_ALIGN_LOG
)),
863 HEAP_PAGE_SIZE
= HEAP_PAGE_ALIGN
,
864 HEAP_PAGE_OBJ_LIMIT
= (unsigned int)((HEAP_PAGE_SIZE
- sizeof(struct heap_page_header
))/sizeof(struct RVALUE
)),
865 HEAP_PAGE_BITMAP_LIMIT
= CEILDIV(CEILDIV(HEAP_PAGE_SIZE
, sizeof(struct RVALUE
)), BITS_BITLENGTH
),
866 HEAP_PAGE_BITMAP_SIZE
= (BITS_SIZE
* HEAP_PAGE_BITMAP_LIMIT
),
868 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
869 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
872 # if HAVE_CONST_PAGE_SIZE
873 /* If we have the HEAP_PAGE and it is a constant, then we can directly use it. */
874 static const bool USE_MMAP_ALIGNED_ALLOC
= (PAGE_SIZE
<= HEAP_PAGE_SIZE
);
875 # elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
876 /* PAGE_SIZE <= HEAP_PAGE_SIZE */
877 static const bool USE_MMAP_ALIGNED_ALLOC
= true;
879 /* Otherwise, fall back to determining if we can use mmap during runtime. */
880 # define USE_MMAP_ALIGNED_ALLOC (use_mmap_aligned_alloc != false)
882 static bool use_mmap_aligned_alloc
;
884 #elif !defined(__MINGW32__) && !defined(_WIN32)
885 static const bool USE_MMAP_ALIGNED_ALLOC
= false;
895 unsigned int before_sweep
: 1;
896 unsigned int has_remembered_objects
: 1;
897 unsigned int has_uncollectible_shady_objects
: 1;
898 unsigned int in_tomb
: 1;
901 rb_size_pool_t
*size_pool
;
903 struct heap_page
*free_next
;
906 struct list_node page_node
;
908 bits_t wb_unprotected_bits
[HEAP_PAGE_BITMAP_LIMIT
];
909 /* the following three bitmaps are cleared at the beginning of full GC */
910 bits_t mark_bits
[HEAP_PAGE_BITMAP_LIMIT
];
911 bits_t uncollectible_bits
[HEAP_PAGE_BITMAP_LIMIT
];
912 bits_t marking_bits
[HEAP_PAGE_BITMAP_LIMIT
];
914 /* If set, the object is not movable */
915 bits_t pinned_bits
[HEAP_PAGE_BITMAP_LIMIT
];
918 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
919 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
920 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
922 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
923 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
924 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
925 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
927 /* Bitmap Operations */
928 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
929 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
930 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
933 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
934 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
935 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
936 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
937 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
940 #define rb_objspace (*rb_objspace_of(GET_VM()))
941 #define rb_objspace_of(vm) ((vm)->objspace)
943 #define ruby_initial_gc_stress gc_params.gc_stress
945 VALUE
*ruby_initial_gc_stress_ptr
= &ruby_initial_gc_stress
;
947 #define malloc_limit objspace->malloc_params.limit
948 #define malloc_increase objspace->malloc_params.increase
949 #define malloc_allocated_size objspace->malloc_params.allocated_size
950 #define heap_pages_sorted objspace->heap_pages.sorted
951 #define heap_allocated_pages objspace->heap_pages.allocated_pages
952 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
953 #define heap_pages_lomem objspace->heap_pages.range[0]
954 #define heap_pages_himem objspace->heap_pages.range[1]
955 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
956 #define heap_pages_final_slots objspace->heap_pages.final_slots
957 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
958 #define size_pools objspace->size_pools
959 #define during_gc objspace->flags.during_gc
960 #define finalizing objspace->atomic_flags.finalizing
961 #define finalizer_table objspace->finalizer_table
962 #define global_list objspace->global_list
963 #define ruby_gc_stressful objspace->flags.gc_stressful
964 #define ruby_gc_stress_mode objspace->gc_stress_mode
965 #if GC_DEBUG_STRESS_TO_CLASS
966 #define stress_to_class objspace->stress_to_class
968 #define stress_to_class 0
972 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
973 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
974 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
975 #define dont_gc_val() (objspace->flags.dont_gc)
977 #define dont_gc_on() (objspace->flags.dont_gc = 1)
978 #define dont_gc_off() (objspace->flags.dont_gc = 0)
979 #define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
980 #define dont_gc_val() (objspace->flags.dont_gc)
983 static inline enum gc_mode
984 gc_mode_verify(enum gc_mode mode
)
986 #if RGENGC_CHECK_MODE > 0
989 case gc_mode_marking
:
990 case gc_mode_sweeping
:
993 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode
);
1000 has_sweeping_pages(rb_objspace_t
*objspace
)
1002 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1003 if (SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->sweeping_page
) {
1010 static inline size_t
1011 heap_eden_total_pages(rb_objspace_t
*objspace
)
1014 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1015 count
+= SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->total_pages
;
1020 static inline size_t
1021 heap_eden_total_slots(rb_objspace_t
*objspace
)
1024 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1025 count
+= SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->total_slots
;
1030 static inline size_t
1031 heap_tomb_total_pages(rb_objspace_t
*objspace
)
1034 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1035 count
+= SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->total_pages
;
1040 static inline size_t
1041 heap_allocatable_pages(rb_objspace_t
*objspace
)
1044 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1045 count
+= size_pools
[i
].allocatable_pages
;
1050 static inline size_t
1051 heap_allocatable_slots(rb_objspace_t
*objspace
)
1054 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1055 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1056 int slot_size_multiple
= size_pool
->slot_size
/ sizeof(RVALUE
);
1057 count
+= size_pool
->allocatable_pages
* HEAP_PAGE_OBJ_LIMIT
/ slot_size_multiple
;
1062 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1063 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1065 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1066 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1067 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1068 #if GC_ENABLE_INCREMENTAL_MARK
1069 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1071 #define is_incremental_marking(objspace) FALSE
1073 #if GC_ENABLE_INCREMENTAL_MARK
1074 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1076 #define will_be_incremental_marking(objspace) FALSE
1078 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1080 #if SIZEOF_LONG == SIZEOF_VOIDP
1081 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1082 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1083 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1084 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1085 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1086 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1088 # error not supported
1091 #define RANY(o) ((RVALUE*)(o))
1094 struct RBasic basic
;
1096 void (*dfree
)(void *);
1100 #define RZOMBIE(o) ((struct RZombie *)(o))
1102 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1104 #if RUBY_MARK_FREE_DEBUG
1105 int ruby_gc_debug_indent
= 0;
1108 int ruby_disable_gc
= 0;
1109 int ruby_enable_autocompact
= 0;
1111 void rb_iseq_mark(const rb_iseq_t
*iseq
);
1112 void rb_iseq_update_references(rb_iseq_t
*iseq
);
1113 void rb_iseq_free(const rb_iseq_t
*iseq
);
1114 size_t rb_iseq_memsize(const rb_iseq_t
*iseq
);
1115 void rb_vm_update_references(void *ptr
);
1117 void rb_gcdebug_print_obj_condition(VALUE obj
);
1119 static VALUE
define_final0(VALUE obj
, VALUE block
);
1121 NORETURN(static void *gc_vraise(void *ptr
));
1122 NORETURN(static void gc_raise(VALUE exc
, const char *fmt
, ...));
1123 NORETURN(static void negative_size_allocation_error(const char *));
1125 static void init_mark_stack(mark_stack_t
*stack
);
1127 static int ready_to_gc(rb_objspace_t
*objspace
);
1129 static int garbage_collect(rb_objspace_t
*, unsigned int reason
);
1131 static int gc_start(rb_objspace_t
*objspace
, unsigned int reason
);
1132 static void gc_rest(rb_objspace_t
*objspace
);
1134 enum gc_enter_event
{
1135 gc_enter_event_start
,
1136 gc_enter_event_mark_continue
,
1137 gc_enter_event_sweep_continue
,
1138 gc_enter_event_rest
,
1139 gc_enter_event_finalizer
,
1140 gc_enter_event_rb_memerror
,
1143 static inline void gc_enter(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
);
1144 static inline void gc_exit(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
);
1146 static void gc_marks(rb_objspace_t
*objspace
, int full_mark
);
1147 static void gc_marks_start(rb_objspace_t
*objspace
, int full
);
1148 static int gc_marks_finish(rb_objspace_t
*objspace
);
1149 static void gc_marks_rest(rb_objspace_t
*objspace
);
1150 static void gc_marks_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1152 static void gc_sweep(rb_objspace_t
*objspace
);
1153 static void gc_sweep_start(rb_objspace_t
*objspace
);
1154 static void gc_sweep_finish(rb_objspace_t
*objspace
);
1155 static int gc_sweep_step(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1156 static void gc_sweep_rest(rb_objspace_t
*objspace
);
1157 static void gc_sweep_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
1159 static inline void gc_mark(rb_objspace_t
*objspace
, VALUE ptr
);
1160 static inline void gc_pin(rb_objspace_t
*objspace
, VALUE ptr
);
1161 static inline void gc_mark_and_pin(rb_objspace_t
*objspace
, VALUE ptr
);
1162 static void gc_mark_ptr(rb_objspace_t
*objspace
, VALUE ptr
);
1163 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t
*objspace
, VALUE ptr
));
1164 static void gc_mark_children(rb_objspace_t
*objspace
, VALUE ptr
);
1166 static int gc_mark_stacked_objects_incremental(rb_objspace_t
*, size_t count
);
1167 static int gc_mark_stacked_objects_all(rb_objspace_t
*);
1168 static void gc_grey(rb_objspace_t
*objspace
, VALUE ptr
);
1170 static inline int gc_mark_set(rb_objspace_t
*objspace
, VALUE obj
);
1171 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t
*objspace
, void *ptr
));
1173 static void push_mark_stack(mark_stack_t
*, VALUE
);
1174 static int pop_mark_stack(mark_stack_t
*, VALUE
*);
1175 static size_t mark_stack_size(mark_stack_t
*stack
);
1176 static void shrink_stack_chunk_cache(mark_stack_t
*stack
);
1178 static size_t obj_memsize_of(VALUE obj
, int use_all_types
);
1179 static void gc_verify_internal_consistency(rb_objspace_t
*objspace
);
1180 static int gc_verify_heap_page(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
);
1181 static int gc_verify_heap_pages(rb_objspace_t
*objspace
);
1183 static void gc_stress_set(rb_objspace_t
*objspace
, VALUE flag
);
1184 static VALUE
gc_disable_no_rest(rb_objspace_t
*);
1186 static double getrusage_time(void);
1187 static inline void gc_prof_setup_new_record(rb_objspace_t
*objspace
, unsigned int reason
);
1188 static inline void gc_prof_timer_start(rb_objspace_t
*);
1189 static inline void gc_prof_timer_stop(rb_objspace_t
*);
1190 static inline void gc_prof_mark_timer_start(rb_objspace_t
*);
1191 static inline void gc_prof_mark_timer_stop(rb_objspace_t
*);
1192 static inline void gc_prof_sweep_timer_start(rb_objspace_t
*);
1193 static inline void gc_prof_sweep_timer_stop(rb_objspace_t
*);
1194 static inline void gc_prof_set_malloc_info(rb_objspace_t
*);
1195 static inline void gc_prof_set_heap_info(rb_objspace_t
*);
1197 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1198 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1199 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1203 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1205 #define gc_prof_record(objspace) (objspace)->profile.current_record
1206 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1208 #ifdef HAVE_VA_ARGS_MACRO
1209 # define gc_report(level, objspace, ...) \
1210 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1212 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1214 PRINTF_ARGS(static void gc_report_body(int level
, rb_objspace_t
*objspace
, const char *fmt
, ...), 3, 4);
1215 static const char *obj_info(VALUE obj
);
1216 static const char *obj_type_name(VALUE obj
);
1219 * 1 - TSC (H/W Time Stamp Counter)
1229 /* the following code is only for internal tuning. */
1231 /* Source code to use RDTSC is quoted and modified from
1232 * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1233 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1236 #if defined(__GNUC__) && defined(__i386__)
1237 typedef unsigned long long tick_t
;
1238 #define PRItick "llu"
1239 static inline tick_t
1242 unsigned long long int x
;
1243 __asm__
__volatile__ ("rdtsc" : "=A" (x
));
1247 #elif defined(__GNUC__) && defined(__x86_64__)
1248 typedef unsigned long long tick_t
;
1249 #define PRItick "llu"
1251 static __inline__ tick_t
1254 unsigned long hi
, lo
;
1255 __asm__
__volatile__ ("rdtsc" : "=a"(lo
), "=d"(hi
));
1256 return ((unsigned long long)lo
)|( ((unsigned long long)hi
)<<32);
1259 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1260 typedef unsigned long long tick_t
;
1261 #define PRItick "llu"
1263 static __inline__ tick_t
1266 unsigned long long val
= __builtin_ppc_get_timebase();
1270 #elif defined(__aarch64__) && defined(__GNUC__)
1271 typedef unsigned long tick_t
;
1272 #define PRItick "lu"
1274 static __inline__ tick_t
1278 __asm__
__volatile__ ("mrs %0, cntvct_el0" : "=r" (val
));
1283 #elif defined(_WIN32) && defined(_MSC_VER)
1285 typedef unsigned __int64 tick_t
;
1286 #define PRItick "llu"
1288 static inline tick_t
1294 #else /* use clock */
1295 typedef clock_t tick_t
;
1296 #define PRItick "llu"
1298 static inline tick_t
1305 #elif TICK_TYPE == 2
1306 typedef double tick_t
;
1307 #define PRItick "4.9f"
1309 static inline tick_t
1312 return getrusage_time();
1314 #else /* TICK_TYPE */
1315 #error "choose tick type"
1316 #endif /* TICK_TYPE */
1318 #define MEASURE_LINE(expr) do { \
1319 volatile tick_t start_time = tick(); \
1320 volatile tick_t end_time; \
1322 end_time = tick(); \
1323 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1326 #else /* USE_TICK_T */
1327 #define MEASURE_LINE(expr) expr
1328 #endif /* USE_TICK_T */
1330 static inline void *
1331 asan_unpoison_object_temporary(VALUE obj
)
1333 void *ptr
= asan_poisoned_object_p(obj
);
1334 asan_unpoison_object(obj
, false);
1338 #define FL_CHECK2(name, x, pred) \
1339 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1340 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1341 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1342 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1343 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1345 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1346 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1347 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1349 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1350 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1351 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1353 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1354 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1355 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1357 #define RVALUE_OLD_AGE 3
1358 #define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1360 static int rgengc_remembered(rb_objspace_t
*objspace
, VALUE obj
);
1361 static int rgengc_remembered_sweep(rb_objspace_t
*objspace
, VALUE obj
);
1362 static int rgengc_remember(rb_objspace_t
*objspace
, VALUE obj
);
1363 static void rgengc_mark_and_rememberset_clear(rb_objspace_t
*objspace
, rb_heap_t
*heap
);
1364 static void rgengc_rememberset_mark(rb_objspace_t
*objspace
, rb_heap_t
*heap
);
1367 RVALUE_FLAGS_AGE(VALUE flags
)
1369 return (int)((flags
& (FL_PROMOTED0
| FL_PROMOTED1
)) >> RVALUE_AGE_SHIFT
);
1373 check_rvalue_consistency_force(const VALUE obj
, int terminate
)
1376 rb_objspace_t
*objspace
= &rb_objspace
;
1378 RB_VM_LOCK_ENTER_NO_BARRIER();
1380 if (SPECIAL_CONST_P(obj
)) {
1381 fprintf(stderr
, "check_rvalue_consistency: %p is a special const.\n", (void *)obj
);
1384 else if (!is_pointer_to_heap(objspace
, (void *)obj
)) {
1385 /* check if it is in tomb_pages */
1386 struct heap_page
*page
= NULL
;
1387 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1388 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1389 list_for_each(&size_pool
->tomb_heap
.pages
, page
, page_node
) {
1390 if (&page
->start
[0] <= (RVALUE
*)obj
&&
1391 (uintptr_t)obj
< ((uintptr_t)page
->start
+ (page
->total_slots
* size_pool
->slot_size
))) {
1392 fprintf(stderr
, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1393 (void *)obj
, (void *)page
);
1400 fprintf(stderr
, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj
);
1406 const int wb_unprotected_bit
= RVALUE_WB_UNPROTECTED_BITMAP(obj
) != 0;
1407 const int uncollectible_bit
= RVALUE_UNCOLLECTIBLE_BITMAP(obj
) != 0;
1408 const int mark_bit
= RVALUE_MARK_BITMAP(obj
) != 0;
1409 const int marking_bit
= RVALUE_MARKING_BITMAP(obj
) != 0, remembered_bit
= marking_bit
;
1410 const int age
= RVALUE_FLAGS_AGE(RBASIC(obj
)->flags
);
1412 if (GET_HEAP_PAGE(obj
)->flags
.in_tomb
) {
1413 fprintf(stderr
, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj
));
1416 if (BUILTIN_TYPE(obj
) == T_NONE
) {
1417 fprintf(stderr
, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj
));
1420 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) {
1421 fprintf(stderr
, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj
));
1425 obj_memsize_of((VALUE
)obj
, FALSE
);
1429 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1431 if (age
> 0 && wb_unprotected_bit
) {
1432 fprintf(stderr
, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj
), age
);
1436 if (!is_marking(objspace
) && uncollectible_bit
&& !mark_bit
) {
1437 fprintf(stderr
, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj
));
1441 if (!is_full_marking(objspace
)) {
1442 if (uncollectible_bit
&& age
!= RVALUE_OLD_AGE
&& !wb_unprotected_bit
) {
1443 fprintf(stderr
, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1444 obj_info(obj
), age
);
1447 if (remembered_bit
&& age
!= RVALUE_OLD_AGE
) {
1448 fprintf(stderr
, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1449 obj_info(obj
), age
);
1457 * marking:false marking:true
1458 * marked:false white *invalid*
1459 * marked:true black grey
1461 if (is_incremental_marking(objspace
) && marking_bit
) {
1462 if (!is_marking(objspace
) && !mark_bit
) {
1463 fprintf(stderr
, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj
));
1469 RB_VM_LOCK_LEAVE_NO_BARRIER();
1471 if (err
> 0 && terminate
) {
1472 rb_bug("check_rvalue_consistency_force: there is %d errors.", err
);
1477 #if RGENGC_CHECK_MODE == 0
1479 check_rvalue_consistency(const VALUE obj
)
1485 check_rvalue_consistency(const VALUE obj
)
1487 check_rvalue_consistency_force(obj
, TRUE
);
1493 gc_object_moved_p(rb_objspace_t
* objspace
, VALUE obj
)
1495 if (RB_SPECIAL_CONST_P(obj
)) {
1499 void *poisoned
= asan_poisoned_object_p(obj
);
1500 asan_unpoison_object(obj
, false);
1502 int ret
= BUILTIN_TYPE(obj
) == T_MOVED
;
1503 /* Re-poison slot if it's not the one we want */
1505 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
1506 asan_poison_object(obj
);
1513 RVALUE_MARKED(VALUE obj
)
1515 check_rvalue_consistency(obj
);
1516 return RVALUE_MARK_BITMAP(obj
) != 0;
1520 RVALUE_PINNED(VALUE obj
)
1522 check_rvalue_consistency(obj
);
1523 return RVALUE_PIN_BITMAP(obj
) != 0;
1527 RVALUE_WB_UNPROTECTED(VALUE obj
)
1529 check_rvalue_consistency(obj
);
1530 return RVALUE_WB_UNPROTECTED_BITMAP(obj
) != 0;
1534 RVALUE_MARKING(VALUE obj
)
1536 check_rvalue_consistency(obj
);
1537 return RVALUE_MARKING_BITMAP(obj
) != 0;
1541 RVALUE_REMEMBERED(VALUE obj
)
1543 check_rvalue_consistency(obj
);
1544 return RVALUE_MARKING_BITMAP(obj
) != 0;
1548 RVALUE_UNCOLLECTIBLE(VALUE obj
)
1550 check_rvalue_consistency(obj
);
1551 return RVALUE_UNCOLLECTIBLE_BITMAP(obj
) != 0;
1555 RVALUE_OLD_P_RAW(VALUE obj
)
1557 const VALUE promoted
= FL_PROMOTED0
| FL_PROMOTED1
;
1558 return (RBASIC(obj
)->flags
& promoted
) == promoted
;
1562 RVALUE_OLD_P(VALUE obj
)
1564 check_rvalue_consistency(obj
);
1565 return RVALUE_OLD_P_RAW(obj
);
1568 #if RGENGC_CHECK_MODE || GC_DEBUG
1570 RVALUE_AGE(VALUE obj
)
1572 check_rvalue_consistency(obj
);
1573 return RVALUE_FLAGS_AGE(RBASIC(obj
)->flags
);
1578 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
1580 MARK_IN_BITMAP(&page
->uncollectible_bits
[0], obj
);
1581 objspace
->rgengc
.old_objects
++;
1582 rb_transient_heap_promote(obj
);
1584 #if RGENGC_PROFILE >= 2
1585 objspace
->profile
.total_promoted_count
++;
1586 objspace
->profile
.promoted_types
[BUILTIN_TYPE(obj
)]++;
1591 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t
*objspace
, VALUE obj
)
1593 RB_DEBUG_COUNTER_INC(obj_promote
);
1594 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace
, GET_HEAP_PAGE(obj
), obj
);
1598 RVALUE_FLAGS_AGE_SET(VALUE flags
, int age
)
1600 flags
&= ~(FL_PROMOTED0
| FL_PROMOTED1
);
1601 flags
|= (age
<< RVALUE_AGE_SHIFT
);
1605 /* set age to age+1 */
1607 RVALUE_AGE_INC(rb_objspace_t
*objspace
, VALUE obj
)
1609 VALUE flags
= RBASIC(obj
)->flags
;
1610 int age
= RVALUE_FLAGS_AGE(flags
);
1612 if (RGENGC_CHECK_MODE
&& age
== RVALUE_OLD_AGE
) {
1613 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj
));
1617 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(flags
, age
);
1619 if (age
== RVALUE_OLD_AGE
) {
1620 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace
, obj
);
1622 check_rvalue_consistency(obj
);
1625 /* set age to RVALUE_OLD_AGE */
1627 RVALUE_AGE_SET_OLD(rb_objspace_t
*objspace
, VALUE obj
)
1629 check_rvalue_consistency(obj
);
1630 GC_ASSERT(!RVALUE_OLD_P(obj
));
1632 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, RVALUE_OLD_AGE
);
1633 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace
, obj
);
1635 check_rvalue_consistency(obj
);
1638 /* set age to RVALUE_OLD_AGE - 1 */
1640 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t
*objspace
, VALUE obj
)
1642 check_rvalue_consistency(obj
);
1643 GC_ASSERT(!RVALUE_OLD_P(obj
));
1645 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, RVALUE_OLD_AGE
- 1);
1647 check_rvalue_consistency(obj
);
1651 RVALUE_DEMOTE_RAW(rb_objspace_t
*objspace
, VALUE obj
)
1653 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, 0);
1654 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj
), obj
);
1658 RVALUE_DEMOTE(rb_objspace_t
*objspace
, VALUE obj
)
1660 check_rvalue_consistency(obj
);
1661 GC_ASSERT(RVALUE_OLD_P(obj
));
1663 if (!is_incremental_marking(objspace
) && RVALUE_REMEMBERED(obj
)) {
1664 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
1667 RVALUE_DEMOTE_RAW(objspace
, obj
);
1669 if (RVALUE_MARKED(obj
)) {
1670 objspace
->rgengc
.old_objects
--;
1673 check_rvalue_consistency(obj
);
1677 RVALUE_AGE_RESET_RAW(VALUE obj
)
1679 RBASIC(obj
)->flags
= RVALUE_FLAGS_AGE_SET(RBASIC(obj
)->flags
, 0);
1683 RVALUE_AGE_RESET(VALUE obj
)
1685 check_rvalue_consistency(obj
);
1686 GC_ASSERT(!RVALUE_OLD_P(obj
));
1688 RVALUE_AGE_RESET_RAW(obj
);
1689 check_rvalue_consistency(obj
);
1693 RVALUE_BLACK_P(VALUE obj
)
1695 return RVALUE_MARKED(obj
) && !RVALUE_MARKING(obj
);
1700 RVALUE_GREY_P(VALUE obj
)
1702 return RVALUE_MARKED(obj
) && RVALUE_MARKING(obj
);
1707 RVALUE_WHITE_P(VALUE obj
)
1709 return RVALUE_MARKED(obj
) == FALSE
;
1713 --------------------------- ObjectSpace -----------------------------
1716 static inline void *
1719 return calloc(1, n
);
1723 rb_objspace_alloc(void)
1725 rb_objspace_t
*objspace
= calloc1(sizeof(rb_objspace_t
));
1726 objspace
->flags
.measure_gc
= 1;
1727 malloc_limit
= gc_params
.malloc_limit_min
;
1729 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1730 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1732 size_pool
->slot_size
= sizeof(RVALUE
) * (1 << i
);
1734 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
);
1735 list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool
)->pages
);
1743 static void free_stack_chunks(mark_stack_t
*);
1744 static void heap_page_free(rb_objspace_t
*objspace
, struct heap_page
*page
);
1747 rb_objspace_free(rb_objspace_t
*objspace
)
1749 if (is_lazy_sweeping(objspace
))
1750 rb_bug("lazy sweeping underway when freeing object space");
1752 if (objspace
->profile
.records
) {
1753 free(objspace
->profile
.records
);
1754 objspace
->profile
.records
= 0;
1758 struct gc_list
*list
, *next
;
1759 for (list
= global_list
; list
; list
= next
) {
1764 if (heap_pages_sorted
) {
1766 for (i
= 0; i
< heap_allocated_pages
; ++i
) {
1767 heap_page_free(objspace
, heap_pages_sorted
[i
]);
1769 free(heap_pages_sorted
);
1770 heap_allocated_pages
= 0;
1771 heap_pages_sorted_length
= 0;
1772 heap_pages_lomem
= 0;
1773 heap_pages_himem
= 0;
1775 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1776 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1777 SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
= 0;
1778 SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
= 0;
1781 st_free_table(objspace
->id_to_obj_tbl
);
1782 st_free_table(objspace
->obj_to_id_tbl
);
1783 free_stack_chunks(&objspace
->mark_stack
);
1788 heap_pages_expand_sorted_to(rb_objspace_t
*objspace
, size_t next_length
)
1790 struct heap_page
**sorted
;
1791 size_t size
= size_mul_or_raise(next_length
, sizeof(struct heap_page
*), rb_eRuntimeError
);
1793 gc_report(3, objspace
, "heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1796 if (heap_pages_sorted_length
> 0) {
1797 sorted
= (struct heap_page
**)realloc(heap_pages_sorted
, size
);
1798 if (sorted
) heap_pages_sorted
= sorted
;
1801 sorted
= heap_pages_sorted
= (struct heap_page
**)malloc(size
);
1808 heap_pages_sorted_length
= next_length
;
1812 heap_pages_expand_sorted(rb_objspace_t
*objspace
)
1814 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1815 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1816 * however, if there are pages which do not have empty slots, then try to create new pages
1817 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1819 size_t next_length
= heap_allocatable_pages(objspace
);
1820 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1821 rb_size_pool_t
*size_pool
= &size_pools
[i
];
1822 next_length
+= SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
;
1823 next_length
+= SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
1826 if (next_length
> heap_pages_sorted_length
) {
1827 heap_pages_expand_sorted_to(objspace
, next_length
);
1830 GC_ASSERT(heap_allocatable_pages(objspace
) + heap_eden_total_pages(objspace
) <= heap_pages_sorted_length
);
1831 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
1835 size_pool_allocatable_pages_set(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, size_t s
)
1837 size_pool
->allocatable_pages
= s
;
1838 heap_pages_expand_sorted(objspace
);
1842 heap_page_add_freeobj(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
1844 ASSERT_vm_locking();
1846 RVALUE
*p
= (RVALUE
*)obj
;
1848 asan_unpoison_object(obj
, false);
1850 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1852 p
->as
.free
.flags
= 0;
1853 p
->as
.free
.next
= page
->freelist
;
1855 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1857 if (RGENGC_CHECK_MODE
&&
1858 /* obj should belong to page */
1859 !(&page
->start
[0] <= (RVALUE
*)obj
&&
1860 (uintptr_t)obj
< ((uintptr_t)page
->start
+ (page
->total_slots
* page
->slot_size
)) &&
1861 obj
% sizeof(RVALUE
) == 0)) {
1862 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p
);
1865 asan_poison_object(obj
);
1866 gc_report(3, objspace
, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj
);
1870 heap_add_freepage(rb_heap_t
*heap
, struct heap_page
*page
)
1872 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1873 GC_ASSERT(page
->free_slots
!= 0);
1874 GC_ASSERT(page
->freelist
!= NULL
);
1876 page
->free_next
= heap
->free_pages
;
1877 heap
->free_pages
= page
;
1879 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page
, (void *)page
->freelist
);
1881 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1884 #if GC_ENABLE_INCREMENTAL_MARK
1886 heap_add_poolpage(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
)
1888 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
1889 GC_ASSERT(page
->free_slots
!= 0);
1890 GC_ASSERT(page
->freelist
!= NULL
);
1892 page
->free_next
= heap
->pooled_pages
;
1893 heap
->pooled_pages
= page
;
1894 objspace
->rincgc
.pooled_slots
+= page
->free_slots
;
1896 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
1901 heap_unlink_page(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
)
1903 list_del(&page
->page_node
);
1904 heap
->total_pages
--;
1905 heap
->total_slots
-= page
->total_slots
;
1908 static void rb_aligned_free(void *ptr
, size_t size
);
1911 heap_page_free(rb_objspace_t
*objspace
, struct heap_page
*page
)
1913 heap_allocated_pages
--;
1914 objspace
->profile
.total_freed_pages
++;
1915 rb_aligned_free(GET_PAGE_BODY(page
->start
), HEAP_PAGE_SIZE
);
1920 heap_pages_free_unused_pages(rb_objspace_t
*objspace
)
1924 bool has_pages_in_tomb_heap
= FALSE
;
1925 for (i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
1926 if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->pages
)) {
1927 has_pages_in_tomb_heap
= TRUE
;
1932 if (has_pages_in_tomb_heap
) {
1933 for (i
= j
= 1; j
< heap_allocated_pages
; i
++) {
1934 struct heap_page
*page
= heap_pages_sorted
[i
];
1936 if (page
->flags
.in_tomb
&& page
->free_slots
== page
->total_slots
) {
1937 heap_unlink_page(objspace
, SIZE_POOL_TOMB_HEAP(page
->size_pool
), page
);
1938 heap_page_free(objspace
, page
);
1942 heap_pages_sorted
[j
] = page
;
1948 struct heap_page
*hipage
= heap_pages_sorted
[heap_allocated_pages
- 1];
1949 uintptr_t himem
= (uintptr_t)hipage
->start
+ (hipage
->total_slots
* hipage
->slot_size
);
1950 GC_ASSERT(himem
<= (uintptr_t)heap_pages_himem
);
1951 heap_pages_himem
= (RVALUE
*)himem
;
1953 GC_ASSERT(j
== heap_allocated_pages
);
1957 static struct heap_page
*
1958 heap_page_allocate(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
1960 uintptr_t start
, end
, p
;
1961 struct heap_page
*page
;
1962 struct heap_page_body
*page_body
= 0;
1963 uintptr_t hi
, lo
, mid
;
1964 size_t stride
= size_pool
->slot_size
;
1965 unsigned int limit
= (unsigned int)((HEAP_PAGE_SIZE
- sizeof(struct heap_page_header
)))/(int)stride
;
1967 /* assign heap_page body (contains heap_page_header and RVALUEs) */
1968 page_body
= (struct heap_page_body
*)rb_aligned_malloc(HEAP_PAGE_ALIGN
, HEAP_PAGE_SIZE
);
1969 if (page_body
== 0) {
1973 /* assign heap_page entry */
1974 page
= calloc1(sizeof(struct heap_page
));
1976 rb_aligned_free(page_body
, HEAP_PAGE_SIZE
);
1980 /* adjust obj_limit (object number available in this page) */
1981 start
= (uintptr_t)((VALUE
)page_body
+ sizeof(struct heap_page_header
));
1983 if ((VALUE
)start
% sizeof(RVALUE
) != 0) {
1984 int delta
= (int)sizeof(RVALUE
) - (start
% (int)sizeof(RVALUE
));
1985 start
= start
+ delta
;
1986 GC_ASSERT(NUM_IN_PAGE(start
) == 0 || NUM_IN_PAGE(start
) == 1);
1988 /* Find a num in page that is evenly divisible by `stride`.
1989 * This is to ensure that objects are aligned with bit planes.
1990 * In other words, ensure there are an even number of objects
1992 if (NUM_IN_PAGE(start
) == 1) {
1993 start
+= stride
- sizeof(RVALUE
);
1996 GC_ASSERT(NUM_IN_PAGE(start
) * sizeof(RVALUE
) % stride
== 0);
1998 limit
= (HEAP_PAGE_SIZE
- (int)(start
- (uintptr_t)page_body
))/(int)stride
;
2000 end
= start
+ (limit
* (int)stride
);
2002 /* setup heap_pages_sorted */
2004 hi
= (uintptr_t)heap_allocated_pages
;
2006 struct heap_page
*mid_page
;
2008 mid
= (lo
+ hi
) / 2;
2009 mid_page
= heap_pages_sorted
[mid
];
2010 if ((uintptr_t)mid_page
->start
< start
) {
2013 else if ((uintptr_t)mid_page
->start
> start
) {
2017 rb_bug("same heap page is allocated: %p at %"PRIuVALUE
, (void *)page_body
, (VALUE
)mid
);
2021 if (hi
< (uintptr_t)heap_allocated_pages
) {
2022 MEMMOVE(&heap_pages_sorted
[hi
+1], &heap_pages_sorted
[hi
], struct heap_page_header
*, heap_allocated_pages
- hi
);
2025 heap_pages_sorted
[hi
] = page
;
2027 heap_allocated_pages
++;
2029 GC_ASSERT(heap_eden_total_pages(objspace
) + heap_allocatable_pages(objspace
) <= heap_pages_sorted_length
);
2030 GC_ASSERT(heap_eden_total_pages(objspace
) + heap_tomb_total_pages(objspace
) == heap_allocated_pages
- 1);
2031 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2033 objspace
->profile
.total_allocated_pages
++;
2035 if (heap_allocated_pages
> heap_pages_sorted_length
) {
2036 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2037 heap_allocated_pages
, heap_pages_sorted_length
);
2040 if (heap_pages_lomem
== 0 || (uintptr_t)heap_pages_lomem
> start
) heap_pages_lomem
= (RVALUE
*)start
;
2041 if ((uintptr_t)heap_pages_himem
< end
) heap_pages_himem
= (RVALUE
*)end
;
2043 page
->start
= (RVALUE
*)start
;
2044 page
->total_slots
= limit
;
2045 page
->slot_size
= size_pool
->slot_size
;
2046 page
->size_pool
= size_pool
;
2047 page_body
->header
.page
= page
;
2049 for (p
= start
; p
!= end
; p
+= stride
) {
2050 gc_report(3, objspace
, "assign_heap_page: %p is added to freelist\n", (void *)p
);
2051 heap_page_add_freeobj(objspace
, page
, (VALUE
)p
);
2053 page
->free_slots
= limit
;
2055 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
2059 static struct heap_page
*
2060 heap_page_resurrect(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2062 struct heap_page
*page
= 0, *next
;
2064 list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool
)->pages
, page
, next
, page_node
) {
2065 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
2066 if (page
->freelist
!= NULL
) {
2067 heap_unlink_page(objspace
, &size_pool
->tomb_heap
, page
);
2068 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
2076 static struct heap_page
*
2077 heap_page_create(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
2079 struct heap_page
*page
;
2080 const char *method
= "recycle";
2082 size_pool
->allocatable_pages
--;
2084 page
= heap_page_resurrect(objspace
, size_pool
);
2087 page
= heap_page_allocate(objspace
, size_pool
);
2088 method
= "allocate";
2090 if (0) fprintf(stderr
, "heap_page_create: %s - %p, "
2091 "heap_allocated_pages: %"PRIdSIZE
", "
2092 "heap_allocated_pages: %"PRIdSIZE
", "
2093 "tomb->total_pages: %"PRIdSIZE
"\n",
2094 method
, (void *)page
, heap_pages_sorted_length
, heap_allocated_pages
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
);
2099 heap_add_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, struct heap_page
*page
)
2101 /* Adding to eden heap during incremental sweeping is forbidden */
2102 GC_ASSERT(!(heap
== SIZE_POOL_EDEN_HEAP(size_pool
) && heap
->sweeping_page
));
2103 page
->flags
.in_tomb
= (heap
== SIZE_POOL_TOMB_HEAP(size_pool
));
2104 list_add_tail(&heap
->pages
, &page
->page_node
);
2105 heap
->total_pages
++;
2106 heap
->total_slots
+= page
->total_slots
;
2110 heap_assign_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2112 struct heap_page
*page
= heap_page_create(objspace
, size_pool
);
2113 heap_add_page(objspace
, size_pool
, heap
, page
);
2114 heap_add_freepage(heap
, page
);
2118 heap_add_pages(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, size_t add
)
2122 size_pool_allocatable_pages_set(objspace
, size_pool
, add
);
2124 for (i
= 0; i
< add
; i
++) {
2125 heap_assign_page(objspace
, size_pool
, heap
);
2128 GC_ASSERT(size_pool
->allocatable_pages
== 0);
2132 heap_extend_pages(rb_objspace_t
*objspace
, size_t free_slots
, size_t total_slots
, size_t used
)
2134 double goal_ratio
= gc_params
.heap_free_slots_goal_ratio
;
2137 if (goal_ratio
== 0.0) {
2138 next_used
= (size_t)(used
* gc_params
.growth_factor
);
2141 /* Find `f' where free_slots = f * total_slots * goal_ratio
2142 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2144 double f
= (double)(total_slots
- free_slots
) / ((1 - goal_ratio
) * total_slots
);
2146 if (f
> gc_params
.growth_factor
) f
= gc_params
.growth_factor
;
2147 if (f
< 1.0) f
= 1.1;
2149 next_used
= (size_t)(f
* used
);
2153 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2154 " G(%1.2f), f(%1.2f),"
2155 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2156 free_slots
, total_slots
, free_slots
/(double)total_slots
,
2157 goal_ratio
, f
, used
, next_used
);
2161 if (gc_params
.growth_max_slots
> 0) {
2162 size_t max_used
= (size_t)(used
+ gc_params
.growth_max_slots
/HEAP_PAGE_OBJ_LIMIT
);
2163 if (next_used
> max_used
) next_used
= max_used
;
2166 size_t extend_page_count
= next_used
- used
;
2167 /* Extend by at least 1 page. */
2168 if (extend_page_count
== 0) extend_page_count
= 1;
2170 return extend_page_count
;
2174 heap_increment(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2176 if (size_pool
->allocatable_pages
> 0) {
2177 gc_report(1, objspace
, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2178 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2179 heap_pages_sorted_length
, size_pool
->allocatable_pages
, heap
->total_pages
);
2181 GC_ASSERT(heap_allocatable_pages(objspace
) + heap_eden_total_pages(objspace
) <= heap_pages_sorted_length
);
2182 GC_ASSERT(heap_allocated_pages
<= heap_pages_sorted_length
);
2184 heap_assign_page(objspace
, size_pool
, heap
);
2191 heap_prepare(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2193 GC_ASSERT(heap
->free_pages
== NULL
);
2195 if (is_lazy_sweeping(objspace
)) {
2196 gc_sweep_continue(objspace
, size_pool
, heap
);
2198 else if (is_incremental_marking(objspace
)) {
2199 gc_marks_continue(objspace
, size_pool
, heap
);
2202 if (heap
->free_pages
== NULL
&&
2203 (will_be_incremental_marking(objspace
) || heap_increment(objspace
, size_pool
, heap
) == FALSE
) &&
2204 gc_start(objspace
, GPR_FLAG_NEWOBJ
) == FALSE
) {
2210 rb_objspace_set_event_hook(const rb_event_flag_t event
)
2212 rb_objspace_t
*objspace
= &rb_objspace
;
2213 objspace
->hook_events
= event
& RUBY_INTERNAL_EVENT_OBJSPACE_MASK
;
2214 objspace
->flags
.has_hook
= (objspace
->hook_events
!= 0);
2218 gc_event_hook_body(rb_execution_context_t
*ec
, rb_objspace_t
*objspace
, const rb_event_flag_t event
, VALUE data
)
2220 const VALUE
*pc
= ec
->cfp
->pc
;
2221 if (pc
&& VM_FRAME_RUBYFRAME_P(ec
->cfp
)) {
2222 /* increment PC because source line is calculated with PC-1 */
2225 EXEC_EVENT_HOOK(ec
, event
, ec
->cfp
->self
, 0, 0, 0, data
);
2229 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2230 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2232 #define gc_event_hook_prep(objspace, event, data, prep) do { \
2233 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2235 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2239 #define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2242 newobj_init(VALUE klass
, VALUE flags
, int wb_protected
, rb_objspace_t
*objspace
, VALUE obj
)
2244 #if !__has_feature(memory_sanitizer)
2245 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
2246 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2248 RVALUE
*p
= RANY(obj
);
2249 p
->as
.basic
.flags
= flags
;
2250 *((VALUE
*)&p
->as
.basic
.klass
) = klass
;
2252 #if RACTOR_CHECK_MODE
2253 rb_ractor_setup_belonging(obj
);
2256 #if RGENGC_CHECK_MODE
2257 p
->as
.values
.v1
= p
->as
.values
.v2
= p
->as
.values
.v3
= 0;
2259 RB_VM_LOCK_ENTER_NO_BARRIER();
2261 check_rvalue_consistency(obj
);
2263 GC_ASSERT(RVALUE_MARKED(obj
) == FALSE
);
2264 GC_ASSERT(RVALUE_MARKING(obj
) == FALSE
);
2265 GC_ASSERT(RVALUE_OLD_P(obj
) == FALSE
);
2266 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj
) == FALSE
);
2268 if (flags
& FL_PROMOTED1
) {
2269 if (RVALUE_AGE(obj
) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj
), RVALUE_AGE(obj
));
2272 if (RVALUE_AGE(obj
) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj
), RVALUE_AGE(obj
));
2274 if (rgengc_remembered(objspace
, (VALUE
)obj
)) rb_bug("newobj: %s is remembered.", obj_info(obj
));
2276 RB_VM_LOCK_LEAVE_NO_BARRIER();
2279 if (UNLIKELY(wb_protected
== FALSE
)) {
2280 ASSERT_vm_locking();
2281 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
2284 // TODO: make it atomic, or ractor local
2285 objspace
->total_allocated_objects
++;
2289 objspace
->profile
.total_generated_normal_object_count
++;
2290 #if RGENGC_PROFILE >= 2
2291 objspace
->profile
.generated_normal_object_count_types
[BUILTIN_TYPE(obj
)]++;
2295 objspace
->profile
.total_generated_shady_object_count
++;
2296 #if RGENGC_PROFILE >= 2
2297 objspace
->profile
.generated_shady_object_count_types
[BUILTIN_TYPE(obj
)]++;
2303 RANY(obj
)->file
= rb_source_location_cstr(&RANY(obj
)->line
);
2304 GC_ASSERT(!SPECIAL_CONST_P(obj
)); /* check alignment */
2307 gc_report(5, objspace
, "newobj: %s\n", obj_info(obj
));
2309 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2311 static int newobj_cnt
= RGENGC_OLD_NEWOBJ_CHECK
;
2313 if (!is_incremental_marking(objspace
) &&
2314 flags
& FL_WB_PROTECTED
&& /* do not promote WB unprotected objects */
2315 ! RB_TYPE_P(obj
, T_ARRAY
)) { /* array.c assumes that allocated objects are new */
2316 if (--newobj_cnt
== 0) {
2317 newobj_cnt
= RGENGC_OLD_NEWOBJ_CHECK
;
2319 gc_mark_set(objspace
, obj
);
2320 RVALUE_AGE_SET_OLD(objspace
, obj
);
2322 rb_gc_writebarrier_remember(obj
);
2327 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2331 static inline void heap_add_freepage(rb_heap_t
*heap
, struct heap_page
*page
);
2332 static struct heap_page
*heap_next_freepage(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
);
2333 static inline void ractor_set_cache(rb_ractor_t
*cr
, struct heap_page
*page
, size_t size_pool_idx
);
2336 rb_gc_obj_slot_size(VALUE obj
)
2338 return GET_HEAP_PAGE(obj
)->slot_size
;
2341 static inline size_t
2342 size_pool_slot_size(unsigned char pool_id
)
2344 GC_ASSERT(pool_id
< SIZE_POOL_COUNT
);
2346 size_t slot_size
= (1 << pool_id
) * sizeof(RVALUE
);
2348 #if RGENGC_CHECK_MODE
2349 rb_objspace_t
*objspace
= &rb_objspace
;
2350 GC_ASSERT(size_pools
[pool_id
].slot_size
== (short)slot_size
);
2357 rb_gc_size_allocatable_p(size_t size
)
2359 return size
<= size_pool_slot_size(SIZE_POOL_COUNT
- 1);
2363 ractor_cached_free_region(rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2365 rb_ractor_newobj_size_pool_cache_t
*cache
= &cr
->newobj_cache
.size_pool_caches
[size_pool_idx
];
2366 RVALUE
*p
= cache
->freelist
;
2369 VALUE obj
= (VALUE
)p
;
2370 cache
->freelist
= p
->as
.free
.next
;
2371 asan_unpoison_object(obj
, true);
2372 #if RGENGC_CHECK_MODE
2374 MEMZERO((char *)obj
, char, size_pool_slot_size(size_pool_idx
));
2383 static struct heap_page
*
2384 heap_next_freepage(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
2386 ASSERT_vm_locking();
2388 struct heap_page
*page
;
2390 while (heap
->free_pages
== NULL
) {
2391 heap_prepare(objspace
, size_pool
, heap
);
2393 page
= heap
->free_pages
;
2394 heap
->free_pages
= page
->free_next
;
2396 GC_ASSERT(page
->free_slots
!= 0);
2397 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page
, (void *)page
->freelist
, page
->free_slots
);
2399 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
2405 ractor_set_cache(rb_ractor_t
*cr
, struct heap_page
*page
, size_t size_pool_idx
)
2407 gc_report(3, &rb_objspace
, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page
->start
));
2409 rb_ractor_newobj_size_pool_cache_t
*cache
= &cr
->newobj_cache
.size_pool_caches
[size_pool_idx
];
2411 cache
->using_page
= page
;
2412 cache
->freelist
= page
->freelist
;
2413 page
->free_slots
= 0;
2414 page
->freelist
= NULL
;
2416 asan_unpoison_object((VALUE
)cache
->freelist
, false);
2417 GC_ASSERT(RB_TYPE_P((VALUE
)cache
->freelist
, T_NONE
));
2418 asan_poison_object((VALUE
)cache
->freelist
);
2422 ractor_cache_slots(rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2424 ASSERT_vm_locking();
2426 rb_size_pool_t
*size_pool
= &size_pools
[size_pool_idx
];
2427 struct heap_page
*page
= heap_next_freepage(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
2429 ractor_set_cache(cr
, page
, size_pool_idx
);
2433 newobj_fill(VALUE obj
, VALUE v1
, VALUE v2
, VALUE v3
)
2435 RVALUE
*p
= (RVALUE
*)obj
;
2436 p
->as
.values
.v1
= v1
;
2437 p
->as
.values
.v2
= v2
;
2438 p
->as
.values
.v3
= v3
;
2442 static inline size_t
2443 size_pool_idx_for_size(size_t size
)
2446 size_t slot_count
= CEILDIV(size
, sizeof(RVALUE
));
2448 /* size_pool_idx is ceil(log2(slot_count)) */
2449 size_t size_pool_idx
= 64 - nlz_int64(slot_count
- 1);
2450 if (size_pool_idx
>= SIZE_POOL_COUNT
) {
2451 rb_bug("size_pool_idx_for_size: allocation size too large");
2454 return size_pool_idx
;
2456 GC_ASSERT(size
<= sizeof(RVALUE
));
2461 ALWAYS_INLINE(static VALUE
newobj_slowpath(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, int wb_protected
, size_t size_pool_idx
));
2464 newobj_slowpath(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, int wb_protected
, size_t size_pool_idx
)
2469 RB_VM_LOCK_ENTER_CR_LEV(cr
, &lev
);
2471 if (UNLIKELY(during_gc
|| ruby_gc_stressful
)) {
2475 rb_bug("object allocation during garbage collection phase");
2478 if (ruby_gc_stressful
) {
2479 if (!garbage_collect(objspace
, GPR_FLAG_NEWOBJ
)) {
2485 // allocate new slot
2486 while ((obj
= ractor_cached_free_region(objspace
, cr
, size_pool_idx
)) == Qfalse
) {
2487 ractor_cache_slots(objspace
, cr
, size_pool_idx
);
2489 GC_ASSERT(obj
!= 0);
2490 newobj_init(klass
, flags
, wb_protected
, objspace
, obj
);
2492 gc_event_hook_prep(objspace
, RUBY_INTERNAL_EVENT_NEWOBJ
, obj
, newobj_fill(obj
, 0, 0, 0));
2494 RB_VM_LOCK_LEAVE_CR_LEV(cr
, &lev
);
2499 NOINLINE(static VALUE
newobj_slowpath_wb_protected(VALUE klass
, VALUE flags
,
2500 rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
));
2501 NOINLINE(static VALUE
newobj_slowpath_wb_unprotected(VALUE klass
, VALUE flags
,
2502 rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
));
2505 newobj_slowpath_wb_protected(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2507 return newobj_slowpath(klass
, flags
, objspace
, cr
, TRUE
, size_pool_idx
);
2511 newobj_slowpath_wb_unprotected(VALUE klass
, VALUE flags
, rb_objspace_t
*objspace
, rb_ractor_t
*cr
, size_t size_pool_idx
)
2513 return newobj_slowpath(klass
, flags
, objspace
, cr
, FALSE
, size_pool_idx
);
2517 newobj_of0(VALUE klass
, VALUE flags
, int wb_protected
, rb_ractor_t
*cr
, size_t alloc_size
)
2520 rb_objspace_t
*objspace
= &rb_objspace
;
2522 RB_DEBUG_COUNTER_INC(obj_newobj
);
2523 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected
, !wb_protected
);
2525 #if GC_DEBUG_STRESS_TO_CLASS
2526 if (UNLIKELY(stress_to_class
)) {
2527 long i
, cnt
= RARRAY_LEN(stress_to_class
);
2528 for (i
= 0; i
< cnt
; ++i
) {
2529 if (klass
== RARRAY_AREF(stress_to_class
, i
)) rb_memerror();
2534 size_t size_pool_idx
= size_pool_idx_for_size(alloc_size
);
2536 if ((!UNLIKELY(during_gc
||
2537 ruby_gc_stressful
||
2538 gc_event_hook_available_p(objspace
)) &&
2540 (obj
= ractor_cached_free_region(objspace
, cr
, size_pool_idx
)) != Qfalse
)) {
2542 newobj_init(klass
, flags
, wb_protected
, objspace
, obj
);
2545 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath
);
2547 obj
= wb_protected
?
2548 newobj_slowpath_wb_protected(klass
, flags
, objspace
, cr
, size_pool_idx
) :
2549 newobj_slowpath_wb_unprotected(klass
, flags
, objspace
, cr
, size_pool_idx
);
2556 newobj_of(VALUE klass
, VALUE flags
, VALUE v1
, VALUE v2
, VALUE v3
, int wb_protected
, size_t alloc_size
)
2558 VALUE obj
= newobj_of0(klass
, flags
, wb_protected
, GET_RACTOR(), alloc_size
);
2559 return newobj_fill(obj
, v1
, v2
, v3
);
2563 newobj_of_cr(rb_ractor_t
*cr
, VALUE klass
, VALUE flags
, VALUE v1
, VALUE v2
, VALUE v3
, int wb_protected
, size_t alloc_size
)
2565 VALUE obj
= newobj_of0(klass
, flags
, wb_protected
, cr
, alloc_size
);
2566 return newobj_fill(obj
, v1
, v2
, v3
);
2570 rb_wb_unprotected_newobj_of(VALUE klass
, VALUE flags
, size_t size
)
2572 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2573 return newobj_of(klass
, flags
, 0, 0, 0, FALSE
, size
);
2577 rb_wb_protected_newobj_of(VALUE klass
, VALUE flags
, size_t size
)
2579 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2580 return newobj_of(klass
, flags
, 0, 0, 0, TRUE
, size
);
2584 rb_ec_wb_protected_newobj_of(rb_execution_context_t
*ec
, VALUE klass
, VALUE flags
, size_t size
)
2586 GC_ASSERT((flags
& FL_WB_PROTECTED
) == 0);
2587 return newobj_of_cr(rb_ec_ractor_ptr(ec
), klass
, flags
, 0, 0, 0, TRUE
, size
);
2590 /* for compatibility */
2595 return newobj_of(0, T_NONE
, 0, 0, 0, FALSE
, sizeof(RVALUE
));
2599 rb_newobj_of(VALUE klass
, VALUE flags
)
2601 if ((flags
& RUBY_T_MASK
) == T_OBJECT
) {
2602 st_table
*index_tbl
= RCLASS_IV_INDEX_TBL(klass
);
2604 VALUE obj
= newobj_of(klass
, (flags
| ROBJECT_EMBED
) & ~FL_WB_PROTECTED
, Qundef
, Qundef
, Qundef
, flags
& FL_WB_PROTECTED
, sizeof(RVALUE
));
2606 if (index_tbl
&& index_tbl
->num_entries
> ROBJECT_EMBED_LEN_MAX
) {
2607 rb_init_iv_list(obj
);
2612 return newobj_of(klass
, flags
& ~FL_WB_PROTECTED
, 0, 0, 0, flags
& FL_WB_PROTECTED
, sizeof(RVALUE
));
2616 #define UNEXPECTED_NODE(func) \
2617 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2618 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2621 rb_imemo_name(enum imemo_type type
)
2623 // put no default case to get a warning if an imemo type is missing
2625 #define IMEMO_NAME(x) case imemo_##x: return #x;
2629 IMEMO_NAME(throw_data
);
2636 IMEMO_NAME(parser_strterm
);
2637 IMEMO_NAME(callinfo
);
2638 IMEMO_NAME(callcache
);
2639 IMEMO_NAME(constcache
);
2648 rb_imemo_new(enum imemo_type type
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v0
)
2650 size_t size
= sizeof(RVALUE
);
2651 VALUE flags
= T_IMEMO
| (type
<< FL_USHIFT
);
2652 return newobj_of(v0
, flags
, v1
, v2
, v3
, TRUE
, size
);
2656 rb_imemo_tmpbuf_new(VALUE v1
, VALUE v2
, VALUE v3
, VALUE v0
)
2658 size_t size
= sizeof(RVALUE
);
2659 VALUE flags
= T_IMEMO
| (imemo_tmpbuf
<< FL_USHIFT
);
2660 return newobj_of(v0
, flags
, v1
, v2
, v3
, FALSE
, size
);
2664 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf
, size_t cnt
)
2666 return rb_imemo_tmpbuf_new((VALUE
)buf
, 0, (VALUE
)cnt
, 0);
2670 rb_imemo_tmpbuf_parser_heap(void *buf
, rb_imemo_tmpbuf_t
*old_heap
, size_t cnt
)
2672 return (rb_imemo_tmpbuf_t
*)rb_imemo_tmpbuf_new((VALUE
)buf
, (VALUE
)old_heap
, (VALUE
)cnt
, 0);
2676 imemo_memsize(VALUE obj
)
2679 switch (imemo_type(obj
)) {
2681 size
+= sizeof(RANY(obj
)->as
.imemo
.ment
.def
);
2684 size
+= rb_iseq_memsize((rb_iseq_t
*)obj
);
2687 size
+= RANY(obj
)->as
.imemo
.env
.env_size
* sizeof(VALUE
);
2690 size
+= RANY(obj
)->as
.imemo
.alloc
.cnt
* sizeof(VALUE
);
2693 size
+= rb_ast_memsize(&RANY(obj
)->as
.imemo
.ast
);
2697 case imemo_throw_data
:
2700 case imemo_parser_strterm
:
2711 rb_imemo_new_debug(enum imemo_type type
, VALUE v1
, VALUE v2
, VALUE v3
, VALUE v0
, const char *file
, int line
)
2713 VALUE memo
= rb_imemo_new(type
, v1
, v2
, v3
, v0
);
2714 fprintf(stderr
, "memo %p (type: %d) @ %s:%d\n", (void *)memo
, imemo_type(memo
), file
, line
);
2720 rb_class_allocate_instance(VALUE klass
)
2722 st_table
*index_tbl
= RCLASS_IV_INDEX_TBL(klass
);
2724 VALUE flags
= T_OBJECT
| ROBJECT_EMBED
;
2726 VALUE obj
= newobj_of(klass
, flags
, Qundef
, Qundef
, Qundef
, RGENGC_WB_PROTECTED_OBJECT
, sizeof(RVALUE
));
2728 if (index_tbl
&& index_tbl
->num_entries
> ROBJECT_EMBED_LEN_MAX
) {
2729 rb_init_iv_list(obj
);
2736 rb_data_object_check(VALUE klass
)
2738 if (klass
!= rb_cObject
&& (rb_get_alloc_func(klass
) == rb_class_allocate_instance
)) {
2739 rb_undef_alloc_func(klass
);
2740 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE
, klass
);
2745 rb_data_object_wrap(VALUE klass
, void *datap
, RUBY_DATA_FUNC dmark
, RUBY_DATA_FUNC dfree
)
2747 RUBY_ASSERT_ALWAYS(dfree
!= (RUBY_DATA_FUNC
)1);
2748 if (klass
) rb_data_object_check(klass
);
2749 return newobj_of(klass
, T_DATA
, (VALUE
)dmark
, (VALUE
)dfree
, (VALUE
)datap
, FALSE
, sizeof(RVALUE
));
2753 rb_data_object_zalloc(VALUE klass
, size_t size
, RUBY_DATA_FUNC dmark
, RUBY_DATA_FUNC dfree
)
2755 VALUE obj
= rb_data_object_wrap(klass
, 0, dmark
, dfree
);
2756 DATA_PTR(obj
) = xcalloc(1, size
);
2761 rb_data_typed_object_wrap(VALUE klass
, void *datap
, const rb_data_type_t
*type
)
2763 RBIMPL_NONNULL_ARG(type
);
2764 if (klass
) rb_data_object_check(klass
);
2765 return newobj_of(klass
, T_DATA
, (VALUE
)type
, (VALUE
)1, (VALUE
)datap
, type
->flags
& RUBY_FL_WB_PROTECTED
, sizeof(RVALUE
));
2769 rb_data_typed_object_zalloc(VALUE klass
, size_t size
, const rb_data_type_t
*type
)
2771 VALUE obj
= rb_data_typed_object_wrap(klass
, 0, type
);
2772 DATA_PTR(obj
) = xcalloc(1, size
);
2777 rb_objspace_data_type_memsize(VALUE obj
)
2779 if (RTYPEDDATA_P(obj
)) {
2780 const rb_data_type_t
*type
= RTYPEDDATA_TYPE(obj
);
2781 const void *ptr
= RTYPEDDATA_DATA(obj
);
2782 if (ptr
&& type
->function
.dsize
) {
2783 return type
->function
.dsize(ptr
);
2790 rb_objspace_data_type_name(VALUE obj
)
2792 if (RTYPEDDATA_P(obj
)) {
2793 return RTYPEDDATA_TYPE(obj
)->wrap_struct_name
;
2801 ptr_in_page_body_p(const void *ptr
, const void *memb
)
2803 struct heap_page
*page
= *(struct heap_page
**)memb
;
2804 uintptr_t p_body
= (uintptr_t)GET_PAGE_BODY(page
->start
);
2806 if ((uintptr_t)ptr
>= p_body
) {
2807 return (uintptr_t)ptr
< (p_body
+ HEAP_PAGE_SIZE
) ? 0 : 1;
2814 PUREFUNC(static inline struct heap_page
* heap_page_for_ptr(rb_objspace_t
*objspace
, uintptr_t ptr
);)
2815 static inline struct heap_page
*
2816 heap_page_for_ptr(rb_objspace_t
*objspace
, uintptr_t ptr
)
2818 struct heap_page
**res
;
2820 if (ptr
< (uintptr_t)heap_pages_lomem
||
2821 ptr
> (uintptr_t)heap_pages_himem
) {
2825 res
= bsearch((void *)ptr
, heap_pages_sorted
,
2826 (size_t)heap_allocated_pages
, sizeof(struct heap_page
*),
2827 ptr_in_page_body_p
);
2837 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t
*objspace
, void *ptr
);)
2839 is_pointer_to_heap(rb_objspace_t
*objspace
, void *ptr
)
2841 register RVALUE
*p
= RANY(ptr
);
2842 register struct heap_page
*page
;
2844 RB_DEBUG_COUNTER_INC(gc_isptr_trial
);
2846 if (p
< heap_pages_lomem
|| p
> heap_pages_himem
) return FALSE
;
2847 RB_DEBUG_COUNTER_INC(gc_isptr_range
);
2849 if ((VALUE
)p
% sizeof(RVALUE
) != 0) return FALSE
;
2850 RB_DEBUG_COUNTER_INC(gc_isptr_align
);
2852 page
= heap_page_for_ptr(objspace
, (uintptr_t)ptr
);
2854 RB_DEBUG_COUNTER_INC(gc_isptr_maybe
);
2855 if (page
->flags
.in_tomb
) {
2859 if ((uintptr_t)p
< ((uintptr_t)page
->start
)) return FALSE
;
2860 if ((uintptr_t)p
>= ((uintptr_t)page
->start
+ (page
->total_slots
* page
->slot_size
))) return FALSE
;
2861 if ((NUM_IN_PAGE(p
) * sizeof(RVALUE
)) % page
->slot_size
!= 0) return FALSE
;
2869 static enum rb_id_table_iterator_result
2870 free_const_entry_i(VALUE value
, void *data
)
2872 rb_const_entry_t
*ce
= (rb_const_entry_t
*)value
;
2874 return ID_TABLE_CONTINUE
;
2878 rb_free_const_table(struct rb_id_table
*tbl
)
2880 rb_id_table_foreach_values(tbl
, free_const_entry_i
, 0);
2881 rb_id_table_free(tbl
);
2885 free_iv_index_tbl_free_i(st_data_t key
, st_data_t value
, st_data_t data
)
2887 xfree((void *)value
);
2892 iv_index_tbl_free(struct st_table
*tbl
)
2894 st_foreach(tbl
, free_iv_index_tbl_free_i
, 0);
2898 // alive: if false, target pointers can be freed already.
2899 // To check it, we need objspace parameter.
2901 vm_ccs_free(struct rb_class_cc_entries
*ccs
, int alive
, rb_objspace_t
*objspace
, VALUE klass
)
2904 for (int i
=0; i
<ccs
->len
; i
++) {
2905 const struct rb_callcache
*cc
= ccs
->entries
[i
].cc
;
2907 void *ptr
= asan_poisoned_object_p((VALUE
)cc
);
2908 asan_unpoison_object((VALUE
)cc
, false);
2909 // ccs can be free'ed.
2910 if (is_pointer_to_heap(objspace
, (void *)cc
) &&
2911 IMEMO_TYPE_P(cc
, imemo_callcache
) &&
2912 cc
->klass
== klass
) {
2913 // OK. maybe target cc.
2917 asan_poison_object((VALUE
)cc
);
2922 asan_poison_object((VALUE
)cc
);
2925 vm_cc_invalidate(cc
);
2927 ruby_xfree(ccs
->entries
);
2933 rb_vm_ccs_free(struct rb_class_cc_entries
*ccs
)
2935 RB_DEBUG_COUNTER_INC(ccs_free
);
2936 vm_ccs_free(ccs
, TRUE
, NULL
, Qundef
);
2939 struct cc_tbl_i_data
{
2940 rb_objspace_t
*objspace
;
2945 static enum rb_id_table_iterator_result
2946 cc_table_mark_i(ID id
, VALUE ccs_ptr
, void *data_ptr
)
2948 struct cc_tbl_i_data
*data
= data_ptr
;
2949 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
2950 VM_ASSERT(vm_ccs_p(ccs
));
2951 VM_ASSERT(id
== ccs
->cme
->called_id
);
2953 if (METHOD_ENTRY_INVALIDATED(ccs
->cme
)) {
2954 rb_vm_ccs_free(ccs
);
2955 return ID_TABLE_DELETE
;
2958 gc_mark(data
->objspace
, (VALUE
)ccs
->cme
);
2960 for (int i
=0; i
<ccs
->len
; i
++) {
2961 VM_ASSERT(data
->klass
== ccs
->entries
[i
].cc
->klass
);
2962 VM_ASSERT(vm_cc_check_cme(ccs
->entries
[i
].cc
, ccs
->cme
));
2964 gc_mark(data
->objspace
, (VALUE
)ccs
->entries
[i
].ci
);
2965 gc_mark(data
->objspace
, (VALUE
)ccs
->entries
[i
].cc
);
2967 return ID_TABLE_CONTINUE
;
2972 cc_table_mark(rb_objspace_t
*objspace
, VALUE klass
)
2974 struct rb_id_table
*cc_tbl
= RCLASS_CC_TBL(klass
);
2976 struct cc_tbl_i_data data
= {
2977 .objspace
= objspace
,
2980 rb_id_table_foreach(cc_tbl
, cc_table_mark_i
, &data
);
2984 static enum rb_id_table_iterator_result
2985 cc_table_free_i(VALUE ccs_ptr
, void *data_ptr
)
2987 struct cc_tbl_i_data
*data
= data_ptr
;
2988 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
2989 VM_ASSERT(vm_ccs_p(ccs
));
2990 vm_ccs_free(ccs
, data
->alive
, data
->objspace
, data
->klass
);
2991 return ID_TABLE_CONTINUE
;
2995 cc_table_free(rb_objspace_t
*objspace
, VALUE klass
, bool alive
)
2997 struct rb_id_table
*cc_tbl
= RCLASS_CC_TBL(klass
);
3000 struct cc_tbl_i_data data
= {
3001 .objspace
= objspace
,
3005 rb_id_table_foreach_values(cc_tbl
, cc_table_free_i
, &data
);
3006 rb_id_table_free(cc_tbl
);
3010 static enum rb_id_table_iterator_result
3011 cvar_table_free_i(VALUE value
, void * ctx
)
3013 xfree((void *) value
);
3014 return ID_TABLE_CONTINUE
;
3018 rb_cc_table_free(VALUE klass
)
3020 cc_table_free(&rb_objspace
, klass
, TRUE
);
3024 make_zombie(rb_objspace_t
*objspace
, VALUE obj
, void (*dfree
)(void *), void *data
)
3026 struct RZombie
*zombie
= RZOMBIE(obj
);
3027 zombie
->basic
.flags
= T_ZOMBIE
| (zombie
->basic
.flags
& FL_SEEN_OBJ_ID
);
3028 zombie
->dfree
= dfree
;
3029 zombie
->data
= data
;
3030 zombie
->next
= heap_pages_deferred_final
;
3031 heap_pages_deferred_final
= (VALUE
)zombie
;
3033 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
3034 page
->final_slots
++;
3035 heap_pages_final_slots
++;
3039 make_io_zombie(rb_objspace_t
*objspace
, VALUE obj
)
3041 rb_io_t
*fptr
= RANY(obj
)->as
.file
.fptr
;
3042 make_zombie(objspace
, obj
, rb_io_fptr_finalize_internal
, fptr
);
3046 obj_free_object_id(rb_objspace_t
*objspace
, VALUE obj
)
3048 ASSERT_vm_locking();
3049 st_data_t o
= (st_data_t
)obj
, id
;
3051 GC_ASSERT(FL_TEST(obj
, FL_SEEN_OBJ_ID
));
3052 FL_UNSET(obj
, FL_SEEN_OBJ_ID
);
3054 if (st_delete(objspace
->obj_to_id_tbl
, &o
, &id
)) {
3056 st_delete(objspace
->id_to_obj_tbl
, &id
, NULL
);
3059 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj
));
3064 obj_free(rb_objspace_t
*objspace
, VALUE obj
)
3066 RB_DEBUG_COUNTER_INC(obj_free
);
3067 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3069 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_FREEOBJ
, obj
);
3071 switch (BUILTIN_TYPE(obj
)) {
3076 rb_bug("obj_free() called for broken object");
3082 if (FL_TEST(obj
, FL_EXIVAR
)) {
3083 rb_free_generic_ivar((VALUE
)obj
);
3084 FL_UNSET(obj
, FL_EXIVAR
);
3087 if (FL_TEST(obj
, FL_SEEN_OBJ_ID
) && !FL_TEST(obj
, FL_FINALIZE
)) {
3088 obj_free_object_id(objspace
, obj
);
3091 if (RVALUE_WB_UNPROTECTED(obj
)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
3093 #if RGENGC_CHECK_MODE
3094 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3095 CHECK(RVALUE_WB_UNPROTECTED
);
3096 CHECK(RVALUE_MARKED
);
3097 CHECK(RVALUE_MARKING
);
3098 CHECK(RVALUE_UNCOLLECTIBLE
);
3102 switch (BUILTIN_TYPE(obj
)) {
3104 if (RANY(obj
)->as
.basic
.flags
& ROBJECT_EMBED
) {
3105 RB_DEBUG_COUNTER_INC(obj_obj_embed
);
3107 else if (ROBJ_TRANSIENT_P(obj
)) {
3108 RB_DEBUG_COUNTER_INC(obj_obj_transient
);
3111 xfree(RANY(obj
)->as
.object
.as
.heap
.ivptr
);
3112 RB_DEBUG_COUNTER_INC(obj_obj_ptr
);
3117 rb_id_table_free(RCLASS_M_TBL(obj
));
3118 cc_table_free(objspace
, obj
, FALSE
);
3119 if (RCLASS_IV_TBL(obj
)) {
3120 st_free_table(RCLASS_IV_TBL(obj
));
3122 if (RCLASS_CONST_TBL(obj
)) {
3123 rb_free_const_table(RCLASS_CONST_TBL(obj
));
3125 if (RCLASS_IV_INDEX_TBL(obj
)) {
3126 iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj
));
3128 if (RCLASS_CVC_TBL(obj
)) {
3129 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj
), cvar_table_free_i
, NULL
);
3130 rb_id_table_free(RCLASS_CVC_TBL(obj
));
3132 rb_class_remove_subclass_head(obj
);
3133 rb_class_remove_from_module_subclasses(obj
);
3134 rb_class_remove_from_super_subclasses(obj
);
3135 #if SIZEOF_SERIAL_T != SIZEOF_VALUE && USE_RVARGC
3136 xfree(RCLASS(obj
)->class_serial_ptr
);
3140 if (RCLASS_EXT(obj
))
3141 xfree(RCLASS_EXT(obj
));
3144 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr
, BUILTIN_TYPE(obj
) == T_MODULE
);
3145 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr
, BUILTIN_TYPE(obj
) == T_CLASS
);
3154 #if USE_DEBUG_COUNTER
3155 switch (RHASH_SIZE(obj
)) {
3157 RB_DEBUG_COUNTER_INC(obj_hash_empty
);
3160 RB_DEBUG_COUNTER_INC(obj_hash_1
);
3163 RB_DEBUG_COUNTER_INC(obj_hash_2
);
3166 RB_DEBUG_COUNTER_INC(obj_hash_3
);
3169 RB_DEBUG_COUNTER_INC(obj_hash_4
);
3175 RB_DEBUG_COUNTER_INC(obj_hash_5_8
);
3178 GC_ASSERT(RHASH_SIZE(obj
) > 8);
3179 RB_DEBUG_COUNTER_INC(obj_hash_g8
);
3182 if (RHASH_AR_TABLE_P(obj
)) {
3183 if (RHASH_AR_TABLE(obj
) == NULL
) {
3184 RB_DEBUG_COUNTER_INC(obj_hash_null
);
3187 RB_DEBUG_COUNTER_INC(obj_hash_ar
);
3191 RB_DEBUG_COUNTER_INC(obj_hash_st
);
3194 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj
, RHASH_ST_TABLE_FLAG
)) {
3195 struct ar_table_struct
*tab
= RHASH(obj
)->as
.ar
;
3198 if (RHASH_TRANSIENT_P(obj
)) {
3199 RB_DEBUG_COUNTER_INC(obj_hash_transient
);
3207 GC_ASSERT(RHASH_ST_TABLE_P(obj
));
3208 st_free_table(RHASH(obj
)->as
.st
);
3212 if (RANY(obj
)->as
.regexp
.ptr
) {
3213 onig_free(RANY(obj
)->as
.regexp
.ptr
);
3214 RB_DEBUG_COUNTER_INC(obj_regexp_ptr
);
3218 if (DATA_PTR(obj
)) {
3219 int free_immediately
= FALSE
;
3220 void (*dfree
)(void *);
3221 void *data
= DATA_PTR(obj
);
3223 if (RTYPEDDATA_P(obj
)) {
3224 free_immediately
= (RANY(obj
)->as
.typeddata
.type
->flags
& RUBY_TYPED_FREE_IMMEDIATELY
) != 0;
3225 dfree
= RANY(obj
)->as
.typeddata
.type
->function
.dfree
;
3226 if (0 && free_immediately
== 0) {
3227 /* to expose non-free-immediate T_DATA */
3228 fprintf(stderr
, "not immediate -> %s\n", RANY(obj
)->as
.typeddata
.type
->wrap_struct_name
);
3232 dfree
= RANY(obj
)->as
.data
.dfree
;
3236 if (dfree
== RUBY_DEFAULT_FREE
) {
3238 RB_DEBUG_COUNTER_INC(obj_data_xfree
);
3240 else if (free_immediately
) {
3242 RB_DEBUG_COUNTER_INC(obj_data_imm_free
);
3245 make_zombie(objspace
, obj
, dfree
, data
);
3246 RB_DEBUG_COUNTER_INC(obj_data_zombie
);
3251 RB_DEBUG_COUNTER_INC(obj_data_empty
);
3256 if (RANY(obj
)->as
.match
.rmatch
) {
3257 struct rmatch
*rm
= RANY(obj
)->as
.match
.rmatch
;
3258 #if USE_DEBUG_COUNTER
3259 if (rm
->regs
.num_regs
>= 8) {
3260 RB_DEBUG_COUNTER_INC(obj_match_ge8
);
3262 else if (rm
->regs
.num_regs
>= 4) {
3263 RB_DEBUG_COUNTER_INC(obj_match_ge4
);
3265 else if (rm
->regs
.num_regs
>= 1) {
3266 RB_DEBUG_COUNTER_INC(obj_match_under4
);
3269 onig_region_free(&rm
->regs
, 0);
3270 if (rm
->char_offset
)
3271 xfree(rm
->char_offset
);
3274 RB_DEBUG_COUNTER_INC(obj_match_ptr
);
3278 if (RANY(obj
)->as
.file
.fptr
) {
3279 make_io_zombie(objspace
, obj
);
3280 RB_DEBUG_COUNTER_INC(obj_file_ptr
);
3285 RB_DEBUG_COUNTER_INC(obj_rational
);
3288 RB_DEBUG_COUNTER_INC(obj_complex
);
3293 /* Basically , T_ICLASS shares table with the module */
3294 if (RICLASS_OWNS_M_TBL_P(obj
)) {
3295 /* Method table is not shared for origin iclasses of classes */
3296 rb_id_table_free(RCLASS_M_TBL(obj
));
3298 if (RCLASS_CALLABLE_M_TBL(obj
) != NULL
) {
3299 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj
));
3301 rb_class_remove_subclass_head(obj
);
3302 cc_table_free(objspace
, obj
, FALSE
);
3303 rb_class_remove_from_module_subclasses(obj
);
3304 rb_class_remove_from_super_subclasses(obj
);
3306 xfree(RCLASS_EXT(obj
));
3309 RB_DEBUG_COUNTER_INC(obj_iclass_ptr
);
3313 RB_DEBUG_COUNTER_INC(obj_float
);
3317 if (!BIGNUM_EMBED_P(obj
) && BIGNUM_DIGITS(obj
)) {
3318 xfree(BIGNUM_DIGITS(obj
));
3319 RB_DEBUG_COUNTER_INC(obj_bignum_ptr
);
3322 RB_DEBUG_COUNTER_INC(obj_bignum_embed
);
3327 UNEXPECTED_NODE(obj_free
);
3331 if ((RBASIC(obj
)->flags
& RSTRUCT_EMBED_LEN_MASK
) ||
3332 RANY(obj
)->as
.rstruct
.as
.heap
.ptr
== NULL
) {
3333 RB_DEBUG_COUNTER_INC(obj_struct_embed
);
3335 else if (RSTRUCT_TRANSIENT_P(obj
)) {
3336 RB_DEBUG_COUNTER_INC(obj_struct_transient
);
3339 xfree((void *)RANY(obj
)->as
.rstruct
.as
.heap
.ptr
);
3340 RB_DEBUG_COUNTER_INC(obj_struct_ptr
);
3346 rb_gc_free_dsymbol(obj
);
3347 RB_DEBUG_COUNTER_INC(obj_symbol
);
3352 switch (imemo_type(obj
)) {
3354 rb_free_method_entry(&RANY(obj
)->as
.imemo
.ment
);
3355 RB_DEBUG_COUNTER_INC(obj_imemo_ment
);
3358 rb_iseq_free(&RANY(obj
)->as
.imemo
.iseq
);
3359 RB_DEBUG_COUNTER_INC(obj_imemo_iseq
);
3362 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj
)->as
.imemo
.env
.ep
));
3363 xfree((VALUE
*)RANY(obj
)->as
.imemo
.env
.env
);
3364 RB_DEBUG_COUNTER_INC(obj_imemo_env
);
3367 xfree(RANY(obj
)->as
.imemo
.alloc
.ptr
);
3368 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf
);
3371 rb_ast_free(&RANY(obj
)->as
.imemo
.ast
);
3372 RB_DEBUG_COUNTER_INC(obj_imemo_ast
);
3375 RB_DEBUG_COUNTER_INC(obj_imemo_cref
);
3378 RB_DEBUG_COUNTER_INC(obj_imemo_svar
);
3380 case imemo_throw_data
:
3381 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data
);
3384 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc
);
3387 RB_DEBUG_COUNTER_INC(obj_imemo_memo
);
3389 case imemo_parser_strterm
:
3390 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm
);
3392 case imemo_callinfo
:
3393 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo
);
3395 case imemo_callcache
:
3396 RB_DEBUG_COUNTER_INC(obj_imemo_callcache
);
3398 case imemo_constcache
:
3399 RB_DEBUG_COUNTER_INC(obj_imemo_constcache
);
3405 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE
,
3406 BUILTIN_TYPE(obj
), (void*)obj
, RBASIC(obj
)->flags
);
3409 if (FL_TEST(obj
, FL_FINALIZE
)) {
3410 make_zombie(objspace
, obj
, 0, 0);
3419 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3420 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3423 object_id_cmp(st_data_t x
, st_data_t y
)
3425 if (RB_BIGNUM_TYPE_P(x
)) {
3426 return !rb_big_eql(x
, y
);
3434 object_id_hash(st_data_t n
)
3436 if (RB_BIGNUM_TYPE_P(n
)) {
3437 return FIX2LONG(rb_big_hash(n
));
3440 return st_numhash(n
);
3443 static const struct st_hash_type object_id_hash_type
= {
3451 rb_objspace_t
*objspace
= &rb_objspace
;
3453 #if defined(HAVE_MMAP) && !HAVE_CONST_PAGE_SIZE && !defined(PAGE_MAX_SIZE)
3454 /* Need to determine if we can use mmap at runtime. */
3456 /* If the PAGE_SIZE macro can be used. */
3457 use_mmap_aligned_alloc
= PAGE_SIZE
<= HEAP_PAGE_SIZE
;
3458 # elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3459 /* If we can use sysconf to determine the page size. */
3460 use_mmap_aligned_alloc
= sysconf(_SC_PAGE_SIZE
) <= HEAP_PAGE_SIZE
;
3462 /* Otherwise we can't determine the system page size, so don't use mmap. */
3463 use_mmap_aligned_alloc
= FALSE
;
3467 objspace
->next_object_id
= INT2FIX(OBJ_ID_INITIAL
);
3468 objspace
->id_to_obj_tbl
= st_init_table(&object_id_hash_type
);
3469 objspace
->obj_to_id_tbl
= st_init_numtable();
3471 #if RGENGC_ESTIMATE_OLDMALLOC
3472 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
3475 heap_add_pages(objspace
, &size_pools
[0], SIZE_POOL_EDEN_HEAP(&size_pools
[0]), gc_params
.heap_init_slots
/ HEAP_PAGE_OBJ_LIMIT
);
3477 /* Give other size pools allocatable pages. */
3478 for (int i
= 1; i
< SIZE_POOL_COUNT
; i
++) {
3479 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3480 int multiple
= size_pool
->slot_size
/ sizeof(RVALUE
);
3481 size_pool
->allocatable_pages
= gc_params
.heap_init_slots
* multiple
/ HEAP_PAGE_OBJ_LIMIT
;
3483 heap_pages_expand_sorted(objspace
);
3485 init_mark_stack(&objspace
->mark_stack
);
3487 objspace
->profile
.invoke_time
= getrusage_time();
3488 finalizer_table
= st_init_numtable();
3492 Init_gc_stress(void)
3494 rb_objspace_t
*objspace
= &rb_objspace
;
3496 gc_stress_set(objspace
, ruby_initial_gc_stress
);
3499 typedef int each_obj_callback(void *, void *, size_t, void *);
3501 static void objspace_each_objects(rb_objspace_t
*objspace
, each_obj_callback
*callback
, void *data
, bool protected);
3502 static void objspace_reachable_objects_from_root(rb_objspace_t
*, void (func
)(const char *, VALUE
, void *), void *);
3504 struct each_obj_data
{
3505 rb_objspace_t
*objspace
;
3506 bool reenable_incremental
;
3508 each_obj_callback
*callback
;
3511 struct heap_page
**pages
[SIZE_POOL_COUNT
];
3512 size_t pages_counts
[SIZE_POOL_COUNT
];
3516 objspace_each_objects_ensure(VALUE arg
)
3518 struct each_obj_data
*data
= (struct each_obj_data
*)arg
;
3519 rb_objspace_t
*objspace
= data
->objspace
;
3521 /* Reenable incremental GC */
3522 if (data
->reenable_incremental
) {
3523 objspace
->flags
.dont_incremental
= FALSE
;
3526 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3527 struct heap_page
**pages
= data
->pages
[i
];
3528 /* pages could be NULL if an error was raised during setup (e.g.
3529 * malloc failed due to out of memory). */
3539 objspace_each_objects_try(VALUE arg
)
3541 struct each_obj_data
*data
= (struct each_obj_data
*)arg
;
3542 rb_objspace_t
*objspace
= data
->objspace
;
3544 /* Copy pages from all size_pools to their respective buffers. */
3545 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3546 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3547 size_t size
= size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
, sizeof(struct heap_page
*), rb_eRuntimeError
);
3549 struct heap_page
**pages
= malloc(size
);
3550 if (!pages
) rb_memerror();
3552 /* Set up pages buffer by iterating over all pages in the current eden
3553 * heap. This will be a snapshot of the state of the heap before we
3554 * call the callback over each page that exists in this buffer. Thus it
3555 * is safe for the callback to allocate objects without possibly entering
3556 * an infinite loop. */
3557 struct heap_page
*page
= 0;
3558 size_t pages_count
= 0;
3559 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
) {
3560 pages
[pages_count
] = page
;
3563 data
->pages
[i
] = pages
;
3564 data
->pages_counts
[i
] = pages_count
;
3565 GC_ASSERT(pages_count
== SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
);
3568 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
3569 rb_size_pool_t
*size_pool
= &size_pools
[i
];
3570 size_t pages_count
= data
->pages_counts
[i
];
3571 struct heap_page
**pages
= data
->pages
[i
];
3573 struct heap_page
*page
= list_top(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, struct heap_page
, page_node
);
3574 for (size_t i
= 0; i
< pages_count
; i
++) {
3575 /* If we have reached the end of the linked list then there are no
3576 * more pages, so break. */
3577 if (page
== NULL
) break;
3579 /* If this page does not match the one in the buffer, then move to
3580 * the next page in the buffer. */
3581 if (pages
[i
] != page
) continue;
3583 uintptr_t pstart
= (uintptr_t)page
->start
;
3584 uintptr_t pend
= pstart
+ (page
->total_slots
* size_pool
->slot_size
);
3586 if ((*data
->callback
)((void *)pstart
, (void *)pend
, size_pool
->slot_size
, data
->data
)) {
3590 page
= list_next(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
);
3598 * rb_objspace_each_objects() is special C API to walk through
3599 * Ruby object space. This C API is too difficult to use it.
3600 * To be frank, you should not use it. Or you need to read the
3601 * source code of this function and understand what this function does.
3603 * 'callback' will be called several times (the number of heap page,
3604 * at current implementation) with:
3605 * vstart: a pointer to the first living object of the heap_page.
3606 * vend: a pointer to next to the valid heap_page area.
3607 * stride: a distance to next VALUE.
3609 * If callback() returns non-zero, the iteration will be stopped.
3611 * This is a sample callback code to iterate liveness objects:
3614 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3615 * VALUE v = (VALUE)vstart;
3616 * for (; v != (VALUE)vend; v += stride) {
3617 * if (RBASIC(v)->flags) { // liveness check
3618 * // do something with live object 'v'
3620 * return 0; // continue to iteration
3623 * Note: 'vstart' is not a top of heap_page. This point the first
3624 * living object to grasp at least one object to avoid GC issue.
3625 * This means that you can not walk through all Ruby object page
3626 * including freed object page.
3628 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3629 * However, there are possibilities to pass variable values with
3630 * 'stride' with some reasons. You must use stride instead of
3631 * use some constant value in the iteration.
3634 rb_objspace_each_objects(each_obj_callback
*callback
, void *data
)
3636 objspace_each_objects(&rb_objspace
, callback
, data
, TRUE
);
3640 objspace_each_objects(rb_objspace_t
*objspace
, each_obj_callback
*callback
, void *data
, bool protected)
3642 /* Disable incremental GC */
3643 bool reenable_incremental
= FALSE
;
3645 reenable_incremental
= !objspace
->flags
.dont_incremental
;
3648 objspace
->flags
.dont_incremental
= TRUE
;
3651 struct each_obj_data each_obj_data
= {
3652 .objspace
= objspace
,
3653 .reenable_incremental
= reenable_incremental
,
3655 .callback
= callback
,
3659 .pages_counts
= {0},
3661 rb_ensure(objspace_each_objects_try
, (VALUE
)&each_obj_data
,
3662 objspace_each_objects_ensure
, (VALUE
)&each_obj_data
);
3666 rb_objspace_each_objects_without_setup(each_obj_callback
*callback
, void *data
)
3668 objspace_each_objects(&rb_objspace
, callback
, data
, FALSE
);
3671 struct os_each_struct
{
3677 internal_object_p(VALUE obj
)
3679 RVALUE
*p
= (RVALUE
*)obj
;
3680 void *ptr
= __asan_region_is_poisoned(p
, SIZEOF_VALUE
);
3681 asan_unpoison_object(obj
, false);
3682 bool used_p
= p
->as
.basic
.flags
;
3685 switch (BUILTIN_TYPE(obj
)) {
3687 UNEXPECTED_NODE(internal_object_p
);
3696 if (!p
->as
.basic
.klass
) break;
3697 if (FL_TEST(obj
, FL_SINGLETON
)) {
3698 return rb_singleton_class_internal_p(obj
);
3702 if (!p
->as
.basic
.klass
) break;
3706 if (ptr
|| ! used_p
) {
3707 asan_poison_object(obj
);
3713 rb_objspace_internal_object_p(VALUE obj
)
3715 return internal_object_p(obj
);
3719 os_obj_of_i(void *vstart
, void *vend
, size_t stride
, void *data
)
3721 struct os_each_struct
*oes
= (struct os_each_struct
*)data
;
3723 VALUE v
= (VALUE
)vstart
;
3724 for (; v
!= (VALUE
)vend
; v
+= stride
) {
3725 if (!internal_object_p(v
)) {
3726 if (!oes
->of
|| rb_obj_is_kind_of(v
, oes
->of
)) {
3727 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v
)) {
3741 struct os_each_struct oes
;
3745 rb_objspace_each_objects(os_obj_of_i
, &oes
);
3746 return SIZET2NUM(oes
.num
);
3751 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3752 * ObjectSpace.each_object([module]) -> an_enumerator
3754 * Calls the block once for each living, nonimmediate object in this
3755 * Ruby process. If <i>module</i> is specified, calls the block
3756 * for only those classes or modules that match (or are a subclass of)
3757 * <i>module</i>. Returns the number of objects found. Immediate
3758 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3759 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3760 * never returned. In the example below, #each_object returns both
3761 * the numbers we defined and several constants defined in the Math
3764 * If no block is given, an enumerator is returned instead.
3767 * b = 95 # Won't be returned
3768 * c = 12345678987654321
3769 * count = ObjectSpace.each_object(Numeric) {|x| p x }
3770 * puts "Total count: #{count}"
3772 * <em>produces:</em>
3778 * 2.22044604925031e-16
3779 * 1.7976931348623157e+308
3780 * 2.2250738585072e-308
3786 os_each_obj(int argc
, VALUE
*argv
, VALUE os
)
3790 of
= (!rb_check_arity(argc
, 0, 1) ? 0 : argv
[0]);
3791 RETURN_ENUMERATOR(os
, 1, &of
);
3792 return os_obj_of(of
);
3797 * ObjectSpace.undefine_finalizer(obj)
3799 * Removes all finalizers for <i>obj</i>.
3804 undefine_final(VALUE os
, VALUE obj
)
3806 return rb_undefine_finalizer(obj
);
3810 rb_undefine_finalizer(VALUE obj
)
3812 rb_objspace_t
*objspace
= &rb_objspace
;
3813 st_data_t data
= obj
;
3814 rb_check_frozen(obj
);
3815 st_delete(finalizer_table
, &data
, 0);
3816 FL_UNSET(obj
, FL_FINALIZE
);
3821 should_be_callable(VALUE block
)
3823 if (!rb_obj_respond_to(block
, idCall
, TRUE
)) {
3824 rb_raise(rb_eArgError
, "wrong type argument %"PRIsVALUE
" (should be callable)",
3825 rb_obj_class(block
));
3830 should_be_finalizable(VALUE obj
)
3832 if (!FL_ABLE(obj
)) {
3833 rb_raise(rb_eArgError
, "cannot define finalizer for %s",
3834 rb_obj_classname(obj
));
3836 rb_check_frozen(obj
);
3841 * ObjectSpace.define_finalizer(obj, aProc=proc())
3843 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3844 * was destroyed. The object ID of the <i>obj</i> will be passed
3845 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3846 * method, make sure it can be called with a single argument.
3848 * The return value is an array <code>[0, aProc]</code>.
3850 * The two recommended patterns are to either create the finaliser proc
3851 * in a non-instance method where it can safely capture the needed state,
3852 * or to use a custom callable object that stores the needed state
3853 * explicitly as instance variables.
3856 * def initialize(data_needed_for_finalization)
3857 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
3860 * def self.create_finalizer(data_needed_for_finalization)
3862 * puts "finalizing #{data_needed_for_finalization}"
3869 * def initialize(data_needed_for_finalization)
3870 * @data_needed_for_finalization = data_needed_for_finalization
3874 * puts "finalizing #{@data_needed_for_finalization}"
3878 * def initialize(data_needed_for_finalization)
3879 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
3883 * Note that if your finalizer references the object to be
3884 * finalized it will never be run on GC, although it will still be
3885 * run at exit. You will get a warning if you capture the object
3886 * to be finalized as the receiver of the finalizer.
3888 * class CapturesSelf
3889 * def initialize(name)
3890 * ObjectSpace.define_finalizer(self, proc {
3891 * # this finalizer will only be run on exit
3892 * puts "finalizing #{name}"
3897 * Also note that finalization can be unpredictable and is never guaranteed
3898 * to be run except on exit.
3902 define_final(int argc
, VALUE
*argv
, VALUE os
)
3906 rb_scan_args(argc
, argv
, "11", &obj
, &block
);
3907 should_be_finalizable(obj
);
3909 block
= rb_block_proc();
3912 should_be_callable(block
);
3915 if (rb_callable_receiver(block
) == obj
) {
3916 rb_warn("finalizer references object to be finalized");
3919 return define_final0(obj
, block
);
3923 define_final0(VALUE obj
, VALUE block
)
3925 rb_objspace_t
*objspace
= &rb_objspace
;
3929 RBASIC(obj
)->flags
|= FL_FINALIZE
;
3931 if (st_lookup(finalizer_table
, obj
, &data
)) {
3932 table
= (VALUE
)data
;
3934 /* avoid duplicate block, table is usually small */
3936 long len
= RARRAY_LEN(table
);
3939 for (i
= 0; i
< len
; i
++) {
3940 VALUE recv
= RARRAY_AREF(table
, i
);
3941 if (rb_equal(recv
, block
)) {
3948 rb_ary_push(table
, block
);
3951 table
= rb_ary_new3(1, block
);
3952 RBASIC_CLEAR_CLASS(table
);
3953 st_add_direct(finalizer_table
, obj
, table
);
3956 block
= rb_ary_new3(2, INT2FIX(0), block
);
3962 rb_define_finalizer(VALUE obj
, VALUE block
)
3964 should_be_finalizable(obj
);
3965 should_be_callable(block
);
3966 return define_final0(obj
, block
);
3970 rb_gc_copy_finalizer(VALUE dest
, VALUE obj
)
3972 rb_objspace_t
*objspace
= &rb_objspace
;
3976 if (!FL_TEST(obj
, FL_FINALIZE
)) return;
3977 if (st_lookup(finalizer_table
, obj
, &data
)) {
3978 table
= (VALUE
)data
;
3979 st_insert(finalizer_table
, dest
, table
);
3981 FL_SET(dest
, FL_FINALIZE
);
3985 run_single_final(VALUE cmd
, VALUE objid
)
3987 return rb_check_funcall(cmd
, idCall
, 1, &objid
);
3991 warn_exception_in_finalizer(rb_execution_context_t
*ec
, VALUE final
)
3993 if (final
!= Qundef
&& !NIL_P(ruby_verbose
)) {
3994 VALUE errinfo
= ec
->errinfo
;
3995 rb_warn("Exception in finalizer %+"PRIsVALUE
, final
);
3996 rb_ec_error_print(ec
, errinfo
);
4001 run_finalizer(rb_objspace_t
*objspace
, VALUE obj
, VALUE table
)
4004 enum ruby_tag_type state
;
4009 rb_control_frame_t
*cfp
;
4012 rb_execution_context_t
* volatile ec
= GET_EC();
4013 #define RESTORE_FINALIZER() (\
4014 ec->cfp = saved.cfp, \
4015 ec->errinfo = saved.errinfo)
4017 saved
.errinfo
= ec
->errinfo
;
4018 saved
.objid
= rb_obj_id(obj
);
4019 saved
.cfp
= ec
->cfp
;
4021 saved
.final
= Qundef
;
4024 state
= EC_EXEC_TAG();
4025 if (state
!= TAG_NONE
) {
4026 ++saved
.finished
; /* skip failed finalizer */
4027 warn_exception_in_finalizer(ec
, ATOMIC_VALUE_EXCHANGE(saved
.final
, Qundef
));
4029 for (i
= saved
.finished
;
4030 RESTORE_FINALIZER(), i
<RARRAY_LEN(table
);
4031 saved
.finished
= ++i
) {
4032 run_single_final(saved
.final
= RARRAY_AREF(table
, i
), saved
.objid
);
4035 #undef RESTORE_FINALIZER
4039 run_final(rb_objspace_t
*objspace
, VALUE zombie
)
4041 st_data_t key
, table
;
4043 if (RZOMBIE(zombie
)->dfree
) {
4044 RZOMBIE(zombie
)->dfree(RZOMBIE(zombie
)->data
);
4047 key
= (st_data_t
)zombie
;
4048 if (st_delete(finalizer_table
, &key
, &table
)) {
4049 run_finalizer(objspace
, zombie
, (VALUE
)table
);
4054 finalize_list(rb_objspace_t
*objspace
, VALUE zombie
)
4058 struct heap_page
*page
;
4059 asan_unpoison_object(zombie
, false);
4060 next_zombie
= RZOMBIE(zombie
)->next
;
4061 page
= GET_HEAP_PAGE(zombie
);
4063 run_final(objspace
, zombie
);
4067 GC_ASSERT(BUILTIN_TYPE(zombie
) == T_ZOMBIE
);
4068 if (FL_TEST(zombie
, FL_SEEN_OBJ_ID
)) {
4069 obj_free_object_id(objspace
, zombie
);
4072 GC_ASSERT(heap_pages_final_slots
> 0);
4073 GC_ASSERT(page
->final_slots
> 0);
4075 heap_pages_final_slots
--;
4076 page
->final_slots
--;
4078 heap_page_add_freeobj(objspace
, page
, zombie
);
4079 objspace
->profile
.total_freed_objects
++;
4083 zombie
= next_zombie
;
4088 finalize_deferred(rb_objspace_t
*objspace
)
4091 rb_execution_context_t
*ec
= GET_EC();
4092 ec
->interrupt_mask
|= PENDING_INTERRUPT_MASK
;
4094 while ((zombie
= ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final
, 0)) != 0) {
4095 finalize_list(objspace
, zombie
);
4098 ec
->interrupt_mask
&= ~PENDING_INTERRUPT_MASK
;
4102 gc_finalize_deferred(void *dmy
)
4104 rb_objspace_t
*objspace
= dmy
;
4105 if (ATOMIC_EXCHANGE(finalizing
, 1)) return;
4107 finalize_deferred(objspace
);
4108 ATOMIC_SET(finalizing
, 0);
4112 gc_finalize_deferred_register(rb_objspace_t
*objspace
)
4114 if (rb_postponed_job_register_one(0, gc_finalize_deferred
, objspace
) == 0) {
4115 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
4119 struct force_finalize_list
{
4122 struct force_finalize_list
*next
;
4126 force_chain_object(st_data_t key
, st_data_t val
, st_data_t arg
)
4128 struct force_finalize_list
**prev
= (struct force_finalize_list
**)arg
;
4129 struct force_finalize_list
*curr
= ALLOC(struct force_finalize_list
);
4137 bool rb_obj_is_main_ractor(VALUE gv
);
4140 rb_objspace_call_finalizer(rb_objspace_t
*objspace
)
4144 #if RGENGC_CHECK_MODE >= 2
4145 gc_verify_internal_consistency(objspace
);
4149 if (ATOMIC_EXCHANGE(finalizing
, 1)) return;
4151 /* run finalizers */
4152 finalize_deferred(objspace
);
4153 GC_ASSERT(heap_pages_deferred_final
== 0);
4156 /* prohibit incremental GC */
4157 objspace
->flags
.dont_incremental
= 1;
4159 /* force to run finalizer */
4160 while (finalizer_table
->num_entries
) {
4161 struct force_finalize_list
*list
= 0;
4162 st_foreach(finalizer_table
, force_chain_object
, (st_data_t
)&list
);
4164 struct force_finalize_list
*curr
= list
;
4165 st_data_t obj
= (st_data_t
)curr
->obj
;
4166 run_finalizer(objspace
, curr
->obj
, curr
->table
);
4167 st_delete(finalizer_table
, &obj
, 0);
4173 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4176 /* running data/file finalizers are part of garbage collection */
4177 unsigned int lock_lev
;
4178 gc_enter(objspace
, gc_enter_event_finalizer
, &lock_lev
);
4180 /* run data/file object's finalizers */
4181 for (i
= 0; i
< heap_allocated_pages
; i
++) {
4182 struct heap_page
*page
= heap_pages_sorted
[i
];
4183 short stride
= page
->slot_size
;
4185 uintptr_t p
= (uintptr_t)page
->start
;
4186 uintptr_t pend
= p
+ page
->total_slots
* stride
;
4187 for (; p
< pend
; p
+= stride
) {
4188 VALUE vp
= (VALUE
)p
;
4189 void *poisoned
= asan_poisoned_object_p(vp
);
4190 asan_unpoison_object(vp
, false);
4191 switch (BUILTIN_TYPE(vp
)) {
4193 if (!DATA_PTR(p
) || !RANY(p
)->as
.data
.dfree
) break;
4194 if (rb_obj_is_thread(vp
)) break;
4195 if (rb_obj_is_mutex(vp
)) break;
4196 if (rb_obj_is_fiber(vp
)) break;
4197 if (rb_obj_is_main_ractor(vp
)) break;
4198 if (RTYPEDDATA_P(vp
)) {
4199 RDATA(p
)->dfree
= RANY(p
)->as
.typeddata
.type
->function
.dfree
;
4201 RANY(p
)->as
.free
.flags
= 0;
4202 if (RANY(p
)->as
.data
.dfree
== RUBY_DEFAULT_FREE
) {
4205 else if (RANY(p
)->as
.data
.dfree
) {
4206 make_zombie(objspace
, vp
, RANY(p
)->as
.data
.dfree
, RANY(p
)->as
.data
.data
);
4210 if (RANY(p
)->as
.file
.fptr
) {
4211 make_io_zombie(objspace
, vp
);
4218 GC_ASSERT(BUILTIN_TYPE(vp
) == T_NONE
);
4219 asan_poison_object(vp
);
4224 gc_exit(objspace
, gc_enter_event_finalizer
, &lock_lev
);
4226 if (heap_pages_deferred_final
) {
4227 finalize_list(objspace
, heap_pages_deferred_final
);
4230 st_free_table(finalizer_table
);
4231 finalizer_table
= 0;
4232 ATOMIC_SET(finalizing
, 0);
4236 is_swept_object(rb_objspace_t
*objspace
, VALUE ptr
)
4238 struct heap_page
*page
= GET_HEAP_PAGE(ptr
);
4239 return page
->flags
.before_sweep
? FALSE
: TRUE
;
4242 /* garbage objects will be collected soon. */
4244 is_garbage_object(rb_objspace_t
*objspace
, VALUE ptr
)
4246 if (!is_lazy_sweeping(objspace
) ||
4247 is_swept_object(objspace
, ptr
) ||
4248 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr
), ptr
)) {
4258 is_live_object(rb_objspace_t
*objspace
, VALUE ptr
)
4260 switch (BUILTIN_TYPE(ptr
)) {
4269 if (!is_garbage_object(objspace
, ptr
)) {
4278 is_markable_object(rb_objspace_t
*objspace
, VALUE obj
)
4280 if (rb_special_const_p(obj
)) return FALSE
; /* special const is not markable */
4281 check_rvalue_consistency(obj
);
4286 rb_objspace_markable_object_p(VALUE obj
)
4288 rb_objspace_t
*objspace
= &rb_objspace
;
4289 return is_markable_object(objspace
, obj
) && is_live_object(objspace
, obj
);
4293 rb_objspace_garbage_object_p(VALUE obj
)
4295 rb_objspace_t
*objspace
= &rb_objspace
;
4296 return is_garbage_object(objspace
, obj
);
4300 id2ref_obj_tbl(rb_objspace_t
*objspace
, VALUE objid
)
4303 if (st_lookup(objspace
->id_to_obj_tbl
, objid
, &orig
)) {
4313 * ObjectSpace._id2ref(object_id) -> an_object
4315 * Converts an object id to a reference to the object. May not be
4316 * called on an object id passed as a parameter to a finalizer.
4318 * s = "I am a string" #=> "I am a string"
4319 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4322 * On multi-ractor mode, if the object is not shareable, it raises
4329 #if SIZEOF_LONG == SIZEOF_VOIDP
4330 #define NUM2PTR(x) NUM2ULONG(x)
4331 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4332 #define NUM2PTR(x) NUM2ULL(x)
4334 rb_objspace_t
*objspace
= &rb_objspace
;
4339 objid
= rb_to_int(objid
);
4340 if (FIXNUM_P(objid
) || rb_big_size(objid
) <= SIZEOF_VOIDP
) {
4341 ptr
= NUM2PTR(objid
);
4342 if (ptr
== Qtrue
) return Qtrue
;
4343 if (ptr
== Qfalse
) return Qfalse
;
4344 if (NIL_P(ptr
)) return Qnil
;
4345 if (FIXNUM_P(ptr
)) return (VALUE
)ptr
;
4346 if (FLONUM_P(ptr
)) return (VALUE
)ptr
;
4348 ptr
= obj_id_to_ref(objid
);
4349 if ((ptr
% sizeof(RVALUE
)) == (4 << 2)) {
4350 ID symid
= ptr
/ sizeof(RVALUE
);
4352 if (rb_id2str(symid
) == 0)
4353 rb_raise(rb_eRangeError
, "%p is not symbol id value", p0
);
4354 return ID2SYM(symid
);
4358 if ((orig
= id2ref_obj_tbl(objspace
, objid
)) != Qundef
&&
4359 is_live_object(objspace
, orig
)) {
4361 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig
)) {
4365 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid
, 10));
4369 if (rb_int_ge(objid
, objspace
->next_object_id
)) {
4370 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is not id value", rb_int2str(objid
, 10));
4373 rb_raise(rb_eRangeError
, "%+"PRIsVALUE
" is recycled object", rb_int2str(objid
, 10));
4378 os_id2ref(VALUE os
, VALUE objid
)
4380 return id2ref(objid
);
4384 rb_find_object_id(VALUE obj
, VALUE (*get_heap_object_id
)(VALUE
))
4386 if (STATIC_SYM_P(obj
)) {
4387 return (SYM2ID(obj
) * sizeof(RVALUE
) + (4 << 2)) | FIXNUM_FLAG
;
4389 else if (FLONUM_P(obj
)) {
4390 #if SIZEOF_LONG == SIZEOF_VOIDP
4391 return LONG2NUM((SIGNED_VALUE
)obj
);
4393 return LL2NUM((SIGNED_VALUE
)obj
);
4396 else if (SPECIAL_CONST_P(obj
)) {
4397 return LONG2NUM((SIGNED_VALUE
)obj
);
4400 return get_heap_object_id(obj
);
4404 cached_object_id(VALUE obj
)
4407 rb_objspace_t
*objspace
= &rb_objspace
;
4410 if (st_lookup(objspace
->obj_to_id_tbl
, (st_data_t
)obj
, &id
)) {
4411 GC_ASSERT(FL_TEST(obj
, FL_SEEN_OBJ_ID
));
4414 GC_ASSERT(!FL_TEST(obj
, FL_SEEN_OBJ_ID
));
4416 id
= objspace
->next_object_id
;
4417 objspace
->next_object_id
= rb_int_plus(id
, INT2FIX(OBJ_ID_INCREMENT
));
4419 VALUE already_disabled
= rb_gc_disable_no_rest();
4420 st_insert(objspace
->obj_to_id_tbl
, (st_data_t
)obj
, (st_data_t
)id
);
4421 st_insert(objspace
->id_to_obj_tbl
, (st_data_t
)id
, (st_data_t
)obj
);
4422 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
4423 FL_SET(obj
, FL_SEEN_OBJ_ID
);
4431 nonspecial_obj_id_(VALUE obj
)
4433 return nonspecial_obj_id(obj
);
4438 rb_memory_id(VALUE obj
)
4440 return rb_find_object_id(obj
, nonspecial_obj_id_
);
4444 * Document-method: __id__
4445 * Document-method: object_id
4448 * obj.__id__ -> integer
4449 * obj.object_id -> integer
4451 * Returns an integer identifier for +obj+.
4453 * The same number will be returned on all calls to +object_id+ for a given
4454 * object, and no two active objects will share an id.
4456 * Note: that some objects of builtin classes are reused for optimization.
4457 * This is the case for immediate values and frozen string literals.
4459 * BasicObject implements +__id__+, Kernel implements +object_id+.
4461 * Immediate values are not passed by reference but are passed by value:
4462 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4464 * Object.new.object_id == Object.new.object_id # => false
4465 * (21 * 2).object_id == (21 * 2).object_id # => true
4466 * "hello".object_id == "hello".object_id # => false
4467 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4471 rb_obj_id(VALUE obj
)
4474 * 32-bit VALUE space
4475 * MSB ------------------------ LSB
4476 * false 00000000000000000000000000000000
4477 * true 00000000000000000000000000000010
4478 * nil 00000000000000000000000000000100
4479 * undef 00000000000000000000000000000110
4480 * symbol ssssssssssssssssssssssss00001110
4481 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4482 * fixnum fffffffffffffffffffffffffffffff1
4486 * false 00000000000000000000000000000000
4487 * true 00000000000000000000000000000010
4488 * nil 00000000000000000000000000000100
4489 * undef 00000000000000000000000000000110
4490 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4491 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4492 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4494 * where A = sizeof(RVALUE)/4
4497 * 20 if 32-bit, double is 4-byte aligned
4498 * 24 if 32-bit, double is 8-byte aligned
4502 return rb_find_object_id(obj
, cached_object_id
);
4505 static enum rb_id_table_iterator_result
4506 cc_table_memsize_i(VALUE ccs_ptr
, void *data_ptr
)
4508 size_t *total_size
= data_ptr
;
4509 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
4510 *total_size
+= sizeof(*ccs
);
4511 *total_size
+= sizeof(ccs
->entries
[0]) * ccs
->capa
;
4512 return ID_TABLE_CONTINUE
;
4516 cc_table_memsize(struct rb_id_table
*cc_table
)
4518 size_t total
= rb_id_table_memsize(cc_table
);
4519 rb_id_table_foreach_values(cc_table
, cc_table_memsize_i
, &total
);
4524 obj_memsize_of(VALUE obj
, int use_all_types
)
4528 if (SPECIAL_CONST_P(obj
)) {
4532 if (FL_TEST(obj
, FL_EXIVAR
)) {
4533 size
+= rb_generic_ivar_memsize(obj
);
4536 switch (BUILTIN_TYPE(obj
)) {
4538 if (!(RBASIC(obj
)->flags
& ROBJECT_EMBED
)) {
4539 size
+= ROBJECT_NUMIV(obj
) * sizeof(VALUE
);
4544 if (RCLASS_EXT(obj
)) {
4545 if (RCLASS_M_TBL(obj
)) {
4546 size
+= rb_id_table_memsize(RCLASS_M_TBL(obj
));
4548 if (RCLASS_IV_TBL(obj
)) {
4549 size
+= st_memsize(RCLASS_IV_TBL(obj
));
4551 if (RCLASS_CVC_TBL(obj
)) {
4552 size
+= rb_id_table_memsize(RCLASS_CVC_TBL(obj
));
4554 if (RCLASS_IV_INDEX_TBL(obj
)) {
4555 // TODO: more correct value
4556 size
+= st_memsize(RCLASS_IV_INDEX_TBL(obj
));
4558 if (RCLASS_EXT(obj
)->iv_tbl
) {
4559 size
+= st_memsize(RCLASS_EXT(obj
)->iv_tbl
);
4561 if (RCLASS_EXT(obj
)->const_tbl
) {
4562 size
+= rb_id_table_memsize(RCLASS_EXT(obj
)->const_tbl
);
4564 if (RCLASS_CC_TBL(obj
)) {
4565 size
+= cc_table_memsize(RCLASS_CC_TBL(obj
));
4568 size
+= sizeof(rb_classext_t
);
4573 if (RICLASS_OWNS_M_TBL_P(obj
)) {
4574 if (RCLASS_M_TBL(obj
)) {
4575 size
+= rb_id_table_memsize(RCLASS_M_TBL(obj
));
4578 if (RCLASS_EXT(obj
) && RCLASS_CC_TBL(obj
)) {
4579 size
+= cc_table_memsize(RCLASS_CC_TBL(obj
));
4583 size
+= rb_str_memsize(obj
);
4586 size
+= rb_ary_memsize(obj
);
4589 if (RHASH_AR_TABLE_P(obj
)) {
4590 if (RHASH_AR_TABLE(obj
) != NULL
) {
4591 size_t rb_hash_ar_table_size(void);
4592 size
+= rb_hash_ar_table_size();
4596 VM_ASSERT(RHASH_ST_TABLE(obj
) != NULL
);
4597 size
+= st_memsize(RHASH_ST_TABLE(obj
));
4601 if (RREGEXP_PTR(obj
)) {
4602 size
+= onig_memsize(RREGEXP_PTR(obj
));
4606 if (use_all_types
) size
+= rb_objspace_data_type_memsize(obj
);
4609 if (RMATCH(obj
)->rmatch
) {
4610 struct rmatch
*rm
= RMATCH(obj
)->rmatch
;
4611 size
+= onig_region_memsize(&rm
->regs
);
4612 size
+= sizeof(struct rmatch_offset
) * rm
->char_offset_num_allocated
;
4613 size
+= sizeof(struct rmatch
);
4617 if (RFILE(obj
)->fptr
) {
4618 size
+= rb_io_memsize(RFILE(obj
)->fptr
);
4625 size
+= imemo_memsize(obj
);
4633 if (!(RBASIC(obj
)->flags
& BIGNUM_EMBED_FLAG
) && BIGNUM_DIGITS(obj
)) {
4634 size
+= BIGNUM_LEN(obj
) * sizeof(BDIGIT
);
4639 UNEXPECTED_NODE(obj_memsize_of
);
4643 if ((RBASIC(obj
)->flags
& RSTRUCT_EMBED_LEN_MASK
) == 0 &&
4644 RSTRUCT(obj
)->as
.heap
.ptr
) {
4645 size
+= sizeof(VALUE
) * RSTRUCT_LEN(obj
);
4654 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4655 BUILTIN_TYPE(obj
), (void*)obj
);
4658 return size
+ GET_HEAP_PAGE(obj
)->slot_size
;
4662 rb_obj_memsize_of(VALUE obj
)
4664 return obj_memsize_of(obj
, TRUE
);
4668 set_zero(st_data_t key
, st_data_t val
, st_data_t arg
)
4670 VALUE k
= (VALUE
)key
;
4671 VALUE hash
= (VALUE
)arg
;
4672 rb_hash_aset(hash
, k
, INT2FIX(0));
4677 type_sym(size_t type
)
4680 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4682 COUNT_TYPE(T_OBJECT
);
4683 COUNT_TYPE(T_CLASS
);
4684 COUNT_TYPE(T_MODULE
);
4685 COUNT_TYPE(T_FLOAT
);
4686 COUNT_TYPE(T_STRING
);
4687 COUNT_TYPE(T_REGEXP
);
4688 COUNT_TYPE(T_ARRAY
);
4690 COUNT_TYPE(T_STRUCT
);
4691 COUNT_TYPE(T_BIGNUM
);
4694 COUNT_TYPE(T_MATCH
);
4695 COUNT_TYPE(T_COMPLEX
);
4696 COUNT_TYPE(T_RATIONAL
);
4699 COUNT_TYPE(T_FALSE
);
4700 COUNT_TYPE(T_SYMBOL
);
4701 COUNT_TYPE(T_FIXNUM
);
4702 COUNT_TYPE(T_IMEMO
);
4703 COUNT_TYPE(T_UNDEF
);
4705 COUNT_TYPE(T_ICLASS
);
4706 COUNT_TYPE(T_ZOMBIE
);
4707 COUNT_TYPE(T_MOVED
);
4709 default: return SIZET2NUM(type
); break;
4715 * ObjectSpace.count_objects([result_hash]) -> hash
4717 * Counts all objects grouped by type.
4719 * It returns a hash, such as:
4728 * The contents of the returned hash are implementation specific.
4729 * It may be changed in future.
4731 * The keys starting with +:T_+ means live objects.
4732 * For example, +:T_ARRAY+ is the number of arrays.
4733 * +:FREE+ means object slots which is not used now.
4734 * +:TOTAL+ means sum of above.
4736 * If the optional argument +result_hash+ is given,
4737 * it is overwritten and returned. This is intended to avoid probe effect.
4740 * ObjectSpace.count_objects(h)
4742 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4744 * This method is only expected to work on C Ruby.
4749 count_objects(int argc
, VALUE
*argv
, VALUE os
)
4751 rb_objspace_t
*objspace
= &rb_objspace
;
4752 size_t counts
[T_MASK
+1];
4758 if (rb_check_arity(argc
, 0, 1) == 1) {
4760 if (!RB_TYPE_P(hash
, T_HASH
))
4761 rb_raise(rb_eTypeError
, "non-hash given");
4764 for (i
= 0; i
<= T_MASK
; i
++) {
4768 for (i
= 0; i
< heap_allocated_pages
; i
++) {
4769 struct heap_page
*page
= heap_pages_sorted
[i
];
4770 short stride
= page
->slot_size
;
4772 uintptr_t p
= (uintptr_t)page
->start
;
4773 uintptr_t pend
= p
+ page
->total_slots
* stride
;
4774 for (;p
< pend
; p
+= stride
) {
4775 VALUE vp
= (VALUE
)p
;
4776 GC_ASSERT((NUM_IN_PAGE(vp
) * sizeof(RVALUE
)) % page
->slot_size
== 0);
4778 void *poisoned
= asan_poisoned_object_p(vp
);
4779 asan_unpoison_object(vp
, false);
4780 if (RANY(p
)->as
.basic
.flags
) {
4781 counts
[BUILTIN_TYPE(vp
)]++;
4787 GC_ASSERT(BUILTIN_TYPE(vp
) == T_NONE
);
4788 asan_poison_object(vp
);
4791 total
+= page
->total_slots
;
4795 hash
= rb_hash_new();
4797 else if (!RHASH_EMPTY_P(hash
)) {
4798 rb_hash_stlike_foreach(hash
, set_zero
, hash
);
4800 rb_hash_aset(hash
, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total
));
4801 rb_hash_aset(hash
, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed
));
4803 for (i
= 0; i
<= T_MASK
; i
++) {
4804 VALUE type
= type_sym(i
);
4806 rb_hash_aset(hash
, type
, SIZET2NUM(counts
[i
]));
4813 ------------------------ Garbage Collection ------------------------
4819 objspace_available_slots(rb_objspace_t
*objspace
)
4821 size_t total_slots
= 0;
4822 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
4823 rb_size_pool_t
*size_pool
= &size_pools
[i
];
4824 total_slots
+= SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
;
4825 total_slots
+= SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
;
4831 objspace_live_slots(rb_objspace_t
*objspace
)
4833 return (objspace
->total_allocated_objects
- objspace
->profile
.total_freed_objects
) - heap_pages_final_slots
;
4837 objspace_free_slots(rb_objspace_t
*objspace
)
4839 return objspace_available_slots(objspace
) - objspace_live_slots(objspace
) - heap_pages_final_slots
;
4843 gc_setup_mark_bits(struct heap_page
*page
)
4845 /* copy oldgen bitmap to mark bitmap */
4846 memcpy(&page
->mark_bits
[0], &page
->uncollectible_bits
[0], HEAP_PAGE_BITMAP_SIZE
);
4849 static int gc_is_moveable_obj(rb_objspace_t
*objspace
, VALUE obj
);
4850 static VALUE
gc_move(rb_objspace_t
*objspace
, VALUE scan
, VALUE free
, size_t slot_size
);
4853 lock_page_body(rb_objspace_t
*objspace
, struct heap_page_body
*body
)
4858 if (!VirtualProtect(body
, HEAP_PAGE_SIZE
, PAGE_NOACCESS
, &old_protect
)) {
4860 if (mprotect(body
, HEAP_PAGE_SIZE
, PROT_NONE
)) {
4862 rb_bug("Couldn't protect page %p, errno: %s", (void *)body
, strerror(errno
));
4865 gc_report(5, objspace
, "Protecting page in move %p\n", (void *)body
);
4870 unlock_page_body(rb_objspace_t
*objspace
, struct heap_page_body
*body
)
4875 if (!VirtualProtect(body
, HEAP_PAGE_SIZE
, PAGE_READWRITE
, &old_protect
)) {
4877 if (mprotect(body
, HEAP_PAGE_SIZE
, PROT_READ
| PROT_WRITE
)) {
4879 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body
, strerror(errno
));
4882 gc_report(5, objspace
, "Unprotecting page in move %p\n", (void *)body
);
4887 try_move_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*page
, uintptr_t p
, bits_t bits
, VALUE dest
)
4892 /* We're trying to move "p" */
4893 objspace
->rcompactor
.considered_count_table
[BUILTIN_TYPE((VALUE
)p
)]++;
4895 if (gc_is_moveable_obj(objspace
, (VALUE
)p
)) {
4896 /* We were able to move "p" */
4897 objspace
->rcompactor
.moved_count_table
[BUILTIN_TYPE((VALUE
)p
)]++;
4898 objspace
->rcompactor
.total_moved
++;
4900 bool from_freelist
= false;
4902 if (BUILTIN_TYPE(dest
) == T_NONE
) {
4903 from_freelist
= true;
4906 gc_move(objspace
, (VALUE
)p
, dest
, page
->slot_size
);
4907 gc_pin(objspace
, (VALUE
)p
);
4908 heap
->compact_cursor_index
= (RVALUE
*)p
;
4909 if (from_freelist
) {
4910 FL_SET((VALUE
)p
, FL_FROM_FREELIST
);
4916 p
+= sizeof(RVALUE
);
4925 try_move(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*sweep_page
, VALUE dest
)
4927 struct heap_page
* cursor
= heap
->compact_cursor
;
4929 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest
), dest
));
4931 /* T_NONE objects came from the free list. If the object is *not* a
4932 * T_NONE, it is an object that just got freed but hasn't been
4933 * added to the freelist yet */
4938 bits_t
*mark_bits
= cursor
->mark_bits
;
4939 bits_t
*pin_bits
= cursor
->pinned_bits
;
4942 if (heap
->compact_cursor_index
) {
4943 index
= BITMAP_INDEX(heap
->compact_cursor_index
);
4944 p
= heap
->compact_cursor_index
;
4945 GC_ASSERT(cursor
== GET_HEAP_PAGE(p
));
4952 bits_t bits
= mark_bits
[index
] & ~pin_bits
[index
];
4954 bits
>>= NUM_IN_PAGE(p
);
4955 if (try_move_plane(objspace
, heap
, sweep_page
, (uintptr_t)p
, bits
, dest
)) return 1;
4958 p
= cursor
->start
+ (BITS_BITLENGTH
- NUM_IN_PAGE(cursor
->start
));
4961 p
= cursor
->start
+ (BITS_BITLENGTH
- NUM_IN_PAGE(cursor
->start
)) + (BITS_BITLENGTH
* index
);
4964 /* Find an object to move and move it. Movable objects must be
4965 * marked, so we iterate using the marking bitmap */
4966 for (size_t i
= index
+ 1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
4967 bits_t bits
= mark_bits
[i
] & ~pin_bits
[i
];
4968 if (try_move_plane(objspace
, heap
, sweep_page
, (uintptr_t)p
, bits
, dest
)) return 1;
4969 p
+= BITS_BITLENGTH
;
4972 /* We couldn't find a movable object on the compact cursor, so lets
4973 * move to the next page (previous page since we are traveling in the
4974 * opposite direction of the sweep cursor) and look there. */
4976 struct heap_page
* next
;
4978 next
= list_prev(&heap
->pages
, cursor
, page_node
);
4980 /* Protect the current cursor since it probably has T_MOVED slots. */
4981 lock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
4983 heap
->compact_cursor
= next
;
4984 heap
->compact_cursor_index
= 0;
4987 // Cursors have met, lets quit. We set `heap->compact_cursor` equal
4988 // to `heap->sweeping_page` so we know how far to iterate through
4989 // the heap when unprotecting pages.
4990 if (next
== sweep_page
) {
4999 gc_unprotect_pages(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
5001 struct heap_page
*cursor
= heap
->compact_cursor
;
5004 unlock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
5005 cursor
= list_next(&heap
->pages
, cursor
, page_node
);
5009 static void gc_update_references(rb_objspace_t
* objspace
);
5010 static void invalidate_moved_page(rb_objspace_t
*objspace
, struct heap_page
*page
);
5013 read_barrier_handler(uintptr_t address
)
5016 rb_objspace_t
* objspace
= &rb_objspace
;
5018 address
-= address
% sizeof(RVALUE
);
5020 obj
= (VALUE
)address
;
5024 unlock_page_body(objspace
, GET_PAGE_BODY(obj
));
5026 objspace
->profile
.read_barrier_faults
++;
5028 invalidate_moved_page(objspace
, GET_HEAP_PAGE(obj
));
5034 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler
;
5035 typedef void (*signal_handler
)(int);
5036 static signal_handler old_sigsegv_handler
;
5039 read_barrier_signal(EXCEPTION_POINTERS
* info
)
5041 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5042 if (info
->ExceptionRecord
->ExceptionCode
== EXCEPTION_ACCESS_VIOLATION
) {
5043 /* > The second array element specifies the virtual address of the inaccessible data.
5044 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5046 * Use this address to invalidate the page */
5047 read_barrier_handler((uintptr_t)info
->ExceptionRecord
->ExceptionInformation
[1]);
5048 return EXCEPTION_CONTINUE_EXECUTION
;
5051 return EXCEPTION_CONTINUE_SEARCH
;
5056 uninstall_handlers(void)
5058 signal(SIGSEGV
, old_sigsegv_handler
);
5059 SetUnhandledExceptionFilter(old_handler
);
5063 install_handlers(void)
5065 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5066 old_sigsegv_handler
= signal(SIGSEGV
, NULL
);
5067 /* Unhandled Exception Filter has access to the violation address similar
5068 * to si_addr from sigaction */
5069 old_handler
= SetUnhandledExceptionFilter(read_barrier_signal
);
5072 static struct sigaction old_sigbus_handler
;
5073 static struct sigaction old_sigsegv_handler
;
5076 read_barrier_signal(int sig
, siginfo_t
* info
, void * data
)
5078 // setup SEGV/BUS handlers for errors
5079 struct sigaction prev_sigbus
, prev_sigsegv
;
5080 sigaction(SIGBUS
, &old_sigbus_handler
, &prev_sigbus
);
5081 sigaction(SIGSEGV
, &old_sigsegv_handler
, &prev_sigsegv
);
5083 // enable SIGBUS/SEGV
5084 sigset_t set
, prev_set
;
5086 sigaddset(&set
, SIGBUS
);
5087 sigaddset(&set
, SIGSEGV
);
5088 sigprocmask(SIG_UNBLOCK
, &set
, &prev_set
);
5091 read_barrier_handler((uintptr_t)info
->si_addr
);
5093 // reset SEGV/BUS handlers
5094 sigaction(SIGBUS
, &prev_sigbus
, NULL
);
5095 sigaction(SIGSEGV
, &prev_sigsegv
, NULL
);
5096 sigprocmask(SIG_SETMASK
, &prev_set
, NULL
);
5100 uninstall_handlers(void)
5102 sigaction(SIGBUS
, &old_sigbus_handler
, NULL
);
5103 sigaction(SIGSEGV
, &old_sigsegv_handler
, NULL
);
5107 install_handlers(void)
5109 struct sigaction action
;
5110 memset(&action
, 0, sizeof(struct sigaction
));
5111 sigemptyset(&action
.sa_mask
);
5112 action
.sa_sigaction
= read_barrier_signal
;
5113 action
.sa_flags
= SA_SIGINFO
| SA_ONSTACK
;
5115 sigaction(SIGBUS
, &action
, &old_sigbus_handler
);
5116 sigaction(SIGSEGV
, &action
, &old_sigsegv_handler
);
5121 revert_stack_objects(VALUE stack_obj
, void *ctx
)
5123 rb_objspace_t
* objspace
= (rb_objspace_t
*)ctx
;
5125 if (BUILTIN_TYPE(stack_obj
) == T_MOVED
) {
5126 /* For now we'll revert the whole page if the object made it to the
5127 * stack. I think we can change this to move just the one object
5129 invalidate_moved_page(objspace
, GET_HEAP_PAGE(stack_obj
));
5134 revert_machine_stack_references(rb_objspace_t
*objspace
, VALUE v
)
5136 if (is_pointer_to_heap(objspace
, (void *)v
)) {
5137 if (BUILTIN_TYPE(v
) == T_MOVED
) {
5138 /* For now we'll revert the whole page if the object made it to the
5139 * stack. I think we can change this to move just the one object
5141 invalidate_moved_page(objspace
, GET_HEAP_PAGE(v
));
5146 static void each_machine_stack_value(const rb_execution_context_t
*ec
, void (*cb
)(rb_objspace_t
*, VALUE
));
5149 check_stack_for_moved(rb_objspace_t
*objspace
)
5151 rb_execution_context_t
*ec
= GET_EC();
5152 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
5153 rb_vm_each_stack_value(vm
, revert_stack_objects
, (void*)objspace
);
5154 each_machine_stack_value(ec
, revert_machine_stack_references
);
5158 gc_compact_finish(rb_objspace_t
*objspace
, rb_size_pool_t
*pool
, rb_heap_t
*heap
)
5160 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5161 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5162 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5163 gc_unprotect_pages(objspace
, heap
);
5166 uninstall_handlers();
5168 /* The mutator is allowed to run during incremental sweeping. T_MOVED
5169 * objects can get pushed on the stack and when the compaction process
5170 * finishes up, it may remove the read barrier before anything has a
5171 * chance to read from the T_MOVED address. To fix this, we scan the stack
5172 * then revert any moved objects that made it to the stack. */
5173 check_stack_for_moved(objspace
);
5175 gc_update_references(objspace
);
5176 objspace
->profile
.compact_count
++;
5178 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5179 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5180 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5181 heap
->compact_cursor
= NULL
;
5182 heap
->compact_cursor_index
= 0;
5185 if (gc_prof_enabled(objspace
)) {
5186 gc_profile_record
*record
= gc_prof_record(objspace
);
5187 record
->moved_objects
= objspace
->rcompactor
.total_moved
- record
->moved_objects
;
5189 objspace
->flags
.during_compacting
= FALSE
;
5192 struct gc_sweep_context
{
5193 struct heap_page
*page
;
5200 gc_fill_swept_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, uintptr_t p
, bits_t bitset
, bool *finished_compacting
, struct gc_sweep_context
*ctx
)
5202 struct heap_page
* sweep_page
= ctx
->page
;
5205 short slot_size
= sweep_page
->slot_size
;
5206 short slot_bits
= slot_size
/ sizeof(RVALUE
);
5210 VALUE dest
= (VALUE
)p
;
5212 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(dest
), dest
));
5213 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest
), dest
));
5215 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(dest
), dest
);
5217 if (*finished_compacting
) {
5218 if (BUILTIN_TYPE(dest
) == T_NONE
) {
5224 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)dest
, sizeof(RVALUE
));
5225 heap_page_add_freeobj(objspace
, sweep_page
, dest
);
5228 /* Zombie slots don't get marked, but we can't reuse
5229 * their memory until they have their finalizers run.*/
5230 if (BUILTIN_TYPE(dest
) != T_ZOMBIE
) {
5231 if (!try_move(objspace
, heap
, sweep_page
, dest
)) {
5232 *finished_compacting
= true;
5233 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p
, sizeof(RVALUE
));
5234 gc_report(5, objspace
, "Quit compacting, couldn't find an object to move\n");
5235 if (BUILTIN_TYPE(dest
) == T_NONE
) {
5241 heap_page_add_freeobj(objspace
, sweep_page
, dest
);
5242 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(dest
));
5251 bitset
>>= slot_bits
;
5257 gc_fill_swept_page(rb_objspace_t
*objspace
, rb_heap_t
*heap
, struct heap_page
*sweep_page
, struct gc_sweep_context
*ctx
)
5259 /* Find any pinned but not marked objects and try to fill those slots */
5260 bool finished_compacting
= false;
5261 bits_t
*mark_bits
, *pin_bits
;
5265 mark_bits
= sweep_page
->mark_bits
;
5266 pin_bits
= sweep_page
->pinned_bits
;
5268 p
= (uintptr_t)sweep_page
->start
;
5270 struct heap_page
* cursor
= heap
->compact_cursor
;
5272 unlock_page_body(objspace
, GET_PAGE_BODY(cursor
->start
));
5274 /* *Want to move* objects are pinned but not marked. */
5275 bitset
= pin_bits
[0] & ~mark_bits
[0];
5276 bitset
>>= NUM_IN_PAGE(p
); // Skip header / dead space bits
5277 gc_fill_swept_plane(objspace
, heap
, (uintptr_t)p
, bitset
, &finished_compacting
, ctx
);
5278 p
+= ((BITS_BITLENGTH
- NUM_IN_PAGE(p
)) * sizeof(RVALUE
));
5280 for (int i
= 1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5281 /* *Want to move* objects are pinned but not marked. */
5282 bitset
= pin_bits
[i
] & ~mark_bits
[i
];
5283 gc_fill_swept_plane(objspace
, heap
, (uintptr_t)p
, bitset
, &finished_compacting
, ctx
);
5284 p
+= ((BITS_BITLENGTH
) * sizeof(RVALUE
));
5287 lock_page_body(objspace
, GET_PAGE_BODY(heap
->compact_cursor
->start
));
5289 return finished_compacting
;
5293 gc_sweep_plane(rb_objspace_t
*objspace
, rb_heap_t
*heap
, uintptr_t p
, bits_t bitset
, struct gc_sweep_context
*ctx
)
5295 struct heap_page
* sweep_page
= ctx
->page
;
5296 short slot_size
= sweep_page
->slot_size
;
5297 short slot_bits
= slot_size
/ sizeof(RVALUE
);
5298 GC_ASSERT(slot_bits
> 0);
5301 VALUE vp
= (VALUE
)p
;
5302 GC_ASSERT(vp
% sizeof(RVALUE
) == 0);
5304 asan_unpoison_object(vp
, false);
5306 switch (BUILTIN_TYPE(vp
)) {
5307 default: /* majority case */
5308 gc_report(2, objspace
, "page_sweep: free %p\n", (void *)p
);
5309 #if RGENGC_CHECK_MODE
5310 if (!is_full_marking(objspace
)) {
5311 if (RVALUE_OLD_P(vp
)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p
);
5312 if (rgengc_remembered_sweep(objspace
, vp
)) rb_bug("page_sweep: %p - remembered.", (void *)p
);
5315 if (obj_free(objspace
, vp
)) {
5316 if (heap
->compact_cursor
) {
5317 /* We *want* to fill this slot */
5318 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp
), vp
);
5321 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p
, sizeof(RVALUE
));
5322 heap_page_add_freeobj(objspace
, sweep_page
, vp
);
5323 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(vp
));
5333 if (objspace
->flags
.during_compacting
) {
5334 /* The sweep cursor shouldn't have made it to any
5335 * T_MOVED slots while the compact flag is enabled.
5336 * The sweep cursor and compact cursor move in
5337 * opposite directions, and when they meet references will
5338 * get updated and "during_compacting" should get disabled */
5339 rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
5341 gc_report(3, objspace
, "page_sweep: %s is added to freelist\n", obj_info(vp
));
5342 if (FL_TEST(vp
, FL_FROM_FREELIST
)) {
5348 heap_page_add_freeobj(objspace
, sweep_page
, vp
);
5351 /* already counted */
5354 if (heap
->compact_cursor
) {
5355 /* We *want* to fill this slot */
5356 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp
), vp
);
5359 ctx
->empty_slots
++; /* already freed */
5365 bitset
>>= slot_bits
;
5370 gc_sweep_page(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
, struct gc_sweep_context
*ctx
)
5372 struct heap_page
*sweep_page
= ctx
->page
;
5377 bits_t
*bits
, bitset
;
5379 gc_report(2, objspace
, "page_sweep: start.\n");
5381 if (heap
->compact_cursor
) {
5382 if (sweep_page
== heap
->compact_cursor
) {
5383 /* The compaction cursor and sweep page met, so we need to quit compacting */
5384 gc_report(5, objspace
, "Quit compacting, mark and compact cursor met\n");
5385 gc_compact_finish(objspace
, size_pool
, heap
);
5388 /* We anticipate filling the page, so NULL out the freelist. */
5389 asan_unpoison_memory_region(&sweep_page
->freelist
, sizeof(RVALUE
*), false);
5390 sweep_page
->freelist
= NULL
;
5391 asan_poison_memory_region(&sweep_page
->freelist
, sizeof(RVALUE
*));
5395 sweep_page
->flags
.before_sweep
= FALSE
;
5396 sweep_page
->free_slots
= 0;
5398 p
= sweep_page
->start
;
5399 bits
= sweep_page
->mark_bits
;
5401 int page_rvalue_count
= sweep_page
->total_slots
* (size_pool
->slot_size
/ sizeof(RVALUE
));
5402 int out_of_range_bits
= (NUM_IN_PAGE(p
) + page_rvalue_count
) % BITS_BITLENGTH
;
5403 if (out_of_range_bits
!= 0) { // sizeof(RVALUE) == 64
5404 bits
[BITMAP_INDEX(p
) + page_rvalue_count
/ BITS_BITLENGTH
] |= ~(((bits_t
)1 << out_of_range_bits
) - 1);
5407 // Skip out of range slots at the head of the page
5409 bitset
>>= NUM_IN_PAGE(p
);
5411 gc_sweep_plane(objspace
, heap
, (uintptr_t)p
, bitset
, ctx
);
5413 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
5415 for (i
=1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5418 gc_sweep_plane(objspace
, heap
, (uintptr_t)p
, bitset
, ctx
);
5420 p
+= BITS_BITLENGTH
;
5423 if (heap
->compact_cursor
) {
5424 if (gc_fill_swept_page(objspace
, heap
, sweep_page
, ctx
)) {
5425 gc_compact_finish(objspace
, size_pool
, heap
);
5429 if (!heap
->compact_cursor
) {
5430 gc_setup_mark_bits(sweep_page
);
5433 #if GC_PROFILE_MORE_DETAIL
5434 if (gc_prof_enabled(objspace
)) {
5435 gc_profile_record
*record
= gc_prof_record(objspace
);
5436 record
->removing_objects
+= ctx
->final_slots
+ ctx
->freed_slots
;
5437 record
->empty_objects
+= ctx
->empty_slots
;
5440 if (0) fprintf(stderr
, "gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5442 sweep_page
->total_slots
,
5443 ctx
->freed_slots
, ctx
->empty_slots
, ctx
->final_slots
);
5445 sweep_page
->free_slots
+= ctx
->freed_slots
+ ctx
->empty_slots
;
5446 objspace
->profile
.total_freed_objects
+= ctx
->freed_slots
;
5448 if (heap_pages_deferred_final
&& !finalizing
) {
5449 rb_thread_t
*th
= GET_THREAD();
5451 gc_finalize_deferred_register(objspace
);
5455 #if RGENGC_CHECK_MODE
5456 short freelist_len
= 0;
5457 RVALUE
*ptr
= sweep_page
->freelist
;
5460 ptr
= ptr
->as
.free
.next
;
5462 if (freelist_len
!= sweep_page
->free_slots
) {
5463 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page
->free_slots
, freelist_len
);
5467 gc_report(2, objspace
, "page_sweep: end.\n");
5471 /* allocate additional minimum page to work */
5473 gc_heap_prepare_minimum_pages(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
5475 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5476 if (!heap
->free_pages
&& heap_increment(objspace
, size_pool
, heap
) == FALSE
) {
5477 /* there is no free after page_sweep() */
5478 size_pool_allocatable_pages_set(objspace
, size_pool
, 1);
5479 if (!heap_increment(objspace
, size_pool
, heap
)) { /* can't allocate additional free objects */
5488 gc_mode_name(enum gc_mode mode
)
5491 case gc_mode_none
: return "none";
5492 case gc_mode_marking
: return "marking";
5493 case gc_mode_sweeping
: return "sweeping";
5494 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode
);
5499 gc_mode_transition(rb_objspace_t
*objspace
, enum gc_mode mode
)
5501 #if RGENGC_CHECK_MODE
5502 enum gc_mode prev_mode
= gc_mode(objspace
);
5503 switch (prev_mode
) {
5504 case gc_mode_none
: GC_ASSERT(mode
== gc_mode_marking
); break;
5505 case gc_mode_marking
: GC_ASSERT(mode
== gc_mode_sweeping
); break;
5506 case gc_mode_sweeping
: GC_ASSERT(mode
== gc_mode_none
); break;
5509 if (0) fprintf(stderr
, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace
)), gc_mode_name(mode
));
5510 gc_mode_set(objspace
, mode
);
5514 heap_page_freelist_append(struct heap_page
*page
, RVALUE
*freelist
)
5517 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
5518 if (page
->freelist
) {
5519 RVALUE
*p
= page
->freelist
;
5520 asan_unpoison_object((VALUE
)p
, false);
5521 while (p
->as
.free
.next
) {
5523 p
= p
->as
.free
.next
;
5524 asan_poison_object((VALUE
)prev
);
5525 asan_unpoison_object((VALUE
)p
, false);
5527 p
->as
.free
.next
= freelist
;
5528 asan_poison_object((VALUE
)p
);
5531 page
->freelist
= freelist
;
5533 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
5538 gc_sweep_start_heap(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
5540 heap
->sweeping_page
= list_top(&heap
->pages
, struct heap_page
, page_node
);
5541 heap
->free_pages
= NULL
;
5542 #if GC_ENABLE_INCREMENTAL_MARK
5543 heap
->pooled_pages
= NULL
;
5547 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5548 __attribute__((noinline
))
5551 gc_sweep_start(rb_objspace_t
*objspace
)
5553 gc_mode_transition(objspace
, gc_mode_sweeping
);
5555 #if GC_ENABLE_INCREMENTAL_MARK
5556 objspace
->rincgc
.pooled_slots
= 0;
5559 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5560 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5562 gc_sweep_start_heap(objspace
, SIZE_POOL_EDEN_HEAP(size_pool
));
5565 rb_ractor_t
*r
= NULL
;
5566 list_for_each(&GET_VM()->ractor
.set
, r
, vmlr_node
) {
5567 rb_gc_ractor_newobj_cache_clear(&r
->newobj_cache
);
5573 gc_sweep_finish_size_pool(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
)
5575 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5576 size_t total_slots
= heap
->total_slots
+ SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
;
5577 size_t total_pages
= heap
->total_pages
+ SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
5578 size_t swept_slots
= size_pool
->freed_slots
+ size_pool
->empty_slots
;
5580 size_t min_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_min_ratio
);
5582 if (swept_slots
< min_free_slots
) {
5583 bool grow_heap
= is_full_marking(objspace
);
5585 if (!is_full_marking(objspace
)) {
5586 /* The heap is a growth heap if it freed more slots than had empty slots. */
5587 bool is_growth_heap
= size_pool
->empty_slots
== 0 ||
5588 size_pool
->freed_slots
> size_pool
->empty_slots
;
5590 if (objspace
->profile
.count
- objspace
->rgengc
.last_major_gc
< RVALUE_OLD_AGE
) {
5593 else if (is_growth_heap
) { /* Only growth heaps are allowed to start a major GC. */
5594 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_NOFREE
;
5595 size_pool
->force_major_gc_count
++;
5600 size_t extend_page_count
= heap_extend_pages(objspace
, swept_slots
, total_slots
, total_pages
);
5602 if (extend_page_count
> size_pool
->allocatable_pages
) {
5603 size_pool_allocatable_pages_set(objspace
, size_pool
, extend_page_count
);
5606 heap_increment(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5613 gc_sweep_finish(rb_objspace_t
*objspace
)
5615 gc_report(1, objspace
, "gc_sweep_finish\n");
5617 gc_prof_set_heap_info(objspace
);
5618 heap_pages_free_unused_pages(objspace
);
5620 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5621 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5623 /* if heap_pages has unused pages, then assign them to increment */
5624 size_t tomb_pages
= SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
;
5625 if (size_pool
->allocatable_pages
< tomb_pages
) {
5626 size_pool
->allocatable_pages
= tomb_pages
;
5630 size_pool
->freed_slots
= 0;
5631 size_pool
->empty_slots
= 0;
5633 #if GC_ENABLE_INCREMENTAL_MARK
5634 if (!will_be_incremental_marking(objspace
)) {
5635 rb_heap_t
*eden_heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
5636 struct heap_page
*end_page
= eden_heap
->free_pages
;
5638 while (end_page
->free_next
) end_page
= end_page
->free_next
;
5639 end_page
->free_next
= eden_heap
->pooled_pages
;
5642 eden_heap
->free_pages
= eden_heap
->pooled_pages
;
5644 eden_heap
->pooled_pages
= NULL
;
5645 objspace
->rincgc
.pooled_slots
= 0;
5650 heap_pages_expand_sorted(objspace
);
5652 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_END_SWEEP
, 0);
5653 gc_mode_transition(objspace
, gc_mode_none
);
5655 #if RGENGC_CHECK_MODE >= 2
5656 gc_verify_internal_consistency(objspace
);
5661 gc_sweep_step(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
5663 struct heap_page
*sweep_page
= heap
->sweeping_page
;
5664 int unlink_limit
= 3;
5666 #if GC_ENABLE_INCREMENTAL_MARK
5667 int swept_slots
= 0;
5669 bool need_pool
= TRUE
;
5671 int need_pool
= will_be_incremental_marking(objspace
) ? TRUE
: FALSE
;
5674 gc_report(2, objspace
, "gc_sweep_step (need_pool: %d)\n", need_pool
);
5676 gc_report(2, objspace
, "gc_sweep_step\n");
5679 if (sweep_page
== NULL
) return FALSE
;
5681 #if GC_ENABLE_LAZY_SWEEP
5682 gc_prof_sweep_timer_start(objspace
);
5686 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page
);
5688 struct gc_sweep_context ctx
= {
5694 gc_sweep_page(objspace
, size_pool
, heap
, &ctx
);
5695 int free_slots
= ctx
.freed_slots
+ ctx
.empty_slots
;
5697 heap
->sweeping_page
= list_next(&heap
->pages
, sweep_page
, page_node
);
5699 if (sweep_page
->final_slots
+ free_slots
== sweep_page
->total_slots
&&
5700 heap_pages_freeable_pages
> 0 &&
5702 heap_pages_freeable_pages
--;
5704 /* there are no living objects -> move this page to tomb heap */
5705 heap_unlink_page(objspace
, heap
, sweep_page
);
5706 heap_add_page(objspace
, size_pool
, SIZE_POOL_TOMB_HEAP(size_pool
), sweep_page
);
5708 else if (free_slots
> 0) {
5710 size_pool
->freed_slots
+= ctx
.freed_slots
;
5711 size_pool
->empty_slots
+= ctx
.empty_slots
;
5714 #if GC_ENABLE_INCREMENTAL_MARK
5716 heap_add_poolpage(objspace
, heap
, sweep_page
);
5720 heap_add_freepage(heap
, sweep_page
);
5721 swept_slots
+= free_slots
;
5722 if (swept_slots
> 2048) {
5727 heap_add_freepage(heap
, sweep_page
);
5732 sweep_page
->free_next
= NULL
;
5734 } while ((sweep_page
= heap
->sweeping_page
));
5736 if (!heap
->sweeping_page
) {
5738 gc_sweep_finish_size_pool(objspace
, size_pool
);
5741 if (!has_sweeping_pages(objspace
)) {
5742 gc_sweep_finish(objspace
);
5746 #if GC_ENABLE_LAZY_SWEEP
5747 gc_prof_sweep_timer_stop(objspace
);
5750 return heap
->free_pages
!= NULL
;
5754 gc_sweep_rest(rb_objspace_t
*objspace
)
5756 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5757 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5759 while (SIZE_POOL_EDEN_HEAP(size_pool
)->sweeping_page
) {
5760 gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5766 gc_sweep_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*sweep_size_pool
, rb_heap_t
*heap
)
5768 GC_ASSERT(dont_gc_val() == FALSE
);
5769 if (!GC_ENABLE_LAZY_SWEEP
) return;
5771 unsigned int lock_lev
;
5772 gc_enter(objspace
, gc_enter_event_sweep_continue
, &lock_lev
);
5774 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5775 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5776 if (!gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
))) {
5778 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
5779 if (size_pool
== sweep_size_pool
) {
5780 if (size_pool
->allocatable_pages
> 0) {
5781 heap_increment(objspace
, size_pool
, heap
);
5784 /* Not allowed to create a new page so finish sweeping. */
5785 gc_sweep_rest(objspace
);
5793 gc_exit(objspace
, gc_enter_event_sweep_continue
, &lock_lev
);
5797 invalidate_moved_plane(rb_objspace_t
*objspace
, struct heap_page
*page
, uintptr_t p
, bits_t bitset
)
5802 VALUE forwarding_object
= (VALUE
)p
;
5805 if (BUILTIN_TYPE(forwarding_object
) == T_MOVED
) {
5806 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object
), forwarding_object
));
5807 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object
), forwarding_object
));
5809 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object
), forwarding_object
);
5811 bool from_freelist
= FL_TEST_RAW(forwarding_object
, FL_FROM_FREELIST
);
5812 object
= rb_gc_location(forwarding_object
);
5814 gc_move(objspace
, object
, forwarding_object
, page
->slot_size
);
5815 /* forwarding_object is now our actual object, and "object"
5816 * is the free slot for the original page */
5817 struct heap_page
*orig_page
= GET_HEAP_PAGE(object
);
5818 orig_page
->free_slots
++;
5819 if (!from_freelist
) {
5820 objspace
->profile
.total_freed_objects
++;
5822 heap_page_add_freeobj(objspace
, orig_page
, object
);
5824 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object
), forwarding_object
));
5825 GC_ASSERT(BUILTIN_TYPE(forwarding_object
) != T_MOVED
);
5826 GC_ASSERT(BUILTIN_TYPE(forwarding_object
) != T_NONE
);
5829 p
+= sizeof(RVALUE
);
5836 invalidate_moved_page(rb_objspace_t
*objspace
, struct heap_page
*page
)
5839 bits_t
*mark_bits
, *pin_bits
;
5843 mark_bits
= page
->mark_bits
;
5844 pin_bits
= page
->pinned_bits
;
5848 // Skip out of range slots at the head of the page
5849 bitset
= pin_bits
[0] & ~mark_bits
[0];
5850 bitset
>>= NUM_IN_PAGE(p
);
5851 invalidate_moved_plane(objspace
, page
, (uintptr_t)p
, bitset
);
5852 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
5854 for (i
=1; i
< HEAP_PAGE_BITMAP_LIMIT
; i
++) {
5855 /* Moved objects are pinned but never marked. We reuse the pin bits
5856 * to indicate there is a moved object in this slot. */
5857 bitset
= pin_bits
[i
] & ~mark_bits
[i
];
5859 invalidate_moved_plane(objspace
, page
, (uintptr_t)p
, bitset
);
5860 p
+= BITS_BITLENGTH
;
5865 gc_compact_start(rb_objspace_t
*objspace
)
5867 struct heap_page
*page
= NULL
;
5869 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5870 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(&size_pools
[i
]);
5871 list_for_each(&heap
->pages
, page
, page_node
) {
5872 page
->flags
.before_sweep
= TRUE
;
5875 heap
->compact_cursor
= list_tail(&heap
->pages
, struct heap_page
, page_node
);
5876 heap
->compact_cursor_index
= 0;
5879 if (gc_prof_enabled(objspace
)) {
5880 gc_profile_record
*record
= gc_prof_record(objspace
);
5881 record
->moved_objects
= objspace
->rcompactor
.total_moved
;
5884 memset(objspace
->rcompactor
.considered_count_table
, 0, T_MASK
* sizeof(size_t));
5885 memset(objspace
->rcompactor
.moved_count_table
, 0, T_MASK
* sizeof(size_t));
5887 /* Set up read barrier for pages containing MOVED objects */
5892 gc_sweep(rb_objspace_t
*objspace
)
5894 const unsigned int immediate_sweep
= objspace
->flags
.immediate_sweep
;
5896 gc_report(1, objspace
, "gc_sweep: immediate: %d\n", immediate_sweep
);
5898 if (immediate_sweep
) {
5899 #if !GC_ENABLE_LAZY_SWEEP
5900 gc_prof_sweep_timer_start(objspace
);
5902 gc_sweep_start(objspace
);
5903 if (objspace
->flags
.during_compacting
) {
5904 gc_compact_start(objspace
);
5907 gc_sweep_rest(objspace
);
5908 #if !GC_ENABLE_LAZY_SWEEP
5909 gc_prof_sweep_timer_stop(objspace
);
5913 struct heap_page
*page
= NULL
;
5914 gc_sweep_start(objspace
);
5916 if (ruby_enable_autocompact
&& is_full_marking(objspace
)) {
5917 gc_compact_start(objspace
);
5920 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5921 list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pages
), page
, page_node
) {
5922 page
->flags
.before_sweep
= TRUE
;
5926 /* Sweep every size pool. */
5927 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
5928 rb_size_pool_t
*size_pool
= &size_pools
[i
];
5929 gc_sweep_step(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5934 rb_size_pool_t
*size_pool
= &size_pools
[0];
5935 gc_heap_prepare_minimum_pages(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
5939 /* Marking - Marking stack */
5941 static stack_chunk_t
*
5942 stack_chunk_alloc(void)
5946 res
= malloc(sizeof(stack_chunk_t
));
5954 is_mark_stack_empty(mark_stack_t
*stack
)
5956 return stack
->chunk
== NULL
;
5960 mark_stack_size(mark_stack_t
*stack
)
5962 size_t size
= stack
->index
;
5963 stack_chunk_t
*chunk
= stack
->chunk
? stack
->chunk
->next
: NULL
;
5966 size
+= stack
->limit
;
5967 chunk
= chunk
->next
;
5973 add_stack_chunk_cache(mark_stack_t
*stack
, stack_chunk_t
*chunk
)
5975 chunk
->next
= stack
->cache
;
5976 stack
->cache
= chunk
;
5977 stack
->cache_size
++;
5981 shrink_stack_chunk_cache(mark_stack_t
*stack
)
5983 stack_chunk_t
*chunk
;
5985 if (stack
->unused_cache_size
> (stack
->cache_size
/2)) {
5986 chunk
= stack
->cache
;
5987 stack
->cache
= stack
->cache
->next
;
5988 stack
->cache_size
--;
5991 stack
->unused_cache_size
= stack
->cache_size
;
5995 push_mark_stack_chunk(mark_stack_t
*stack
)
5997 stack_chunk_t
*next
;
5999 GC_ASSERT(stack
->index
== stack
->limit
);
6001 if (stack
->cache_size
> 0) {
6002 next
= stack
->cache
;
6003 stack
->cache
= stack
->cache
->next
;
6004 stack
->cache_size
--;
6005 if (stack
->unused_cache_size
> stack
->cache_size
)
6006 stack
->unused_cache_size
= stack
->cache_size
;
6009 next
= stack_chunk_alloc();
6011 next
->next
= stack
->chunk
;
6012 stack
->chunk
= next
;
6017 pop_mark_stack_chunk(mark_stack_t
*stack
)
6019 stack_chunk_t
*prev
;
6021 prev
= stack
->chunk
->next
;
6022 GC_ASSERT(stack
->index
== 0);
6023 add_stack_chunk_cache(stack
, stack
->chunk
);
6024 stack
->chunk
= prev
;
6025 stack
->index
= stack
->limit
;
6029 free_stack_chunks(mark_stack_t
*stack
)
6031 stack_chunk_t
*chunk
= stack
->chunk
;
6032 stack_chunk_t
*next
= NULL
;
6034 while (chunk
!= NULL
) {
6042 push_mark_stack(mark_stack_t
*stack
, VALUE data
)
6045 switch (BUILTIN_TYPE(obj
)) {
6066 if (stack
->index
== stack
->limit
) {
6067 push_mark_stack_chunk(stack
);
6069 stack
->chunk
->data
[stack
->index
++] = data
;
6079 rb_bug("push_mark_stack() called for broken object");
6083 UNEXPECTED_NODE(push_mark_stack
);
6087 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6088 BUILTIN_TYPE(obj
), (void *)data
,
6089 is_pointer_to_heap(&rb_objspace
, (void *)data
) ? "corrupted object" : "non object");
6093 pop_mark_stack(mark_stack_t
*stack
, VALUE
*data
)
6095 if (is_mark_stack_empty(stack
)) {
6098 if (stack
->index
== 1) {
6099 *data
= stack
->chunk
->data
[--stack
->index
];
6100 pop_mark_stack_chunk(stack
);
6103 *data
= stack
->chunk
->data
[--stack
->index
];
6109 init_mark_stack(mark_stack_t
*stack
)
6113 MEMZERO(stack
, mark_stack_t
, 1);
6114 stack
->index
= stack
->limit
= STACK_CHUNK_SIZE
;
6116 for (i
=0; i
< 4; i
++) {
6117 add_stack_chunk_cache(stack
, stack_chunk_alloc());
6119 stack
->unused_cache_size
= stack
->cache_size
;
6124 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6126 #define STACK_START (ec->machine.stack_start)
6127 #define STACK_END (ec->machine.stack_end)
6128 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6130 #if STACK_GROW_DIRECTION < 0
6131 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6132 #elif STACK_GROW_DIRECTION > 0
6133 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6135 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6136 : (size_t)(STACK_END - STACK_START + 1))
6138 #if !STACK_GROW_DIRECTION
6139 int ruby_stack_grow_direction
;
6141 ruby_get_stack_grow_direction(volatile VALUE
*addr
)
6144 SET_MACHINE_STACK_END(&end
);
6146 if (end
> addr
) return ruby_stack_grow_direction
= 1;
6147 return ruby_stack_grow_direction
= -1;
6152 ruby_stack_length(VALUE
**p
)
6154 rb_execution_context_t
*ec
= GET_EC();
6156 if (p
) *p
= STACK_UPPER(STACK_END
, STACK_START
, STACK_END
);
6157 return STACK_LENGTH
;
6160 #define PREVENT_STACK_OVERFLOW 1
6161 #ifndef PREVENT_STACK_OVERFLOW
6162 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6163 # define PREVENT_STACK_OVERFLOW 1
6165 # define PREVENT_STACK_OVERFLOW 0
6168 #if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6170 stack_check(rb_execution_context_t
*ec
, int water_mark
)
6174 size_t length
= STACK_LENGTH
;
6175 size_t maximum_length
= STACK_LEVEL_MAX
- water_mark
;
6177 return length
> maximum_length
;
6180 #define stack_check(ec, water_mark) FALSE
6183 #define STACKFRAME_FOR_CALL_CFUNC 2048
6185 MJIT_FUNC_EXPORTED
int
6186 rb_ec_stack_check(rb_execution_context_t
*ec
)
6188 return stack_check(ec
, STACKFRAME_FOR_CALL_CFUNC
);
6192 ruby_stack_check(void)
6194 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC
);
6197 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t
*objspace
, register const VALUE
*x
, register long n
, void (*cb
)(rb_objspace_t
*, VALUE
)));
6199 each_location(rb_objspace_t
*objspace
, register const VALUE
*x
, register long n
, void (*cb
)(rb_objspace_t
*, VALUE
))
6210 gc_mark_locations(rb_objspace_t
*objspace
, const VALUE
*start
, const VALUE
*end
, void (*cb
)(rb_objspace_t
*, VALUE
))
6214 if (end
<= start
) return;
6216 each_location(objspace
, start
, n
, cb
);
6220 rb_gc_mark_locations(const VALUE
*start
, const VALUE
*end
)
6222 gc_mark_locations(&rb_objspace
, start
, end
, gc_mark_maybe
);
6226 gc_mark_values(rb_objspace_t
*objspace
, long n
, const VALUE
*values
)
6230 for (i
=0; i
<n
; i
++) {
6231 gc_mark(objspace
, values
[i
]);
6236 rb_gc_mark_values(long n
, const VALUE
*values
)
6239 rb_objspace_t
*objspace
= &rb_objspace
;
6241 for (i
=0; i
<n
; i
++) {
6242 gc_mark_and_pin(objspace
, values
[i
]);
6247 gc_mark_stack_values(rb_objspace_t
*objspace
, long n
, const VALUE
*values
)
6251 for (i
=0; i
<n
; i
++) {
6252 if (is_markable_object(objspace
, values
[i
])) {
6253 gc_mark_and_pin(objspace
, values
[i
]);
6259 rb_gc_mark_vm_stack_values(long n
, const VALUE
*values
)
6261 rb_objspace_t
*objspace
= &rb_objspace
;
6262 gc_mark_stack_values(objspace
, n
, values
);
6266 mark_value(st_data_t key
, st_data_t value
, st_data_t data
)
6268 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6269 gc_mark(objspace
, (VALUE
)value
);
6274 mark_value_pin(st_data_t key
, st_data_t value
, st_data_t data
)
6276 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6277 gc_mark_and_pin(objspace
, (VALUE
)value
);
6282 mark_tbl_no_pin(rb_objspace_t
*objspace
, st_table
*tbl
)
6284 if (!tbl
|| tbl
->num_entries
== 0) return;
6285 st_foreach(tbl
, mark_value
, (st_data_t
)objspace
);
6289 mark_tbl(rb_objspace_t
*objspace
, st_table
*tbl
)
6291 if (!tbl
|| tbl
->num_entries
== 0) return;
6292 st_foreach(tbl
, mark_value_pin
, (st_data_t
)objspace
);
6296 mark_key(st_data_t key
, st_data_t value
, st_data_t data
)
6298 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6299 gc_mark_and_pin(objspace
, (VALUE
)key
);
6304 mark_set(rb_objspace_t
*objspace
, st_table
*tbl
)
6307 st_foreach(tbl
, mark_key
, (st_data_t
)objspace
);
6311 pin_value(st_data_t key
, st_data_t value
, st_data_t data
)
6313 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6314 gc_mark_and_pin(objspace
, (VALUE
)value
);
6319 mark_finalizer_tbl(rb_objspace_t
*objspace
, st_table
*tbl
)
6322 st_foreach(tbl
, pin_value
, (st_data_t
)objspace
);
6326 rb_mark_set(st_table
*tbl
)
6328 mark_set(&rb_objspace
, tbl
);
6332 mark_keyvalue(st_data_t key
, st_data_t value
, st_data_t data
)
6334 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6336 gc_mark(objspace
, (VALUE
)key
);
6337 gc_mark(objspace
, (VALUE
)value
);
6342 pin_key_pin_value(st_data_t key
, st_data_t value
, st_data_t data
)
6344 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6346 gc_mark_and_pin(objspace
, (VALUE
)key
);
6347 gc_mark_and_pin(objspace
, (VALUE
)value
);
6352 pin_key_mark_value(st_data_t key
, st_data_t value
, st_data_t data
)
6354 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6356 gc_mark_and_pin(objspace
, (VALUE
)key
);
6357 gc_mark(objspace
, (VALUE
)value
);
6362 mark_hash(rb_objspace_t
*objspace
, VALUE hash
)
6364 if (rb_hash_compare_by_id_p(hash
)) {
6365 rb_hash_stlike_foreach(hash
, pin_key_mark_value
, (st_data_t
)objspace
);
6368 rb_hash_stlike_foreach(hash
, mark_keyvalue
, (st_data_t
)objspace
);
6371 if (RHASH_AR_TABLE_P(hash
)) {
6372 if (LIKELY(during_gc
) && RHASH_TRANSIENT_P(hash
)) {
6373 rb_transient_heap_mark(hash
, RHASH_AR_TABLE(hash
));
6377 VM_ASSERT(!RHASH_TRANSIENT_P(hash
));
6379 gc_mark(objspace
, RHASH(hash
)->ifnone
);
6383 mark_st(rb_objspace_t
*objspace
, st_table
*tbl
)
6386 st_foreach(tbl
, pin_key_pin_value
, (st_data_t
)objspace
);
6390 rb_mark_hash(st_table
*tbl
)
6392 mark_st(&rb_objspace
, tbl
);
6396 mark_method_entry(rb_objspace_t
*objspace
, const rb_method_entry_t
*me
)
6398 const rb_method_definition_t
*def
= me
->def
;
6400 gc_mark(objspace
, me
->owner
);
6401 gc_mark(objspace
, me
->defined_class
);
6404 switch (def
->type
) {
6405 case VM_METHOD_TYPE_ISEQ
:
6406 if (def
->body
.iseq
.iseqptr
) gc_mark(objspace
, (VALUE
)def
->body
.iseq
.iseqptr
);
6407 gc_mark(objspace
, (VALUE
)def
->body
.iseq
.cref
);
6409 if (def
->iseq_overload
&& me
->defined_class
) {
6410 // it can be a key of "overloaded_cme" table
6411 // so it should be pinned.
6412 gc_mark_and_pin(objspace
, (VALUE
)me
);
6415 case VM_METHOD_TYPE_ATTRSET
:
6416 case VM_METHOD_TYPE_IVAR
:
6417 gc_mark(objspace
, def
->body
.attr
.location
);
6419 case VM_METHOD_TYPE_BMETHOD
:
6420 gc_mark(objspace
, def
->body
.bmethod
.proc
);
6421 if (def
->body
.bmethod
.hooks
) rb_hook_list_mark(def
->body
.bmethod
.hooks
);
6423 case VM_METHOD_TYPE_ALIAS
:
6424 gc_mark(objspace
, (VALUE
)def
->body
.alias
.original_me
);
6426 case VM_METHOD_TYPE_REFINED
:
6427 gc_mark(objspace
, (VALUE
)def
->body
.refined
.orig_me
);
6428 gc_mark(objspace
, (VALUE
)def
->body
.refined
.owner
);
6430 case VM_METHOD_TYPE_CFUNC
:
6431 case VM_METHOD_TYPE_ZSUPER
:
6432 case VM_METHOD_TYPE_MISSING
:
6433 case VM_METHOD_TYPE_OPTIMIZED
:
6434 case VM_METHOD_TYPE_UNDEF
:
6435 case VM_METHOD_TYPE_NOTIMPLEMENTED
:
6441 static enum rb_id_table_iterator_result
6442 mark_method_entry_i(VALUE me
, void *data
)
6444 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
6446 gc_mark(objspace
, me
);
6447 return ID_TABLE_CONTINUE
;
6451 mark_m_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
6454 rb_id_table_foreach_values(tbl
, mark_method_entry_i
, objspace
);
6458 static enum rb_id_table_iterator_result
6459 mark_const_entry_i(VALUE value
, void *data
)
6461 const rb_const_entry_t
*ce
= (const rb_const_entry_t
*)value
;
6462 rb_objspace_t
*objspace
= data
;
6464 gc_mark(objspace
, ce
->value
);
6465 gc_mark(objspace
, ce
->file
);
6466 return ID_TABLE_CONTINUE
;
6470 mark_const_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
6473 rb_id_table_foreach_values(tbl
, mark_const_entry_i
, objspace
);
6476 #if STACK_GROW_DIRECTION < 0
6477 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6478 #elif STACK_GROW_DIRECTION > 0
6479 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6481 #define GET_STACK_BOUNDS(start, end, appendix) \
6482 ((STACK_END < STACK_START) ? \
6483 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6486 static void each_stack_location(rb_objspace_t
*objspace
, const rb_execution_context_t
*ec
,
6487 const VALUE
*stack_start
, const VALUE
*stack_end
, void (*cb
)(rb_objspace_t
*, VALUE
));
6489 #ifndef __EMSCRIPTEN__
6491 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6495 VALUE v
[sizeof(rb_jmp_buf
) / (sizeof(VALUE
))];
6496 } save_regs_gc_mark
;
6497 VALUE
*stack_start
, *stack_end
;
6499 FLUSH_REGISTER_WINDOWS
;
6500 memset(&save_regs_gc_mark
, 0, sizeof(save_regs_gc_mark
));
6501 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6502 rb_setjmp(save_regs_gc_mark
.j
);
6504 /* SET_STACK_END must be called in this function because
6505 * the stack frame of this function may contain
6506 * callee save registers and they should be marked. */
6508 GET_STACK_BOUNDS(stack_start
, stack_end
, 1);
6510 each_location(objspace
, save_regs_gc_mark
.v
, numberof(save_regs_gc_mark
.v
), gc_mark_maybe
);
6512 each_stack_location(objspace
, ec
, stack_start
, stack_end
, gc_mark_maybe
);
6516 static VALUE
*rb_emscripten_stack_range_tmp
[2];
6519 rb_emscripten_mark_locations(void *begin
, void *end
)
6521 rb_emscripten_stack_range_tmp
[0] = begin
;
6522 rb_emscripten_stack_range_tmp
[1] = end
;
6526 mark_current_machine_context(rb_objspace_t
*objspace
, rb_execution_context_t
*ec
)
6528 emscripten_scan_stack(rb_emscripten_mark_locations
);
6529 each_stack_location(objspace
, ec
, rb_emscripten_stack_range_tmp
[0], rb_emscripten_stack_range_tmp
[1], gc_mark_maybe
);
6531 emscripten_scan_registers(rb_emscripten_mark_locations
);
6532 each_stack_location(objspace
, ec
, rb_emscripten_stack_range_tmp
[0], rb_emscripten_stack_range_tmp
[1], gc_mark_maybe
);
6537 each_machine_stack_value(const rb_execution_context_t
*ec
, void (*cb
)(rb_objspace_t
*, VALUE
))
6539 rb_objspace_t
*objspace
= &rb_objspace
;
6540 VALUE
*stack_start
, *stack_end
;
6542 GET_STACK_BOUNDS(stack_start
, stack_end
, 0);
6543 each_stack_location(objspace
, ec
, stack_start
, stack_end
, cb
);
6547 rb_gc_mark_machine_stack(const rb_execution_context_t
*ec
)
6549 each_machine_stack_value(ec
, gc_mark_maybe
);
6553 each_stack_location(rb_objspace_t
*objspace
, const rb_execution_context_t
*ec
,
6554 const VALUE
*stack_start
, const VALUE
*stack_end
, void (*cb
)(rb_objspace_t
*, VALUE
))
6557 gc_mark_locations(objspace
, stack_start
, stack_end
, cb
);
6559 #if defined(__mc68000__)
6560 gc_mark_locations(objspace
,
6561 (VALUE
*)((char*)stack_start
+ 2),
6562 (VALUE
*)((char*)stack_end
- 2), cb
);
6567 rb_mark_tbl(st_table
*tbl
)
6569 mark_tbl(&rb_objspace
, tbl
);
6573 rb_mark_tbl_no_pin(st_table
*tbl
)
6575 mark_tbl_no_pin(&rb_objspace
, tbl
);
6579 gc_mark_maybe(rb_objspace_t
*objspace
, VALUE obj
)
6581 (void)VALGRIND_MAKE_MEM_DEFINED(&obj
, sizeof(obj
));
6583 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
6584 void *ptr
= __asan_region_is_poisoned((void *)obj
, SIZEOF_VALUE
);
6585 asan_unpoison_object(obj
, false);
6587 /* Garbage can live on the stack, so do not mark or pin */
6588 switch (BUILTIN_TYPE(obj
)) {
6593 gc_mark_and_pin(objspace
, obj
);
6598 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
6599 asan_poison_object(obj
);
6605 rb_gc_mark_maybe(VALUE obj
)
6607 gc_mark_maybe(&rb_objspace
, obj
);
6611 gc_mark_set(rb_objspace_t
*objspace
, VALUE obj
)
6613 ASSERT_vm_locking();
6614 if (RVALUE_MARKED(obj
)) return 0;
6615 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
);
6620 gc_remember_unprotected(rb_objspace_t
*objspace
, VALUE obj
)
6622 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
6623 bits_t
*uncollectible_bits
= &page
->uncollectible_bits
[0];
6625 if (!MARKED_IN_BITMAP(uncollectible_bits
, obj
)) {
6626 page
->flags
.has_uncollectible_shady_objects
= TRUE
;
6627 MARK_IN_BITMAP(uncollectible_bits
, obj
);
6628 objspace
->rgengc
.uncollectible_wb_unprotected_objects
++;
6630 #if RGENGC_PROFILE > 0
6631 objspace
->profile
.total_remembered_shady_object_count
++;
6632 #if RGENGC_PROFILE >= 2
6633 objspace
->profile
.remembered_shady_object_count_types
[BUILTIN_TYPE(obj
)]++;
6644 rgengc_check_relation(rb_objspace_t
*objspace
, VALUE obj
)
6646 const VALUE old_parent
= objspace
->rgengc
.parent_object
;
6648 if (old_parent
) { /* parent object is old */
6649 if (RVALUE_WB_UNPROTECTED(obj
)) {
6650 if (gc_remember_unprotected(objspace
, obj
)) {
6651 gc_report(2, objspace
, "relation: (O->S) %s -> %s\n", obj_info(old_parent
), obj_info(obj
));
6655 if (!RVALUE_OLD_P(obj
)) {
6656 if (RVALUE_MARKED(obj
)) {
6657 /* An object pointed from an OLD object should be OLD. */
6658 gc_report(2, objspace
, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent
), obj_info(obj
));
6659 RVALUE_AGE_SET_OLD(objspace
, obj
);
6660 if (is_incremental_marking(objspace
)) {
6661 if (!RVALUE_MARKING(obj
)) {
6662 gc_grey(objspace
, obj
);
6666 rgengc_remember(objspace
, obj
);
6670 gc_report(2, objspace
, "relation: (O->Y) %s -> %s\n", obj_info(old_parent
), obj_info(obj
));
6671 RVALUE_AGE_SET_CANDIDATE(objspace
, obj
);
6677 GC_ASSERT(old_parent
== objspace
->rgengc
.parent_object
);
6681 gc_grey(rb_objspace_t
*objspace
, VALUE obj
)
6683 #if RGENGC_CHECK_MODE
6684 if (RVALUE_MARKED(obj
) == FALSE
) rb_bug("gc_grey: %s is not marked.", obj_info(obj
));
6685 if (RVALUE_MARKING(obj
) == TRUE
) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj
));
6688 #if GC_ENABLE_INCREMENTAL_MARK
6689 if (is_incremental_marking(objspace
)) {
6690 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
6694 push_mark_stack(&objspace
->mark_stack
, obj
);
6698 gc_aging(rb_objspace_t
*objspace
, VALUE obj
)
6700 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
6702 GC_ASSERT(RVALUE_MARKING(obj
) == FALSE
);
6703 check_rvalue_consistency(obj
);
6705 if (!RVALUE_PAGE_WB_UNPROTECTED(page
, obj
)) {
6706 if (!RVALUE_OLD_P(obj
)) {
6707 gc_report(3, objspace
, "gc_aging: YOUNG: %s\n", obj_info(obj
));
6708 RVALUE_AGE_INC(objspace
, obj
);
6710 else if (is_full_marking(objspace
)) {
6711 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page
, obj
) == FALSE
);
6712 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace
, page
, obj
);
6715 check_rvalue_consistency(obj
);
6717 objspace
->marked_slots
++;
6720 NOINLINE(static void gc_mark_ptr(rb_objspace_t
*objspace
, VALUE obj
));
6721 static void reachable_objects_from_callback(VALUE obj
);
6724 gc_mark_ptr(rb_objspace_t
*objspace
, VALUE obj
)
6726 if (LIKELY(during_gc
)) {
6727 rgengc_check_relation(objspace
, obj
);
6728 if (!gc_mark_set(objspace
, obj
)) return; /* already marked */
6730 if (0) { // for debug GC marking miss
6731 if (objspace
->rgengc
.parent_object
) {
6732 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
6733 (void *)obj
, obj_type_name(obj
),
6734 (void *)objspace
->rgengc
.parent_object
, obj_type_name(objspace
->rgengc
.parent_object
));
6737 RUBY_DEBUG_LOG("%p (%s)", (void *)obj
, obj_type_name(obj
));
6741 if (UNLIKELY(RB_TYPE_P(obj
, T_NONE
))) {
6743 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
6745 gc_aging(objspace
, obj
);
6746 gc_grey(objspace
, obj
);
6749 reachable_objects_from_callback(obj
);
6754 gc_pin(rb_objspace_t
*objspace
, VALUE obj
)
6756 GC_ASSERT(is_markable_object(objspace
, obj
));
6757 if (UNLIKELY(objspace
->flags
.during_compacting
)) {
6758 if (LIKELY(during_gc
)) {
6759 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
);
6765 gc_mark_and_pin(rb_objspace_t
*objspace
, VALUE obj
)
6767 if (!is_markable_object(objspace
, obj
)) return;
6768 gc_pin(objspace
, obj
);
6769 gc_mark_ptr(objspace
, obj
);
6773 gc_mark(rb_objspace_t
*objspace
, VALUE obj
)
6775 if (!is_markable_object(objspace
, obj
)) return;
6776 gc_mark_ptr(objspace
, obj
);
6780 rb_gc_mark_movable(VALUE ptr
)
6782 gc_mark(&rb_objspace
, ptr
);
6786 rb_gc_mark(VALUE ptr
)
6788 gc_mark_and_pin(&rb_objspace
, ptr
);
6791 /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
6792 * This function is only for GC_END_MARK timing.
6796 rb_objspace_marked_object_p(VALUE obj
)
6798 return RVALUE_MARKED(obj
) ? TRUE
: FALSE
;
6802 gc_mark_set_parent(rb_objspace_t
*objspace
, VALUE obj
)
6804 if (RVALUE_OLD_P(obj
)) {
6805 objspace
->rgengc
.parent_object
= obj
;
6808 objspace
->rgengc
.parent_object
= Qfalse
;
6813 gc_mark_imemo(rb_objspace_t
*objspace
, VALUE obj
)
6815 switch (imemo_type(obj
)) {
6818 const rb_env_t
*env
= (const rb_env_t
*)obj
;
6820 if (LIKELY(env
->ep
)) {
6821 // just after newobj() can be NULL here.
6822 GC_ASSERT(env
->ep
[VM_ENV_DATA_INDEX_ENV
] == obj
);
6823 GC_ASSERT(VM_ENV_ESCAPED_P(env
->ep
));
6824 gc_mark_values(objspace
, (long)env
->env_size
, env
->env
);
6825 VM_ENV_FLAGS_SET(env
->ep
, VM_ENV_FLAG_WB_REQUIRED
);
6826 gc_mark(objspace
, (VALUE
)rb_vm_env_prev_env(env
));
6827 gc_mark(objspace
, (VALUE
)env
->iseq
);
6832 gc_mark(objspace
, RANY(obj
)->as
.imemo
.cref
.klass_or_self
);
6833 gc_mark(objspace
, (VALUE
)RANY(obj
)->as
.imemo
.cref
.next
);
6834 gc_mark(objspace
, RANY(obj
)->as
.imemo
.cref
.refinements
);
6837 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.cref_or_me
);
6838 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.lastline
);
6839 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.backref
);
6840 gc_mark(objspace
, RANY(obj
)->as
.imemo
.svar
.others
);
6842 case imemo_throw_data
:
6843 gc_mark(objspace
, RANY(obj
)->as
.imemo
.throw_data
.throw_obj
);
6846 gc_mark_maybe(objspace
, (VALUE
)RANY(obj
)->as
.imemo
.ifunc
.data
);
6849 gc_mark(objspace
, RANY(obj
)->as
.imemo
.memo
.v1
);
6850 gc_mark(objspace
, RANY(obj
)->as
.imemo
.memo
.v2
);
6851 gc_mark_maybe(objspace
, RANY(obj
)->as
.imemo
.memo
.u3
.value
);
6854 mark_method_entry(objspace
, &RANY(obj
)->as
.imemo
.ment
);
6857 rb_iseq_mark((rb_iseq_t
*)obj
);
6861 const rb_imemo_tmpbuf_t
*m
= &RANY(obj
)->as
.imemo
.alloc
;
6863 rb_gc_mark_locations(m
->ptr
, m
->ptr
+ m
->cnt
);
6864 } while ((m
= m
->next
) != NULL
);
6868 rb_ast_mark(&RANY(obj
)->as
.imemo
.ast
);
6870 case imemo_parser_strterm
:
6871 rb_strterm_mark(obj
);
6873 case imemo_callinfo
:
6875 case imemo_callcache
:
6877 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
6878 // should not mark klass here
6879 gc_mark(objspace
, (VALUE
)vm_cc_cme(cc
));
6882 case imemo_constcache
:
6884 const struct iseq_inline_constant_cache_entry
*ice
= (struct iseq_inline_constant_cache_entry
*)obj
;
6885 gc_mark(objspace
, ice
->value
);
6888 #if VM_CHECK_MODE > 0
6890 VM_UNREACHABLE(gc_mark_imemo
);
6896 gc_mark_children(rb_objspace_t
*objspace
, VALUE obj
)
6898 register RVALUE
*any
= RANY(obj
);
6899 gc_mark_set_parent(objspace
, obj
);
6901 if (FL_TEST(obj
, FL_EXIVAR
)) {
6902 rb_mark_generic_ivar(obj
);
6905 switch (BUILTIN_TYPE(obj
)) {
6909 /* Not immediates, but does not have references and singleton
6915 rb_bug("rb_gc_mark() called for broken object");
6919 UNEXPECTED_NODE(rb_gc_mark
);
6923 gc_mark_imemo(objspace
, obj
);
6930 gc_mark(objspace
, any
->as
.basic
.klass
);
6932 switch (BUILTIN_TYPE(obj
)) {
6935 if (RCLASS_SUPER(obj
)) {
6936 gc_mark(objspace
, RCLASS_SUPER(obj
));
6938 if (!RCLASS_EXT(obj
)) break;
6940 mark_m_tbl(objspace
, RCLASS_M_TBL(obj
));
6941 cc_table_mark(objspace
, obj
);
6942 mark_tbl_no_pin(objspace
, RCLASS_IV_TBL(obj
));
6943 mark_const_tbl(objspace
, RCLASS_CONST_TBL(obj
));
6947 if (RICLASS_OWNS_M_TBL_P(obj
)) {
6948 mark_m_tbl(objspace
, RCLASS_M_TBL(obj
));
6950 if (RCLASS_SUPER(obj
)) {
6951 gc_mark(objspace
, RCLASS_SUPER(obj
));
6953 if (!RCLASS_EXT(obj
)) break;
6954 mark_m_tbl(objspace
, RCLASS_CALLABLE_M_TBL(obj
));
6955 cc_table_mark(objspace
, obj
);
6959 if (FL_TEST(obj
, ELTS_SHARED
)) {
6960 VALUE root
= any
->as
.array
.as
.heap
.aux
.shared_root
;
6961 gc_mark(objspace
, root
);
6964 long i
, len
= RARRAY_LEN(obj
);
6965 const VALUE
*ptr
= RARRAY_CONST_PTR_TRANSIENT(obj
);
6966 for (i
=0; i
< len
; i
++) {
6967 gc_mark(objspace
, ptr
[i
]);
6970 if (LIKELY(during_gc
)) {
6971 if (!FL_TEST_RAW(obj
, RARRAY_EMBED_FLAG
) &&
6972 RARRAY_TRANSIENT_P(obj
)) {
6973 rb_transient_heap_mark(obj
, ptr
);
6980 mark_hash(objspace
, obj
);
6984 if (STR_SHARED_P(obj
)) {
6985 gc_mark(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
6991 void *const ptr
= DATA_PTR(obj
);
6993 RUBY_DATA_FUNC mark_func
= RTYPEDDATA_P(obj
) ?
6994 any
->as
.typeddata
.type
->function
.dmark
:
6996 if (mark_func
) (*mark_func
)(ptr
);
7003 const VALUE
* const ptr
= ROBJECT_IVPTR(obj
);
7005 uint32_t i
, len
= ROBJECT_NUMIV(obj
);
7006 for (i
= 0; i
< len
; i
++) {
7007 gc_mark(objspace
, ptr
[i
]);
7010 if (LIKELY(during_gc
) &&
7011 ROBJ_TRANSIENT_P(obj
)) {
7012 rb_transient_heap_mark(obj
, ptr
);
7018 if (any
->as
.file
.fptr
) {
7019 gc_mark(objspace
, any
->as
.file
.fptr
->self
);
7020 gc_mark(objspace
, any
->as
.file
.fptr
->pathv
);
7021 gc_mark(objspace
, any
->as
.file
.fptr
->tied_io_for_writing
);
7022 gc_mark(objspace
, any
->as
.file
.fptr
->writeconv_asciicompat
);
7023 gc_mark(objspace
, any
->as
.file
.fptr
->writeconv_pre_ecopts
);
7024 gc_mark(objspace
, any
->as
.file
.fptr
->encs
.ecopts
);
7025 gc_mark(objspace
, any
->as
.file
.fptr
->write_lock
);
7030 gc_mark(objspace
, any
->as
.regexp
.src
);
7034 gc_mark(objspace
, any
->as
.match
.regexp
);
7035 if (any
->as
.match
.str
) {
7036 gc_mark(objspace
, any
->as
.match
.str
);
7041 gc_mark(objspace
, any
->as
.rational
.num
);
7042 gc_mark(objspace
, any
->as
.rational
.den
);
7046 gc_mark(objspace
, any
->as
.complex.real
);
7047 gc_mark(objspace
, any
->as
.complex.imag
);
7053 const long len
= RSTRUCT_LEN(obj
);
7054 const VALUE
* const ptr
= RSTRUCT_CONST_PTR(obj
);
7056 for (i
=0; i
<len
; i
++) {
7057 gc_mark(objspace
, ptr
[i
]);
7060 if (LIKELY(during_gc
) &&
7061 RSTRUCT_TRANSIENT_P(obj
)) {
7062 rb_transient_heap_mark(obj
, ptr
);
7069 rb_gcdebug_print_obj_condition((VALUE
)obj
);
7071 if (BUILTIN_TYPE(obj
) == T_MOVED
) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj
);
7072 if (BUILTIN_TYPE(obj
) == T_NONE
) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj
);
7073 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj
);
7074 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7075 BUILTIN_TYPE(obj
), (void *)any
,
7076 is_pointer_to_heap(objspace
, any
) ? "corrupted object" : "non object");
7081 * incremental: 0 -> not incremental (do all)
7082 * incremental: n -> mark at most `n' objects
7085 gc_mark_stacked_objects(rb_objspace_t
*objspace
, int incremental
, size_t count
)
7087 mark_stack_t
*mstack
= &objspace
->mark_stack
;
7089 #if GC_ENABLE_INCREMENTAL_MARK
7090 size_t marked_slots_at_the_beginning
= objspace
->marked_slots
;
7091 size_t popped_count
= 0;
7094 while (pop_mark_stack(mstack
, &obj
)) {
7095 if (obj
== Qundef
) continue; /* skip */
7097 if (RGENGC_CHECK_MODE
&& !RVALUE_MARKED(obj
)) {
7098 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj
));
7100 gc_mark_children(objspace
, obj
);
7102 #if GC_ENABLE_INCREMENTAL_MARK
7104 if (RGENGC_CHECK_MODE
&& !RVALUE_MARKING(obj
)) {
7105 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7107 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
);
7110 if (popped_count
+ (objspace
->marked_slots
- marked_slots_at_the_beginning
) > count
) {
7115 /* just ignore marking bits */
7120 if (RGENGC_CHECK_MODE
>= 3) gc_verify_internal_consistency(objspace
);
7122 if (is_mark_stack_empty(mstack
)) {
7123 shrink_stack_chunk_cache(mstack
);
7132 gc_mark_stacked_objects_incremental(rb_objspace_t
*objspace
, size_t count
)
7134 return gc_mark_stacked_objects(objspace
, TRUE
, count
);
7138 gc_mark_stacked_objects_all(rb_objspace_t
*objspace
)
7140 return gc_mark_stacked_objects(objspace
, FALSE
, 0);
7143 #if PRINT_ROOT_TICKS
7144 #define MAX_TICKS 0x100
7145 static tick_t mark_ticks
[MAX_TICKS
];
7146 static const char *mark_ticks_categories
[MAX_TICKS
];
7149 show_mark_ticks(void)
7152 fprintf(stderr
, "mark ticks result:\n");
7153 for (i
=0; i
<MAX_TICKS
; i
++) {
7154 const char *category
= mark_ticks_categories
[i
];
7156 fprintf(stderr
, "%s\t%8lu\n", category
, (unsigned long)mark_ticks
[i
]);
7164 #endif /* PRINT_ROOT_TICKS */
7167 gc_mark_roots(rb_objspace_t
*objspace
, const char **categoryp
)
7169 struct gc_list
*list
;
7170 rb_execution_context_t
*ec
= GET_EC();
7171 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
7173 #if PRINT_ROOT_TICKS
7174 tick_t start_tick
= tick();
7176 const char *prev_category
= 0;
7178 if (mark_ticks_categories
[0] == 0) {
7179 atexit(show_mark_ticks
);
7183 if (categoryp
) *categoryp
= "xxx";
7185 objspace
->rgengc
.parent_object
= Qfalse
;
7187 #if PRINT_ROOT_TICKS
7188 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7189 if (prev_category) { \
7190 tick_t t = tick(); \
7191 mark_ticks[tick_count] = t - start_tick; \
7192 mark_ticks_categories[tick_count] = prev_category; \
7195 prev_category = category; \
7196 start_tick = tick(); \
7198 #else /* PRINT_ROOT_TICKS */
7199 #define MARK_CHECKPOINT_PRINT_TICK(category)
7202 #define MARK_CHECKPOINT(category) do { \
7203 if (categoryp) *categoryp = category; \
7204 MARK_CHECKPOINT_PRINT_TICK(category); \
7207 MARK_CHECKPOINT("vm");
7210 if (vm
->self
) gc_mark(objspace
, vm
->self
);
7212 MARK_CHECKPOINT("finalizers");
7213 mark_finalizer_tbl(objspace
, finalizer_table
);
7215 MARK_CHECKPOINT("machine_context");
7216 mark_current_machine_context(objspace
, ec
);
7218 /* mark protected global variables */
7219 MARK_CHECKPOINT("global_list");
7220 for (list
= global_list
; list
; list
= list
->next
) {
7221 gc_mark_maybe(objspace
, *list
->varptr
);
7224 MARK_CHECKPOINT("end_proc");
7227 MARK_CHECKPOINT("global_tbl");
7228 rb_gc_mark_global_tbl();
7230 MARK_CHECKPOINT("object_id");
7231 rb_gc_mark(objspace
->next_object_id
);
7232 mark_tbl_no_pin(objspace
, objspace
->obj_to_id_tbl
); /* Only mark ids */
7234 if (stress_to_class
) rb_gc_mark(stress_to_class
);
7236 MARK_CHECKPOINT("finish");
7237 #undef MARK_CHECKPOINT
7240 #if RGENGC_CHECK_MODE >= 4
7242 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7243 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7244 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7252 static struct reflist
*
7253 reflist_create(VALUE obj
)
7255 struct reflist
*refs
= xmalloc(sizeof(struct reflist
));
7257 refs
->list
= ALLOC_N(VALUE
, refs
->size
);
7258 refs
->list
[0] = obj
;
7264 reflist_destruct(struct reflist
*refs
)
7271 reflist_add(struct reflist
*refs
, VALUE obj
)
7273 if (refs
->pos
== refs
->size
) {
7275 SIZED_REALLOC_N(refs
->list
, VALUE
, refs
->size
, refs
->size
/2);
7278 refs
->list
[refs
->pos
++] = obj
;
7282 reflist_dump(struct reflist
*refs
)
7285 for (i
=0; i
<refs
->pos
; i
++) {
7286 VALUE obj
= refs
->list
[i
];
7287 if (IS_ROOTSIG(obj
)) { /* root */
7288 fprintf(stderr
, "<root@%s>", GET_ROOTSIG(obj
));
7291 fprintf(stderr
, "<%s>", obj_info(obj
));
7293 if (i
+1 < refs
->pos
) fprintf(stderr
, ", ");
7298 reflist_referred_from_machine_context(struct reflist
*refs
)
7301 for (i
=0; i
<refs
->pos
; i
++) {
7302 VALUE obj
= refs
->list
[i
];
7303 if (IS_ROOTSIG(obj
) && strcmp(GET_ROOTSIG(obj
), "machine_context") == 0) return 1;
7309 rb_objspace_t
*objspace
;
7315 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7317 struct st_table
*references
;
7318 const char *category
;
7320 mark_stack_t mark_stack
;
7324 allrefs_add(struct allrefs
*data
, VALUE obj
)
7326 struct reflist
*refs
;
7329 if (st_lookup(data
->references
, obj
, &r
)) {
7330 refs
= (struct reflist
*)r
;
7331 reflist_add(refs
, data
->root_obj
);
7335 refs
= reflist_create(data
->root_obj
);
7336 st_insert(data
->references
, obj
, (st_data_t
)refs
);
7342 allrefs_i(VALUE obj
, void *ptr
)
7344 struct allrefs
*data
= (struct allrefs
*)ptr
;
7346 if (allrefs_add(data
, obj
)) {
7347 push_mark_stack(&data
->mark_stack
, obj
);
7352 allrefs_roots_i(VALUE obj
, void *ptr
)
7354 struct allrefs
*data
= (struct allrefs
*)ptr
;
7355 if (strlen(data
->category
) == 0) rb_bug("!!!");
7356 data
->root_obj
= MAKE_ROOTSIG(data
->category
);
7358 if (allrefs_add(data
, obj
)) {
7359 push_mark_stack(&data
->mark_stack
, obj
);
7362 #define PUSH_MARK_FUNC_DATA(v) do { \
7363 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7364 GET_RACTOR()->mfd = (v);
7366 #define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7369 objspace_allrefs(rb_objspace_t
*objspace
)
7371 struct allrefs data
;
7372 struct gc_mark_func_data_struct mfd
;
7374 int prev_dont_gc
= dont_gc_val();
7377 data
.objspace
= objspace
;
7378 data
.references
= st_init_numtable();
7379 init_mark_stack(&data
.mark_stack
);
7381 mfd
.mark_func
= allrefs_roots_i
;
7384 /* traverse root objects */
7385 PUSH_MARK_FUNC_DATA(&mfd
);
7386 GET_RACTOR()->mfd
= &mfd
;
7387 gc_mark_roots(objspace
, &data
.category
);
7388 POP_MARK_FUNC_DATA();
7390 /* traverse rest objects reachable from root objects */
7391 while (pop_mark_stack(&data
.mark_stack
, &obj
)) {
7392 rb_objspace_reachable_objects_from(data
.root_obj
= obj
, allrefs_i
, &data
);
7394 free_stack_chunks(&data
.mark_stack
);
7396 dont_gc_set(prev_dont_gc
);
7397 return data
.references
;
7401 objspace_allrefs_destruct_i(st_data_t key
, st_data_t value
, st_data_t ptr
)
7403 struct reflist
*refs
= (struct reflist
*)value
;
7404 reflist_destruct(refs
);
7409 objspace_allrefs_destruct(struct st_table
*refs
)
7411 st_foreach(refs
, objspace_allrefs_destruct_i
, 0);
7412 st_free_table(refs
);
7415 #if RGENGC_CHECK_MODE >= 5
7417 allrefs_dump_i(st_data_t k
, st_data_t v
, st_data_t ptr
)
7419 VALUE obj
= (VALUE
)k
;
7420 struct reflist
*refs
= (struct reflist
*)v
;
7421 fprintf(stderr
, "[allrefs_dump_i] %s <- ", obj_info(obj
));
7423 fprintf(stderr
, "\n");
7428 allrefs_dump(rb_objspace_t
*objspace
)
7430 VALUE size
= objspace
->rgengc
.allrefs_table
->num_entries
;
7431 fprintf(stderr
, "[all refs] (size: %"PRIuVALUE
")\n", size
);
7432 st_foreach(objspace
->rgengc
.allrefs_table
, allrefs_dump_i
, 0);
7437 gc_check_after_marks_i(st_data_t k
, st_data_t v
, st_data_t ptr
)
7440 struct reflist
*refs
= (struct reflist
*)v
;
7441 rb_objspace_t
*objspace
= (rb_objspace_t
*)ptr
;
7443 /* object should be marked or oldgen */
7444 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
)) {
7445 fprintf(stderr
, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj
));
7446 fprintf(stderr
, "gc_check_after_marks_i: %p is referred from ", (void *)obj
);
7449 if (reflist_referred_from_machine_context(refs
)) {
7450 fprintf(stderr
, " (marked from machine stack).\n");
7451 /* marked from machine context can be false positive */
7454 objspace
->rgengc
.error_count
++;
7455 fprintf(stderr
, "\n");
7462 gc_marks_check(rb_objspace_t
*objspace
, st_foreach_callback_func
*checker_func
, const char *checker_name
)
7464 size_t saved_malloc_increase
= objspace
->malloc_params
.increase
;
7465 #if RGENGC_ESTIMATE_OLDMALLOC
7466 size_t saved_oldmalloc_increase
= objspace
->rgengc
.oldmalloc_increase
;
7468 VALUE already_disabled
= rb_objspace_gc_disable(objspace
);
7470 objspace
->rgengc
.allrefs_table
= objspace_allrefs(objspace
);
7473 st_foreach(objspace
->rgengc
.allrefs_table
, checker_func
, (st_data_t
)objspace
);
7476 if (objspace
->rgengc
.error_count
> 0) {
7477 #if RGENGC_CHECK_MODE >= 5
7478 allrefs_dump(objspace
);
7480 if (checker_name
) rb_bug("%s: GC has problem.", checker_name
);
7483 objspace_allrefs_destruct(objspace
->rgengc
.allrefs_table
);
7484 objspace
->rgengc
.allrefs_table
= 0;
7486 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
7487 objspace
->malloc_params
.increase
= saved_malloc_increase
;
7488 #if RGENGC_ESTIMATE_OLDMALLOC
7489 objspace
->rgengc
.oldmalloc_increase
= saved_oldmalloc_increase
;
7492 #endif /* RGENGC_CHECK_MODE >= 4 */
7494 struct verify_internal_consistency_struct
{
7495 rb_objspace_t
*objspace
;
7497 size_t live_object_count
;
7498 size_t zombie_object_count
;
7501 size_t old_object_count
;
7502 size_t remembered_shady_count
;
7506 check_generation_i(const VALUE child
, void *ptr
)
7508 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7509 const VALUE parent
= data
->parent
;
7511 if (RGENGC_CHECK_MODE
) GC_ASSERT(RVALUE_OLD_P(parent
));
7513 if (!RVALUE_OLD_P(child
)) {
7514 if (!RVALUE_REMEMBERED(parent
) &&
7515 !RVALUE_REMEMBERED(child
) &&
7516 !RVALUE_UNCOLLECTIBLE(child
)) {
7517 fprintf(stderr
, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent
), obj_info(child
));
7524 check_color_i(const VALUE child
, void *ptr
)
7526 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7527 const VALUE parent
= data
->parent
;
7529 if (!RVALUE_WB_UNPROTECTED(parent
) && RVALUE_WHITE_P(child
)) {
7530 fprintf(stderr
, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7531 obj_info(parent
), obj_info(child
));
7537 check_children_i(const VALUE child
, void *ptr
)
7539 struct verify_internal_consistency_struct
*data
= (struct verify_internal_consistency_struct
*)ptr
;
7540 if (check_rvalue_consistency_force(child
, FALSE
) != 0) {
7541 fprintf(stderr
, "check_children_i: %s has error (referenced from %s)",
7542 obj_info(child
), obj_info(data
->parent
));
7543 rb_print_backtrace(); /* C backtrace will help to debug */
7550 verify_internal_consistency_i(void *page_start
, void *page_end
, size_t stride
,
7551 struct verify_internal_consistency_struct
*data
)
7554 rb_objspace_t
*objspace
= data
->objspace
;
7556 for (obj
= (VALUE
)page_start
; obj
!= (VALUE
)page_end
; obj
+= stride
) {
7557 void *poisoned
= asan_poisoned_object_p(obj
);
7558 asan_unpoison_object(obj
, false);
7560 if (is_live_object(objspace
, obj
)) {
7562 data
->live_object_count
++;
7565 /* Normally, we don't expect T_MOVED objects to be in the heap.
7566 * But they can stay alive on the stack, */
7567 if (!gc_object_moved_p(objspace
, obj
)) {
7568 /* moved slots don't have children */
7569 rb_objspace_reachable_objects_from(obj
, check_children_i
, (void *)data
);
7572 /* check health of children */
7573 if (RVALUE_OLD_P(obj
)) data
->old_object_count
++;
7574 if (RVALUE_WB_UNPROTECTED(obj
) && RVALUE_UNCOLLECTIBLE(obj
)) data
->remembered_shady_count
++;
7576 if (!is_marking(objspace
) && RVALUE_OLD_P(obj
)) {
7577 /* reachable objects from an oldgen object should be old or (young with remember) */
7579 rb_objspace_reachable_objects_from(obj
, check_generation_i
, (void *)data
);
7582 if (is_incremental_marking(objspace
)) {
7583 if (RVALUE_BLACK_P(obj
)) {
7584 /* reachable objects from black objects should be black or grey objects */
7586 rb_objspace_reachable_objects_from(obj
, check_color_i
, (void *)data
);
7591 if (BUILTIN_TYPE(obj
) == T_ZOMBIE
) {
7592 GC_ASSERT((RBASIC(obj
)->flags
& ~FL_SEEN_OBJ_ID
) == T_ZOMBIE
);
7593 data
->zombie_object_count
++;
7597 GC_ASSERT(BUILTIN_TYPE(obj
) == T_NONE
);
7598 asan_poison_object(obj
);
7606 gc_verify_heap_page(rb_objspace_t
*objspace
, struct heap_page
*page
, VALUE obj
)
7609 unsigned int has_remembered_shady
= FALSE
;
7610 unsigned int has_remembered_old
= FALSE
;
7611 int remembered_old_objects
= 0;
7612 int free_objects
= 0;
7613 int zombie_objects
= 0;
7614 int stride
= page
->slot_size
/ sizeof(RVALUE
);
7616 for (i
=0; i
<page
->total_slots
; i
+=stride
) {
7617 VALUE val
= (VALUE
)&page
->start
[i
];
7618 void *poisoned
= asan_poisoned_object_p(val
);
7619 asan_unpoison_object(val
, false);
7621 if (RBASIC(val
) == 0) free_objects
++;
7622 if (BUILTIN_TYPE(val
) == T_ZOMBIE
) zombie_objects
++;
7623 if (RVALUE_PAGE_UNCOLLECTIBLE(page
, val
) && RVALUE_PAGE_WB_UNPROTECTED(page
, val
)) {
7624 has_remembered_shady
= TRUE
;
7626 if (RVALUE_PAGE_MARKING(page
, val
)) {
7627 has_remembered_old
= TRUE
;
7628 remembered_old_objects
++;
7632 GC_ASSERT(BUILTIN_TYPE(val
) == T_NONE
);
7633 asan_poison_object(val
);
7637 if (!is_incremental_marking(objspace
) &&
7638 page
->flags
.has_remembered_objects
== FALSE
&& has_remembered_old
== TRUE
) {
7640 for (i
=0; i
<page
->total_slots
; i
++) {
7641 VALUE val
= (VALUE
)&page
->start
[i
];
7642 if (RVALUE_PAGE_MARKING(page
, val
)) {
7643 fprintf(stderr
, "marking -> %s\n", obj_info(val
));
7646 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7647 (void *)page
, remembered_old_objects
, obj
? obj_info(obj
) : "");
7650 if (page
->flags
.has_uncollectible_shady_objects
== FALSE
&& has_remembered_shady
== TRUE
) {
7651 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7652 (void *)page
, obj
? obj_info(obj
) : "");
7656 /* free_slots may not equal to free_objects */
7657 if (page
->free_slots
!= free_objects
) {
7658 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page
, page
->free_slots
, free_objects
);
7661 if (page
->final_slots
!= zombie_objects
) {
7662 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page
, page
->final_slots
, zombie_objects
);
7665 return remembered_old_objects
;
7669 gc_verify_heap_pages_(rb_objspace_t
*objspace
, struct list_head
*head
)
7671 int remembered_old_objects
= 0;
7672 struct heap_page
*page
= 0;
7674 list_for_each(head
, page
, page_node
) {
7675 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
7676 RVALUE
*p
= page
->freelist
;
7678 VALUE vp
= (VALUE
)p
;
7680 asan_unpoison_object(vp
, false);
7681 if (BUILTIN_TYPE(vp
) != T_NONE
) {
7682 fprintf(stderr
, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp
));
7684 p
= p
->as
.free
.next
;
7685 asan_poison_object(prev
);
7687 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
7689 if (page
->flags
.has_remembered_objects
== FALSE
) {
7690 remembered_old_objects
+= gc_verify_heap_page(objspace
, page
, Qfalse
);
7694 return remembered_old_objects
;
7698 gc_verify_heap_pages(rb_objspace_t
*objspace
)
7700 int remembered_old_objects
= 0;
7701 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7702 remembered_old_objects
+= gc_verify_heap_pages_(objspace
, &(SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pages
));
7703 remembered_old_objects
+= gc_verify_heap_pages_(objspace
, &(SIZE_POOL_TOMB_HEAP(&size_pools
[i
])->pages
));
7705 return remembered_old_objects
;
7710 * GC.verify_internal_consistency -> nil
7712 * Verify internal consistency.
7714 * This method is implementation specific.
7715 * Now this method checks generational consistency
7716 * if RGenGC is supported.
7719 gc_verify_internal_consistency_m(VALUE dummy
)
7721 gc_verify_internal_consistency(&rb_objspace
);
7726 gc_verify_internal_consistency_(rb_objspace_t
*objspace
)
7728 struct verify_internal_consistency_struct data
= {0};
7730 data
.objspace
= objspace
;
7731 gc_report(5, objspace
, "gc_verify_internal_consistency: start\n");
7733 /* check relations */
7734 for (size_t i
= 0; i
< heap_allocated_pages
; i
++) {
7735 struct heap_page
*page
= heap_pages_sorted
[i
];
7736 short slot_size
= page
->slot_size
;
7738 uintptr_t start
= (uintptr_t)page
->start
;
7739 uintptr_t end
= start
+ page
->total_slots
* slot_size
;
7741 verify_internal_consistency_i((void *)start
, (void *)end
, slot_size
, &data
);
7744 if (data
.err_count
!= 0) {
7745 #if RGENGC_CHECK_MODE >= 5
7746 objspace
->rgengc
.error_count
= data
.err_count
;
7747 gc_marks_check(objspace
, NULL
, NULL
);
7748 allrefs_dump(objspace
);
7750 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
7753 /* check heap_page status */
7754 gc_verify_heap_pages(objspace
);
7756 /* check counters */
7758 if (!is_lazy_sweeping(objspace
) &&
7760 ruby_single_main_ractor
!= NULL
) {
7761 if (objspace_live_slots(objspace
) != data
.live_object_count
) {
7762 fprintf(stderr
, "heap_pages_final_slots: %"PRIdSIZE
", "
7763 "objspace->profile.total_freed_objects: %"PRIdSIZE
"\n",
7764 heap_pages_final_slots
, objspace
->profile
.total_freed_objects
);
7765 rb_bug("inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7766 objspace_live_slots(objspace
), data
.live_object_count
);
7770 if (!is_marking(objspace
)) {
7771 if (objspace
->rgengc
.old_objects
!= data
.old_object_count
) {
7772 rb_bug("inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7773 objspace
->rgengc
.old_objects
, data
.old_object_count
);
7775 if (objspace
->rgengc
.uncollectible_wb_unprotected_objects
!= data
.remembered_shady_count
) {
7776 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7777 objspace
->rgengc
.uncollectible_wb_unprotected_objects
, data
.remembered_shady_count
);
7782 size_t list_count
= 0;
7785 VALUE z
= heap_pages_deferred_final
;
7788 z
= RZOMBIE(z
)->next
;
7792 if (heap_pages_final_slots
!= data
.zombie_object_count
||
7793 heap_pages_final_slots
!= list_count
) {
7795 rb_bug("inconsistent finalizing object count:\n"
7796 " expect %"PRIuSIZE
"\n"
7797 " but %"PRIuSIZE
" zombies\n"
7798 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
7799 heap_pages_final_slots
,
7800 data
.zombie_object_count
,
7805 gc_report(5, objspace
, "gc_verify_internal_consistency: OK\n");
7809 gc_verify_internal_consistency(rb_objspace_t
*objspace
)
7813 rb_vm_barrier(); // stop other ractors
7815 unsigned int prev_during_gc
= during_gc
;
7816 during_gc
= FALSE
; // stop gc here
7818 gc_verify_internal_consistency_(objspace
);
7820 during_gc
= prev_during_gc
;
7826 rb_gc_verify_internal_consistency(void)
7828 gc_verify_internal_consistency(&rb_objspace
);
7832 gc_verify_transient_heap_internal_consistency(VALUE dmy
)
7834 rb_transient_heap_verify();
7841 gc_marks_start(rb_objspace_t
*objspace
, int full_mark
)
7844 gc_report(1, objspace
, "gc_marks_start: (%s)\n", full_mark
? "full" : "minor");
7845 gc_mode_transition(objspace
, gc_mode_marking
);
7848 #if GC_ENABLE_INCREMENTAL_MARK
7849 objspace
->rincgc
.step_slots
= (objspace
->marked_slots
* 2) / ((objspace
->rincgc
.pooled_slots
/ HEAP_PAGE_OBJ_LIMIT
) + 1);
7851 if (0) fprintf(stderr
, "objspace->marked_slots: %"PRIdSIZE
", "
7852 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
7853 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
7854 objspace
->marked_slots
, objspace
->rincgc
.pooled_slots
, objspace
->rincgc
.step_slots
);
7856 objspace
->flags
.during_minor_gc
= FALSE
;
7857 if (ruby_enable_autocompact
) {
7858 objspace
->flags
.during_compacting
|= TRUE
;
7860 objspace
->profile
.major_gc_count
++;
7861 objspace
->rgengc
.uncollectible_wb_unprotected_objects
= 0;
7862 objspace
->rgengc
.old_objects
= 0;
7863 objspace
->rgengc
.last_major_gc
= objspace
->profile
.count
;
7864 objspace
->marked_slots
= 0;
7866 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7867 rgengc_mark_and_rememberset_clear(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7871 objspace
->flags
.during_minor_gc
= TRUE
;
7872 objspace
->marked_slots
=
7873 objspace
->rgengc
.old_objects
+ objspace
->rgengc
.uncollectible_wb_unprotected_objects
; /* uncollectible objects are marked already */
7874 objspace
->profile
.minor_gc_count
++;
7876 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7877 rgengc_rememberset_mark(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7881 gc_mark_roots(objspace
, NULL
);
7883 gc_report(1, objspace
, "gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
7884 full_mark
? "full" : "minor", mark_stack_size(&objspace
->mark_stack
));
7887 #if GC_ENABLE_INCREMENTAL_MARK
7889 gc_marks_wb_unprotected_objects_plane(rb_objspace_t
*objspace
, uintptr_t p
, bits_t bits
)
7894 gc_report(2, objspace
, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE
)p
));
7895 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE
)p
));
7896 GC_ASSERT(RVALUE_MARKED((VALUE
)p
));
7897 gc_mark_children(objspace
, (VALUE
)p
);
7899 p
+= sizeof(RVALUE
);
7906 gc_marks_wb_unprotected_objects(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
7908 struct heap_page
*page
= 0;
7910 list_for_each(&heap
->pages
, page
, page_node
) {
7911 bits_t
*mark_bits
= page
->mark_bits
;
7912 bits_t
*wbun_bits
= page
->wb_unprotected_bits
;
7913 RVALUE
*p
= page
->start
;
7916 bits_t bits
= mark_bits
[0] & wbun_bits
[0];
7917 bits
>>= NUM_IN_PAGE(p
);
7918 gc_marks_wb_unprotected_objects_plane(objspace
, (uintptr_t)p
, bits
);
7919 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
7921 for (j
=1; j
<HEAP_PAGE_BITMAP_LIMIT
; j
++) {
7922 bits_t bits
= mark_bits
[j
] & wbun_bits
[j
];
7924 gc_marks_wb_unprotected_objects_plane(objspace
, (uintptr_t)p
, bits
);
7925 p
+= BITS_BITLENGTH
;
7929 gc_mark_stacked_objects_all(objspace
);
7932 static struct heap_page
*
7933 heap_move_pooled_pages_to_free_pages(rb_heap_t
*heap
)
7935 struct heap_page
*page
= heap
->pooled_pages
;
7938 heap
->pooled_pages
= page
->free_next
;
7939 heap_add_freepage(heap
, page
);
7947 gc_marks_finish(rb_objspace_t
*objspace
)
7949 #if GC_ENABLE_INCREMENTAL_MARK
7950 /* finish incremental GC */
7951 if (is_incremental_marking(objspace
)) {
7952 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7953 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(&size_pools
[i
]);
7954 if (heap
->pooled_pages
) {
7955 heap_move_pooled_pages_to_free_pages(heap
);
7956 gc_report(1, objspace
, "gc_marks_finish: pooled pages are exists. retry.\n");
7957 return FALSE
; /* continue marking phase */
7961 if (RGENGC_CHECK_MODE
&& is_mark_stack_empty(&objspace
->mark_stack
) == 0) {
7962 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
7963 mark_stack_size(&objspace
->mark_stack
));
7966 gc_mark_roots(objspace
, 0);
7968 if (is_mark_stack_empty(&objspace
->mark_stack
) == FALSE
) {
7969 gc_report(1, objspace
, "gc_marks_finish: not empty (%"PRIdSIZE
"). retry.\n",
7970 mark_stack_size(&objspace
->mark_stack
));
7974 #if RGENGC_CHECK_MODE >= 2
7975 if (gc_verify_heap_pages(objspace
) != 0) {
7976 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
7980 objspace
->flags
.during_incremental_marking
= FALSE
;
7981 /* check children of all marked wb-unprotected objects */
7982 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
7983 gc_marks_wb_unprotected_objects(objspace
, SIZE_POOL_EDEN_HEAP(&size_pools
[i
]));
7986 #endif /* GC_ENABLE_INCREMENTAL_MARK */
7988 #if RGENGC_CHECK_MODE >= 2
7989 gc_verify_internal_consistency(objspace
);
7992 if (is_full_marking(objspace
)) {
7993 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
7994 const double r
= gc_params
.oldobject_limit_factor
;
7995 objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
= (size_t)(objspace
->rgengc
.uncollectible_wb_unprotected_objects
* r
);
7996 objspace
->rgengc
.old_objects_limit
= (size_t)(objspace
->rgengc
.old_objects
* r
);
7999 #if RGENGC_CHECK_MODE >= 4
8001 gc_marks_check(objspace
, gc_check_after_marks_i
, "after_marks");
8006 /* decide full GC is needed or not */
8007 size_t total_slots
= heap_allocatable_slots(objspace
) + heap_eden_total_slots(objspace
);
8008 size_t sweep_slots
= total_slots
- objspace
->marked_slots
; /* will be swept slots */
8009 size_t max_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_max_ratio
);
8010 size_t min_free_slots
= (size_t)(total_slots
* gc_params
.heap_free_slots_min_ratio
);
8011 int full_marking
= is_full_marking(objspace
);
8012 const int r_cnt
= GET_VM()->ractor
.cnt
;
8013 const int r_mul
= r_cnt
> 8 ? 8 : r_cnt
; // upto 8
8015 GC_ASSERT(heap_eden_total_slots(objspace
) >= objspace
->marked_slots
);
8017 /* setup free-able page counts */
8018 if (max_free_slots
< gc_params
.heap_init_slots
* r_mul
) {
8019 max_free_slots
= gc_params
.heap_init_slots
* r_mul
;
8022 if (sweep_slots
> max_free_slots
) {
8023 heap_pages_freeable_pages
= (sweep_slots
- max_free_slots
) / HEAP_PAGE_OBJ_LIMIT
;
8026 heap_pages_freeable_pages
= 0;
8029 /* check free_min */
8030 if (min_free_slots
< gc_params
.heap_free_slots
* r_mul
) {
8031 min_free_slots
= gc_params
.heap_free_slots
* r_mul
;
8034 if (sweep_slots
< min_free_slots
) {
8035 if (!full_marking
) {
8036 if (objspace
->profile
.count
- objspace
->rgengc
.last_major_gc
< RVALUE_OLD_AGE
) {
8037 full_marking
= TRUE
;
8038 /* do not update last_major_gc, because full marking is not done. */
8039 /* goto increment; */
8042 gc_report(1, objspace
, "gc_marks_finish: next is full GC!!)\n");
8043 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_NOFREE
;
8050 gc_report(1, objspace
, "gc_marks_finish: heap_set_increment!!\n");
8051 rb_size_pool_t
*size_pool
= &size_pools
[0];
8052 size_pool_allocatable_pages_set(objspace
, size_pool
, heap_extend_pages(objspace
, sweep_slots
, total_slots
, heap_allocated_pages
+ heap_allocatable_pages(objspace
)));
8054 heap_increment(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
8060 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8061 const double r
= gc_params
.oldobject_limit_factor
;
8062 objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
= (size_t)(objspace
->rgengc
.uncollectible_wb_unprotected_objects
* r
);
8063 objspace
->rgengc
.old_objects_limit
= (size_t)(objspace
->rgengc
.old_objects
* r
);
8066 if (objspace
->rgengc
.uncollectible_wb_unprotected_objects
> objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
) {
8067 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_SHADY
;
8069 if (objspace
->rgengc
.old_objects
> objspace
->rgengc
.old_objects_limit
) {
8070 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_OLDGEN
;
8072 if (RGENGC_FORCE_MAJOR_GC
) {
8073 objspace
->rgengc
.need_major_gc
= GPR_FLAG_MAJOR_BY_FORCE
;
8076 gc_report(1, objspace
, "gc_marks_finish (marks %"PRIdSIZE
" objects, "
8077 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8078 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8079 objspace
->marked_slots
, objspace
->rgengc
.old_objects
, heap_eden_total_slots(objspace
), sweep_slots
, heap_allocatable_pages(objspace
),
8080 objspace
->rgengc
.need_major_gc
? "major" : "minor");
8083 rb_transient_heap_finish_marking();
8084 rb_ractor_finish_marking();
8086 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_END_MARK
, 0);
8091 #if GC_ENABLE_INCREMENTAL_MARK
8093 gc_marks_step(rb_objspace_t
*objspace
, size_t slots
)
8095 GC_ASSERT(is_marking(objspace
));
8097 if (gc_mark_stacked_objects_incremental(objspace
, slots
)) {
8098 if (gc_marks_finish(objspace
)) {
8103 if (0) fprintf(stderr
, "objspace->marked_slots: %"PRIdSIZE
"\n", objspace
->marked_slots
);
8108 gc_marks_rest(rb_objspace_t
*objspace
)
8110 gc_report(1, objspace
, "gc_marks_rest\n");
8112 #if GC_ENABLE_INCREMENTAL_MARK
8113 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8114 SIZE_POOL_EDEN_HEAP(&size_pools
[i
])->pooled_pages
= NULL
;
8118 if (is_incremental_marking(objspace
)) {
8120 while (gc_mark_stacked_objects_incremental(objspace
, INT_MAX
) == FALSE
);
8121 } while (gc_marks_finish(objspace
) == FALSE
);
8124 gc_mark_stacked_objects_all(objspace
);
8125 gc_marks_finish(objspace
);
8133 gc_marks_continue(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
8135 GC_ASSERT(dont_gc_val() == FALSE
);
8136 #if GC_ENABLE_INCREMENTAL_MARK
8138 unsigned int lock_lev
;
8139 gc_enter(objspace
, gc_enter_event_mark_continue
, &lock_lev
);
8144 if (heap
->pooled_pages
) {
8145 while (heap
->pooled_pages
&& slots
< HEAP_PAGE_OBJ_LIMIT
) {
8146 struct heap_page
*page
= heap_move_pooled_pages_to_free_pages(heap
);
8147 slots
+= page
->free_slots
;
8149 from
= "pooled-pages";
8151 else if (heap_increment(objspace
, size_pool
, heap
)) {
8152 slots
= heap
->free_pages
->free_slots
;
8153 from
= "incremented-pages";
8157 gc_report(2, objspace
, "gc_marks_continue: provide %d slots from %s.\n",
8159 gc_marks_step(objspace
, objspace
->rincgc
.step_slots
);
8162 gc_report(2, objspace
, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8163 mark_stack_size(&objspace
->mark_stack
));
8164 gc_marks_rest(objspace
);
8167 gc_exit(objspace
, gc_enter_event_mark_continue
, &lock_lev
);
8172 gc_marks(rb_objspace_t
*objspace
, int full_mark
)
8174 gc_prof_mark_timer_start(objspace
);
8178 gc_marks_start(objspace
, full_mark
);
8179 if (!is_incremental_marking(objspace
)) {
8180 gc_marks_rest(objspace
);
8183 #if RGENGC_PROFILE > 0
8184 if (gc_prof_record(objspace
)) {
8185 gc_profile_record
*record
= gc_prof_record(objspace
);
8186 record
->old_objects
= objspace
->rgengc
.old_objects
;
8189 gc_prof_mark_timer_stop(objspace
);
8195 gc_report_body(int level
, rb_objspace_t
*objspace
, const char *fmt
, ...)
8197 if (level
<= RGENGC_DEBUG
) {
8201 const char *status
= " ";
8204 status
= is_full_marking(objspace
) ? "+" : "-";
8207 if (is_lazy_sweeping(objspace
)) {
8210 if (is_incremental_marking(objspace
)) {
8215 va_start(args
, fmt
);
8216 vsnprintf(buf
, 1024, fmt
, args
);
8219 fprintf(out
, "%s|", status
);
8224 /* bit operations */
8227 rgengc_remembersetbits_get(rb_objspace_t
*objspace
, VALUE obj
)
8229 return RVALUE_REMEMBERED(obj
);
8233 rgengc_remembersetbits_set(rb_objspace_t
*objspace
, VALUE obj
)
8235 struct heap_page
*page
= GET_HEAP_PAGE(obj
);
8236 bits_t
*bits
= &page
->marking_bits
[0];
8238 GC_ASSERT(!is_incremental_marking(objspace
));
8240 if (MARKED_IN_BITMAP(bits
, obj
)) {
8244 page
->flags
.has_remembered_objects
= TRUE
;
8245 MARK_IN_BITMAP(bits
, obj
);
8252 /* return FALSE if already remembered */
8254 rgengc_remember(rb_objspace_t
*objspace
, VALUE obj
)
8256 gc_report(6, objspace
, "rgengc_remember: %s %s\n", obj_info(obj
),
8257 rgengc_remembersetbits_get(objspace
, obj
) ? "was already remembered" : "is remembered now");
8259 check_rvalue_consistency(obj
);
8261 if (RGENGC_CHECK_MODE
) {
8262 if (RVALUE_WB_UNPROTECTED(obj
)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj
));
8265 #if RGENGC_PROFILE > 0
8266 if (!rgengc_remembered(objspace
, obj
)) {
8267 if (RVALUE_WB_UNPROTECTED(obj
) == 0) {
8268 objspace
->profile
.total_remembered_normal_object_count
++;
8269 #if RGENGC_PROFILE >= 2
8270 objspace
->profile
.remembered_normal_object_count_types
[BUILTIN_TYPE(obj
)]++;
8274 #endif /* RGENGC_PROFILE > 0 */
8276 return rgengc_remembersetbits_set(objspace
, obj
);
8280 rgengc_remembered_sweep(rb_objspace_t
*objspace
, VALUE obj
)
8282 int result
= rgengc_remembersetbits_get(objspace
, obj
);
8283 check_rvalue_consistency(obj
);
8288 rgengc_remembered(rb_objspace_t
*objspace
, VALUE obj
)
8290 gc_report(6, objspace
, "rgengc_remembered: %s\n", obj_info(obj
));
8291 return rgengc_remembered_sweep(objspace
, obj
);
8294 #ifndef PROFILE_REMEMBERSET_MARK
8295 #define PROFILE_REMEMBERSET_MARK 0
8299 rgengc_rememberset_mark_plane(rb_objspace_t
*objspace
, uintptr_t p
, bits_t bitset
)
8304 VALUE obj
= (VALUE
)p
;
8305 gc_report(2, objspace
, "rgengc_rememberset_mark: mark %s\n", obj_info(obj
));
8306 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj
));
8307 GC_ASSERT(RVALUE_OLD_P(obj
) || RVALUE_WB_UNPROTECTED(obj
));
8309 gc_mark_children(objspace
, obj
);
8311 p
+= sizeof(RVALUE
);
8318 rgengc_rememberset_mark(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
8321 struct heap_page
*page
= 0;
8322 #if PROFILE_REMEMBERSET_MARK
8323 int has_old
= 0, has_shady
= 0, has_both
= 0, skip
= 0;
8325 gc_report(1, objspace
, "rgengc_rememberset_mark: start\n");
8327 list_for_each(&heap
->pages
, page
, page_node
) {
8328 if (page
->flags
.has_remembered_objects
| page
->flags
.has_uncollectible_shady_objects
) {
8329 RVALUE
*p
= page
->start
;
8330 bits_t bitset
, bits
[HEAP_PAGE_BITMAP_LIMIT
];
8331 bits_t
*marking_bits
= page
->marking_bits
;
8332 bits_t
*uncollectible_bits
= page
->uncollectible_bits
;
8333 bits_t
*wb_unprotected_bits
= page
->wb_unprotected_bits
;
8334 #if PROFILE_REMEMBERSET_MARK
8335 if (page
->flags
.has_remembered_objects
&& page
->flags
.has_uncollectible_shady_objects
) has_both
++;
8336 else if (page
->flags
.has_remembered_objects
) has_old
++;
8337 else if (page
->flags
.has_uncollectible_shady_objects
) has_shady
++;
8339 for (j
=0; j
<HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8340 bits
[j
] = marking_bits
[j
] | (uncollectible_bits
[j
] & wb_unprotected_bits
[j
]);
8341 marking_bits
[j
] = 0;
8343 page
->flags
.has_remembered_objects
= FALSE
;
8346 bitset
>>= NUM_IN_PAGE(p
);
8347 rgengc_rememberset_mark_plane(objspace
, (uintptr_t)p
, bitset
);
8348 p
+= (BITS_BITLENGTH
- NUM_IN_PAGE(p
));
8350 for (j
=1; j
< HEAP_PAGE_BITMAP_LIMIT
; j
++) {
8352 rgengc_rememberset_mark_plane(objspace
, (uintptr_t)p
, bitset
);
8353 p
+= BITS_BITLENGTH
;
8356 #if PROFILE_REMEMBERSET_MARK
8363 #if PROFILE_REMEMBERSET_MARK
8364 fprintf(stderr
, "%d\t%d\t%d\t%d\n", has_both
, has_old
, has_shady
, skip
);
8366 gc_report(1, objspace
, "rgengc_rememberset_mark: finished\n");
8370 rgengc_mark_and_rememberset_clear(rb_objspace_t
*objspace
, rb_heap_t
*heap
)
8372 struct heap_page
*page
= 0;
8374 list_for_each(&heap
->pages
, page
, page_node
) {
8375 memset(&page
->mark_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8376 memset(&page
->uncollectible_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8377 memset(&page
->marking_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8378 memset(&page
->pinned_bits
[0], 0, HEAP_PAGE_BITMAP_SIZE
);
8379 page
->flags
.has_uncollectible_shady_objects
= FALSE
;
8380 page
->flags
.has_remembered_objects
= FALSE
;
8386 NOINLINE(static void gc_writebarrier_generational(VALUE a
, VALUE b
, rb_objspace_t
*objspace
));
8389 gc_writebarrier_generational(VALUE a
, VALUE b
, rb_objspace_t
*objspace
)
8391 if (RGENGC_CHECK_MODE
) {
8392 if (!RVALUE_OLD_P(a
)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a
));
8393 if ( RVALUE_OLD_P(b
)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b
));
8394 if (is_incremental_marking(objspace
)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a
), obj_info(b
));
8398 /* mark `a' and remember (default behavior) */
8399 if (!rgengc_remembered(objspace
, a
)) {
8400 RB_VM_LOCK_ENTER_NO_BARRIER();
8402 rgengc_remember(objspace
, a
);
8404 RB_VM_LOCK_LEAVE_NO_BARRIER();
8405 gc_report(1, objspace
, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a
), obj_info(b
));
8408 /* mark `b' and remember */
8409 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b
), b
);
8410 if (RVALUE_WB_UNPROTECTED(b
)) {
8411 gc_remember_unprotected(objspace
, b
);
8414 RVALUE_AGE_SET_OLD(objspace
, b
);
8415 rgengc_remember(objspace
, b
);
8418 gc_report(1, objspace
, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a
), obj_info(b
));
8421 check_rvalue_consistency(a
);
8422 check_rvalue_consistency(b
);
8425 #if GC_ENABLE_INCREMENTAL_MARK
8427 gc_mark_from(rb_objspace_t
*objspace
, VALUE obj
, VALUE parent
)
8429 gc_mark_set_parent(objspace
, parent
);
8430 rgengc_check_relation(objspace
, obj
);
8431 if (gc_mark_set(objspace
, obj
) == FALSE
) return;
8432 gc_aging(objspace
, obj
);
8433 gc_grey(objspace
, obj
);
8436 NOINLINE(static void gc_writebarrier_incremental(VALUE a
, VALUE b
, rb_objspace_t
*objspace
));
8439 gc_writebarrier_incremental(VALUE a
, VALUE b
, rb_objspace_t
*objspace
)
8441 gc_report(2, objspace
, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a
, obj_info(b
));
8443 if (RVALUE_BLACK_P(a
)) {
8444 if (RVALUE_WHITE_P(b
)) {
8445 if (!RVALUE_WB_UNPROTECTED(a
)) {
8446 gc_report(2, objspace
, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a
, obj_info(b
));
8447 gc_mark_from(objspace
, b
, a
);
8450 else if (RVALUE_OLD_P(a
) && !RVALUE_OLD_P(b
)) {
8451 if (!RVALUE_WB_UNPROTECTED(b
)) {
8452 gc_report(1, objspace
, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a
, obj_info(b
));
8453 RVALUE_AGE_SET_OLD(objspace
, b
);
8455 if (RVALUE_BLACK_P(b
)) {
8456 gc_grey(objspace
, b
);
8460 gc_report(1, objspace
, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a
, obj_info(b
));
8461 gc_remember_unprotected(objspace
, b
);
8465 if (UNLIKELY(objspace
->flags
.during_compacting
)) {
8466 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b
), b
);
8471 #define gc_writebarrier_incremental(a, b, objspace)
8475 rb_gc_writebarrier(VALUE a
, VALUE b
)
8477 rb_objspace_t
*objspace
= &rb_objspace
;
8479 if (RGENGC_CHECK_MODE
&& SPECIAL_CONST_P(a
)) rb_bug("rb_gc_writebarrier: a is special const");
8480 if (RGENGC_CHECK_MODE
&& SPECIAL_CONST_P(b
)) rb_bug("rb_gc_writebarrier: b is special const");
8483 if (!is_incremental_marking(objspace
)) {
8484 if (!RVALUE_OLD_P(a
) || RVALUE_OLD_P(b
)) {
8488 gc_writebarrier_generational(a
, b
, objspace
);
8494 RB_VM_LOCK_ENTER_NO_BARRIER();
8496 if (is_incremental_marking(objspace
)) {
8497 gc_writebarrier_incremental(a
, b
, objspace
);
8503 RB_VM_LOCK_LEAVE_NO_BARRIER();
8505 if (retry
) goto retry
;
8511 rb_gc_writebarrier_unprotect(VALUE obj
)
8513 if (RVALUE_WB_UNPROTECTED(obj
)) {
8517 rb_objspace_t
*objspace
= &rb_objspace
;
8519 gc_report(2, objspace
, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj
),
8520 rgengc_remembered(objspace
, obj
) ? " (already remembered)" : "");
8522 if (RVALUE_OLD_P(obj
)) {
8523 gc_report(1, objspace
, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj
));
8524 RVALUE_DEMOTE(objspace
, obj
);
8525 gc_mark_set(objspace
, obj
);
8526 gc_remember_unprotected(objspace
, obj
);
8529 objspace
->profile
.total_shade_operation_count
++;
8530 #if RGENGC_PROFILE >= 2
8531 objspace
->profile
.shade_operation_count_types
[BUILTIN_TYPE(obj
)]++;
8532 #endif /* RGENGC_PROFILE >= 2 */
8533 #endif /* RGENGC_PROFILE */
8536 RVALUE_AGE_RESET(obj
);
8539 RB_DEBUG_COUNTER_INC(obj_wb_unprotect
);
8540 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj
), obj
);
8545 * remember `obj' if needed.
8547 MJIT_FUNC_EXPORTED
void
8548 rb_gc_writebarrier_remember(VALUE obj
)
8550 rb_objspace_t
*objspace
= &rb_objspace
;
8552 gc_report(1, objspace
, "rb_gc_writebarrier_remember: %s\n", obj_info(obj
));
8554 if (is_incremental_marking(objspace
)) {
8555 if (RVALUE_BLACK_P(obj
)) {
8556 gc_grey(objspace
, obj
);
8560 if (RVALUE_OLD_P(obj
)) {
8561 rgengc_remember(objspace
, obj
);
8566 static st_table
*rgengc_unprotect_logging_table
;
8569 rgengc_unprotect_logging_exit_func_i(st_data_t key
, st_data_t val
, st_data_t arg
)
8571 fprintf(stderr
, "%s\t%"PRIuVALUE
"\n", (char *)key
, (VALUE
)val
);
8576 rgengc_unprotect_logging_exit_func(void)
8578 st_foreach(rgengc_unprotect_logging_table
, rgengc_unprotect_logging_exit_func_i
, 0);
8582 rb_gc_unprotect_logging(void *objptr
, const char *filename
, int line
)
8584 VALUE obj
= (VALUE
)objptr
;
8586 if (rgengc_unprotect_logging_table
== 0) {
8587 rgengc_unprotect_logging_table
= st_init_strtable();
8588 atexit(rgengc_unprotect_logging_exit_func
);
8591 if (RVALUE_WB_UNPROTECTED(obj
) == 0) {
8596 snprintf(ptr
, 0x100 - 1, "%s|%s:%d", obj_info(obj
), filename
, line
);
8598 if (st_lookup(rgengc_unprotect_logging_table
, (st_data_t
)ptr
, &cnt
)) {
8602 ptr
= (strdup
)(buff
);
8603 if (!ptr
) rb_memerror();
8605 st_insert(rgengc_unprotect_logging_table
, (st_data_t
)ptr
, cnt
);
8610 rb_copy_wb_protected_attribute(VALUE dest
, VALUE obj
)
8612 rb_objspace_t
*objspace
= &rb_objspace
;
8614 if (RVALUE_WB_UNPROTECTED(obj
) && !RVALUE_WB_UNPROTECTED(dest
)) {
8615 if (!RVALUE_OLD_P(dest
)) {
8616 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest
), dest
);
8617 RVALUE_AGE_RESET_RAW(dest
);
8620 RVALUE_DEMOTE(objspace
, dest
);
8624 check_rvalue_consistency(dest
);
8627 /* RGENGC analysis information */
8630 rb_obj_rgengc_writebarrier_protected_p(VALUE obj
)
8632 return RBOOL(!RVALUE_WB_UNPROTECTED(obj
));
8636 rb_obj_rgengc_promoted_p(VALUE obj
)
8638 return RBOOL(OBJ_PROMOTED(obj
));
8642 rb_obj_gc_flags(VALUE obj
, ID
* flags
, size_t max
)
8645 static ID ID_marked
;
8646 static ID ID_wb_protected
, ID_old
, ID_marking
, ID_uncollectible
, ID_pinned
;
8649 #define I(s) ID_##s = rb_intern(#s);
8659 if (RVALUE_WB_UNPROTECTED(obj
) == 0 && n
<max
) flags
[n
++] = ID_wb_protected
;
8660 if (RVALUE_OLD_P(obj
) && n
<max
) flags
[n
++] = ID_old
;
8661 if (RVALUE_UNCOLLECTIBLE(obj
) && n
<max
) flags
[n
++] = ID_uncollectible
;
8662 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_marking
;
8663 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_marked
;
8664 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
) && n
<max
) flags
[n
++] = ID_pinned
;
8671 rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t
*newobj_cache
)
8673 for (size_t size_pool_idx
= 0; size_pool_idx
< SIZE_POOL_COUNT
; size_pool_idx
++) {
8674 rb_ractor_newobj_size_pool_cache_t
*cache
= &newobj_cache
->size_pool_caches
[size_pool_idx
];
8676 struct heap_page
*page
= cache
->using_page
;
8677 RVALUE
*freelist
= cache
->freelist
;
8678 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page
, (void *)freelist
);
8680 heap_page_freelist_append(page
, freelist
);
8682 cache
->using_page
= NULL
;
8683 cache
->freelist
= NULL
;
8688 rb_gc_force_recycle(VALUE obj
)
8693 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8694 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8698 rb_gc_register_mark_object(VALUE obj
)
8700 if (!is_pointer_to_heap(&rb_objspace
, (void *)obj
))
8705 VALUE ary_ary
= GET_VM()->mark_object_ary
;
8706 VALUE ary
= rb_ary_last(0, 0, ary_ary
);
8708 if (NIL_P(ary
) || RARRAY_LEN(ary
) >= MARK_OBJECT_ARY_BUCKET_SIZE
) {
8709 ary
= rb_ary_tmp_new(MARK_OBJECT_ARY_BUCKET_SIZE
);
8710 rb_ary_push(ary_ary
, ary
);
8713 rb_ary_push(ary
, obj
);
8719 rb_gc_register_address(VALUE
*addr
)
8721 rb_objspace_t
*objspace
= &rb_objspace
;
8722 struct gc_list
*tmp
;
8724 tmp
= ALLOC(struct gc_list
);
8725 tmp
->next
= global_list
;
8731 rb_gc_unregister_address(VALUE
*addr
)
8733 rb_objspace_t
*objspace
= &rb_objspace
;
8734 struct gc_list
*tmp
= global_list
;
8736 if (tmp
->varptr
== addr
) {
8737 global_list
= tmp
->next
;
8742 if (tmp
->next
->varptr
== addr
) {
8743 struct gc_list
*t
= tmp
->next
;
8745 tmp
->next
= tmp
->next
->next
;
8754 rb_global_variable(VALUE
*var
)
8756 rb_gc_register_address(var
);
8763 gc_stress_no_immediate_sweep
,
8764 gc_stress_full_mark_after_malloc
,
8768 #define gc_stress_full_mark_after_malloc_p() \
8769 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8772 heap_ready_to_gc(rb_objspace_t
*objspace
, rb_size_pool_t
*size_pool
, rb_heap_t
*heap
)
8774 if (!heap
->free_pages
) {
8775 if (!heap_increment(objspace
, size_pool
, heap
)) {
8776 size_pool_allocatable_pages_set(objspace
, size_pool
, 1);
8777 heap_increment(objspace
, size_pool
, heap
);
8783 ready_to_gc(rb_objspace_t
*objspace
)
8785 if (dont_gc_val() || during_gc
|| ruby_disable_gc
) {
8786 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
8787 rb_size_pool_t
*size_pool
= &size_pools
[i
];
8788 heap_ready_to_gc(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
));
8798 gc_reset_malloc_info(rb_objspace_t
*objspace
, bool full_mark
)
8800 gc_prof_set_malloc_info(objspace
);
8802 size_t inc
= ATOMIC_SIZE_EXCHANGE(malloc_increase
, 0);
8803 size_t old_limit
= malloc_limit
;
8805 if (inc
> malloc_limit
) {
8806 malloc_limit
= (size_t)(inc
* gc_params
.malloc_limit_growth_factor
);
8807 if (malloc_limit
> gc_params
.malloc_limit_max
) {
8808 malloc_limit
= gc_params
.malloc_limit_max
;
8812 malloc_limit
= (size_t)(malloc_limit
* 0.98); /* magic number */
8813 if (malloc_limit
< gc_params
.malloc_limit_min
) {
8814 malloc_limit
= gc_params
.malloc_limit_min
;
8819 if (old_limit
!= malloc_limit
) {
8820 fprintf(stderr
, "[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
8821 rb_gc_count(), old_limit
, malloc_limit
);
8824 fprintf(stderr
, "[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
8825 rb_gc_count(), malloc_limit
);
8830 /* reset oldmalloc info */
8831 #if RGENGC_ESTIMATE_OLDMALLOC
8833 if (objspace
->rgengc
.oldmalloc_increase
> objspace
->rgengc
.oldmalloc_increase_limit
) {
8834 objspace
->rgengc
.need_major_gc
|= GPR_FLAG_MAJOR_BY_OLDMALLOC
;
8835 objspace
->rgengc
.oldmalloc_increase_limit
=
8836 (size_t)(objspace
->rgengc
.oldmalloc_increase_limit
* gc_params
.oldmalloc_limit_growth_factor
);
8838 if (objspace
->rgengc
.oldmalloc_increase_limit
> gc_params
.oldmalloc_limit_max
) {
8839 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_max
;
8843 if (0) fprintf(stderr
, "%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
8845 objspace
->rgengc
.need_major_gc
,
8846 objspace
->rgengc
.oldmalloc_increase
,
8847 objspace
->rgengc
.oldmalloc_increase_limit
,
8848 gc_params
.oldmalloc_limit_max
);
8852 objspace
->rgengc
.oldmalloc_increase
= 0;
8854 if ((objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) == 0) {
8855 objspace
->rgengc
.oldmalloc_increase_limit
=
8856 (size_t)(objspace
->rgengc
.oldmalloc_increase_limit
/ ((gc_params
.oldmalloc_limit_growth_factor
- 1)/10 + 1));
8857 if (objspace
->rgengc
.oldmalloc_increase_limit
< gc_params
.oldmalloc_limit_min
) {
8858 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
8866 garbage_collect(rb_objspace_t
*objspace
, unsigned int reason
)
8872 #if GC_PROFILE_MORE_DETAIL
8873 objspace
->profile
.prepare_time
= getrusage_time();
8878 #if GC_PROFILE_MORE_DETAIL
8879 objspace
->profile
.prepare_time
= getrusage_time() - objspace
->profile
.prepare_time
;
8882 ret
= gc_start(objspace
, reason
);
8890 gc_start(rb_objspace_t
*objspace
, unsigned int reason
)
8892 unsigned int do_full_mark
= !!(reason
& GPR_FLAG_FULL_MARK
);
8893 #if GC_ENABLE_INCREMENTAL_MARK
8894 unsigned int immediate_mark
= reason
& GPR_FLAG_IMMEDIATE_MARK
;
8897 /* reason may be clobbered, later, so keep set immediate_sweep here */
8898 objspace
->flags
.immediate_sweep
= !!(reason
& GPR_FLAG_IMMEDIATE_SWEEP
);
8900 /* Explicitly enable compaction (GC.compact) */
8901 objspace
->flags
.during_compacting
= !!(reason
& GPR_FLAG_COMPACT
);
8903 if (!heap_allocated_pages
) return FALSE
; /* heap is not ready */
8904 if (!(reason
& GPR_FLAG_METHOD
) && !ready_to_gc(objspace
)) return TRUE
; /* GC is not allowed */
8906 GC_ASSERT(gc_mode(objspace
) == gc_mode_none
);
8907 GC_ASSERT(!is_lazy_sweeping(objspace
));
8908 GC_ASSERT(!is_incremental_marking(objspace
));
8910 unsigned int lock_lev
;
8911 gc_enter(objspace
, gc_enter_event_start
, &lock_lev
);
8913 #if RGENGC_CHECK_MODE >= 2
8914 gc_verify_internal_consistency(objspace
);
8917 if (ruby_gc_stressful
) {
8918 int flag
= FIXNUM_P(ruby_gc_stress_mode
) ? FIX2INT(ruby_gc_stress_mode
) : 0;
8920 if ((flag
& (1<<gc_stress_no_major
)) == 0) {
8921 do_full_mark
= TRUE
;
8924 objspace
->flags
.immediate_sweep
= !(flag
& (1<<gc_stress_no_immediate_sweep
));
8927 if (objspace
->rgengc
.need_major_gc
) {
8928 reason
|= objspace
->rgengc
.need_major_gc
;
8929 do_full_mark
= TRUE
;
8931 else if (RGENGC_FORCE_MAJOR_GC
) {
8932 reason
= GPR_FLAG_MAJOR_BY_FORCE
;
8933 do_full_mark
= TRUE
;
8936 objspace
->rgengc
.need_major_gc
= GPR_FLAG_NONE
;
8939 if (do_full_mark
&& (reason
& GPR_FLAG_MAJOR_MASK
) == 0) {
8940 reason
|= GPR_FLAG_MAJOR_BY_FORCE
; /* GC by CAPI, METHOD, and so on. */
8943 #if GC_ENABLE_INCREMENTAL_MARK
8944 if (!GC_ENABLE_INCREMENTAL_MARK
|| objspace
->flags
.dont_incremental
|| immediate_mark
) {
8945 objspace
->flags
.during_incremental_marking
= FALSE
;
8948 objspace
->flags
.during_incremental_marking
= do_full_mark
;
8952 if (!GC_ENABLE_LAZY_SWEEP
|| objspace
->flags
.dont_incremental
) {
8953 objspace
->flags
.immediate_sweep
= TRUE
;
8956 if (objspace
->flags
.immediate_sweep
) reason
|= GPR_FLAG_IMMEDIATE_SWEEP
;
8958 gc_report(1, objspace
, "gc_start(reason: %x) => %u, %d, %d\n",
8960 do_full_mark
, !is_incremental_marking(objspace
), objspace
->flags
.immediate_sweep
);
8962 #if USE_DEBUG_COUNTER
8963 RB_DEBUG_COUNTER_INC(gc_count
);
8965 if (reason
& GPR_FLAG_MAJOR_MASK
) {
8966 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree
, reason
& GPR_FLAG_MAJOR_BY_NOFREE
);
8967 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen
, reason
& GPR_FLAG_MAJOR_BY_OLDGEN
);
8968 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady
, reason
& GPR_FLAG_MAJOR_BY_SHADY
);
8969 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force
, reason
& GPR_FLAG_MAJOR_BY_FORCE
);
8970 #if RGENGC_ESTIMATE_OLDMALLOC
8971 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc
, reason
& GPR_FLAG_MAJOR_BY_OLDMALLOC
);
8975 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj
, reason
& GPR_FLAG_NEWOBJ
);
8976 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc
, reason
& GPR_FLAG_MALLOC
);
8977 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method
, reason
& GPR_FLAG_METHOD
);
8978 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi
, reason
& GPR_FLAG_CAPI
);
8979 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress
, reason
& GPR_FLAG_STRESS
);
8983 objspace
->profile
.count
++;
8984 objspace
->profile
.latest_gc_info
= reason
;
8985 objspace
->profile
.total_allocated_objects_at_gc_start
= objspace
->total_allocated_objects
;
8986 objspace
->profile
.heap_used_at_gc_start
= heap_allocated_pages
;
8987 gc_prof_setup_new_record(objspace
, reason
);
8988 gc_reset_malloc_info(objspace
, do_full_mark
);
8989 rb_transient_heap_start_marking(do_full_mark
);
8991 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_START
, 0 /* TODO: pass minor/immediate flag? */);
8992 GC_ASSERT(during_gc
);
8994 gc_prof_timer_start(objspace
);
8996 gc_marks(objspace
, do_full_mark
);
8998 gc_prof_timer_stop(objspace
);
9000 gc_exit(objspace
, gc_enter_event_start
, &lock_lev
);
9005 gc_rest(rb_objspace_t
*objspace
)
9007 int marking
= is_incremental_marking(objspace
);
9008 int sweeping
= is_lazy_sweeping(objspace
);
9010 if (marking
|| sweeping
) {
9011 unsigned int lock_lev
;
9012 gc_enter(objspace
, gc_enter_event_rest
, &lock_lev
);
9014 if (RGENGC_CHECK_MODE
>= 2) gc_verify_internal_consistency(objspace
);
9016 if (is_incremental_marking(objspace
)) {
9017 gc_marks_rest(objspace
);
9019 if (is_lazy_sweeping(objspace
)) {
9020 gc_sweep_rest(objspace
);
9022 gc_exit(objspace
, gc_enter_event_rest
, &lock_lev
);
9026 struct objspace_and_reason
{
9027 rb_objspace_t
*objspace
;
9028 unsigned int reason
;
9032 gc_current_status_fill(rb_objspace_t
*objspace
, char *buff
)
9035 if (is_marking(objspace
)) {
9037 if (is_full_marking(objspace
)) buff
[i
++] = 'F';
9038 #if GC_ENABLE_INCREMENTAL_MARK
9039 if (is_incremental_marking(objspace
)) buff
[i
++] = 'I';
9042 else if (is_sweeping(objspace
)) {
9044 if (is_lazy_sweeping(objspace
)) buff
[i
++] = 'L';
9053 gc_current_status(rb_objspace_t
*objspace
)
9055 static char buff
[0x10];
9056 gc_current_status_fill(objspace
, buff
);
9060 #if PRINT_ENTER_EXIT_TICK
9062 static tick_t last_exit_tick
;
9063 static tick_t enter_tick
;
9064 static int enter_count
= 0;
9065 static char last_gc_status
[0x10];
9068 gc_record(rb_objspace_t
*objspace
, int direction
, const char *event
)
9070 if (direction
== 0) { /* enter */
9072 enter_tick
= tick();
9073 gc_current_status_fill(objspace
, last_gc_status
);
9076 tick_t exit_tick
= tick();
9077 char current_gc_status
[0x10];
9078 gc_current_status_fill(objspace
, current_gc_status
);
9080 /* [last mutator time] [gc time] [event] */
9081 fprintf(stderr
, "%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9082 enter_tick
- last_exit_tick
,
9083 exit_tick
- enter_tick
,
9085 last_gc_status
, current_gc_status
,
9086 (objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_MASK
) ? '+' : '-');
9087 last_exit_tick
= exit_tick
;
9089 /* [enter_tick] [gc time] [event] */
9090 fprintf(stderr
, "%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9092 exit_tick
- enter_tick
,
9094 last_gc_status
, current_gc_status
,
9095 (objspace
->profile
.latest_gc_info
& GPR_FLAG_MAJOR_MASK
) ? '+' : '-');
9099 #else /* PRINT_ENTER_EXIT_TICK */
9101 gc_record(rb_objspace_t
*objspace
, int direction
, const char *event
)
9105 #endif /* PRINT_ENTER_EXIT_TICK */
9108 gc_enter_event_cstr(enum gc_enter_event event
)
9111 case gc_enter_event_start
: return "start";
9112 case gc_enter_event_mark_continue
: return "mark_continue";
9113 case gc_enter_event_sweep_continue
: return "sweep_continue";
9114 case gc_enter_event_rest
: return "rest";
9115 case gc_enter_event_finalizer
: return "finalizer";
9116 case gc_enter_event_rb_memerror
: return "rb_memerror";
9122 gc_enter_count(enum gc_enter_event event
)
9125 case gc_enter_event_start
: RB_DEBUG_COUNTER_INC(gc_enter_start
); break;
9126 case gc_enter_event_mark_continue
: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue
); break;
9127 case gc_enter_event_sweep_continue
: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue
); break;
9128 case gc_enter_event_rest
: RB_DEBUG_COUNTER_INC(gc_enter_rest
); break;
9129 case gc_enter_event_finalizer
: RB_DEBUG_COUNTER_INC(gc_enter_finalizer
); break;
9130 case gc_enter_event_rb_memerror
: /* nothing */ break;
9135 #define MEASURE_GC (objspace->flags.measure_gc)
9139 gc_enter_event_measure_p(rb_objspace_t
*objspace
, enum gc_enter_event event
)
9141 if (!MEASURE_GC
) return false;
9144 case gc_enter_event_start
:
9145 case gc_enter_event_mark_continue
:
9146 case gc_enter_event_sweep_continue
:
9147 case gc_enter_event_rest
:
9151 // case gc_enter_event_finalizer:
9152 // case gc_enter_event_rb_memerror:
9157 static bool current_process_time(struct timespec
*ts
);
9160 gc_enter_clock(rb_objspace_t
*objspace
, enum gc_enter_event event
)
9162 if (gc_enter_event_measure_p(objspace
, event
)) {
9163 if (!current_process_time(&objspace
->profile
.start_time
)) {
9164 objspace
->profile
.start_time
.tv_sec
= 0;
9165 objspace
->profile
.start_time
.tv_nsec
= 0;
9171 gc_exit_clock(rb_objspace_t
*objspace
, enum gc_enter_event event
)
9173 if (gc_enter_event_measure_p(objspace
, event
)) {
9174 struct timespec end_time
;
9176 if ((objspace
->profile
.start_time
.tv_sec
> 0 ||
9177 objspace
->profile
.start_time
.tv_nsec
> 0) &&
9178 current_process_time(&end_time
)) {
9180 if (end_time
.tv_sec
< objspace
->profile
.start_time
.tv_sec
) {
9185 (uint64_t)(end_time
.tv_sec
- objspace
->profile
.start_time
.tv_sec
) * (1000 * 1000 * 1000) +
9186 (end_time
.tv_nsec
- objspace
->profile
.start_time
.tv_nsec
);
9187 objspace
->profile
.total_time_ns
+= ns
;
9194 gc_enter(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
)
9196 RB_VM_LOCK_ENTER_LEV(lock_lev
);
9198 gc_enter_clock(objspace
, event
);
9201 case gc_enter_event_rest
:
9202 if (!is_marking(objspace
)) break;
9204 case gc_enter_event_start
:
9205 case gc_enter_event_mark_continue
:
9206 // stop other ractors
9213 gc_enter_count(event
);
9214 if (UNLIKELY(during_gc
!= 0)) rb_bug("during_gc != 0");
9215 if (RGENGC_CHECK_MODE
>= 3) gc_verify_internal_consistency(objspace
);
9217 mjit_gc_start_hook();
9220 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event
), gc_current_status(objspace
));
9221 gc_report(1, objspace
, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9222 gc_record(objspace
, 0, gc_enter_event_cstr(event
));
9223 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_ENTER
, 0); /* TODO: which parameter should be passed? */
9227 gc_exit(rb_objspace_t
*objspace
, enum gc_enter_event event
, unsigned int *lock_lev
)
9229 GC_ASSERT(during_gc
!= 0);
9231 gc_event_hook(objspace
, RUBY_INTERNAL_EVENT_GC_EXIT
, 0); /* TODO: which parameter should be passsed? */
9232 gc_record(objspace
, 1, gc_enter_event_cstr(event
));
9233 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9234 gc_report(1, objspace
, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event
), gc_current_status(objspace
));
9237 mjit_gc_exit_hook();
9238 gc_exit_clock(objspace
, event
);
9239 RB_VM_LOCK_LEAVE_LEV(lock_lev
);
9243 gc_with_gvl(void *ptr
)
9245 struct objspace_and_reason
*oar
= (struct objspace_and_reason
*)ptr
;
9246 return (void *)(VALUE
)garbage_collect(oar
->objspace
, oar
->reason
);
9250 garbage_collect_with_gvl(rb_objspace_t
*objspace
, unsigned int reason
)
9252 if (dont_gc_val()) return TRUE
;
9253 if (ruby_thread_has_gvl_p()) {
9254 return garbage_collect(objspace
, reason
);
9257 if (ruby_native_thread_p()) {
9258 struct objspace_and_reason oar
;
9259 oar
.objspace
= objspace
;
9260 oar
.reason
= reason
;
9261 return (int)(VALUE
)rb_thread_call_with_gvl(gc_with_gvl
, (void *)&oar
);
9264 /* no ruby thread */
9265 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
9272 gc_start_internal(rb_execution_context_t
*ec
, VALUE self
, VALUE full_mark
, VALUE immediate_mark
, VALUE immediate_sweep
, VALUE compact
)
9274 rb_objspace_t
*objspace
= &rb_objspace
;
9275 unsigned int reason
= (GPR_FLAG_FULL_MARK
|
9276 GPR_FLAG_IMMEDIATE_MARK
|
9277 GPR_FLAG_IMMEDIATE_SWEEP
|
9280 /* For now, compact implies full mark / sweep, so ignore other flags */
9281 if (RTEST(compact
)) {
9282 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
9283 * the read barrier, so we must disable compaction. */
9284 #if !defined(__MINGW32__) && !defined(_WIN32)
9285 if (!USE_MMAP_ALIGNED_ALLOC
) {
9286 rb_raise(rb_eNotImpError
, "Compaction isn't available on this platform");
9290 reason
|= GPR_FLAG_COMPACT
;
9293 if (!RTEST(full_mark
)) reason
&= ~GPR_FLAG_FULL_MARK
;
9294 if (!RTEST(immediate_mark
)) reason
&= ~GPR_FLAG_IMMEDIATE_MARK
;
9295 if (!RTEST(immediate_sweep
)) reason
&= ~GPR_FLAG_IMMEDIATE_SWEEP
;
9298 garbage_collect(objspace
, reason
);
9299 gc_finalize_deferred(objspace
);
9305 gc_is_moveable_obj(rb_objspace_t
*objspace
, VALUE obj
)
9307 GC_ASSERT(!SPECIAL_CONST_P(obj
));
9309 switch (BUILTIN_TYPE(obj
)) {
9316 if (DYNAMIC_SYM_P(obj
) && (RSYMBOL(obj
)->id
& ~ID_SCOPE_MASK
)) {
9338 if (FL_TEST(obj
, FL_FINALIZE
)) {
9339 /* The finalizer table is a numtable. It looks up objects by address.
9340 * We can't mark the keys in the finalizer table because that would
9341 * prevent the objects from being collected. This check prevents
9342 * objects that are keys in the finalizer table from being moved
9343 * without directly pinning them. */
9344 if (st_is_member(finalizer_table
, obj
)) {
9348 GC_ASSERT(RVALUE_MARKED(obj
));
9349 GC_ASSERT(!RVALUE_PINNED(obj
));
9354 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj
));
9362 gc_move(rb_objspace_t
*objspace
, VALUE scan
, VALUE free
, size_t slot_size
)
9368 RVALUE
*dest
= (RVALUE
*)free
;
9369 RVALUE
*src
= (RVALUE
*)scan
;
9371 gc_report(4, objspace
, "Moving object: %p -> %p\n", (void*)scan
, (void *)free
);
9373 GC_ASSERT(BUILTIN_TYPE(scan
) != T_NONE
);
9374 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free
), free
));
9376 /* Save off bits for current object. */
9377 marked
= rb_objspace_marked_object_p((VALUE
)src
);
9378 wb_unprotected
= RVALUE_WB_UNPROTECTED((VALUE
)src
);
9379 uncollectible
= RVALUE_UNCOLLECTIBLE((VALUE
)src
);
9380 marking
= RVALUE_MARKING((VALUE
)src
);
9382 /* Clear bits for eventual T_MOVED */
9383 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)src
), (VALUE
)src
);
9384 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)src
), (VALUE
)src
);
9385 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)src
), (VALUE
)src
);
9386 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE
)src
), (VALUE
)src
);
9388 if (FL_TEST((VALUE
)src
, FL_EXIVAR
)) {
9389 /* Same deal as below. Generic ivars are held in st tables.
9390 * Resizing the table could cause a GC to happen and we can't allow it */
9391 VALUE already_disabled
= rb_gc_disable_no_rest();
9392 rb_mv_generic_ivar((VALUE
)src
, (VALUE
)dest
);
9393 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
9396 st_data_t srcid
= (st_data_t
)src
, id
;
9398 /* If the source object's object_id has been seen, we need to update
9399 * the object to object id mapping. */
9400 if (st_lookup(objspace
->obj_to_id_tbl
, srcid
, &id
)) {
9401 gc_report(4, objspace
, "Moving object with seen id: %p -> %p\n", (void *)src
, (void *)dest
);
9402 /* inserting in the st table can cause the GC to run. We need to
9403 * prevent re-entry in to the GC since `gc_move` is running in the GC,
9404 * so temporarily disable the GC around the st table mutation */
9405 VALUE already_disabled
= rb_gc_disable_no_rest();
9406 st_delete(objspace
->obj_to_id_tbl
, &srcid
, 0);
9407 st_insert(objspace
->obj_to_id_tbl
, (st_data_t
)dest
, id
);
9408 if (already_disabled
== Qfalse
) rb_objspace_gc_enable(objspace
);
9411 /* Move the object */
9412 memcpy(dest
, src
, slot_size
);
9413 memset(src
, 0, slot_size
);
9415 /* Set bits for object in new location */
9417 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE
)dest
), (VALUE
)dest
);
9420 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE
)dest
), (VALUE
)dest
);
9424 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)dest
), (VALUE
)dest
);
9427 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE
)dest
), (VALUE
)dest
);
9430 if (wb_unprotected
) {
9431 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)dest
), (VALUE
)dest
);
9434 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE
)dest
), (VALUE
)dest
);
9437 if (uncollectible
) {
9438 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)dest
), (VALUE
)dest
);
9441 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE
)dest
), (VALUE
)dest
);
9444 /* Assign forwarding address */
9445 src
->as
.moved
.flags
= T_MOVED
;
9446 src
->as
.moved
.dummy
= Qundef
;
9447 src
->as
.moved
.destination
= (VALUE
)dest
;
9448 GC_ASSERT(BUILTIN_TYPE((VALUE
)dest
) != T_NONE
);
9454 compare_free_slots(const void *left
, const void *right
, void *dummy
)
9456 struct heap_page
*left_page
;
9457 struct heap_page
*right_page
;
9459 left_page
= *(struct heap_page
* const *)left
;
9460 right_page
= *(struct heap_page
* const *)right
;
9462 return left_page
->free_slots
- right_page
->free_slots
;
9466 gc_sort_heap_by_empty_slots(rb_objspace_t
*objspace
)
9468 for (int j
= 0; j
< SIZE_POOL_COUNT
; j
++) {
9469 rb_size_pool_t
*size_pool
= &size_pools
[j
];
9471 size_t total_pages
= SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
;
9472 size_t size
= size_mul_or_raise(total_pages
, sizeof(struct heap_page
*), rb_eRuntimeError
);
9473 struct heap_page
*page
= 0, **page_list
= malloc(size
);
9476 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, page
, page_node
) {
9477 page_list
[i
++] = page
;
9481 GC_ASSERT((size_t)i
== total_pages
);
9483 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
9484 * head of the list, so empty pages will end up at the start of the heap */
9485 ruby_qsort(page_list
, total_pages
, sizeof(struct heap_page
*), compare_free_slots
, NULL
);
9487 /* Reset the eden heap */
9488 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
);
9490 for (i
= 0; i
< total_pages
; i
++) {
9491 list_add(&SIZE_POOL_EDEN_HEAP(size_pool
)->pages
, &page_list
[i
]->page_node
);
9492 if (page_list
[i
]->free_slots
!= 0) {
9493 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool
), page_list
[i
]);
9502 gc_ref_update_array(rb_objspace_t
* objspace
, VALUE v
)
9506 if (FL_TEST(v
, ELTS_SHARED
))
9509 len
= RARRAY_LEN(v
);
9511 VALUE
*ptr
= (VALUE
*)RARRAY_CONST_PTR_TRANSIENT(v
);
9512 for (i
= 0; i
< len
; i
++) {
9513 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
9519 gc_ref_update_object(rb_objspace_t
* objspace
, VALUE v
)
9521 VALUE
*ptr
= ROBJECT_IVPTR(v
);
9523 uint32_t i
, len
= ROBJECT_NUMIV(v
);
9524 for (i
= 0; i
< len
; i
++) {
9525 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
9530 hash_replace_ref(st_data_t
*key
, st_data_t
*value
, st_data_t argp
, int existing
)
9532 rb_objspace_t
*objspace
= (rb_objspace_t
*)argp
;
9534 if (gc_object_moved_p(objspace
, (VALUE
)*key
)) {
9535 *key
= rb_gc_location((VALUE
)*key
);
9538 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9539 *value
= rb_gc_location((VALUE
)*value
);
9546 hash_foreach_replace(st_data_t key
, st_data_t value
, st_data_t argp
, int error
)
9548 rb_objspace_t
*objspace
;
9550 objspace
= (rb_objspace_t
*)argp
;
9552 if (gc_object_moved_p(objspace
, (VALUE
)key
)) {
9556 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9563 hash_replace_ref_value(st_data_t
*key
, st_data_t
*value
, st_data_t argp
, int existing
)
9565 rb_objspace_t
*objspace
= (rb_objspace_t
*)argp
;
9567 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9568 *value
= rb_gc_location((VALUE
)*value
);
9575 hash_foreach_replace_value(st_data_t key
, st_data_t value
, st_data_t argp
, int error
)
9577 rb_objspace_t
*objspace
;
9579 objspace
= (rb_objspace_t
*)argp
;
9581 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9588 gc_update_tbl_refs(rb_objspace_t
* objspace
, st_table
*tbl
)
9590 if (!tbl
|| tbl
->num_entries
== 0) return;
9592 if (st_foreach_with_replace(tbl
, hash_foreach_replace_value
, hash_replace_ref_value
, (st_data_t
)objspace
)) {
9593 rb_raise(rb_eRuntimeError
, "hash modified during iteration");
9598 gc_update_table_refs(rb_objspace_t
* objspace
, st_table
*tbl
)
9600 if (!tbl
|| tbl
->num_entries
== 0) return;
9602 if (st_foreach_with_replace(tbl
, hash_foreach_replace
, hash_replace_ref
, (st_data_t
)objspace
)) {
9603 rb_raise(rb_eRuntimeError
, "hash modified during iteration");
9607 /* Update MOVED references in an st_table */
9609 rb_gc_update_tbl_refs(st_table
*ptr
)
9611 rb_objspace_t
*objspace
= &rb_objspace
;
9612 gc_update_table_refs(objspace
, ptr
);
9616 gc_ref_update_hash(rb_objspace_t
* objspace
, VALUE v
)
9618 rb_hash_stlike_foreach_with_replace(v
, hash_foreach_replace
, hash_replace_ref
, (st_data_t
)objspace
);
9622 gc_ref_update_method_entry(rb_objspace_t
*objspace
, rb_method_entry_t
*me
)
9624 rb_method_definition_t
*def
= me
->def
;
9626 UPDATE_IF_MOVED(objspace
, me
->owner
);
9627 UPDATE_IF_MOVED(objspace
, me
->defined_class
);
9630 switch (def
->type
) {
9631 case VM_METHOD_TYPE_ISEQ
:
9632 if (def
->body
.iseq
.iseqptr
) {
9633 TYPED_UPDATE_IF_MOVED(objspace
, rb_iseq_t
*, def
->body
.iseq
.iseqptr
);
9635 TYPED_UPDATE_IF_MOVED(objspace
, rb_cref_t
*, def
->body
.iseq
.cref
);
9637 case VM_METHOD_TYPE_ATTRSET
:
9638 case VM_METHOD_TYPE_IVAR
:
9639 UPDATE_IF_MOVED(objspace
, def
->body
.attr
.location
);
9641 case VM_METHOD_TYPE_BMETHOD
:
9642 UPDATE_IF_MOVED(objspace
, def
->body
.bmethod
.proc
);
9644 case VM_METHOD_TYPE_ALIAS
:
9645 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_method_entry_struct
*, def
->body
.alias
.original_me
);
9647 case VM_METHOD_TYPE_REFINED
:
9648 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_method_entry_struct
*, def
->body
.refined
.orig_me
);
9649 UPDATE_IF_MOVED(objspace
, def
->body
.refined
.owner
);
9651 case VM_METHOD_TYPE_CFUNC
:
9652 case VM_METHOD_TYPE_ZSUPER
:
9653 case VM_METHOD_TYPE_MISSING
:
9654 case VM_METHOD_TYPE_OPTIMIZED
:
9655 case VM_METHOD_TYPE_UNDEF
:
9656 case VM_METHOD_TYPE_NOTIMPLEMENTED
:
9663 gc_update_values(rb_objspace_t
*objspace
, long n
, VALUE
*values
)
9667 for (i
=0; i
<n
; i
++) {
9668 UPDATE_IF_MOVED(objspace
, values
[i
]);
9673 gc_ref_update_imemo(rb_objspace_t
*objspace
, VALUE obj
)
9675 switch (imemo_type(obj
)) {
9678 rb_env_t
*env
= (rb_env_t
*)obj
;
9679 if (LIKELY(env
->ep
)) {
9680 // just after newobj() can be NULL here.
9681 TYPED_UPDATE_IF_MOVED(objspace
, rb_iseq_t
*, env
->iseq
);
9682 UPDATE_IF_MOVED(objspace
, env
->ep
[VM_ENV_DATA_INDEX_ENV
]);
9683 gc_update_values(objspace
, (long)env
->env_size
, (VALUE
*)env
->env
);
9688 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.cref
.klass_or_self
);
9689 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_cref_struct
*, RANY(obj
)->as
.imemo
.cref
.next
);
9690 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.cref
.refinements
);
9693 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.cref_or_me
);
9694 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.lastline
);
9695 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.backref
);
9696 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.svar
.others
);
9698 case imemo_throw_data
:
9699 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.throw_data
.throw_obj
);
9704 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.memo
.v1
);
9705 UPDATE_IF_MOVED(objspace
, RANY(obj
)->as
.imemo
.memo
.v2
);
9708 gc_ref_update_method_entry(objspace
, &RANY(obj
)->as
.imemo
.ment
);
9711 rb_iseq_update_references((rb_iseq_t
*)obj
);
9714 rb_ast_update_references((rb_ast_t
*)obj
);
9716 case imemo_callcache
:
9718 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
9720 UPDATE_IF_MOVED(objspace
, cc
->klass
);
9721 if (!is_live_object(objspace
, cc
->klass
)) {
9722 *((VALUE
*)(&cc
->klass
)) = (VALUE
)0;
9727 TYPED_UPDATE_IF_MOVED(objspace
, struct rb_callable_method_entry_struct
*, cc
->cme_
);
9728 if (!is_live_object(objspace
, (VALUE
)cc
->cme_
)) {
9729 *((struct rb_callable_method_entry_struct
**)(&cc
->cme_
)) = (struct rb_callable_method_entry_struct
*)0;
9734 case imemo_constcache
:
9736 const struct iseq_inline_constant_cache_entry
*ice
= (struct iseq_inline_constant_cache_entry
*)obj
;
9737 UPDATE_IF_MOVED(objspace
, ice
->value
);
9740 case imemo_parser_strterm
:
9742 case imemo_callinfo
:
9745 rb_bug("not reachable %d", imemo_type(obj
));
9750 static enum rb_id_table_iterator_result
9751 check_id_table_move(ID id
, VALUE value
, void *data
)
9753 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9755 if (gc_object_moved_p(objspace
, (VALUE
)value
)) {
9756 return ID_TABLE_REPLACE
;
9759 return ID_TABLE_CONTINUE
;
9762 /* Returns the new location of an object, if it moved. Otherwise returns
9763 * the existing location. */
9765 rb_gc_location(VALUE value
)
9770 if (!SPECIAL_CONST_P(value
)) {
9771 void *poisoned
= asan_poisoned_object_p(value
);
9772 asan_unpoison_object(value
, false);
9774 if (BUILTIN_TYPE(value
) == T_MOVED
) {
9775 destination
= (VALUE
)RMOVED(value
)->destination
;
9776 GC_ASSERT(BUILTIN_TYPE(destination
) != T_NONE
);
9779 destination
= value
;
9782 /* Re-poison slot if it's not the one we want */
9784 GC_ASSERT(BUILTIN_TYPE(value
) == T_NONE
);
9785 asan_poison_object(value
);
9789 destination
= value
;
9795 static enum rb_id_table_iterator_result
9796 update_id_table(ID
*key
, VALUE
* value
, void *data
, int existing
)
9798 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9800 if (gc_object_moved_p(objspace
, (VALUE
)*value
)) {
9801 *value
= rb_gc_location((VALUE
)*value
);
9804 return ID_TABLE_CONTINUE
;
9808 update_m_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
9811 rb_id_table_foreach_with_replace(tbl
, check_id_table_move
, update_id_table
, objspace
);
9815 static enum rb_id_table_iterator_result
9816 update_cc_tbl_i(ID id
, VALUE ccs_ptr
, void *data
)
9818 rb_objspace_t
*objspace
= (rb_objspace_t
*)data
;
9819 struct rb_class_cc_entries
*ccs
= (struct rb_class_cc_entries
*)ccs_ptr
;
9820 VM_ASSERT(vm_ccs_p(ccs
));
9822 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->cme
)) {
9823 ccs
->cme
= (const rb_callable_method_entry_t
*)rb_gc_location((VALUE
)ccs
->cme
);
9826 for (int i
=0; i
<ccs
->len
; i
++) {
9827 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->entries
[i
].ci
)) {
9828 ccs
->entries
[i
].ci
= (struct rb_callinfo
*)rb_gc_location((VALUE
)ccs
->entries
[i
].ci
);
9830 if (gc_object_moved_p(objspace
, (VALUE
)ccs
->entries
[i
].cc
)) {
9831 ccs
->entries
[i
].cc
= (struct rb_callcache
*)rb_gc_location((VALUE
)ccs
->entries
[i
].cc
);
9836 return ID_TABLE_CONTINUE
;
9840 update_cc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
9842 struct rb_id_table
*tbl
= RCLASS_CC_TBL(klass
);
9844 rb_id_table_foreach_with_replace(tbl
, update_cc_tbl_i
, 0, objspace
);
9848 static enum rb_id_table_iterator_result
9849 update_cvc_tbl_i(ID id
, VALUE cvc_entry
, void *data
)
9851 struct rb_cvar_class_tbl_entry
*entry
;
9853 entry
= (struct rb_cvar_class_tbl_entry
*)cvc_entry
;
9855 entry
->class_value
= rb_gc_location(entry
->class_value
);
9857 return ID_TABLE_CONTINUE
;
9861 update_cvc_tbl(rb_objspace_t
*objspace
, VALUE klass
)
9863 struct rb_id_table
*tbl
= RCLASS_CVC_TBL(klass
);
9865 rb_id_table_foreach_with_replace(tbl
, update_cvc_tbl_i
, 0, objspace
);
9869 static enum rb_id_table_iterator_result
9870 update_const_table(VALUE value
, void *data
)
9872 rb_const_entry_t
*ce
= (rb_const_entry_t
*)value
;
9873 rb_objspace_t
* objspace
= (rb_objspace_t
*)data
;
9875 if (gc_object_moved_p(objspace
, ce
->value
)) {
9876 ce
->value
= rb_gc_location(ce
->value
);
9879 if (gc_object_moved_p(objspace
, ce
->file
)) {
9880 ce
->file
= rb_gc_location(ce
->file
);
9883 return ID_TABLE_CONTINUE
;
9887 update_const_tbl(rb_objspace_t
*objspace
, struct rb_id_table
*tbl
)
9890 rb_id_table_foreach_values(tbl
, update_const_table
, objspace
);
9894 update_subclass_entries(rb_objspace_t
*objspace
, rb_subclass_entry_t
*entry
)
9897 UPDATE_IF_MOVED(objspace
, entry
->klass
);
9898 entry
= entry
->next
;
9903 update_iv_index_tbl_i(st_data_t key
, st_data_t value
, st_data_t arg
)
9905 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
9906 struct rb_iv_index_tbl_entry
*ent
= (struct rb_iv_index_tbl_entry
*)value
;
9907 UPDATE_IF_MOVED(objspace
, ent
->class_value
);
9912 update_class_ext(rb_objspace_t
*objspace
, rb_classext_t
*ext
)
9914 UPDATE_IF_MOVED(objspace
, ext
->origin_
);
9915 UPDATE_IF_MOVED(objspace
, ext
->refined_class
);
9916 update_subclass_entries(objspace
, ext
->subclasses
);
9918 // ext->iv_index_tbl
9919 if (ext
->iv_index_tbl
) {
9920 st_foreach(ext
->iv_index_tbl
, update_iv_index_tbl_i
, (st_data_t
)objspace
);
9925 gc_update_object_references(rb_objspace_t
*objspace
, VALUE obj
)
9927 RVALUE
*any
= RANY(obj
);
9929 gc_report(4, objspace
, "update-refs: %p ->\n", (void *)obj
);
9931 switch (BUILTIN_TYPE(obj
)) {
9934 if (RCLASS_SUPER((VALUE
)obj
)) {
9935 UPDATE_IF_MOVED(objspace
, RCLASS(obj
)->super
);
9937 if (!RCLASS_EXT(obj
)) break;
9938 update_m_tbl(objspace
, RCLASS_M_TBL(obj
));
9939 update_cc_tbl(objspace
, obj
);
9940 update_cvc_tbl(objspace
, obj
);
9942 gc_update_tbl_refs(objspace
, RCLASS_IV_TBL(obj
));
9944 update_class_ext(objspace
, RCLASS_EXT(obj
));
9945 update_const_tbl(objspace
, RCLASS_CONST_TBL(obj
));
9949 if (FL_TEST(obj
, RICLASS_IS_ORIGIN
) &&
9950 !FL_TEST(obj
, RICLASS_ORIGIN_SHARED_MTBL
)) {
9951 update_m_tbl(objspace
, RCLASS_M_TBL(obj
));
9953 if (RCLASS_SUPER((VALUE
)obj
)) {
9954 UPDATE_IF_MOVED(objspace
, RCLASS(obj
)->super
);
9956 if (!RCLASS_EXT(obj
)) break;
9957 if (RCLASS_IV_TBL(obj
)) {
9958 gc_update_tbl_refs(objspace
, RCLASS_IV_TBL(obj
));
9960 update_class_ext(objspace
, RCLASS_EXT(obj
));
9961 update_m_tbl(objspace
, RCLASS_CALLABLE_M_TBL(obj
));
9962 update_cc_tbl(objspace
, obj
);
9966 gc_ref_update_imemo(objspace
, obj
);
9974 /* These can't move */
9978 if (FL_TEST(obj
, ELTS_SHARED
)) {
9979 UPDATE_IF_MOVED(objspace
, any
->as
.array
.as
.heap
.aux
.shared_root
);
9982 gc_ref_update_array(objspace
, obj
);
9987 gc_ref_update_hash(objspace
, obj
);
9988 UPDATE_IF_MOVED(objspace
, any
->as
.hash
.ifnone
);
9992 if (STR_SHARED_P(obj
)) {
9994 VALUE orig_shared
= any
->as
.string
.as
.heap
.aux
.shared
;
9996 UPDATE_IF_MOVED(objspace
, any
->as
.string
.as
.heap
.aux
.shared
);
9998 VALUE shared
= any
->as
.string
.as
.heap
.aux
.shared
;
9999 if (STR_EMBED_P(shared
)) {
10000 size_t offset
= (size_t)any
->as
.string
.as
.heap
.ptr
- (size_t)RSTRING(orig_shared
)->as
.embed
.ary
;
10001 GC_ASSERT(any
->as
.string
.as
.heap
.ptr
>= RSTRING(orig_shared
)->as
.embed
.ary
);
10002 GC_ASSERT(offset
<= (size_t)RSTRING(shared
)->as
.embed
.len
);
10003 any
->as
.string
.as
.heap
.ptr
= RSTRING(shared
)->as
.embed
.ary
+ offset
;
10010 /* Call the compaction callback, if it exists */
10012 void *const ptr
= DATA_PTR(obj
);
10014 if (RTYPEDDATA_P(obj
)) {
10015 RUBY_DATA_FUNC compact_func
= any
->as
.typeddata
.type
->function
.dcompact
;
10016 if (compact_func
) (*compact_func
)(ptr
);
10023 gc_ref_update_object(objspace
, obj
);
10027 if (any
->as
.file
.fptr
) {
10028 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->self
);
10029 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->pathv
);
10030 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->tied_io_for_writing
);
10031 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->writeconv_asciicompat
);
10032 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->writeconv_pre_ecopts
);
10033 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->encs
.ecopts
);
10034 UPDATE_IF_MOVED(objspace
, any
->as
.file
.fptr
->write_lock
);
10038 UPDATE_IF_MOVED(objspace
, any
->as
.regexp
.src
);
10042 if (DYNAMIC_SYM_P((VALUE
)any
)) {
10043 UPDATE_IF_MOVED(objspace
, RSYMBOL(any
)->fstr
);
10052 UPDATE_IF_MOVED(objspace
, any
->as
.match
.regexp
);
10054 if (any
->as
.match
.str
) {
10055 UPDATE_IF_MOVED(objspace
, any
->as
.match
.str
);
10060 UPDATE_IF_MOVED(objspace
, any
->as
.rational
.num
);
10061 UPDATE_IF_MOVED(objspace
, any
->as
.rational
.den
);
10065 UPDATE_IF_MOVED(objspace
, any
->as
.complex.real
);
10066 UPDATE_IF_MOVED(objspace
, any
->as
.complex.imag
);
10072 long i
, len
= RSTRUCT_LEN(obj
);
10073 VALUE
*ptr
= (VALUE
*)RSTRUCT_CONST_PTR(obj
);
10075 for (i
= 0; i
< len
; i
++) {
10076 UPDATE_IF_MOVED(objspace
, ptr
[i
]);
10082 rb_gcdebug_print_obj_condition((VALUE
)obj
);
10083 rb_obj_info_dump(obj
);
10084 rb_bug("unreachable");
10090 UPDATE_IF_MOVED(objspace
, RBASIC(obj
)->klass
);
10092 gc_report(4, objspace
, "update-refs: %p <-\n", (void *)obj
);
10096 gc_ref_update(void *vstart
, void *vend
, size_t stride
, rb_objspace_t
* objspace
, struct heap_page
*page
)
10098 VALUE v
= (VALUE
)vstart
;
10099 asan_unpoison_memory_region(&page
->freelist
, sizeof(RVALUE
*), false);
10100 asan_poison_memory_region(&page
->freelist
, sizeof(RVALUE
*));
10101 page
->flags
.has_uncollectible_shady_objects
= FALSE
;
10102 page
->flags
.has_remembered_objects
= FALSE
;
10104 /* For each object on the page */
10105 for (; v
!= (VALUE
)vend
; v
+= stride
) {
10106 void *poisoned
= asan_poisoned_object_p(v
);
10107 asan_unpoison_object(v
, false);
10109 switch (BUILTIN_TYPE(v
)) {
10115 if (RVALUE_WB_UNPROTECTED(v
)) {
10116 page
->flags
.has_uncollectible_shady_objects
= TRUE
;
10118 if (RVALUE_PAGE_MARKING(page
, v
)) {
10119 page
->flags
.has_remembered_objects
= TRUE
;
10121 if (page
->flags
.before_sweep
) {
10122 if (RVALUE_MARKED(v
)) {
10123 gc_update_object_references(objspace
, v
);
10127 gc_update_object_references(objspace
, v
);
10132 asan_poison_object(v
);
10139 extern rb_symbols_t ruby_global_symbols
;
10140 #define global_symbols ruby_global_symbols
10143 gc_update_references(rb_objspace_t
*objspace
)
10145 rb_execution_context_t
*ec
= GET_EC();
10146 rb_vm_t
*vm
= rb_ec_vm_ptr(ec
);
10148 struct heap_page
*page
= NULL
;
10150 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10151 bool should_set_mark_bits
= TRUE
;
10152 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10153 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10155 list_for_each(&heap
->pages
, page
, page_node
) {
10156 uintptr_t start
= (uintptr_t)page
->start
;
10157 uintptr_t end
= start
+ (page
->total_slots
* size_pool
->slot_size
);
10159 gc_ref_update((void *)start
, (void *)end
, size_pool
->slot_size
, objspace
, page
);
10160 if (page
== heap
->sweeping_page
) {
10161 should_set_mark_bits
= FALSE
;
10163 if (should_set_mark_bits
) {
10164 gc_setup_mark_bits(page
);
10168 rb_vm_update_references(vm
);
10169 rb_transient_heap_update_references();
10170 rb_gc_update_global_tbl();
10171 global_symbols
.ids
= rb_gc_location(global_symbols
.ids
);
10172 global_symbols
.dsymbol_fstr_hash
= rb_gc_location(global_symbols
.dsymbol_fstr_hash
);
10173 gc_update_tbl_refs(objspace
, objspace
->obj_to_id_tbl
);
10174 gc_update_table_refs(objspace
, objspace
->id_to_obj_tbl
);
10175 gc_update_table_refs(objspace
, global_symbols
.str_sym
);
10176 gc_update_table_refs(objspace
, finalizer_table
);
10180 gc_compact_stats(rb_execution_context_t
*ec
, VALUE self
)
10183 rb_objspace_t
*objspace
= &rb_objspace
;
10184 VALUE h
= rb_hash_new();
10185 VALUE considered
= rb_hash_new();
10186 VALUE moved
= rb_hash_new();
10188 for (i
=0; i
<T_MASK
; i
++) {
10189 if (objspace
->rcompactor
.considered_count_table
[i
]) {
10190 rb_hash_aset(considered
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.considered_count_table
[i
]));
10193 if (objspace
->rcompactor
.moved_count_table
[i
]) {
10194 rb_hash_aset(moved
, type_sym(i
), SIZET2NUM(objspace
->rcompactor
.moved_count_table
[i
]));
10198 rb_hash_aset(h
, ID2SYM(rb_intern("considered")), considered
);
10199 rb_hash_aset(h
, ID2SYM(rb_intern("moved")), moved
);
10205 root_obj_check_moved_i(const char *category
, VALUE obj
, void *data
)
10207 if (gc_object_moved_p(&rb_objspace
, obj
)) {
10208 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category
, (void *)obj
, obj_info(rb_gc_location(obj
)));
10213 reachable_object_check_moved_i(VALUE ref
, void *data
)
10215 VALUE parent
= (VALUE
)data
;
10216 if (gc_object_moved_p(&rb_objspace
, ref
)) {
10217 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent
), (void *)ref
, obj_info(rb_gc_location(ref
)));
10222 heap_check_moved_i(void *vstart
, void *vend
, size_t stride
, void *data
)
10224 VALUE v
= (VALUE
)vstart
;
10225 for (; v
!= (VALUE
)vend
; v
+= stride
) {
10226 if (gc_object_moved_p(&rb_objspace
, v
)) {
10227 /* Moved object still on the heap, something may have a reference. */
10230 void *poisoned
= asan_poisoned_object_p(v
);
10231 asan_unpoison_object(v
, false);
10233 switch (BUILTIN_TYPE(v
)) {
10238 if (!rb_objspace_garbage_object_p(v
)) {
10239 rb_objspace_reachable_objects_from(v
, reachable_object_check_moved_i
, (void *)v
);
10244 GC_ASSERT(BUILTIN_TYPE(v
) == T_NONE
);
10245 asan_poison_object(v
);
10254 gc_compact(rb_execution_context_t
*ec
, VALUE self
)
10256 /* Run GC with compaction enabled */
10257 gc_start_internal(ec
, self
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
10259 return gc_compact_stats(ec
, self
);
10263 gc_verify_compaction_references(rb_execution_context_t
*ec
, VALUE self
, VALUE double_heap
, VALUE toward_empty
)
10265 rb_objspace_t
*objspace
= &rb_objspace
;
10267 /* Clear the heap. */
10268 gc_start_internal(ec
, self
, Qtrue
, Qtrue
, Qtrue
, Qfalse
);
10270 RB_VM_LOCK_ENTER();
10274 if (RTEST(double_heap
)) {
10275 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10276 rb_size_pool_t
*size_pool
= &size_pools
[i
];
10277 rb_heap_t
*heap
= SIZE_POOL_EDEN_HEAP(size_pool
);
10278 heap_add_pages(objspace
, size_pool
, heap
, heap
->total_pages
);
10282 if (RTEST(toward_empty
)) {
10283 gc_sort_heap_by_empty_slots(objspace
);
10286 RB_VM_LOCK_LEAVE();
10288 gc_start_internal(ec
, self
, Qtrue
, Qtrue
, Qtrue
, Qtrue
);
10290 objspace_reachable_objects_from_root(objspace
, root_obj_check_moved_i
, NULL
);
10291 objspace_each_objects(objspace
, heap_check_moved_i
, NULL
, TRUE
);
10293 return gc_compact_stats(ec
, self
);
10306 rb_objspace_t
*objspace
= &rb_objspace
;
10307 unsigned int reason
= GPR_DEFAULT_REASON
;
10308 garbage_collect(objspace
, reason
);
10314 rb_objspace_t
*objspace
= &rb_objspace
;
10318 #if RGENGC_PROFILE >= 2
10320 static const char *type_name(int type
, VALUE obj
);
10323 gc_count_add_each_types(VALUE hash
, const char *name
, const size_t *types
)
10325 VALUE result
= rb_hash_new_with_size(T_MASK
);
10327 for (i
=0; i
<T_MASK
; i
++) {
10328 const char *type
= type_name(i
, 0);
10329 rb_hash_aset(result
, ID2SYM(rb_intern(type
)), SIZET2NUM(types
[i
]));
10331 rb_hash_aset(hash
, ID2SYM(rb_intern(name
)), result
);
10338 return rb_objspace
.profile
.count
;
10342 gc_count(rb_execution_context_t
*ec
, VALUE self
)
10344 return SIZET2NUM(rb_gc_count());
10348 gc_info_decode(rb_objspace_t
*objspace
, const VALUE hash_or_key
, const unsigned int orig_flags
)
10350 static VALUE sym_major_by
= Qnil
, sym_gc_by
, sym_immediate_sweep
, sym_have_finalizer
, sym_state
;
10351 static VALUE sym_nofree
, sym_oldgen
, sym_shady
, sym_force
, sym_stress
;
10352 #if RGENGC_ESTIMATE_OLDMALLOC
10353 static VALUE sym_oldmalloc
;
10355 static VALUE sym_newobj
, sym_malloc
, sym_method
, sym_capi
;
10356 static VALUE sym_none
, sym_marking
, sym_sweeping
;
10357 VALUE hash
= Qnil
, key
= Qnil
;
10359 unsigned int flags
= orig_flags
? orig_flags
: objspace
->profile
.latest_gc_info
;
10361 if (SYMBOL_P(hash_or_key
)) {
10364 else if (RB_TYPE_P(hash_or_key
, T_HASH
)) {
10365 hash
= hash_or_key
;
10368 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10371 if (NIL_P(sym_major_by
)) {
10372 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10375 S(immediate_sweep
);
10384 #if RGENGC_ESTIMATE_OLDMALLOC
10398 #define SET(name, attr) \
10399 if (key == sym_##name) \
10401 else if (hash != Qnil) \
10402 rb_hash_aset(hash, sym_##name, (attr));
10405 (flags
& GPR_FLAG_MAJOR_BY_NOFREE
) ? sym_nofree
:
10406 (flags
& GPR_FLAG_MAJOR_BY_OLDGEN
) ? sym_oldgen
:
10407 (flags
& GPR_FLAG_MAJOR_BY_SHADY
) ? sym_shady
:
10408 (flags
& GPR_FLAG_MAJOR_BY_FORCE
) ? sym_force
:
10409 #if RGENGC_ESTIMATE_OLDMALLOC
10410 (flags
& GPR_FLAG_MAJOR_BY_OLDMALLOC
) ? sym_oldmalloc
:
10413 SET(major_by
, major_by
);
10416 (flags
& GPR_FLAG_NEWOBJ
) ? sym_newobj
:
10417 (flags
& GPR_FLAG_MALLOC
) ? sym_malloc
:
10418 (flags
& GPR_FLAG_METHOD
) ? sym_method
:
10419 (flags
& GPR_FLAG_CAPI
) ? sym_capi
:
10420 (flags
& GPR_FLAG_STRESS
) ? sym_stress
:
10424 SET(have_finalizer
, RBOOL(flags
& GPR_FLAG_HAVE_FINALIZE
));
10425 SET(immediate_sweep
, RBOOL(flags
& GPR_FLAG_IMMEDIATE_SWEEP
));
10427 if (orig_flags
== 0) {
10428 SET(state
, gc_mode(objspace
) == gc_mode_none
? sym_none
:
10429 gc_mode(objspace
) == gc_mode_marking
? sym_marking
: sym_sweeping
);
10433 if (!NIL_P(key
)) {/* matched key should return above */
10434 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10441 rb_gc_latest_gc_info(VALUE key
)
10443 rb_objspace_t
*objspace
= &rb_objspace
;
10444 return gc_info_decode(objspace
, key
, 0);
10448 gc_latest_gc_info(rb_execution_context_t
*ec
, VALUE self
, VALUE arg
)
10450 rb_objspace_t
*objspace
= &rb_objspace
;
10453 arg
= rb_hash_new();
10455 else if (!SYMBOL_P(arg
) && !RB_TYPE_P(arg
, T_HASH
)) {
10456 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10459 return gc_info_decode(objspace
, arg
, 0);
10465 gc_stat_sym_heap_allocated_pages
,
10466 gc_stat_sym_heap_sorted_length
,
10467 gc_stat_sym_heap_allocatable_pages
,
10468 gc_stat_sym_heap_available_slots
,
10469 gc_stat_sym_heap_live_slots
,
10470 gc_stat_sym_heap_free_slots
,
10471 gc_stat_sym_heap_final_slots
,
10472 gc_stat_sym_heap_marked_slots
,
10473 gc_stat_sym_heap_eden_pages
,
10474 gc_stat_sym_heap_tomb_pages
,
10475 gc_stat_sym_total_allocated_pages
,
10476 gc_stat_sym_total_freed_pages
,
10477 gc_stat_sym_total_allocated_objects
,
10478 gc_stat_sym_total_freed_objects
,
10479 gc_stat_sym_malloc_increase_bytes
,
10480 gc_stat_sym_malloc_increase_bytes_limit
,
10481 gc_stat_sym_minor_gc_count
,
10482 gc_stat_sym_major_gc_count
,
10483 gc_stat_sym_compact_count
,
10484 gc_stat_sym_read_barrier_faults
,
10485 gc_stat_sym_total_moved_objects
,
10486 gc_stat_sym_remembered_wb_unprotected_objects
,
10487 gc_stat_sym_remembered_wb_unprotected_objects_limit
,
10488 gc_stat_sym_old_objects
,
10489 gc_stat_sym_old_objects_limit
,
10490 #if RGENGC_ESTIMATE_OLDMALLOC
10491 gc_stat_sym_oldmalloc_increase_bytes
,
10492 gc_stat_sym_oldmalloc_increase_bytes_limit
,
10495 gc_stat_sym_total_generated_normal_object_count
,
10496 gc_stat_sym_total_generated_shady_object_count
,
10497 gc_stat_sym_total_shade_operation_count
,
10498 gc_stat_sym_total_promoted_count
,
10499 gc_stat_sym_total_remembered_normal_object_count
,
10500 gc_stat_sym_total_remembered_shady_object_count
,
10505 static VALUE gc_stat_symbols
[gc_stat_sym_last
];
10508 setup_gc_stat_symbols(void)
10510 if (gc_stat_symbols
[0] == 0) {
10511 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10514 S(heap_allocated_pages
);
10515 S(heap_sorted_length
);
10516 S(heap_allocatable_pages
);
10517 S(heap_available_slots
);
10518 S(heap_live_slots
);
10519 S(heap_free_slots
);
10520 S(heap_final_slots
);
10521 S(heap_marked_slots
);
10522 S(heap_eden_pages
);
10523 S(heap_tomb_pages
);
10524 S(total_allocated_pages
);
10525 S(total_freed_pages
);
10526 S(total_allocated_objects
);
10527 S(total_freed_objects
);
10528 S(malloc_increase_bytes
);
10529 S(malloc_increase_bytes_limit
);
10533 S(read_barrier_faults
);
10534 S(total_moved_objects
);
10535 S(remembered_wb_unprotected_objects
);
10536 S(remembered_wb_unprotected_objects_limit
);
10538 S(old_objects_limit
);
10539 #if RGENGC_ESTIMATE_OLDMALLOC
10540 S(oldmalloc_increase_bytes
);
10541 S(oldmalloc_increase_bytes_limit
);
10544 S(total_generated_normal_object_count
);
10545 S(total_generated_shady_object_count
);
10546 S(total_shade_operation_count
);
10547 S(total_promoted_count
);
10548 S(total_remembered_normal_object_count
);
10549 S(total_remembered_shady_object_count
);
10550 #endif /* RGENGC_PROFILE */
10556 gc_stat_internal(VALUE hash_or_sym
)
10558 rb_objspace_t
*objspace
= &rb_objspace
;
10559 VALUE hash
= Qnil
, key
= Qnil
;
10561 setup_gc_stat_symbols();
10563 if (RB_TYPE_P(hash_or_sym
, T_HASH
)) {
10564 hash
= hash_or_sym
;
10566 else if (SYMBOL_P(hash_or_sym
)) {
10570 rb_raise(rb_eTypeError
, "non-hash or symbol argument");
10573 #define SET(name, attr) \
10574 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10576 else if (hash != Qnil) \
10577 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10579 SET(count
, objspace
->profile
.count
);
10580 SET(time
, (size_t) (objspace
->profile
.total_time_ns
/ (1000 * 1000) /* ns -> ms */)); // TODO: UINT64T2NUM
10582 /* implementation dependent counters */
10583 SET(heap_allocated_pages
, heap_allocated_pages
);
10584 SET(heap_sorted_length
, heap_pages_sorted_length
);
10585 SET(heap_allocatable_pages
, heap_allocatable_pages(objspace
));
10586 SET(heap_available_slots
, objspace_available_slots(objspace
));
10587 SET(heap_live_slots
, objspace_live_slots(objspace
));
10588 SET(heap_free_slots
, objspace_free_slots(objspace
));
10589 SET(heap_final_slots
, heap_pages_final_slots
);
10590 SET(heap_marked_slots
, objspace
->marked_slots
);
10591 SET(heap_eden_pages
, heap_eden_total_pages(objspace
));
10592 SET(heap_tomb_pages
, heap_tomb_total_pages(objspace
));
10593 SET(total_allocated_pages
, objspace
->profile
.total_allocated_pages
);
10594 SET(total_freed_pages
, objspace
->profile
.total_freed_pages
);
10595 SET(total_allocated_objects
, objspace
->total_allocated_objects
);
10596 SET(total_freed_objects
, objspace
->profile
.total_freed_objects
);
10597 SET(malloc_increase_bytes
, malloc_increase
);
10598 SET(malloc_increase_bytes_limit
, malloc_limit
);
10599 SET(minor_gc_count
, objspace
->profile
.minor_gc_count
);
10600 SET(major_gc_count
, objspace
->profile
.major_gc_count
);
10601 SET(compact_count
, objspace
->profile
.compact_count
);
10602 SET(read_barrier_faults
, objspace
->profile
.read_barrier_faults
);
10603 SET(total_moved_objects
, objspace
->rcompactor
.total_moved
);
10604 SET(remembered_wb_unprotected_objects
, objspace
->rgengc
.uncollectible_wb_unprotected_objects
);
10605 SET(remembered_wb_unprotected_objects_limit
, objspace
->rgengc
.uncollectible_wb_unprotected_objects_limit
);
10606 SET(old_objects
, objspace
->rgengc
.old_objects
);
10607 SET(old_objects_limit
, objspace
->rgengc
.old_objects_limit
);
10608 #if RGENGC_ESTIMATE_OLDMALLOC
10609 SET(oldmalloc_increase_bytes
, objspace
->rgengc
.oldmalloc_increase
);
10610 SET(oldmalloc_increase_bytes_limit
, objspace
->rgengc
.oldmalloc_increase_limit
);
10614 SET(total_generated_normal_object_count
, objspace
->profile
.total_generated_normal_object_count
);
10615 SET(total_generated_shady_object_count
, objspace
->profile
.total_generated_shady_object_count
);
10616 SET(total_shade_operation_count
, objspace
->profile
.total_shade_operation_count
);
10617 SET(total_promoted_count
, objspace
->profile
.total_promoted_count
);
10618 SET(total_remembered_normal_object_count
, objspace
->profile
.total_remembered_normal_object_count
);
10619 SET(total_remembered_shady_object_count
, objspace
->profile
.total_remembered_shady_object_count
);
10620 #endif /* RGENGC_PROFILE */
10623 if (!NIL_P(key
)) { /* matched key should return above */
10624 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10627 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10628 if (hash
!= Qnil
) {
10629 gc_count_add_each_types(hash
, "generated_normal_object_count_types", objspace
->profile
.generated_normal_object_count_types
);
10630 gc_count_add_each_types(hash
, "generated_shady_object_count_types", objspace
->profile
.generated_shady_object_count_types
);
10631 gc_count_add_each_types(hash
, "shade_operation_count_types", objspace
->profile
.shade_operation_count_types
);
10632 gc_count_add_each_types(hash
, "promoted_types", objspace
->profile
.promoted_types
);
10633 gc_count_add_each_types(hash
, "remembered_normal_object_count_types", objspace
->profile
.remembered_normal_object_count_types
);
10634 gc_count_add_each_types(hash
, "remembered_shady_object_count_types", objspace
->profile
.remembered_shady_object_count_types
);
10642 gc_stat(rb_execution_context_t
*ec
, VALUE self
, VALUE arg
) // arg is (nil || hash || symbol)
10645 arg
= rb_hash_new();
10647 else if (SYMBOL_P(arg
)) {
10648 size_t value
= gc_stat_internal(arg
);
10649 return SIZET2NUM(value
);
10651 else if (RB_TYPE_P(arg
, T_HASH
)) {
10655 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10658 gc_stat_internal(arg
);
10663 rb_gc_stat(VALUE key
)
10665 if (SYMBOL_P(key
)) {
10666 size_t value
= gc_stat_internal(key
);
10670 gc_stat_internal(key
);
10676 enum gc_stat_heap_sym
{
10677 gc_stat_heap_sym_slot_size
,
10678 gc_stat_heap_sym_heap_allocatable_pages
,
10679 gc_stat_heap_sym_heap_eden_pages
,
10680 gc_stat_heap_sym_heap_eden_slots
,
10681 gc_stat_heap_sym_heap_tomb_pages
,
10682 gc_stat_heap_sym_heap_tomb_slots
,
10683 gc_stat_heap_sym_last
10686 static VALUE gc_stat_heap_symbols
[gc_stat_heap_sym_last
];
10689 setup_gc_stat_heap_symbols(void)
10691 if (gc_stat_heap_symbols
[0] == 0) {
10692 #define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
10694 S(heap_allocatable_pages
);
10695 S(heap_eden_pages
);
10696 S(heap_eden_slots
);
10697 S(heap_tomb_pages
);
10698 S(heap_tomb_slots
);
10704 gc_stat_heap_internal(int size_pool_idx
, VALUE hash_or_sym
)
10706 rb_objspace_t
*objspace
= &rb_objspace
;
10707 VALUE hash
= Qnil
, key
= Qnil
;
10709 setup_gc_stat_heap_symbols();
10711 if (RB_TYPE_P(hash_or_sym
, T_HASH
)) {
10712 hash
= hash_or_sym
;
10714 else if (SYMBOL_P(hash_or_sym
)) {
10718 rb_raise(rb_eTypeError
, "non-hash or symbol argument");
10721 if (size_pool_idx
< 0 || size_pool_idx
>= SIZE_POOL_COUNT
) {
10722 rb_raise(rb_eArgError
, "size pool index out of range");
10725 rb_size_pool_t
*size_pool
= &size_pools
[size_pool_idx
];
10727 #define SET(name, attr) \
10728 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
10730 else if (hash != Qnil) \
10731 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
10733 SET(slot_size
, size_pool
->slot_size
);
10734 SET(heap_allocatable_pages
, size_pool
->allocatable_pages
);
10735 SET(heap_eden_pages
, SIZE_POOL_EDEN_HEAP(size_pool
)->total_pages
);
10736 SET(heap_eden_slots
, SIZE_POOL_EDEN_HEAP(size_pool
)->total_slots
);
10737 SET(heap_tomb_pages
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_pages
);
10738 SET(heap_tomb_slots
, SIZE_POOL_TOMB_HEAP(size_pool
)->total_slots
);
10741 if (!NIL_P(key
)) { /* matched key should return above */
10742 rb_raise(rb_eArgError
, "unknown key: %"PRIsVALUE
, rb_sym2str(key
));
10749 gc_stat_heap(rb_execution_context_t
*ec
, VALUE self
, VALUE heap_name
, VALUE arg
)
10751 if (NIL_P(heap_name
)) {
10753 arg
= rb_hash_new();
10755 else if (RB_TYPE_P(arg
, T_HASH
)) {
10759 rb_raise(rb_eTypeError
, "non-hash given");
10762 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
10763 VALUE hash
= rb_hash_aref(arg
, INT2FIX(i
));
10765 hash
= rb_hash_new();
10766 rb_hash_aset(arg
, INT2FIX(i
), hash
);
10768 gc_stat_heap_internal(i
, hash
);
10771 else if (FIXNUM_P(heap_name
)) {
10772 int size_pool_idx
= FIX2INT(heap_name
);
10775 arg
= rb_hash_new();
10777 else if (SYMBOL_P(arg
)) {
10778 size_t value
= gc_stat_heap_internal(size_pool_idx
, arg
);
10779 return SIZET2NUM(value
);
10781 else if (RB_TYPE_P(arg
, T_HASH
)) {
10785 rb_raise(rb_eTypeError
, "non-hash or symbol given");
10788 gc_stat_heap_internal(size_pool_idx
, arg
);
10791 rb_raise(rb_eTypeError
, "heap_name must be nil or an Integer");
10798 gc_stress_get(rb_execution_context_t
*ec
, VALUE self
)
10800 rb_objspace_t
*objspace
= &rb_objspace
;
10801 return ruby_gc_stress_mode
;
10805 gc_stress_set(rb_objspace_t
*objspace
, VALUE flag
)
10807 objspace
->flags
.gc_stressful
= RTEST(flag
);
10808 objspace
->gc_stress_mode
= flag
;
10812 gc_stress_set_m(rb_execution_context_t
*ec
, VALUE self
, VALUE flag
)
10814 rb_objspace_t
*objspace
= &rb_objspace
;
10815 gc_stress_set(objspace
, flag
);
10822 rb_objspace_t
*objspace
= &rb_objspace
;
10823 return rb_objspace_gc_enable(objspace
);
10827 rb_objspace_gc_enable(rb_objspace_t
*objspace
)
10829 int old
= dont_gc_val();
10836 gc_enable(rb_execution_context_t
*ec
, VALUE _
)
10838 return rb_gc_enable();
10842 rb_gc_disable_no_rest(void)
10844 rb_objspace_t
*objspace
= &rb_objspace
;
10845 return gc_disable_no_rest(objspace
);
10849 gc_disable_no_rest(rb_objspace_t
*objspace
)
10851 int old
= dont_gc_val();
10857 rb_gc_disable(void)
10859 rb_objspace_t
*objspace
= &rb_objspace
;
10860 return rb_objspace_gc_disable(objspace
);
10864 rb_objspace_gc_disable(rb_objspace_t
*objspace
)
10867 return gc_disable_no_rest(objspace
);
10871 gc_disable(rb_execution_context_t
*ec
, VALUE _
)
10873 return rb_gc_disable();
10877 gc_set_auto_compact(rb_execution_context_t
*ec
, VALUE _
, VALUE v
)
10879 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
10880 * the read barrier, so we must disable automatic compaction. */
10881 #if !defined(__MINGW32__) && !defined(_WIN32)
10882 if (!USE_MMAP_ALIGNED_ALLOC
) {
10883 rb_raise(rb_eNotImpError
, "Automatic compaction isn't available on this platform");
10887 ruby_enable_autocompact
= RTEST(v
);
10892 gc_get_auto_compact(rb_execution_context_t
*ec
, VALUE _
)
10894 return RBOOL(ruby_enable_autocompact
);
10898 get_envparam_size(const char *name
, size_t *default_value
, size_t lower_bound
)
10900 const char *ptr
= getenv(name
);
10903 if (ptr
!= NULL
&& *ptr
) {
10906 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
10907 val
= strtoll(ptr
, &end
, 0);
10909 val
= strtol(ptr
, &end
, 0);
10912 case 'k': case 'K':
10916 case 'm': case 'M':
10920 case 'g': case 'G':
10921 unit
= 1024*1024*1024;
10925 while (*end
&& isspace((unsigned char)*end
)) end
++;
10927 if (RTEST(ruby_verbose
)) fprintf(stderr
, "invalid string for %s: %s\n", name
, ptr
);
10931 if (val
< -(ssize_t
)(SIZE_MAX
/ 2 / unit
) || (ssize_t
)(SIZE_MAX
/ 2 / unit
) < val
) {
10932 if (RTEST(ruby_verbose
)) fprintf(stderr
, "%s=%s is ignored because it overflows\n", name
, ptr
);
10937 if (val
> 0 && (size_t)val
> lower_bound
) {
10938 if (RTEST(ruby_verbose
)) {
10939 fprintf(stderr
, "%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name
, val
, *default_value
);
10941 *default_value
= (size_t)val
;
10945 if (RTEST(ruby_verbose
)) {
10946 fprintf(stderr
, "%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
10947 name
, val
, *default_value
, lower_bound
);
10956 get_envparam_double(const char *name
, double *default_value
, double lower_bound
, double upper_bound
, int accept_zero
)
10958 const char *ptr
= getenv(name
);
10961 if (ptr
!= NULL
&& *ptr
) {
10963 val
= strtod(ptr
, &end
);
10964 if (!*ptr
|| *end
) {
10965 if (RTEST(ruby_verbose
)) fprintf(stderr
, "invalid string for %s: %s\n", name
, ptr
);
10969 if (accept_zero
&& val
== 0.0) {
10972 else if (val
<= lower_bound
) {
10973 if (RTEST(ruby_verbose
)) {
10974 fprintf(stderr
, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10975 name
, val
, *default_value
, lower_bound
);
10978 else if (upper_bound
!= 0.0 && /* ignore upper_bound if it is 0.0 */
10979 val
> upper_bound
) {
10980 if (RTEST(ruby_verbose
)) {
10981 fprintf(stderr
, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10982 name
, val
, *default_value
, upper_bound
);
10992 if (RTEST(ruby_verbose
)) fprintf(stderr
, "%s=%f (default value: %f)\n", name
, val
, *default_value
);
10993 *default_value
= val
;
10998 gc_set_initial_pages(void)
11001 rb_objspace_t
*objspace
= &rb_objspace
;
11005 min_pages
= gc_params
.heap_init_slots
/ HEAP_PAGE_OBJ_LIMIT
;
11007 size_t pages_per_class
= (min_pages
- heap_eden_total_pages(objspace
)) / SIZE_POOL_COUNT
;
11009 for (int i
= 0; i
< SIZE_POOL_COUNT
; i
++) {
11010 rb_size_pool_t
*size_pool
= &size_pools
[i
];
11012 heap_add_pages(objspace
, size_pool
, SIZE_POOL_EDEN_HEAP(size_pool
), pages_per_class
);
11015 heap_add_pages(objspace
, &size_pools
[0], SIZE_POOL_EDEN_HEAP(&size_pools
[0]), min_pages
- heap_eden_total_pages(objspace
));
11019 * GC tuning environment variables
11021 * * RUBY_GC_HEAP_INIT_SLOTS
11022 * - Initial allocation slots.
11023 * * RUBY_GC_HEAP_FREE_SLOTS
11024 * - Prepare at least this amount of slots after GC.
11025 * - Allocate slots if there are not enough slots.
11026 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
11027 * - Allocate slots by this factor.
11028 * - (next slots number) = (current slots number) * (this factor)
11029 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
11030 * - Allocation rate is limited to this number of slots.
11031 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
11032 * - Allocate additional pages when the number of free slots is
11033 * lower than the value (total_slots * (this ratio)).
11034 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
11035 * - Allocate slots to satisfy this formula:
11036 * free_slots = total_slots * goal_ratio
11037 * - In other words, prepare (total_slots * goal_ratio) free slots.
11038 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
11039 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
11040 * - Allow to free pages when the number of free slots is
11041 * greater than the value (total_slots * (this ratio)).
11042 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
11043 * - Do full GC when the number of old objects is more than R * N
11044 * where R is this factor and
11045 * N is the number of old objects just after last full GC.
11048 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
11049 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
11051 * * RUBY_GC_MALLOC_LIMIT
11052 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
11053 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11055 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
11056 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
11057 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11061 ruby_gc_set_params(void)
11063 /* RUBY_GC_HEAP_FREE_SLOTS */
11064 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params
.heap_free_slots
, 0)) {
11068 /* RUBY_GC_HEAP_INIT_SLOTS */
11069 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params
.heap_init_slots
, 0)) {
11070 gc_set_initial_pages();
11073 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params
.growth_factor
, 1.0, 0.0, FALSE
);
11074 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params
.growth_max_slots
, 0);
11075 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params
.heap_free_slots_min_ratio
,
11077 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params
.heap_free_slots_max_ratio
,
11078 gc_params
.heap_free_slots_min_ratio
, 1.0, FALSE
);
11079 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params
.heap_free_slots_goal_ratio
,
11080 gc_params
.heap_free_slots_min_ratio
, gc_params
.heap_free_slots_max_ratio
, TRUE
);
11081 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params
.oldobject_limit_factor
, 0.0, 0.0, TRUE
);
11083 get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params
.malloc_limit_min
, 0);
11084 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params
.malloc_limit_max
, 0);
11085 if (!gc_params
.malloc_limit_max
) { /* ignore max-check if 0 */
11086 gc_params
.malloc_limit_max
= SIZE_MAX
;
11088 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params
.malloc_limit_growth_factor
, 1.0, 0.0, FALSE
);
11090 #if RGENGC_ESTIMATE_OLDMALLOC
11091 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params
.oldmalloc_limit_min
, 0)) {
11092 rb_objspace_t
*objspace
= &rb_objspace
;
11093 objspace
->rgengc
.oldmalloc_increase_limit
= gc_params
.oldmalloc_limit_min
;
11095 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params
.oldmalloc_limit_max
, 0);
11096 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params
.oldmalloc_limit_growth_factor
, 1.0, 0.0, FALSE
);
11101 reachable_objects_from_callback(VALUE obj
)
11103 rb_ractor_t
*cr
= GET_RACTOR();
11104 cr
->mfd
->mark_func(obj
, cr
->mfd
->data
);
11108 rb_objspace_reachable_objects_from(VALUE obj
, void (func
)(VALUE
, void *), void *data
)
11110 rb_objspace_t
*objspace
= &rb_objspace
;
11112 if (during_gc
) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
11114 if (is_markable_object(objspace
, obj
)) {
11115 rb_ractor_t
*cr
= GET_RACTOR();
11116 struct gc_mark_func_data_struct mfd
= {
11119 }, *prev_mfd
= cr
->mfd
;
11122 gc_mark_children(objspace
, obj
);
11123 cr
->mfd
= prev_mfd
;
11127 struct root_objects_data
{
11128 const char *category
;
11129 void (*func
)(const char *category
, VALUE
, void *);
11134 root_objects_from(VALUE obj
, void *ptr
)
11136 const struct root_objects_data
*data
= (struct root_objects_data
*)ptr
;
11137 (*data
->func
)(data
->category
, obj
, data
->data
);
11141 rb_objspace_reachable_objects_from_root(void (func
)(const char *category
, VALUE
, void *), void *passing_data
)
11143 rb_objspace_t
*objspace
= &rb_objspace
;
11144 objspace_reachable_objects_from_root(objspace
, func
, passing_data
);
11148 objspace_reachable_objects_from_root(rb_objspace_t
*objspace
, void (func
)(const char *category
, VALUE
, void *), void *passing_data
)
11150 if (during_gc
) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11152 rb_ractor_t
*cr
= GET_RACTOR();
11153 struct root_objects_data data
= {
11155 .data
= passing_data
,
11157 struct gc_mark_func_data_struct mfd
= {
11158 .mark_func
= root_objects_from
,
11160 }, *prev_mfd
= cr
->mfd
;
11163 gc_mark_roots(objspace
, &data
.category
);
11164 cr
->mfd
= prev_mfd
;
11168 ------------------------ Extended allocator ------------------------
11171 struct gc_raise_tag
{
11178 gc_vraise(void *ptr
)
11180 struct gc_raise_tag
*argv
= ptr
;
11181 rb_vraise(argv
->exc
, argv
->fmt
, *argv
->ap
);
11182 UNREACHABLE_RETURN(NULL
);
11186 gc_raise(VALUE exc
, const char *fmt
, ...)
11190 struct gc_raise_tag argv
= {
11194 if (ruby_thread_has_gvl_p()) {
11198 else if (ruby_native_thread_p()) {
11199 rb_thread_call_with_gvl(gc_vraise
, &argv
);
11203 /* Not in a ruby thread */
11204 fprintf(stderr
, "%s", "[FATAL] ");
11205 vfprintf(stderr
, fmt
, ap
);
11212 static void objspace_xfree(rb_objspace_t
*objspace
, void *ptr
, size_t size
);
11215 negative_size_allocation_error(const char *msg
)
11217 gc_raise(rb_eNoMemError
, "%s", msg
);
11221 ruby_memerror_body(void *dummy
)
11227 NORETURN(static void ruby_memerror(void));
11228 RBIMPL_ATTR_MAYBE_UNUSED()
11230 ruby_memerror(void)
11232 if (ruby_thread_has_gvl_p()) {
11236 if (ruby_native_thread_p()) {
11237 rb_thread_call_with_gvl(ruby_memerror_body
, 0);
11240 /* no ruby thread */
11241 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
11244 exit(EXIT_FAILURE
);
11250 rb_execution_context_t
*ec
= GET_EC();
11251 rb_objspace_t
*objspace
= rb_objspace_of(rb_ec_vm_ptr(ec
));
11255 // Print out pid, sleep, so you can attach debugger to see what went wrong:
11256 fprintf(stderr
, "rb_memerror pid=%"PRI_PIDT_PREFIX
"d\n", getpid());
11261 // TODO: OMG!! How to implement it?
11262 gc_exit(objspace
, gc_enter_event_rb_memerror
, NULL
);
11267 rb_ec_raised_p(ec
, RAISED_NOMEMORY
)) {
11268 fprintf(stderr
, "[FATAL] failed to allocate memory\n");
11269 exit(EXIT_FAILURE
);
11271 if (rb_ec_raised_p(ec
, RAISED_NOMEMORY
)) {
11272 rb_ec_raised_clear(ec
);
11275 rb_ec_raised_set(ec
, RAISED_NOMEMORY
);
11276 exc
= ruby_vm_special_exception_copy(exc
);
11279 EC_JUMP_TAG(ec
, TAG_RAISE
);
11283 rb_aligned_malloc(size_t alignment
, size_t size
)
11287 #if defined __MINGW32__
11288 res
= __mingw_aligned_malloc(size
, alignment
);
11289 #elif defined _WIN32
11290 void *_aligned_malloc(size_t, size_t);
11291 res
= _aligned_malloc(size
, alignment
);
11293 if (USE_MMAP_ALIGNED_ALLOC
) {
11294 GC_ASSERT(alignment
% sysconf(_SC_PAGE_SIZE
) == 0);
11296 char *ptr
= mmap(NULL
, alignment
+ size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
11297 if (ptr
== MAP_FAILED
) {
11301 char *aligned
= ptr
+ alignment
;
11302 aligned
-= ((VALUE
)aligned
& (alignment
- 1));
11303 GC_ASSERT(aligned
> ptr
);
11304 GC_ASSERT(aligned
<= ptr
+ alignment
);
11306 size_t start_out_of_range_size
= aligned
- ptr
;
11307 GC_ASSERT(start_out_of_range_size
% sysconf(_SC_PAGE_SIZE
) == 0);
11308 if (start_out_of_range_size
> 0) {
11309 if (munmap(ptr
, start_out_of_range_size
)) {
11310 rb_bug("rb_aligned_malloc: munmap failed for start");
11314 size_t end_out_of_range_size
= alignment
- start_out_of_range_size
;
11315 GC_ASSERT(end_out_of_range_size
% sysconf(_SC_PAGE_SIZE
) == 0);
11316 if (end_out_of_range_size
> 0) {
11317 if (munmap(aligned
+ size
, end_out_of_range_size
)) {
11318 rb_bug("rb_aligned_malloc: munmap failed for end");
11322 res
= (void *)aligned
;
11325 # if defined(HAVE_POSIX_MEMALIGN)
11326 if (posix_memalign(&res
, alignment
, size
) != 0) {
11329 # elif defined(HAVE_MEMALIGN)
11330 res
= memalign(alignment
, size
);
11333 res
= malloc(alignment
+ size
+ sizeof(void*));
11334 aligned
= (char*)res
+ alignment
+ sizeof(void*);
11335 aligned
-= ((VALUE
)aligned
& (alignment
- 1));
11336 ((void**)aligned
)[-1] = res
;
11337 res
= (void*)aligned
;
11342 /* alignment must be a power of 2 */
11343 GC_ASSERT(((alignment
- 1) & alignment
) == 0);
11344 GC_ASSERT(alignment
% sizeof(void*) == 0);
11349 rb_aligned_free(void *ptr
, size_t size
)
11351 #if defined __MINGW32__
11352 __mingw_aligned_free(ptr
);
11353 #elif defined _WIN32
11354 _aligned_free(ptr
);
11356 if (USE_MMAP_ALIGNED_ALLOC
) {
11357 GC_ASSERT(size
% sysconf(_SC_PAGE_SIZE
) == 0);
11358 if (munmap(ptr
, size
)) {
11359 rb_bug("rb_aligned_free: munmap failed");
11363 # if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11366 free(((void**)ptr
)[-1]);
11372 static inline size_t
11373 objspace_malloc_size(rb_objspace_t
*objspace
, void *ptr
, size_t hint
)
11375 #ifdef HAVE_MALLOC_USABLE_SIZE
11376 return malloc_usable_size(ptr
);
11383 MEMOP_TYPE_MALLOC
= 0,
11389 atomic_sub_nounderflow(size_t *var
, size_t sub
)
11391 if (sub
== 0) return;
11395 if (val
< sub
) sub
= val
;
11396 if (ATOMIC_SIZE_CAS(*var
, val
, val
-sub
) == val
) break;
11401 objspace_malloc_gc_stress(rb_objspace_t
*objspace
)
11403 if (ruby_gc_stressful
&& ruby_native_thread_p()) {
11404 unsigned int reason
= (GPR_FLAG_IMMEDIATE_MARK
| GPR_FLAG_IMMEDIATE_SWEEP
|
11405 GPR_FLAG_STRESS
| GPR_FLAG_MALLOC
);
11407 if (gc_stress_full_mark_after_malloc_p()) {
11408 reason
|= GPR_FLAG_FULL_MARK
;
11410 garbage_collect_with_gvl(objspace
, reason
);
11415 objspace_malloc_increase_report(rb_objspace_t
*objspace
, void *mem
, size_t new_size
, size_t old_size
, enum memop_type type
)
11417 if (0) fprintf(stderr
, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
11419 type
== MEMOP_TYPE_MALLOC
? "malloc" :
11420 type
== MEMOP_TYPE_FREE
? "free " :
11421 type
== MEMOP_TYPE_REALLOC
? "realloc": "error",
11422 new_size
, old_size
);
11427 objspace_malloc_increase_body(rb_objspace_t
*objspace
, void *mem
, size_t new_size
, size_t old_size
, enum memop_type type
)
11429 if (new_size
> old_size
) {
11430 ATOMIC_SIZE_ADD(malloc_increase
, new_size
- old_size
);
11431 #if RGENGC_ESTIMATE_OLDMALLOC
11432 ATOMIC_SIZE_ADD(objspace
->rgengc
.oldmalloc_increase
, new_size
- old_size
);
11436 atomic_sub_nounderflow(&malloc_increase
, old_size
- new_size
);
11437 #if RGENGC_ESTIMATE_OLDMALLOC
11438 atomic_sub_nounderflow(&objspace
->rgengc
.oldmalloc_increase
, old_size
- new_size
);
11442 if (type
== MEMOP_TYPE_MALLOC
) {
11444 if (malloc_increase
> malloc_limit
&& ruby_native_thread_p() && !dont_gc_val()) {
11445 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace
)) {
11446 gc_rest(objspace
); /* gc_rest can reduce malloc_increase */
11449 garbage_collect_with_gvl(objspace
, GPR_FLAG_MALLOC
);
11453 #if MALLOC_ALLOCATED_SIZE
11454 if (new_size
>= old_size
) {
11455 ATOMIC_SIZE_ADD(objspace
->malloc_params
.allocated_size
, new_size
- old_size
);
11458 size_t dec_size
= old_size
- new_size
;
11459 size_t allocated_size
= objspace
->malloc_params
.allocated_size
;
11461 #if MALLOC_ALLOCATED_SIZE_CHECK
11462 if (allocated_size
< dec_size
) {
11463 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
11466 atomic_sub_nounderflow(&objspace
->malloc_params
.allocated_size
, dec_size
);
11470 case MEMOP_TYPE_MALLOC
:
11471 ATOMIC_SIZE_INC(objspace
->malloc_params
.allocations
);
11473 case MEMOP_TYPE_FREE
:
11475 size_t allocations
= objspace
->malloc_params
.allocations
;
11476 if (allocations
> 0) {
11477 atomic_sub_nounderflow(&objspace
->malloc_params
.allocations
, 1);
11479 #if MALLOC_ALLOCATED_SIZE_CHECK
11481 GC_ASSERT(objspace
->malloc_params
.allocations
> 0);
11486 case MEMOP_TYPE_REALLOC
: /* ignore */ break;
11492 #define objspace_malloc_increase(...) \
11493 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11494 !malloc_increase_done; \
11495 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11497 struct malloc_obj_info
{ /* 4 words */
11499 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11506 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11507 const char *ruby_malloc_info_file
;
11508 int ruby_malloc_info_line
;
11511 static inline size_t
11512 objspace_malloc_prepare(rb_objspace_t
*objspace
, size_t size
)
11514 if (size
== 0) size
= 1;
11516 #if CALC_EXACT_MALLOC_SIZE
11517 size
+= sizeof(struct malloc_obj_info
);
11523 static inline void *
11524 objspace_malloc_fixup(rb_objspace_t
*objspace
, void *mem
, size_t size
)
11526 size
= objspace_malloc_size(objspace
, mem
, size
);
11527 objspace_malloc_increase(objspace
, mem
, size
, 0, MEMOP_TYPE_MALLOC
);
11529 #if CALC_EXACT_MALLOC_SIZE
11531 struct malloc_obj_info
*info
= mem
;
11533 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11534 info
->gen
= objspace
->profile
.count
;
11535 info
->file
= ruby_malloc_info_file
;
11536 info
->line
= info
->file
? ruby_malloc_info_line
: 0;
11545 #if defined(__GNUC__) && RUBY_DEBUG
11546 #define RB_BUG_INSTEAD_OF_RB_MEMERROR
11549 #ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
11550 #define TRY_WITH_GC(siz, expr) do { \
11551 const gc_profile_record_flag gpr = \
11552 GPR_FLAG_FULL_MARK | \
11553 GPR_FLAG_IMMEDIATE_MARK | \
11554 GPR_FLAG_IMMEDIATE_SWEEP | \
11556 objspace_malloc_gc_stress(objspace); \
11558 if (LIKELY((expr))) { \
11559 /* Success on 1st try */ \
11561 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11562 /* @shyouhei thinks this doesn't happen */ \
11563 rb_bug("TRY_WITH_GC: could not GC"); \
11565 else if ((expr)) { \
11566 /* Success on 2nd try */ \
11569 rb_bug("TRY_WITH_GC: could not allocate:" \
11570 "%"PRIdSIZE" bytes for %s", \
11575 #define TRY_WITH_GC(siz, alloc) do { \
11576 objspace_malloc_gc_stress(objspace); \
11578 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
11579 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
11580 GPR_FLAG_MALLOC) || \
11587 /* these shouldn't be called directly.
11588 * objspace_* functions do not check allocation size.
11591 objspace_xmalloc0(rb_objspace_t
*objspace
, size_t size
)
11595 size
= objspace_malloc_prepare(objspace
, size
);
11596 TRY_WITH_GC(size
, mem
= malloc(size
));
11597 RB_DEBUG_COUNTER_INC(heap_xmalloc
);
11598 return objspace_malloc_fixup(objspace
, mem
, size
);
11601 static inline size_t
11602 xmalloc2_size(const size_t count
, const size_t elsize
)
11604 return size_mul_or_raise(count
, elsize
, rb_eArgError
);
11608 objspace_xrealloc(rb_objspace_t
*objspace
, void *ptr
, size_t new_size
, size_t old_size
)
11612 if (!ptr
) return objspace_xmalloc0(objspace
, new_size
);
11615 * The behavior of realloc(ptr, 0) is implementation defined.
11616 * Therefore we don't use realloc(ptr, 0) for portability reason.
11617 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
11619 if (new_size
== 0) {
11620 if ((mem
= objspace_xmalloc0(objspace
, 0)) != NULL
) {
11622 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
11623 * returns a non-NULL pointer to an access-protected memory page.
11624 * The returned pointer cannot be read / written at all, but
11625 * still be a valid argument of free().
11627 * https://man.openbsd.org/malloc.3
11629 * - Linux's malloc(3) man page says that it _might_ perhaps return
11630 * a non-NULL pointer when its argument is 0. That return value
11631 * is safe (and is expected) to be passed to free().
11633 * http://man7.org/linux/man-pages/man3/malloc.3.html
11635 * - As I read the implementation jemalloc's malloc() returns fully
11636 * normal 16 bytes memory region when its argument is 0.
11638 * - As I read the implementation musl libc's malloc() returns
11639 * fully normal 32 bytes memory region when its argument is 0.
11641 * - Other malloc implementations can also return non-NULL.
11643 objspace_xfree(objspace
, ptr
, old_size
);
11648 * It is dangerous to return NULL here, because that could lead to
11649 * RCE. Fallback to 1 byte instead of zero.
11651 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
11657 #if CALC_EXACT_MALLOC_SIZE
11659 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
11660 new_size
+= sizeof(struct malloc_obj_info
);
11662 old_size
= info
->size
;
11666 old_size
= objspace_malloc_size(objspace
, ptr
, old_size
);
11667 TRY_WITH_GC(new_size
, mem
= realloc(ptr
, new_size
));
11668 new_size
= objspace_malloc_size(objspace
, mem
, new_size
);
11670 #if CALC_EXACT_MALLOC_SIZE
11672 struct malloc_obj_info
*info
= mem
;
11673 info
->size
= new_size
;
11678 objspace_malloc_increase(objspace
, mem
, new_size
, old_size
, MEMOP_TYPE_REALLOC
);
11680 RB_DEBUG_COUNTER_INC(heap_xrealloc
);
11684 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
11686 #define MALLOC_INFO_GEN_SIZE 100
11687 #define MALLOC_INFO_SIZE_SIZE 10
11688 static size_t malloc_info_gen_cnt
[MALLOC_INFO_GEN_SIZE
];
11689 static size_t malloc_info_gen_size
[MALLOC_INFO_GEN_SIZE
];
11690 static size_t malloc_info_size
[MALLOC_INFO_SIZE_SIZE
+1];
11691 static st_table
*malloc_info_file_table
;
11694 mmalloc_info_file_i(st_data_t key
, st_data_t val
, st_data_t dmy
)
11696 const char *file
= (void *)key
;
11697 const size_t *data
= (void *)val
;
11699 fprintf(stderr
, "%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file
, data
[0], data
[1]);
11701 return ST_CONTINUE
;
11704 __attribute__((destructor
))
11706 rb_malloc_info_show_results(void)
11710 fprintf(stderr
, "* malloc_info gen statistics\n");
11711 for (i
=0; i
<MALLOC_INFO_GEN_SIZE
; i
++) {
11712 if (i
== MALLOC_INFO_GEN_SIZE
-1) {
11713 fprintf(stderr
, "more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt
[i
], malloc_info_gen_size
[i
]);
11716 fprintf(stderr
, "%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i
, malloc_info_gen_cnt
[i
], malloc_info_gen_size
[i
]);
11720 fprintf(stderr
, "* malloc_info size statistics\n");
11721 for (i
=0; i
<MALLOC_INFO_SIZE_SIZE
; i
++) {
11723 fprintf(stderr
, "%d\t%"PRIdSIZE
"\n", s
, malloc_info_size
[i
]);
11725 fprintf(stderr
, "more\t%"PRIdSIZE
"\n", malloc_info_size
[i
]);
11727 if (malloc_info_file_table
) {
11728 fprintf(stderr
, "* malloc_info file statistics\n");
11729 st_foreach(malloc_info_file_table
, mmalloc_info_file_i
, 0);
11734 rb_malloc_info_show_results(void)
11740 objspace_xfree(rb_objspace_t
*objspace
, void *ptr
, size_t old_size
)
11744 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
11745 * its first version. We would better follow.
11749 #if CALC_EXACT_MALLOC_SIZE
11750 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
11752 old_size
= info
->size
;
11754 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11756 int gen
= (int)(objspace
->profile
.count
- info
->gen
);
11757 int gen_index
= gen
>= MALLOC_INFO_GEN_SIZE
? MALLOC_INFO_GEN_SIZE
-1 : gen
;
11760 malloc_info_gen_cnt
[gen_index
]++;
11761 malloc_info_gen_size
[gen_index
] += info
->size
;
11763 for (i
=0; i
<MALLOC_INFO_SIZE_SIZE
; i
++) {
11764 size_t s
= 16 << i
;
11765 if (info
->size
<= s
) {
11766 malloc_info_size
[i
]++;
11770 malloc_info_size
[i
]++;
11774 st_data_t key
= (st_data_t
)info
->file
, d
;
11777 if (malloc_info_file_table
== NULL
) {
11778 malloc_info_file_table
= st_init_numtable_with_size(1024);
11780 if (st_lookup(malloc_info_file_table
, key
, &d
)) {
11782 data
= (size_t *)d
;
11785 data
= malloc(xmalloc2_size(2, sizeof(size_t)));
11786 if (data
== NULL
) rb_bug("objspace_xfree: can not allocate memory");
11787 data
[0] = data
[1] = 0;
11788 st_insert(malloc_info_file_table
, key
, (st_data_t
)data
);
11791 data
[1] += info
->size
;
11793 if (0 && gen
>= 2) { /* verbose output */
11795 fprintf(stderr
, "free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
11796 info
->size
, gen
, info
->file
, info
->line
);
11799 fprintf(stderr
, "free - size:%"PRIdSIZE
", gen:%d\n",
11806 old_size
= objspace_malloc_size(objspace
, ptr
, old_size
);
11808 objspace_malloc_increase(objspace
, ptr
, 0, old_size
, MEMOP_TYPE_FREE
) {
11810 RB_DEBUG_COUNTER_INC(heap_xfree
);
11815 ruby_xmalloc0(size_t size
)
11817 return objspace_xmalloc0(&rb_objspace
, size
);
11821 ruby_xmalloc_body(size_t size
)
11823 if ((ssize_t
)size
< 0) {
11824 negative_size_allocation_error("too large allocation size");
11826 return ruby_xmalloc0(size
);
11830 ruby_malloc_size_overflow(size_t count
, size_t elsize
)
11832 rb_raise(rb_eArgError
,
11833 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
11838 ruby_xmalloc2_body(size_t n
, size_t size
)
11840 return objspace_xmalloc0(&rb_objspace
, xmalloc2_size(n
, size
));
11844 objspace_xcalloc(rb_objspace_t
*objspace
, size_t size
)
11848 size
= objspace_malloc_prepare(objspace
, size
);
11849 TRY_WITH_GC(size
, mem
= calloc1(size
));
11850 return objspace_malloc_fixup(objspace
, mem
, size
);
11854 ruby_xcalloc_body(size_t n
, size_t size
)
11856 return objspace_xcalloc(&rb_objspace
, xmalloc2_size(n
, size
));
11859 #ifdef ruby_sized_xrealloc
11860 #undef ruby_sized_xrealloc
11863 ruby_sized_xrealloc(void *ptr
, size_t new_size
, size_t old_size
)
11865 if ((ssize_t
)new_size
< 0) {
11866 negative_size_allocation_error("too large allocation size");
11869 return objspace_xrealloc(&rb_objspace
, ptr
, new_size
, old_size
);
11873 ruby_xrealloc_body(void *ptr
, size_t new_size
)
11875 return ruby_sized_xrealloc(ptr
, new_size
, 0);
11878 #ifdef ruby_sized_xrealloc2
11879 #undef ruby_sized_xrealloc2
11882 ruby_sized_xrealloc2(void *ptr
, size_t n
, size_t size
, size_t old_n
)
11884 size_t len
= xmalloc2_size(n
, size
);
11885 return objspace_xrealloc(&rb_objspace
, ptr
, len
, old_n
* size
);
11889 ruby_xrealloc2_body(void *ptr
, size_t n
, size_t size
)
11891 return ruby_sized_xrealloc2(ptr
, n
, size
, 0);
11894 #ifdef ruby_sized_xfree
11895 #undef ruby_sized_xfree
11898 ruby_sized_xfree(void *x
, size_t size
)
11901 objspace_xfree(&rb_objspace
, x
, size
);
11906 ruby_xfree(void *x
)
11908 ruby_sized_xfree(x
, 0);
11912 rb_xmalloc_mul_add(size_t x
, size_t y
, size_t z
) /* x * y + z */
11914 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
11915 return ruby_xmalloc(w
);
11919 rb_xrealloc_mul_add(const void *p
, size_t x
, size_t y
, size_t z
) /* x * y + z */
11921 size_t w
= size_mul_add_or_raise(x
, y
, z
, rb_eArgError
);
11922 return ruby_xrealloc((void *)p
, w
);
11926 rb_xmalloc_mul_add_mul(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
11928 size_t u
= size_mul_add_mul_or_raise(x
, y
, z
, w
, rb_eArgError
);
11929 return ruby_xmalloc(u
);
11933 rb_xcalloc_mul_add_mul(size_t x
, size_t y
, size_t z
, size_t w
) /* x * y + z * w */
11935 size_t u
= size_mul_add_mul_or_raise(x
, y
, z
, w
, rb_eArgError
);
11936 return ruby_xcalloc(u
, 1);
11939 /* Mimic ruby_xmalloc, but need not rb_objspace.
11940 * should return pointer suitable for ruby_xfree
11943 ruby_mimmalloc(size_t size
)
11946 #if CALC_EXACT_MALLOC_SIZE
11947 size
+= sizeof(struct malloc_obj_info
);
11949 mem
= malloc(size
);
11950 #if CALC_EXACT_MALLOC_SIZE
11955 /* set 0 for consistency of allocated_size/allocations */
11957 struct malloc_obj_info
*info
= mem
;
11959 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11971 ruby_mimfree(void *ptr
)
11973 #if CALC_EXACT_MALLOC_SIZE
11974 struct malloc_obj_info
*info
= (struct malloc_obj_info
*)ptr
- 1;
11981 rb_alloc_tmp_buffer_with_count(volatile VALUE
*store
, size_t size
, size_t cnt
)
11985 rb_imemo_tmpbuf_t
*tmpbuf
;
11987 /* Keep the order; allocate an empty imemo first then xmalloc, to
11988 * get rid of potential memory leak */
11989 imemo
= rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL
, 0);
11991 ptr
= ruby_xmalloc0(size
);
11992 tmpbuf
= (rb_imemo_tmpbuf_t
*)imemo
;
11999 rb_alloc_tmp_buffer(volatile VALUE
*store
, long len
)
12003 if (len
< 0 || (cnt
= (long)roomof(len
, sizeof(VALUE
))) < 0) {
12004 rb_raise(rb_eArgError
, "negative buffer size (or size too big)");
12007 return rb_alloc_tmp_buffer_with_count(store
, len
, cnt
);
12011 rb_free_tmp_buffer(volatile VALUE
*store
)
12013 rb_imemo_tmpbuf_t
*s
= (rb_imemo_tmpbuf_t
*)ATOMIC_VALUE_EXCHANGE(*store
, 0);
12015 void *ptr
= ATOMIC_PTR_EXCHANGE(s
->ptr
, 0);
12021 #if MALLOC_ALLOCATED_SIZE
12024 * GC.malloc_allocated_size -> Integer
12026 * Returns the size of memory allocated by malloc().
12028 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12032 gc_malloc_allocated_size(VALUE self
)
12034 return UINT2NUM(rb_objspace
.malloc_params
.allocated_size
);
12039 * GC.malloc_allocations -> Integer
12041 * Returns the number of malloc() allocations.
12043 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12047 gc_malloc_allocations(VALUE self
)
12049 return UINT2NUM(rb_objspace
.malloc_params
.allocations
);
12054 rb_gc_adjust_memory_usage(ssize_t diff
)
12056 rb_objspace_t
*objspace
= &rb_objspace
;
12058 objspace_malloc_increase(objspace
, 0, diff
, 0, MEMOP_TYPE_REALLOC
);
12060 else if (diff
< 0) {
12061 objspace_malloc_increase(objspace
, 0, 0, -diff
, MEMOP_TYPE_REALLOC
);
12066 ------------------------------ WeakMap ------------------------------
12070 st_table
*obj2wmap
; /* obj -> [ref,...] */
12071 st_table
*wmap2obj
; /* ref -> obj */
12075 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
12077 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12079 wmap_mark_map(st_data_t key
, st_data_t val
, st_data_t arg
)
12081 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12082 VALUE obj
= (VALUE
)val
;
12083 if (!is_live_object(objspace
, obj
)) return ST_DELETE
;
12084 return ST_CONTINUE
;
12089 wmap_compact(void *ptr
)
12091 struct weakmap
*w
= ptr
;
12092 if (w
->wmap2obj
) rb_gc_update_tbl_refs(w
->wmap2obj
);
12093 if (w
->obj2wmap
) rb_gc_update_tbl_refs(w
->obj2wmap
);
12094 w
->final
= rb_gc_location(w
->final
);
12098 wmap_mark(void *ptr
)
12100 struct weakmap
*w
= ptr
;
12101 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12102 if (w
->obj2wmap
) st_foreach(w
->obj2wmap
, wmap_mark_map
, (st_data_t
)&rb_objspace
);
12104 rb_gc_mark_movable(w
->final
);
12108 wmap_free_map(st_data_t key
, st_data_t val
, st_data_t arg
)
12110 VALUE
*ptr
= (VALUE
*)val
;
12111 ruby_sized_xfree(ptr
, (ptr
[0] + 1) * sizeof(VALUE
));
12112 return ST_CONTINUE
;
12116 wmap_free(void *ptr
)
12118 struct weakmap
*w
= ptr
;
12119 st_foreach(w
->obj2wmap
, wmap_free_map
, 0);
12120 st_free_table(w
->obj2wmap
);
12121 st_free_table(w
->wmap2obj
);
12125 wmap_memsize_map(st_data_t key
, st_data_t val
, st_data_t arg
)
12127 VALUE
*ptr
= (VALUE
*)val
;
12128 *(size_t *)arg
+= (ptr
[0] + 1) * sizeof(VALUE
);
12129 return ST_CONTINUE
;
12133 wmap_memsize(const void *ptr
)
12136 const struct weakmap
*w
= ptr
;
12138 size
+= st_memsize(w
->obj2wmap
);
12139 size
+= st_memsize(w
->wmap2obj
);
12140 st_foreach(w
->obj2wmap
, wmap_memsize_map
, (st_data_t
)&size
);
12144 static const rb_data_type_t weakmap_type
= {
12152 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12155 static VALUE
wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid
, self
));
12158 wmap_allocate(VALUE klass
)
12161 VALUE obj
= TypedData_Make_Struct(klass
, struct weakmap
, &weakmap_type
, w
);
12162 w
->obj2wmap
= rb_init_identtable();
12163 w
->wmap2obj
= rb_init_identtable();
12164 w
->final
= rb_func_lambda_new(wmap_finalize
, obj
, 1, 1);
12169 wmap_live_p(rb_objspace_t
*objspace
, VALUE obj
)
12171 if (SPECIAL_CONST_P(obj
)) return TRUE
;
12172 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
12173 void *poisoned
= asan_unpoison_object_temporary(obj
);
12175 enum ruby_value_type t
= BUILTIN_TYPE(obj
);
12176 int ret
= (!(t
== T_NONE
|| t
>= T_FIXNUM
|| t
== T_ICLASS
) &&
12177 is_live_object(objspace
, obj
));
12180 asan_poison_object(obj
);
12189 wmap_final_func(st_data_t
*key
, st_data_t
*value
, st_data_t arg
, int existing
)
12191 VALUE wmap
, *ptr
, size
, i
, j
;
12192 if (!existing
) return ST_STOP
;
12193 wmap
= (VALUE
)arg
, ptr
= (VALUE
*)*value
;
12194 for (i
= j
= 1, size
= ptr
[0]; i
<= size
; ++i
) {
12195 if (ptr
[i
] != wmap
) {
12200 ruby_sized_xfree(ptr
, i
* sizeof(VALUE
));
12204 SIZED_REALLOC_N(ptr
, VALUE
, j
+ 1, i
);
12206 *value
= (st_data_t
)ptr
;
12208 return ST_CONTINUE
;
12213 wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid
, self
))
12215 st_data_t orig
, wmap
, data
;
12216 VALUE obj
, *rids
, i
, size
;
12219 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12220 /* Get reference from object id. */
12221 if ((obj
= id2ref_obj_tbl(&rb_objspace
, objid
)) == Qundef
) {
12222 rb_bug("wmap_finalize: objid is not found.");
12225 /* obj is original referenced object and/or weak reference. */
12226 orig
= (st_data_t
)obj
;
12227 if (st_delete(w
->obj2wmap
, &orig
, &data
)) {
12228 rids
= (VALUE
*)data
;
12230 for (i
= 0; i
< size
; ++i
) {
12231 wmap
= (st_data_t
)rids
[i
];
12232 st_delete(w
->wmap2obj
, &wmap
, NULL
);
12234 ruby_sized_xfree((VALUE
*)data
, (size
+ 1) * sizeof(VALUE
));
12237 wmap
= (st_data_t
)obj
;
12238 if (st_delete(w
->wmap2obj
, &wmap
, &orig
)) {
12239 wmap
= (st_data_t
)obj
;
12240 st_update(w
->obj2wmap
, orig
, wmap_final_func
, wmap
);
12245 struct wmap_iter_arg
{
12246 rb_objspace_t
*objspace
;
12251 wmap_inspect_append(rb_objspace_t
*objspace
, VALUE str
, VALUE obj
)
12253 if (SPECIAL_CONST_P(obj
)) {
12254 return rb_str_append(str
, rb_inspect(obj
));
12256 else if (wmap_live_p(objspace
, obj
)) {
12257 return rb_str_append(str
, rb_any_to_s(obj
));
12260 return rb_str_catf(str
, "#<collected:%p>", (void*)obj
);
12265 wmap_inspect_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12267 struct wmap_iter_arg
*argp
= (struct wmap_iter_arg
*)arg
;
12268 rb_objspace_t
*objspace
= argp
->objspace
;
12269 VALUE str
= argp
->value
;
12270 VALUE k
= (VALUE
)key
, v
= (VALUE
)val
;
12272 if (RSTRING_PTR(str
)[0] == '#') {
12273 rb_str_cat2(str
, ", ");
12276 rb_str_cat2(str
, ": ");
12277 RSTRING_PTR(str
)[0] = '#';
12279 wmap_inspect_append(objspace
, str
, k
);
12280 rb_str_cat2(str
, " => ");
12281 wmap_inspect_append(objspace
, str
, v
);
12283 return ST_CONTINUE
;
12287 wmap_inspect(VALUE self
)
12290 VALUE c
= rb_class_name(CLASS_OF(self
));
12292 struct wmap_iter_arg args
;
12294 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12295 str
= rb_sprintf("-<%"PRIsVALUE
":%p", c
, (void *)self
);
12297 args
.objspace
= &rb_objspace
;
12299 st_foreach(w
->wmap2obj
, wmap_inspect_i
, (st_data_t
)&args
);
12301 RSTRING_PTR(str
)[0] = '#';
12302 rb_str_cat2(str
, ">");
12307 wmap_each_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12309 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12310 VALUE obj
= (VALUE
)val
;
12311 if (wmap_live_p(objspace
, obj
)) {
12312 rb_yield_values(2, (VALUE
)key
, obj
);
12314 return ST_CONTINUE
;
12317 /* Iterates over keys and objects in a weakly referenced object */
12319 wmap_each(VALUE self
)
12322 rb_objspace_t
*objspace
= &rb_objspace
;
12324 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12325 st_foreach(w
->wmap2obj
, wmap_each_i
, (st_data_t
)objspace
);
12330 wmap_each_key_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12332 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12333 VALUE obj
= (VALUE
)val
;
12334 if (wmap_live_p(objspace
, obj
)) {
12335 rb_yield((VALUE
)key
);
12337 return ST_CONTINUE
;
12340 /* Iterates over keys and objects in a weakly referenced object */
12342 wmap_each_key(VALUE self
)
12345 rb_objspace_t
*objspace
= &rb_objspace
;
12347 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12348 st_foreach(w
->wmap2obj
, wmap_each_key_i
, (st_data_t
)objspace
);
12353 wmap_each_value_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12355 rb_objspace_t
*objspace
= (rb_objspace_t
*)arg
;
12356 VALUE obj
= (VALUE
)val
;
12357 if (wmap_live_p(objspace
, obj
)) {
12360 return ST_CONTINUE
;
12363 /* Iterates over keys and objects in a weakly referenced object */
12365 wmap_each_value(VALUE self
)
12368 rb_objspace_t
*objspace
= &rb_objspace
;
12370 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12371 st_foreach(w
->wmap2obj
, wmap_each_value_i
, (st_data_t
)objspace
);
12376 wmap_keys_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12378 struct wmap_iter_arg
*argp
= (struct wmap_iter_arg
*)arg
;
12379 rb_objspace_t
*objspace
= argp
->objspace
;
12380 VALUE ary
= argp
->value
;
12381 VALUE obj
= (VALUE
)val
;
12382 if (wmap_live_p(objspace
, obj
)) {
12383 rb_ary_push(ary
, (VALUE
)key
);
12385 return ST_CONTINUE
;
12388 /* Iterates over keys and objects in a weakly referenced object */
12390 wmap_keys(VALUE self
)
12393 struct wmap_iter_arg args
;
12395 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12396 args
.objspace
= &rb_objspace
;
12397 args
.value
= rb_ary_new();
12398 st_foreach(w
->wmap2obj
, wmap_keys_i
, (st_data_t
)&args
);
12403 wmap_values_i(st_data_t key
, st_data_t val
, st_data_t arg
)
12405 struct wmap_iter_arg
*argp
= (struct wmap_iter_arg
*)arg
;
12406 rb_objspace_t
*objspace
= argp
->objspace
;
12407 VALUE ary
= argp
->value
;
12408 VALUE obj
= (VALUE
)val
;
12409 if (wmap_live_p(objspace
, obj
)) {
12410 rb_ary_push(ary
, obj
);
12412 return ST_CONTINUE
;
12415 /* Iterates over values and objects in a weakly referenced object */
12417 wmap_values(VALUE self
)
12420 struct wmap_iter_arg args
;
12422 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12423 args
.objspace
= &rb_objspace
;
12424 args
.value
= rb_ary_new();
12425 st_foreach(w
->wmap2obj
, wmap_values_i
, (st_data_t
)&args
);
12430 wmap_aset_update(st_data_t
*key
, st_data_t
*val
, st_data_t arg
, int existing
)
12432 VALUE size
, *ptr
, *optr
;
12434 size
= (ptr
= optr
= (VALUE
*)*val
)[0];
12436 SIZED_REALLOC_N(ptr
, VALUE
, size
+ 1, size
);
12441 ptr
= ruby_xmalloc0(2 * sizeof(VALUE
));
12444 ptr
[size
] = (VALUE
)arg
;
12445 if (ptr
== optr
) return ST_STOP
;
12446 *val
= (st_data_t
)ptr
;
12447 return ST_CONTINUE
;
12450 /* Creates a weak reference from the given key to the given value */
12452 wmap_aset(VALUE self
, VALUE key
, VALUE value
)
12456 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12457 if (FL_ABLE(value
)) {
12458 define_final0(value
, w
->final
);
12460 if (FL_ABLE(key
)) {
12461 define_final0(key
, w
->final
);
12464 st_update(w
->obj2wmap
, (st_data_t
)value
, wmap_aset_update
, key
);
12465 st_insert(w
->wmap2obj
, (st_data_t
)key
, (st_data_t
)value
);
12466 return nonspecial_obj_id(value
);
12469 /* Retrieves a weakly referenced object with the given key */
12471 wmap_lookup(VALUE self
, VALUE key
)
12476 rb_objspace_t
*objspace
= &rb_objspace
;
12478 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12479 if (!st_lookup(w
->wmap2obj
, (st_data_t
)key
, &data
)) return Qundef
;
12481 if (!wmap_live_p(objspace
, obj
)) return Qundef
;
12485 /* Retrieves a weakly referenced object with the given key */
12487 wmap_aref(VALUE self
, VALUE key
)
12489 VALUE obj
= wmap_lookup(self
, key
);
12490 return obj
!= Qundef
? obj
: Qnil
;
12493 /* Returns +true+ if +key+ is registered */
12495 wmap_has_key(VALUE self
, VALUE key
)
12497 return RBOOL(wmap_lookup(self
, key
) != Qundef
);
12500 /* Returns the number of referenced objects */
12502 wmap_size(VALUE self
)
12507 TypedData_Get_Struct(self
, struct weakmap
, &weakmap_type
, w
);
12508 n
= w
->wmap2obj
->num_entries
;
12509 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
12510 return ULONG2NUM(n
);
12517 ------------------------------ GC profiler ------------------------------
12520 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12523 current_process_time(struct timespec
*ts
)
12525 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12527 static int try_clock_gettime
= 1;
12528 if (try_clock_gettime
&& clock_gettime(CLOCK_PROCESS_CPUTIME_ID
, ts
) == 0) {
12532 try_clock_gettime
= 0;
12539 struct rusage usage
;
12540 struct timeval time
;
12541 if (getrusage(RUSAGE_SELF
, &usage
) == 0) {
12542 time
= usage
.ru_utime
;
12543 ts
->tv_sec
= time
.tv_sec
;
12544 ts
->tv_nsec
= (int32_t)time
.tv_usec
* 1000;
12552 FILETIME creation_time
, exit_time
, kernel_time
, user_time
;
12555 if (GetProcessTimes(GetCurrentProcess(),
12556 &creation_time
, &exit_time
, &kernel_time
, &user_time
) != 0) {
12557 memcpy(&ui
, &user_time
, sizeof(FILETIME
));
12558 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12559 ts
->tv_nsec
= (long)(ui
.QuadPart
% PER100NSEC
);
12560 ts
->tv_sec
= (time_t)(ui
.QuadPart
/ PER100NSEC
);
12570 getrusage_time(void)
12572 struct timespec ts
;
12573 if (current_process_time(&ts
)) {
12574 return ts
.tv_sec
+ ts
.tv_nsec
* 1e-9;
12583 gc_prof_setup_new_record(rb_objspace_t
*objspace
, unsigned int reason
)
12585 if (objspace
->profile
.run
) {
12586 size_t index
= objspace
->profile
.next_index
;
12587 gc_profile_record
*record
;
12589 /* create new record */
12590 objspace
->profile
.next_index
++;
12592 if (!objspace
->profile
.records
) {
12593 objspace
->profile
.size
= GC_PROFILE_RECORD_DEFAULT_SIZE
;
12594 objspace
->profile
.records
= malloc(xmalloc2_size(sizeof(gc_profile_record
), objspace
->profile
.size
));
12596 if (index
>= objspace
->profile
.size
) {
12598 objspace
->profile
.size
+= 1000;
12599 ptr
= realloc(objspace
->profile
.records
, xmalloc2_size(sizeof(gc_profile_record
), objspace
->profile
.size
));
12600 if (!ptr
) rb_memerror();
12601 objspace
->profile
.records
= ptr
;
12603 if (!objspace
->profile
.records
) {
12604 rb_bug("gc_profile malloc or realloc miss");
12606 record
= objspace
->profile
.current_record
= &objspace
->profile
.records
[objspace
->profile
.next_index
- 1];
12607 MEMZERO(record
, gc_profile_record
, 1);
12609 /* setup before-GC parameter */
12610 record
->flags
= reason
| (ruby_gc_stressful
? GPR_FLAG_STRESS
: 0);
12611 #if MALLOC_ALLOCATED_SIZE
12612 record
->allocated_size
= malloc_allocated_size
;
12614 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12617 struct rusage usage
;
12618 if (getrusage(RUSAGE_SELF
, &usage
) == 0) {
12619 record
->maxrss
= usage
.ru_maxrss
;
12620 record
->minflt
= usage
.ru_minflt
;
12621 record
->majflt
= usage
.ru_majflt
;
12630 gc_prof_timer_start(rb_objspace_t
*objspace
)
12632 if (gc_prof_enabled(objspace
)) {
12633 gc_profile_record
*record
= gc_prof_record(objspace
);
12634 #if GC_PROFILE_MORE_DETAIL
12635 record
->prepare_time
= objspace
->profile
.prepare_time
;
12637 record
->gc_time
= 0;
12638 record
->gc_invoke_time
= getrusage_time();
12643 elapsed_time_from(double time
)
12645 double now
= getrusage_time();
12655 gc_prof_timer_stop(rb_objspace_t
*objspace
)
12657 if (gc_prof_enabled(objspace
)) {
12658 gc_profile_record
*record
= gc_prof_record(objspace
);
12659 record
->gc_time
= elapsed_time_from(record
->gc_invoke_time
);
12660 record
->gc_invoke_time
-= objspace
->profile
.invoke_time
;
12664 #define RUBY_DTRACE_GC_HOOK(name) \
12665 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12667 gc_prof_mark_timer_start(rb_objspace_t
*objspace
)
12669 RUBY_DTRACE_GC_HOOK(MARK_BEGIN
);
12670 #if GC_PROFILE_MORE_DETAIL
12671 if (gc_prof_enabled(objspace
)) {
12672 gc_prof_record(objspace
)->gc_mark_time
= getrusage_time();
12678 gc_prof_mark_timer_stop(rb_objspace_t
*objspace
)
12680 RUBY_DTRACE_GC_HOOK(MARK_END
);
12681 #if GC_PROFILE_MORE_DETAIL
12682 if (gc_prof_enabled(objspace
)) {
12683 gc_profile_record
*record
= gc_prof_record(objspace
);
12684 record
->gc_mark_time
= elapsed_time_from(record
->gc_mark_time
);
12690 gc_prof_sweep_timer_start(rb_objspace_t
*objspace
)
12692 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN
);
12693 if (gc_prof_enabled(objspace
)) {
12694 gc_profile_record
*record
= gc_prof_record(objspace
);
12696 if (record
->gc_time
> 0 || GC_PROFILE_MORE_DETAIL
) {
12697 objspace
->profile
.gc_sweep_start_time
= getrusage_time();
12703 gc_prof_sweep_timer_stop(rb_objspace_t
*objspace
)
12705 RUBY_DTRACE_GC_HOOK(SWEEP_END
);
12707 if (gc_prof_enabled(objspace
)) {
12709 gc_profile_record
*record
= gc_prof_record(objspace
);
12711 if (record
->gc_time
> 0) {
12712 sweep_time
= elapsed_time_from(objspace
->profile
.gc_sweep_start_time
);
12713 /* need to accumulate GC time for lazy sweep after gc() */
12714 record
->gc_time
+= sweep_time
;
12716 else if (GC_PROFILE_MORE_DETAIL
) {
12717 sweep_time
= elapsed_time_from(objspace
->profile
.gc_sweep_start_time
);
12720 #if GC_PROFILE_MORE_DETAIL
12721 record
->gc_sweep_time
+= sweep_time
;
12722 if (heap_pages_deferred_final
) record
->flags
|= GPR_FLAG_HAVE_FINALIZE
;
12724 if (heap_pages_deferred_final
) objspace
->profile
.latest_gc_info
|= GPR_FLAG_HAVE_FINALIZE
;
12729 gc_prof_set_malloc_info(rb_objspace_t
*objspace
)
12731 #if GC_PROFILE_MORE_DETAIL
12732 if (gc_prof_enabled(objspace
)) {
12733 gc_profile_record
*record
= gc_prof_record(objspace
);
12734 record
->allocate_increase
= malloc_increase
;
12735 record
->allocate_limit
= malloc_limit
;
12741 gc_prof_set_heap_info(rb_objspace_t
*objspace
)
12743 if (gc_prof_enabled(objspace
)) {
12744 gc_profile_record
*record
= gc_prof_record(objspace
);
12745 size_t live
= objspace
->profile
.total_allocated_objects_at_gc_start
- objspace
->profile
.total_freed_objects
;
12746 size_t total
= objspace
->profile
.heap_used_at_gc_start
* HEAP_PAGE_OBJ_LIMIT
;
12748 #if GC_PROFILE_MORE_DETAIL
12749 record
->heap_use_pages
= objspace
->profile
.heap_used_at_gc_start
;
12750 record
->heap_live_objects
= live
;
12751 record
->heap_free_objects
= total
- live
;
12754 record
->heap_total_objects
= total
;
12755 record
->heap_use_size
= live
* sizeof(RVALUE
);
12756 record
->heap_total_size
= total
* sizeof(RVALUE
);
12762 * GC::Profiler.clear -> nil
12764 * Clears the GC profiler data.
12769 gc_profile_clear(VALUE _
)
12771 rb_objspace_t
*objspace
= &rb_objspace
;
12772 void *p
= objspace
->profile
.records
;
12773 objspace
->profile
.records
= NULL
;
12774 objspace
->profile
.size
= 0;
12775 objspace
->profile
.next_index
= 0;
12776 objspace
->profile
.current_record
= 0;
12785 * GC::Profiler.raw_data -> [Hash, ...]
12787 * Returns an Array of individual raw profile data Hashes ordered
12788 * from earliest to latest by +:GC_INVOKE_TIME+.
12794 * :GC_TIME=>1.3000000000000858e-05,
12795 * :GC_INVOKE_TIME=>0.010634999999999999,
12796 * :HEAP_USE_SIZE=>289640,
12797 * :HEAP_TOTAL_SIZE=>588960,
12798 * :HEAP_TOTAL_OBJECTS=>14724,
12799 * :GC_IS_MARKED=>false
12807 * Time elapsed in seconds for this GC run
12808 * +:GC_INVOKE_TIME+::
12809 * Time elapsed in seconds from startup to when the GC was invoked
12810 * +:HEAP_USE_SIZE+::
12811 * Total bytes of heap used
12812 * +:HEAP_TOTAL_SIZE+::
12813 * Total size of heap in bytes
12814 * +:HEAP_TOTAL_OBJECTS+::
12815 * Total number of objects
12816 * +:GC_IS_MARKED+::
12817 * Returns +true+ if the GC is in mark phase
12819 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
12820 * to the following hash keys:
12822 * +:GC_MARK_TIME+::
12823 * +:GC_SWEEP_TIME+::
12824 * +:ALLOCATE_INCREASE+::
12825 * +:ALLOCATE_LIMIT+::
12826 * +:HEAP_USE_PAGES+::
12827 * +:HEAP_LIVE_OBJECTS+::
12828 * +:HEAP_FREE_OBJECTS+::
12829 * +:HAVE_FINALIZE+::
12834 gc_profile_record_get(VALUE _
)
12837 VALUE gc_profile
= rb_ary_new();
12839 rb_objspace_t
*objspace
= (&rb_objspace
);
12841 if (!objspace
->profile
.run
) {
12845 for (i
=0; i
< objspace
->profile
.next_index
; i
++) {
12846 gc_profile_record
*record
= &objspace
->profile
.records
[i
];
12848 prof
= rb_hash_new();
12849 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record
->flags
));
12850 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record
->gc_time
));
12851 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record
->gc_invoke_time
));
12852 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record
->heap_use_size
));
12853 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record
->heap_total_size
));
12854 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record
->heap_total_objects
));
12855 rb_hash_aset(prof
, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record
->moved_objects
));
12856 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue
);
12857 #if GC_PROFILE_MORE_DETAIL
12858 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record
->gc_mark_time
));
12859 rb_hash_aset(prof
, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record
->gc_sweep_time
));
12860 rb_hash_aset(prof
, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record
->allocate_increase
));
12861 rb_hash_aset(prof
, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record
->allocate_limit
));
12862 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record
->heap_use_pages
));
12863 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record
->heap_live_objects
));
12864 rb_hash_aset(prof
, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record
->heap_free_objects
));
12866 rb_hash_aset(prof
, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record
->removing_objects
));
12867 rb_hash_aset(prof
, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record
->empty_objects
));
12869 rb_hash_aset(prof
, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record
->flags
& GPR_FLAG_HAVE_FINALIZE
));
12872 #if RGENGC_PROFILE > 0
12873 rb_hash_aset(prof
, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record
->old_objects
));
12874 rb_hash_aset(prof
, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record
->remembered_normal_objects
));
12875 rb_hash_aset(prof
, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record
->remembered_shady_objects
));
12877 rb_ary_push(gc_profile
, prof
);
12883 #if GC_PROFILE_MORE_DETAIL
12884 #define MAJOR_REASON_MAX 0x10
12887 gc_profile_dump_major_reason(unsigned int flags
, char *buff
)
12889 unsigned int reason
= flags
& GPR_FLAG_MAJOR_MASK
;
12892 if (reason
== GPR_FLAG_NONE
) {
12898 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12899 buff[i++] = #x[0]; \
12900 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12906 #if RGENGC_ESTIMATE_OLDMALLOC
12916 gc_profile_dump_on(VALUE out
, VALUE (*append
)(VALUE
, VALUE
))
12918 rb_objspace_t
*objspace
= &rb_objspace
;
12919 size_t count
= objspace
->profile
.next_index
;
12920 #ifdef MAJOR_REASON_MAX
12921 char reason_str
[MAJOR_REASON_MAX
];
12924 if (objspace
->profile
.run
&& count
/* > 1 */) {
12926 const gc_profile_record
*record
;
12928 append(out
, rb_sprintf("GC %"PRIuSIZE
" invokes.\n", objspace
->profile
.count
));
12929 append(out
, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12931 for (i
= 0; i
< count
; i
++) {
12932 record
= &objspace
->profile
.records
[i
];
12933 append(out
, rb_sprintf("%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
12934 i
+1, record
->gc_invoke_time
, record
->heap_use_size
,
12935 record
->heap_total_size
, record
->heap_total_objects
, record
->gc_time
*1000));
12938 #if GC_PROFILE_MORE_DETAIL
12939 const char *str
= "\n\n" \
12941 "Prepare Time = Previously GC's rest sweep time\n"
12942 "Index Flags Allocate Inc. Allocate Limit"
12943 #if CALC_EXACT_MALLOC_SIZE
12946 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12948 " OldgenObj RemNormObj RemShadObj"
12950 #if GC_PROFILE_DETAIL_MEMORY
12951 " MaxRSS(KB) MinorFLT MajorFLT"
12954 append(out
, rb_str_new_cstr(str
));
12956 for (i
= 0; i
< count
; i
++) {
12957 record
= &objspace
->profile
.records
[i
];
12958 append(out
, rb_sprintf("%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
12959 #if CALC_EXACT_MALLOC_SIZE
12962 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12964 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12966 #if GC_PROFILE_DETAIL_MEMORY
12972 gc_profile_dump_major_reason(record
->flags
, reason_str
),
12973 (record
->flags
& GPR_FLAG_HAVE_FINALIZE
) ? 'F' : '.',
12974 (record
->flags
& GPR_FLAG_NEWOBJ
) ? "NEWOBJ" :
12975 (record
->flags
& GPR_FLAG_MALLOC
) ? "MALLOC" :
12976 (record
->flags
& GPR_FLAG_METHOD
) ? "METHOD" :
12977 (record
->flags
& GPR_FLAG_CAPI
) ? "CAPI__" : "??????",
12978 (record
->flags
& GPR_FLAG_STRESS
) ? '!' : ' ',
12979 record
->allocate_increase
, record
->allocate_limit
,
12980 #if CALC_EXACT_MALLOC_SIZE
12981 record
->allocated_size
,
12983 record
->heap_use_pages
,
12984 record
->gc_mark_time
*1000,
12985 record
->gc_sweep_time
*1000,
12986 record
->prepare_time
*1000,
12988 record
->heap_live_objects
,
12989 record
->heap_free_objects
,
12990 record
->removing_objects
,
12991 record
->empty_objects
12994 record
->old_objects
,
12995 record
->remembered_normal_objects
,
12996 record
->remembered_shady_objects
12998 #if GC_PROFILE_DETAIL_MEMORY
13000 record
->maxrss
/ 1024,
13013 * GC::Profiler.result -> String
13015 * Returns a profile data report such as:
13018 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
13019 * 1 0.012 159240 212940 10647 0.00000000000001530000
13023 gc_profile_result(VALUE _
)
13025 VALUE str
= rb_str_buf_new(0);
13026 gc_profile_dump_on(str
, rb_str_buf_append
);
13032 * GC::Profiler.report
13033 * GC::Profiler.report(io)
13035 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
13040 gc_profile_report(int argc
, VALUE
*argv
, VALUE self
)
13044 out
= (!rb_check_arity(argc
, 0, 1) ? rb_stdout
: argv
[0]);
13045 gc_profile_dump_on(out
, rb_io_write
);
13052 * GC::Profiler.total_time -> float
13054 * The total time used for garbage collection in seconds
13058 gc_profile_total_time(VALUE self
)
13061 rb_objspace_t
*objspace
= &rb_objspace
;
13063 if (objspace
->profile
.run
&& objspace
->profile
.next_index
> 0) {
13065 size_t count
= objspace
->profile
.next_index
;
13067 for (i
= 0; i
< count
; i
++) {
13068 time
+= objspace
->profile
.records
[i
].gc_time
;
13071 return DBL2NUM(time
);
13076 * GC::Profiler.enabled? -> true or false
13078 * The current status of GC profile mode.
13082 gc_profile_enable_get(VALUE self
)
13084 rb_objspace_t
*objspace
= &rb_objspace
;
13085 return RBOOL(objspace
->profile
.run
);
13090 * GC::Profiler.enable -> nil
13092 * Starts the GC profiler.
13097 gc_profile_enable(VALUE _
)
13099 rb_objspace_t
*objspace
= &rb_objspace
;
13100 objspace
->profile
.run
= TRUE
;
13101 objspace
->profile
.current_record
= 0;
13107 * GC::Profiler.disable -> nil
13109 * Stops the GC profiler.
13114 gc_profile_disable(VALUE _
)
13116 rb_objspace_t
*objspace
= &rb_objspace
;
13118 objspace
->profile
.run
= FALSE
;
13119 objspace
->profile
.current_record
= 0;
13124 ------------------------------ DEBUG ------------------------------
13127 static const char *
13128 type_name(int type
, VALUE obj
)
13131 #define TYPE_NAME(t) case (t): return #t;
13133 TYPE_NAME(T_OBJECT
);
13134 TYPE_NAME(T_CLASS
);
13135 TYPE_NAME(T_MODULE
);
13136 TYPE_NAME(T_FLOAT
);
13137 TYPE_NAME(T_STRING
);
13138 TYPE_NAME(T_REGEXP
);
13139 TYPE_NAME(T_ARRAY
);
13141 TYPE_NAME(T_STRUCT
);
13142 TYPE_NAME(T_BIGNUM
);
13144 TYPE_NAME(T_MATCH
);
13145 TYPE_NAME(T_COMPLEX
);
13146 TYPE_NAME(T_RATIONAL
);
13149 TYPE_NAME(T_FALSE
);
13150 TYPE_NAME(T_SYMBOL
);
13151 TYPE_NAME(T_FIXNUM
);
13152 TYPE_NAME(T_UNDEF
);
13153 TYPE_NAME(T_IMEMO
);
13154 TYPE_NAME(T_ICLASS
);
13155 TYPE_NAME(T_MOVED
);
13156 TYPE_NAME(T_ZOMBIE
);
13158 if (obj
&& rb_objspace_data_type_name(obj
)) {
13159 return rb_objspace_data_type_name(obj
);
13167 static const char *
13168 obj_type_name(VALUE obj
)
13170 return type_name(TYPE(obj
), obj
);
13174 rb_method_type_name(rb_method_type_t type
)
13177 case VM_METHOD_TYPE_ISEQ
: return "iseq";
13178 case VM_METHOD_TYPE_ATTRSET
: return "attrest";
13179 case VM_METHOD_TYPE_IVAR
: return "ivar";
13180 case VM_METHOD_TYPE_BMETHOD
: return "bmethod";
13181 case VM_METHOD_TYPE_ALIAS
: return "alias";
13182 case VM_METHOD_TYPE_REFINED
: return "refined";
13183 case VM_METHOD_TYPE_CFUNC
: return "cfunc";
13184 case VM_METHOD_TYPE_ZSUPER
: return "zsuper";
13185 case VM_METHOD_TYPE_MISSING
: return "missing";
13186 case VM_METHOD_TYPE_OPTIMIZED
: return "optimized";
13187 case VM_METHOD_TYPE_UNDEF
: return "undef";
13188 case VM_METHOD_TYPE_NOTIMPLEMENTED
: return "notimplemented";
13190 rb_bug("rb_method_type_name: unreachable (type: %d)", type
);
13194 # define ARY_SHARED_P(ary) \
13195 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13196 FL_TEST((ary),ELTS_SHARED)!=0)
13197 # define ARY_EMBED_P(ary) \
13198 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13199 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
13202 rb_raw_iseq_info(char *buff
, const int buff_size
, const rb_iseq_t
*iseq
)
13204 if (buff_size
> 0 && iseq
->body
&& iseq
->body
->location
.label
&& !RB_TYPE_P(iseq
->body
->location
.pathobj
, T_MOVED
)) {
13205 VALUE path
= rb_iseq_path(iseq
);
13206 VALUE n
= iseq
->body
->location
.first_lineno
;
13207 snprintf(buff
, buff_size
, " %s@%s:%d",
13208 RSTRING_PTR(iseq
->body
->location
.label
),
13210 n
? FIX2INT(n
) : 0 );
13215 str_len_no_raise(VALUE str
)
13217 long len
= RSTRING_LEN(str
);
13218 if (len
< 0) return 0;
13219 if (len
> INT_MAX
) return INT_MAX
;
13224 rb_raw_obj_info(char *buff
, const int buff_size
, VALUE obj
)
13227 void *poisoned
= asan_poisoned_object_p(obj
);
13228 asan_unpoison_object(obj
, false);
13230 #define BUFF_ARGS buff + pos, buff_size - pos
13231 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
13232 if (SPECIAL_CONST_P(obj
)) {
13233 APPENDF((BUFF_ARGS
, "%s", obj_type_name(obj
)));
13235 if (FIXNUM_P(obj
)) {
13236 APPENDF((BUFF_ARGS
, " %ld", FIX2LONG(obj
)));
13238 else if (SYMBOL_P(obj
)) {
13239 APPENDF((BUFF_ARGS
, " %s", rb_id2name(SYM2ID(obj
))));
13243 #define TF(c) ((c) != 0 ? "true" : "false")
13244 #define C(c, s) ((c) != 0 ? (s) : " ")
13245 const int type
= BUILTIN_TYPE(obj
);
13246 const int age
= RVALUE_FLAGS_AGE(RBASIC(obj
)->flags
);
13248 if (is_pointer_to_heap(&rb_objspace
, (void *)obj
)) {
13249 APPENDF((BUFF_ARGS
, "%p [%d%s%s%s%s%s%s] %s ",
13251 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj
), "L"),
13252 C(RVALUE_MARK_BITMAP(obj
), "M"),
13253 C(RVALUE_PIN_BITMAP(obj
), "P"),
13254 C(RVALUE_MARKING_BITMAP(obj
), "R"),
13255 C(RVALUE_WB_UNPROTECTED_BITMAP(obj
), "U"),
13256 C(rb_objspace_garbage_object_p(obj
), "G"),
13257 obj_type_name(obj
)));
13261 APPENDF((BUFF_ARGS
, "%p [%dXXXX] %s",
13263 obj_type_name(obj
)));
13266 if (internal_object_p(obj
)) {
13269 else if (RBASIC(obj
)->klass
== 0) {
13270 APPENDF((BUFF_ARGS
, "(temporary internal)"));
13273 if (RTEST(RBASIC(obj
)->klass
)) {
13274 VALUE class_path
= rb_class_path_cached(RBASIC(obj
)->klass
);
13275 if (!NIL_P(class_path
)) {
13276 APPENDF((BUFF_ARGS
, "(%s)", RSTRING_PTR(class_path
)));
13282 APPENDF((BUFF_ARGS
, "@%s:%d", RANY(obj
)->file
, RANY(obj
)->line
));
13287 UNEXPECTED_NODE(rb_raw_obj_info
);
13290 if (FL_TEST(obj
, ELTS_SHARED
)) {
13291 APPENDF((BUFF_ARGS
, "shared -> %s",
13292 rb_obj_info(RARRAY(obj
)->as
.heap
.aux
.shared_root
)));
13294 else if (FL_TEST(obj
, RARRAY_EMBED_FLAG
)) {
13295 APPENDF((BUFF_ARGS
, "[%s%s] len: %ld (embed)",
13296 C(ARY_EMBED_P(obj
), "E"),
13297 C(ARY_SHARED_P(obj
), "S"),
13301 APPENDF((BUFF_ARGS
, "[%s%s%s] len: %ld, capa:%ld ptr:%p",
13302 C(ARY_EMBED_P(obj
), "E"),
13303 C(ARY_SHARED_P(obj
), "S"),
13304 C(RARRAY_TRANSIENT_P(obj
), "T"),
13306 ARY_EMBED_P(obj
) ? -1L : RARRAY(obj
)->as
.heap
.aux
.capa
,
13307 (void *)RARRAY_CONST_PTR_TRANSIENT(obj
)));
13311 if (STR_SHARED_P(obj
)) {
13312 APPENDF((BUFF_ARGS
, " [shared] len: %ld", RSTRING_LEN(obj
)));
13315 if (STR_EMBED_P(obj
)) APPENDF((BUFF_ARGS
, " [embed]"));
13317 APPENDF((BUFF_ARGS
, " len: %ld, capa: %ld", RSTRING_LEN(obj
), rb_str_capacity(obj
)));
13319 APPENDF((BUFF_ARGS
, " \"%.*s\"", str_len_no_raise(obj
), RSTRING_PTR(obj
)));
13323 VALUE fstr
= RSYMBOL(obj
)->fstr
;
13324 ID id
= RSYMBOL(obj
)->id
;
13325 if (RB_TYPE_P(fstr
, T_STRING
)) {
13326 APPENDF((BUFF_ARGS
, ":%s id:%d", RSTRING_PTR(fstr
), (unsigned int)id
));
13329 APPENDF((BUFF_ARGS
, "(%p) id:%d", (void *)fstr
, (unsigned int)id
));
13334 APPENDF((BUFF_ARGS
, "-> %p", (void*)rb_gc_location(obj
)));
13338 APPENDF((BUFF_ARGS
, "[%c%c] %"PRIdSIZE
,
13339 RHASH_AR_TABLE_P(obj
) ? 'A' : 'S',
13340 RHASH_TRANSIENT_P(obj
) ? 'T' : ' ',
13347 VALUE class_path
= rb_class_path_cached(obj
);
13348 if (!NIL_P(class_path
)) {
13349 APPENDF((BUFF_ARGS
, "%s", RSTRING_PTR(class_path
)));
13352 APPENDF((BUFF_ARGS
, "(annon)"));
13358 VALUE class_path
= rb_class_path_cached(RBASIC_CLASS(obj
));
13359 if (!NIL_P(class_path
)) {
13360 APPENDF((BUFF_ARGS
, "src:%s", RSTRING_PTR(class_path
)));
13366 uint32_t len
= ROBJECT_NUMIV(obj
);
13368 if (RANY(obj
)->as
.basic
.flags
& ROBJECT_EMBED
) {
13369 APPENDF((BUFF_ARGS
, "(embed) len:%d", len
));
13372 VALUE
*ptr
= ROBJECT_IVPTR(obj
);
13373 APPENDF((BUFF_ARGS
, "len:%d ptr:%p", len
, (void *)ptr
));
13378 const struct rb_block
*block
;
13379 const rb_iseq_t
*iseq
;
13380 if (rb_obj_is_proc(obj
) &&
13381 (block
= vm_proc_block(obj
)) != NULL
&&
13382 (vm_block_type(block
) == block_type_iseq
) &&
13383 (iseq
= vm_block_iseq(block
)) != NULL
) {
13384 rb_raw_iseq_info(BUFF_ARGS
, iseq
);
13386 else if (rb_ractor_p(obj
)) {
13387 rb_ractor_t
*r
= (void *)DATA_PTR(obj
);
13389 APPENDF((BUFF_ARGS
, "r:%d", r
->pub
.id
));
13393 const char * const type_name
= rb_objspace_data_type_name(obj
);
13395 APPENDF((BUFF_ARGS
, "%s", type_name
));
13401 APPENDF((BUFF_ARGS
, "<%s> ", rb_imemo_name(imemo_type(obj
))));
13403 switch (imemo_type(obj
)) {
13406 const rb_method_entry_t
*me
= &RANY(obj
)->as
.imemo
.ment
;
13408 APPENDF((BUFF_ARGS
, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
13409 rb_id2name(me
->called_id
),
13410 METHOD_ENTRY_VISI(me
) == METHOD_VISI_PUBLIC
? "pub" :
13411 METHOD_ENTRY_VISI(me
) == METHOD_VISI_PRIVATE
? "pri" : "pro",
13412 METHOD_ENTRY_COMPLEMENTED(me
) ? ",cmp" : "",
13413 METHOD_ENTRY_CACHED(me
) ? ",cc" : "",
13414 METHOD_ENTRY_INVALIDATED(me
) ? ",inv" : "",
13415 me
->def
? rb_method_type_name(me
->def
->type
) : "NULL",
13416 me
->def
? me
->def
->alias_count
: -1,
13417 (void *)me
->owner
, // obj_info(me->owner),
13418 (void *)me
->defined_class
)); //obj_info(me->defined_class)));
13421 switch (me
->def
->type
) {
13422 case VM_METHOD_TYPE_ISEQ
:
13423 APPENDF((BUFF_ARGS
, " (iseq:%s)", obj_info((VALUE
)me
->def
->body
.iseq
.iseqptr
)));
13433 const rb_iseq_t
*iseq
= (const rb_iseq_t
*)obj
;
13434 rb_raw_iseq_info(BUFF_ARGS
, iseq
);
13437 case imemo_callinfo
:
13439 const struct rb_callinfo
*ci
= (const struct rb_callinfo
*)obj
;
13440 APPENDF((BUFF_ARGS
, "(mid:%s, flag:%x argc:%d, kwarg:%s)",
13441 rb_id2name(vm_ci_mid(ci
)),
13444 vm_ci_kwarg(ci
) ? "available" : "NULL"));
13447 case imemo_callcache
:
13449 const struct rb_callcache
*cc
= (const struct rb_callcache
*)obj
;
13450 VALUE class_path
= cc
->klass
? rb_class_path_cached(cc
->klass
) : Qnil
;
13451 const rb_callable_method_entry_t
*cme
= vm_cc_cme(cc
);
13453 APPENDF((BUFF_ARGS
, "(klass:%s cme:%s%s (%p) call:%p",
13454 NIL_P(class_path
) ? (cc
->klass
? "??" : "<NULL>") : RSTRING_PTR(class_path
),
13455 cme
? rb_id2name(cme
->called_id
) : "<NULL>",
13456 cme
? (METHOD_ENTRY_INVALIDATED(cme
) ? " [inv]" : "") : "",
13458 (void *)vm_cc_call(cc
)));
13473 asan_poison_object(obj
);
13481 #if RGENGC_OBJ_INFO
13482 #define OBJ_INFO_BUFFERS_NUM 10
13483 #define OBJ_INFO_BUFFERS_SIZE 0x100
13484 static int obj_info_buffers_index
= 0;
13485 static char obj_info_buffers
[OBJ_INFO_BUFFERS_NUM
][OBJ_INFO_BUFFERS_SIZE
];
13487 static const char *
13488 obj_info(VALUE obj
)
13490 const int index
= obj_info_buffers_index
++;
13491 char *const buff
= &obj_info_buffers
[index
][0];
13493 if (obj_info_buffers_index
>= OBJ_INFO_BUFFERS_NUM
) {
13494 obj_info_buffers_index
= 0;
13497 return rb_raw_obj_info(buff
, OBJ_INFO_BUFFERS_SIZE
, obj
);
13500 static const char *
13501 obj_info(VALUE obj
)
13503 return obj_type_name(obj
);
13507 MJIT_FUNC_EXPORTED
const char *
13508 rb_obj_info(VALUE obj
)
13510 return obj_info(obj
);
13514 rb_obj_info_dump(VALUE obj
)
13517 fprintf(stderr
, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff
, 0x100, obj
));
13520 MJIT_FUNC_EXPORTED
void
13521 rb_obj_info_dump_loc(VALUE obj
, const char *file
, int line
, const char *func
)
13524 fprintf(stderr
, "<OBJ_INFO:%s@%s:%d> %s\n", func
, file
, line
, rb_raw_obj_info(buff
, 0x100, obj
));
13530 rb_gcdebug_print_obj_condition(VALUE obj
)
13532 rb_objspace_t
*objspace
= &rb_objspace
;
13534 fprintf(stderr
, "created at: %s:%d\n", RANY(obj
)->file
, RANY(obj
)->line
);
13536 if (BUILTIN_TYPE(obj
) == T_MOVED
) {
13537 fprintf(stderr
, "moved?: true\n");
13540 fprintf(stderr
, "moved?: false\n");
13542 if (is_pointer_to_heap(objspace
, (void *)obj
)) {
13543 fprintf(stderr
, "pointer to heap?: true\n");
13546 fprintf(stderr
, "pointer to heap?: false\n");
13550 fprintf(stderr
, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj
), obj
) ? "true" : "false");
13551 fprintf(stderr
, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj
), obj
) ? "true" : "false");
13552 fprintf(stderr
, "age? : %d\n", RVALUE_AGE(obj
));
13553 fprintf(stderr
, "old? : %s\n", RVALUE_OLD_P(obj
) ? "true" : "false");
13554 fprintf(stderr
, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj
) ? "false" : "true");
13555 fprintf(stderr
, "remembered? : %s\n", RVALUE_REMEMBERED(obj
) ? "true" : "false");
13557 if (is_lazy_sweeping(objspace
)) {
13558 fprintf(stderr
, "lazy sweeping?: true\n");
13559 fprintf(stderr
, "swept?: %s\n", is_swept_object(objspace
, obj
) ? "done" : "not yet");
13562 fprintf(stderr
, "lazy sweeping?: false\n");
13567 gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj
, name
))
13569 fprintf(stderr
, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name
, (void *)obj
);
13574 rb_gcdebug_sentinel(VALUE obj
, const char *name
)
13576 rb_define_finalizer(obj
, rb_proc_new(gcdebug_sentinel
, (VALUE
)name
));
13579 #endif /* GC_DEBUG */
13581 #if GC_DEBUG_STRESS_TO_CLASS
13584 * GC.add_stress_to_class(class[, ...])
13586 * Raises NoMemoryError when allocating an instance of the given classes.
13590 rb_gcdebug_add_stress_to_class(int argc
, VALUE
*argv
, VALUE self
)
13592 rb_objspace_t
*objspace
= &rb_objspace
;
13594 if (!stress_to_class
) {
13595 stress_to_class
= rb_ary_tmp_new(argc
);
13597 rb_ary_cat(stress_to_class
, argv
, argc
);
13603 * GC.remove_stress_to_class(class[, ...])
13605 * No longer raises NoMemoryError when allocating an instance of the
13610 rb_gcdebug_remove_stress_to_class(int argc
, VALUE
*argv
, VALUE self
)
13612 rb_objspace_t
*objspace
= &rb_objspace
;
13615 if (stress_to_class
) {
13616 for (i
= 0; i
< argc
; ++i
) {
13617 rb_ary_delete_same(stress_to_class
, argv
[i
]);
13619 if (RARRAY_LEN(stress_to_class
) == 0) {
13620 stress_to_class
= 0;
13628 * Document-module: ObjectSpace
13630 * The ObjectSpace module contains a number of routines
13631 * that interact with the garbage collection facility and allow you to
13632 * traverse all living objects with an iterator.
13634 * ObjectSpace also provides support for object finalizers, procs that will be
13635 * called when a specific object is about to be destroyed by garbage
13636 * collection. See the documentation for
13637 * <code>ObjectSpace.define_finalizer</code> for important information on
13638 * how to use this method correctly.
13643 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
13644 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
13651 * Finalizer two on 537763470
13652 * Finalizer one on 537763480
13656 * Document-class: ObjectSpace::WeakMap
13658 * An ObjectSpace::WeakMap object holds references to
13659 * any objects, but those objects can get garbage collected.
13661 * This class is mostly used internally by WeakRef, please use
13662 * +lib/weakref.rb+ for the public interface.
13665 /* Document-class: GC::Profiler
13667 * The GC profiler provides access to information on GC runs including time,
13668 * length and object space size.
13672 * GC::Profiler.enable
13674 * require 'rdoc/rdoc'
13676 * GC::Profiler.report
13678 * GC::Profiler.disable
13680 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
13683 #include "gc.rbinc"
13689 VALUE rb_mObjSpace
;
13690 VALUE rb_mProfiler
;
13691 VALUE gc_constants
;
13693 rb_mGC
= rb_define_module("GC");
13695 gc_constants
= rb_hash_new();
13696 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG
));
13697 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE
)));
13698 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT
));
13699 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE
));
13700 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE
));
13701 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT
));
13702 rb_hash_aset(gc_constants
, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT
- 1)));
13703 OBJ_FREEZE(gc_constants
);
13704 /* internal constants */
13705 rb_define_const(rb_mGC
, "INTERNAL_CONSTANTS", gc_constants
);
13707 rb_mProfiler
= rb_define_module_under(rb_mGC
, "Profiler");
13708 rb_define_singleton_method(rb_mProfiler
, "enabled?", gc_profile_enable_get
, 0);
13709 rb_define_singleton_method(rb_mProfiler
, "enable", gc_profile_enable
, 0);
13710 rb_define_singleton_method(rb_mProfiler
, "raw_data", gc_profile_record_get
, 0);
13711 rb_define_singleton_method(rb_mProfiler
, "disable", gc_profile_disable
, 0);
13712 rb_define_singleton_method(rb_mProfiler
, "clear", gc_profile_clear
, 0);
13713 rb_define_singleton_method(rb_mProfiler
, "result", gc_profile_result
, 0);
13714 rb_define_singleton_method(rb_mProfiler
, "report", gc_profile_report
, -1);
13715 rb_define_singleton_method(rb_mProfiler
, "total_time", gc_profile_total_time
, 0);
13717 rb_mObjSpace
= rb_define_module("ObjectSpace");
13719 rb_define_module_function(rb_mObjSpace
, "each_object", os_each_obj
, -1);
13721 rb_define_module_function(rb_mObjSpace
, "define_finalizer", define_final
, -1);
13722 rb_define_module_function(rb_mObjSpace
, "undefine_finalizer", undefine_final
, 1);
13724 rb_define_module_function(rb_mObjSpace
, "_id2ref", os_id2ref
, 1);
13726 rb_vm_register_special_exception(ruby_error_nomemory
, rb_eNoMemError
, "failed to allocate memory");
13728 rb_define_method(rb_cBasicObject
, "__id__", rb_obj_id
, 0);
13729 rb_define_method(rb_mKernel
, "object_id", rb_obj_id
, 0);
13731 rb_define_module_function(rb_mObjSpace
, "count_objects", count_objects
, -1);
13734 VALUE rb_cWeakMap
= rb_define_class_under(rb_mObjSpace
, "WeakMap", rb_cObject
);
13735 rb_define_alloc_func(rb_cWeakMap
, wmap_allocate
);
13736 rb_define_method(rb_cWeakMap
, "[]=", wmap_aset
, 2);
13737 rb_define_method(rb_cWeakMap
, "[]", wmap_aref
, 1);
13738 rb_define_method(rb_cWeakMap
, "include?", wmap_has_key
, 1);
13739 rb_define_method(rb_cWeakMap
, "member?", wmap_has_key
, 1);
13740 rb_define_method(rb_cWeakMap
, "key?", wmap_has_key
, 1);
13741 rb_define_method(rb_cWeakMap
, "inspect", wmap_inspect
, 0);
13742 rb_define_method(rb_cWeakMap
, "each", wmap_each
, 0);
13743 rb_define_method(rb_cWeakMap
, "each_pair", wmap_each
, 0);
13744 rb_define_method(rb_cWeakMap
, "each_key", wmap_each_key
, 0);
13745 rb_define_method(rb_cWeakMap
, "each_value", wmap_each_value
, 0);
13746 rb_define_method(rb_cWeakMap
, "keys", wmap_keys
, 0);
13747 rb_define_method(rb_cWeakMap
, "values", wmap_values
, 0);
13748 rb_define_method(rb_cWeakMap
, "size", wmap_size
, 0);
13749 rb_define_method(rb_cWeakMap
, "length", wmap_size
, 0);
13750 rb_include_module(rb_cWeakMap
, rb_mEnumerable
);
13753 /* internal methods */
13754 rb_define_singleton_method(rb_mGC
, "verify_internal_consistency", gc_verify_internal_consistency_m
, 0);
13755 rb_define_singleton_method(rb_mGC
, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency
, 0);
13756 #if MALLOC_ALLOCATED_SIZE
13757 rb_define_singleton_method(rb_mGC
, "malloc_allocated_size", gc_malloc_allocated_size
, 0);
13758 rb_define_singleton_method(rb_mGC
, "malloc_allocations", gc_malloc_allocations
, 0);
13761 #if GC_DEBUG_STRESS_TO_CLASS
13762 rb_define_singleton_method(rb_mGC
, "add_stress_to_class", rb_gcdebug_add_stress_to_class
, -1);
13763 rb_define_singleton_method(rb_mGC
, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class
, -1);
13768 /* GC build options */
13769 rb_define_const(rb_mGC
, "OPTS", opts
= rb_ary_new());
13770 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13774 OPT(RGENGC_CHECK_MODE
);
13775 OPT(RGENGC_PROFILE
);
13776 OPT(RGENGC_ESTIMATE_OLDMALLOC
);
13777 OPT(GC_PROFILE_MORE_DETAIL
);
13778 OPT(GC_ENABLE_LAZY_SWEEP
);
13779 OPT(CALC_EXACT_MALLOC_SIZE
);
13780 OPT(MALLOC_ALLOCATED_SIZE
);
13781 OPT(MALLOC_ALLOCATED_SIZE_CHECK
);
13782 OPT(GC_PROFILE_DETAIL_MEMORY
);
13788 #ifdef ruby_xmalloc
13789 #undef ruby_xmalloc
13791 #ifdef ruby_xmalloc2
13792 #undef ruby_xmalloc2
13794 #ifdef ruby_xcalloc
13795 #undef ruby_xcalloc
13797 #ifdef ruby_xrealloc
13798 #undef ruby_xrealloc
13800 #ifdef ruby_xrealloc2
13801 #undef ruby_xrealloc2
13805 ruby_xmalloc(size_t size
)
13807 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13808 ruby_malloc_info_file
= __FILE__
;
13809 ruby_malloc_info_line
= __LINE__
;
13811 return ruby_xmalloc_body(size
);
13815 ruby_xmalloc2(size_t n
, size_t size
)
13817 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13818 ruby_malloc_info_file
= __FILE__
;
13819 ruby_malloc_info_line
= __LINE__
;
13821 return ruby_xmalloc2_body(n
, size
);
13825 ruby_xcalloc(size_t n
, size_t size
)
13827 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13828 ruby_malloc_info_file
= __FILE__
;
13829 ruby_malloc_info_line
= __LINE__
;
13831 return ruby_xcalloc_body(n
, size
);
13835 ruby_xrealloc(void *ptr
, size_t new_size
)
13837 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13838 ruby_malloc_info_file
= __FILE__
;
13839 ruby_malloc_info_line
= __LINE__
;
13841 return ruby_xrealloc_body(ptr
, new_size
);
13845 ruby_xrealloc2(void *ptr
, size_t n
, size_t new_size
)
13847 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13848 ruby_malloc_info_file
= __FILE__
;
13849 ruby_malloc_info_line
= __LINE__
;
13851 return ruby_xrealloc2_body(ptr
, n
, new_size
);