3 ** Copyright (C) 2005-2025 Mike Pall. See Copyright Notice in luajit.h
5 ** Major portions taken verbatim or adapted from the Lua interpreter.
6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
28 #include "lj_dispatch.h"
30 #include "lj_vmevent.h"
32 #define GCSTEPSIZE 1024u
34 #define GCSWEEPCOST 10
35 #define GCFINALIZECOST 100
37 /* Macros to set GCobj colors and flags. */
38 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
39 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK)
40 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED)
42 /* -- Mark phase ---------------------------------------------------------- */
44 /* Mark a TValue (if needed). */
45 #define gc_marktv(g, tv) \
46 { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \
47 "TValue and GC type mismatch"); \
48 if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
50 /* Mark a GCobj (if needed). */
51 #define gc_markobj(g, o) \
52 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
54 /* Mark a string object. */
55 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
57 /* Mark a white GCobj. */
58 static void gc_mark(global_State
*g
, GCobj
*o
)
61 lj_assertG(iswhite(o
), "mark of non-white object");
62 lj_assertG(!isdead(g
, o
), "mark of dead object");
64 if (LJ_UNLIKELY(gct
== ~LJ_TUDATA
)) {
65 GCtab
*mt
= tabref(gco2ud(o
)->metatable
);
66 gray2black(o
); /* Userdata are never gray. */
67 if (mt
) gc_markobj(g
, mt
);
68 gc_markobj(g
, tabref(gco2ud(o
)->env
));
69 if (LJ_HASBUFFER
&& gco2ud(o
)->udtype
== UDTYPE_BUFFER
) {
70 SBufExt
*sbx
= (SBufExt
*)uddata(gco2ud(o
));
71 if (sbufiscow(sbx
) && gcref(sbx
->cowref
))
72 gc_markobj(g
, gcref(sbx
->cowref
));
73 if (gcref(sbx
->dict_str
))
74 gc_markobj(g
, gcref(sbx
->dict_str
));
75 if (gcref(sbx
->dict_mt
))
76 gc_markobj(g
, gcref(sbx
->dict_mt
));
78 } else if (LJ_UNLIKELY(gct
== ~LJ_TUPVAL
)) {
79 GCupval
*uv
= gco2uv(o
);
80 gc_marktv(g
, uvval(uv
));
82 gray2black(o
); /* Closed upvalues are never gray. */
83 } else if (gct
!= ~LJ_TSTR
&& gct
!= ~LJ_TCDATA
) {
84 lj_assertG(gct
== ~LJ_TFUNC
|| gct
== ~LJ_TTAB
||
85 gct
== ~LJ_TTHREAD
|| gct
== ~LJ_TPROTO
|| gct
== ~LJ_TTRACE
,
86 "bad GC type %d", gct
);
87 setgcrefr(o
->gch
.gclist
, g
->gc
.gray
);
88 setgcref(g
->gc
.gray
, o
);
93 static void gc_mark_gcroot(global_State
*g
)
96 for (i
= 0; i
< GCROOT_MAX
; i
++)
97 if (gcref(g
->gcroot
[i
]) != NULL
)
98 gc_markobj(g
, gcref(g
->gcroot
[i
]));
101 /* Start a GC cycle and mark the root set. */
102 static void gc_mark_start(global_State
*g
)
104 setgcrefnull(g
->gc
.gray
);
105 setgcrefnull(g
->gc
.grayagain
);
106 setgcrefnull(g
->gc
.weak
);
107 gc_markobj(g
, mainthread(g
));
108 gc_markobj(g
, tabref(mainthread(g
)->env
));
109 gc_marktv(g
, &g
->registrytv
);
111 g
->gc
.state
= GCSpropagate
;
114 /* Mark open upvalues. */
115 static void gc_mark_uv(global_State
*g
)
118 for (uv
= uvnext(&g
->uvhead
); uv
!= &g
->uvhead
; uv
= uvnext(uv
)) {
119 lj_assertG(uvprev(uvnext(uv
)) == uv
&& uvnext(uvprev(uv
)) == uv
,
120 "broken upvalue chain");
121 if (isgray(obj2gco(uv
)))
122 gc_marktv(g
, uvval(uv
));
126 /* Mark userdata in mmudata list. */
127 static void gc_mark_mmudata(global_State
*g
)
129 GCobj
*root
= gcref(g
->gc
.mmudata
);
134 makewhite(g
, u
); /* Could be from previous GC. */
140 /* Separate userdata objects to be finalized to mmudata list. */
141 size_t lj_gc_separateudata(global_State
*g
, int all
)
144 GCRef
*p
= &mainthread(g
)->nextgc
;
146 while ((o
= gcref(*p
)) != NULL
) {
147 if (!(iswhite(o
) || all
) || isfinalized(gco2ud(o
))) {
148 p
= &o
->gch
.nextgc
; /* Nothing to do. */
149 } else if (!lj_meta_fastg(g
, tabref(gco2ud(o
)->metatable
), MM_gc
)) {
150 markfinalized(o
); /* Done, as there's no __gc metamethod. */
152 } else { /* Otherwise move userdata to be finalized to mmudata list. */
153 m
+= sizeudata(gco2ud(o
));
156 if (gcref(g
->gc
.mmudata
)) { /* Link to end of mmudata list. */
157 GCobj
*root
= gcref(g
->gc
.mmudata
);
158 setgcrefr(o
->gch
.nextgc
, root
->gch
.nextgc
);
159 setgcref(root
->gch
.nextgc
, o
);
160 setgcref(g
->gc
.mmudata
, o
);
161 } else { /* Create circular list. */
162 setgcref(o
->gch
.nextgc
, o
);
163 setgcref(g
->gc
.mmudata
, o
);
170 /* -- Propagation phase --------------------------------------------------- */
172 /* Traverse a table. */
173 static int gc_traverse_tab(global_State
*g
, GCtab
*t
)
177 GCtab
*mt
= tabref(t
->metatable
);
180 mode
= lj_meta_fastg(g
, mt
, MM_mode
);
181 if (mode
&& tvisstr(mode
)) { /* Valid __mode field? */
182 const char *modestr
= strVdata(mode
);
184 while ((c
= *modestr
++)) {
185 if (c
== 'k') weak
|= LJ_GC_WEAKKEY
;
186 else if (c
== 'v') weak
|= LJ_GC_WEAKVAL
;
188 if (weak
) { /* Weak tables are cleared in the atomic phase. */
190 if (gcref(g
->gcroot
[GCROOT_FFI_FIN
]) == obj2gco(t
)) {
191 weak
= (int)(~0u & ~LJ_GC_WEAKVAL
);
195 t
->marked
= (uint8_t)((t
->marked
& ~LJ_GC_WEAK
) | weak
);
196 setgcrefr(t
->gclist
, g
->gc
.weak
);
197 setgcref(g
->gc
.weak
, obj2gco(t
));
201 if (weak
== LJ_GC_WEAK
) /* Nothing to mark if both keys/values are weak. */
203 if (!(weak
& LJ_GC_WEAKVAL
)) { /* Mark array part. */
204 MSize i
, asize
= t
->asize
;
205 for (i
= 0; i
< asize
; i
++)
206 gc_marktv(g
, arrayslot(t
, i
));
208 if (t
->hmask
> 0) { /* Mark hash part. */
209 Node
*node
= noderef(t
->node
);
210 MSize i
, hmask
= t
->hmask
;
211 for (i
= 0; i
<= hmask
; i
++) {
213 if (!tvisnil(&n
->val
)) { /* Mark non-empty slot. */
214 lj_assertG(!tvisnil(&n
->key
), "mark of nil key in non-empty slot");
215 if (!(weak
& LJ_GC_WEAKKEY
)) gc_marktv(g
, &n
->key
);
216 if (!(weak
& LJ_GC_WEAKVAL
)) gc_marktv(g
, &n
->val
);
223 /* Traverse a function. */
224 static void gc_traverse_func(global_State
*g
, GCfunc
*fn
)
226 gc_markobj(g
, tabref(fn
->c
.env
));
229 lj_assertG(fn
->l
.nupvalues
<= funcproto(fn
)->sizeuv
,
230 "function upvalues out of range");
231 gc_markobj(g
, funcproto(fn
));
232 for (i
= 0; i
< fn
->l
.nupvalues
; i
++) /* Mark Lua function upvalues. */
233 gc_markobj(g
, &gcref(fn
->l
.uvptr
[i
])->uv
);
236 for (i
= 0; i
< fn
->c
.nupvalues
; i
++) /* Mark C function upvalues. */
237 gc_marktv(g
, &fn
->c
.upvalue
[i
]);
243 static void gc_marktrace(global_State
*g
, TraceNo traceno
)
245 GCobj
*o
= obj2gco(traceref(G2J(g
), traceno
));
246 lj_assertG(traceno
!= G2J(g
)->cur
.traceno
, "active trace escaped");
249 setgcrefr(o
->gch
.gclist
, g
->gc
.gray
);
250 setgcref(g
->gc
.gray
, o
);
254 /* Traverse a trace. */
255 static void gc_traverse_trace(global_State
*g
, GCtrace
*T
)
258 if (T
->traceno
== 0) return;
259 for (ref
= T
->nk
; ref
< REF_TRUE
; ref
++) {
260 IRIns
*ir
= &T
->ir
[ref
];
262 gc_markobj(g
, ir_kgc(ir
));
263 if (irt_is64(ir
->t
) && ir
->o
!= IR_KNULL
)
266 if (T
->link
) gc_marktrace(g
, T
->link
);
267 if (T
->nextroot
) gc_marktrace(g
, T
->nextroot
);
268 if (T
->nextside
) gc_marktrace(g
, T
->nextside
);
269 gc_markobj(g
, gcref(T
->startpt
));
272 /* The current trace is a GC root while not anchored in the prototype (yet). */
273 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
275 #define gc_traverse_curtrace(g) UNUSED(g)
278 /* Traverse a prototype. */
279 static void gc_traverse_proto(global_State
*g
, GCproto
*pt
)
282 gc_mark_str(proto_chunkname(pt
));
283 for (i
= -(ptrdiff_t)pt
->sizekgc
; i
< 0; i
++) /* Mark collectable consts. */
284 gc_markobj(g
, proto_kgc(pt
, i
));
286 if (pt
->trace
) gc_marktrace(g
, pt
->trace
);
290 /* Traverse the frame structure of a stack. */
291 static MSize
gc_traverse_frames(global_State
*g
, lua_State
*th
)
293 TValue
*frame
, *top
= th
->top
-1, *bot
= tvref(th
->stack
);
294 /* Note: extra vararg frame not skipped, marks function twice (harmless). */
295 for (frame
= th
->base
-1; frame
> bot
+LJ_FR2
; frame
= frame_prev(frame
)) {
296 GCfunc
*fn
= frame_func(frame
);
297 TValue
*ftop
= frame
;
298 if (isluafunc(fn
)) ftop
+= funcproto(fn
)->framesize
;
299 if (ftop
> top
) top
= ftop
;
300 if (!LJ_FR2
) gc_markobj(g
, fn
); /* Need to mark hidden function (or L). */
302 top
++; /* Correct bias of -1 (frame == base-1). */
303 if (top
> tvref(th
->maxstack
)) top
= tvref(th
->maxstack
);
304 return (MSize
)(top
- bot
); /* Return minimum needed stack size. */
307 /* Traverse a thread object. */
308 static void gc_traverse_thread(global_State
*g
, lua_State
*th
)
310 TValue
*o
, *top
= th
->top
;
311 for (o
= tvref(th
->stack
)+1+LJ_FR2
; o
< top
; o
++)
313 if (g
->gc
.state
== GCSatomic
) {
314 top
= tvref(th
->stack
) + th
->stacksize
;
315 for (; o
< top
; o
++) /* Clear unmarked slots. */
318 gc_markobj(g
, tabref(th
->env
));
319 lj_state_shrinkstack(th
, gc_traverse_frames(g
, th
));
322 /* Propagate one gray object. Traverse it and turn it black. */
323 static size_t propagatemark(global_State
*g
)
325 GCobj
*o
= gcref(g
->gc
.gray
);
326 int gct
= o
->gch
.gct
;
327 lj_assertG(isgray(o
), "propagation of non-gray object");
329 setgcrefr(g
->gc
.gray
, o
->gch
.gclist
); /* Remove from gray list. */
330 if (LJ_LIKELY(gct
== ~LJ_TTAB
)) {
331 GCtab
*t
= gco2tab(o
);
332 if (gc_traverse_tab(g
, t
) > 0)
333 black2gray(o
); /* Keep weak tables gray. */
334 return sizeof(GCtab
) + sizeof(TValue
) * t
->asize
+
335 (t
->hmask
? sizeof(Node
) * (t
->hmask
+ 1) : 0);
336 } else if (LJ_LIKELY(gct
== ~LJ_TFUNC
)) {
337 GCfunc
*fn
= gco2func(o
);
338 gc_traverse_func(g
, fn
);
339 return isluafunc(fn
) ? sizeLfunc((MSize
)fn
->l
.nupvalues
) :
340 sizeCfunc((MSize
)fn
->c
.nupvalues
);
341 } else if (LJ_LIKELY(gct
== ~LJ_TPROTO
)) {
342 GCproto
*pt
= gco2pt(o
);
343 gc_traverse_proto(g
, pt
);
345 } else if (LJ_LIKELY(gct
== ~LJ_TTHREAD
)) {
346 lua_State
*th
= gco2th(o
);
347 setgcrefr(th
->gclist
, g
->gc
.grayagain
);
348 setgcref(g
->gc
.grayagain
, o
);
349 black2gray(o
); /* Threads are never black. */
350 gc_traverse_thread(g
, th
);
351 return sizeof(lua_State
) + sizeof(TValue
) * th
->stacksize
;
354 GCtrace
*T
= gco2trace(o
);
355 gc_traverse_trace(g
, T
);
356 return ((sizeof(GCtrace
)+7)&~7) + (T
->nins
-T
->nk
)*sizeof(IRIns
) +
357 T
->nsnap
*sizeof(SnapShot
) + T
->nsnapmap
*sizeof(SnapEntry
);
359 lj_assertG(0, "bad GC type %d", gct
);
365 /* Propagate all gray objects. */
366 static size_t gc_propagate_gray(global_State
*g
)
369 while (gcref(g
->gc
.gray
) != NULL
)
370 m
+= propagatemark(g
);
374 /* -- Sweep phase --------------------------------------------------------- */
376 /* Type of GC free functions. */
377 typedef void (LJ_FASTCALL
*GCFreeFunc
)(global_State
*g
, GCobj
*o
);
379 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
380 static const GCFreeFunc gc_freefunc
[] = {
381 (GCFreeFunc
)lj_str_free
,
382 (GCFreeFunc
)lj_func_freeuv
,
383 (GCFreeFunc
)lj_state_free
,
384 (GCFreeFunc
)lj_func_freeproto
,
385 (GCFreeFunc
)lj_func_free
,
387 (GCFreeFunc
)lj_trace_free
,
392 (GCFreeFunc
)lj_cdata_free
,
396 (GCFreeFunc
)lj_tab_free
,
397 (GCFreeFunc
)lj_udata_free
400 /* Full sweep of a GC list. */
401 #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0)
403 /* Partial sweep of a GC list. */
404 static GCRef
*gc_sweep(global_State
*g
, GCRef
*p
, uint32_t lim
)
406 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
407 int ow
= otherwhite(g
);
409 while ((o
= gcref(*p
)) != NULL
&& lim
-- > 0) {
410 if (o
->gch
.gct
== ~LJ_TTHREAD
) /* Need to sweep open upvalues, too. */
411 gc_fullsweep(g
, &gco2th(o
)->openupval
);
412 if (((o
->gch
.marked
^ LJ_GC_WHITES
) & ow
)) { /* Black or current white? */
413 lj_assertG(!isdead(g
, o
) || (o
->gch
.marked
& LJ_GC_FIXED
),
414 "sweep of undead object");
415 makewhite(g
, o
); /* Value is alive, change to the current white. */
417 } else { /* Otherwise value is dead, free it. */
418 lj_assertG(isdead(g
, o
) || ow
== LJ_GC_SFIXED
,
419 "sweep of unlive object");
420 setgcrefr(*p
, o
->gch
.nextgc
);
421 if (o
== gcref(g
->gc
.root
))
422 setgcrefr(g
->gc
.root
, o
->gch
.nextgc
); /* Adjust list anchor. */
423 gc_freefunc
[o
->gch
.gct
- ~LJ_TSTR
](g
, o
);
429 /* Sweep one string interning table chain. Preserves hashalg bit. */
430 static void gc_sweepstr(global_State
*g
, GCRef
*chain
)
432 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
433 int ow
= otherwhite(g
);
434 uintptr_t u
= gcrefu(*chain
);
438 setgcrefp(q
, (u
& ~(uintptr_t)1));
439 while ((o
= gcref(*p
)) != NULL
) {
440 if (((o
->gch
.marked
^ LJ_GC_WHITES
) & ow
)) { /* Black or current white? */
441 lj_assertG(!isdead(g
, o
) || (o
->gch
.marked
& LJ_GC_FIXED
),
442 "sweep of undead string");
443 makewhite(g
, o
); /* String is alive, change to the current white. */
445 } else { /* Otherwise string is dead, free it. */
446 lj_assertG(isdead(g
, o
) || ow
== LJ_GC_SFIXED
,
447 "sweep of unlive string");
448 setgcrefr(*p
, o
->gch
.nextgc
);
449 lj_str_free(g
, gco2str(o
));
452 setgcrefp(*chain
, (gcrefu(q
) | (u
& 1)));
455 /* Check whether we can clear a key or a value slot from a table. */
456 static int gc_mayclear(cTValue
*o
, int val
)
458 if (tvisgcv(o
)) { /* Only collectable objects can be weak references. */
459 if (tvisstr(o
)) { /* But strings cannot be used as weak references. */
460 gc_mark_str(strV(o
)); /* And need to be marked. */
464 return 1; /* Object is about to be collected. */
465 if (tvisudata(o
) && val
&& isfinalized(udataV(o
)))
466 return 1; /* Finalized userdata is dropped only from values. */
468 return 0; /* Cannot clear. */
471 /* Clear collected entries from weak tables. */
472 static void gc_clearweak(global_State
*g
, GCobj
*o
)
476 GCtab
*t
= gco2tab(o
);
477 lj_assertG((t
->marked
& LJ_GC_WEAK
), "clear of non-weak table");
478 if ((t
->marked
& LJ_GC_WEAKVAL
)) {
479 MSize i
, asize
= t
->asize
;
480 for (i
= 0; i
< asize
; i
++) {
481 /* Clear array slot when value is about to be collected. */
482 TValue
*tv
= arrayslot(t
, i
);
483 if (gc_mayclear(tv
, 1))
488 Node
*node
= noderef(t
->node
);
489 MSize i
, hmask
= t
->hmask
;
490 for (i
= 0; i
<= hmask
; i
++) {
492 /* Clear hash slot when key or value is about to be collected. */
493 if (!tvisnil(&n
->val
) && (gc_mayclear(&n
->key
, 0) ||
494 gc_mayclear(&n
->val
, 1)))
498 o
= gcref(t
->gclist
);
502 /* Call a userdata or cdata finalizer. */
503 static void gc_call_finalizer(global_State
*g
, lua_State
*L
,
504 cTValue
*mo
, GCobj
*o
)
506 /* Save and restore lots of state around the __gc callback. */
507 uint8_t oldh
= hook_save(g
);
508 GCSize oldt
= g
->gc
.threshold
;
512 hook_entergc(g
); /* Disable hooks and new traces during __gc. */
513 if (LJ_HASPROFILE
&& (oldh
& HOOK_PROFILE
)) lj_dispatch_update(g
);
514 g
->gc
.threshold
= LJ_MAX_MEM
; /* Prevent GC steps. */
516 copyTV(L
, top
++, mo
);
517 if (LJ_FR2
) setnilV(top
++);
518 setgcV(L
, top
, o
, ~o
->gch
.gct
);
520 errcode
= lj_vm_pcall(L
, top
, 1+0, -1); /* Stack: |mo|o| -> | */
521 hook_restore(g
, oldh
);
522 if (LJ_HASPROFILE
&& (oldh
& HOOK_PROFILE
)) lj_dispatch_update(g
);
523 g
->gc
.threshold
= oldt
; /* Restore GC threshold. */
525 ptrdiff_t errobj
= savestack(L
, L
->top
-1); /* Stack may be resized. */
526 lj_vmevent_send(L
, ERRFIN
,
527 copyTV(L
, L
->top
++, restorestack(L
, errobj
));
533 /* Finalize one userdata or cdata object from the mmudata list. */
534 static void gc_finalize(lua_State
*L
)
536 global_State
*g
= G(L
);
537 GCobj
*o
= gcnext(gcref(g
->gc
.mmudata
));
539 lj_assertG(tvref(g
->jit_base
) == NULL
, "finalizer called on trace");
540 /* Unchain from list of userdata to be finalized. */
541 if (o
== gcref(g
->gc
.mmudata
))
542 setgcrefnull(g
->gc
.mmudata
);
544 setgcrefr(gcref(g
->gc
.mmudata
)->gch
.nextgc
, o
->gch
.nextgc
);
546 if (o
->gch
.gct
== ~LJ_TCDATA
) {
548 /* Add cdata back to the GC list and make it white. */
549 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
550 setgcref(g
->gc
.root
, o
);
552 o
->gch
.marked
&= (uint8_t)~LJ_GC_CDATA_FIN
;
553 /* Resolve finalizer. */
554 setcdataV(L
, &tmp
, gco2cd(o
));
555 tv
= lj_tab_set(L
, tabref(g
->gcroot
[GCROOT_FFI_FIN
]), &tmp
);
558 setnilV(tv
); /* Clear entry in finalizer table. */
559 gc_call_finalizer(g
, L
, &tmp
, o
);
564 /* Add userdata back to the main userdata list and make it white. */
565 setgcrefr(o
->gch
.nextgc
, mainthread(g
)->nextgc
);
566 setgcref(mainthread(g
)->nextgc
, o
);
568 /* Resolve the __gc metamethod. */
569 mo
= lj_meta_fastg(g
, tabref(gco2ud(o
)->metatable
), MM_gc
);
571 gc_call_finalizer(g
, L
, mo
, o
);
574 /* Finalize all userdata objects from mmudata list. */
575 void lj_gc_finalize_udata(lua_State
*L
)
577 while (gcref(G(L
)->gc
.mmudata
) != NULL
)
582 /* Finalize all cdata objects from finalizer table. */
583 void lj_gc_finalize_cdata(lua_State
*L
)
585 global_State
*g
= G(L
);
586 GCtab
*t
= tabref(g
->gcroot
[GCROOT_FFI_FIN
]);
587 Node
*node
= noderef(t
->node
);
589 setgcrefnull(t
->metatable
); /* Mark finalizer table as disabled. */
590 for (i
= (ptrdiff_t)t
->hmask
; i
>= 0; i
--)
591 if (!tvisnil(&node
[i
].val
) && tviscdata(&node
[i
].key
)) {
592 GCobj
*o
= gcV(&node
[i
].key
);
595 o
->gch
.marked
&= (uint8_t)~LJ_GC_CDATA_FIN
;
596 copyTV(L
, &tmp
, &node
[i
].val
);
597 setnilV(&node
[i
].val
);
598 gc_call_finalizer(g
, L
, &tmp
, o
);
603 /* Free all remaining GC objects. */
604 void lj_gc_freeall(global_State
*g
)
607 /* Free everything, except super-fixed objects (the main thread). */
608 g
->gc
.currentwhite
= LJ_GC_WHITES
| LJ_GC_SFIXED
;
609 gc_fullsweep(g
, &g
->gc
.root
);
610 for (i
= g
->str
.mask
; i
!= ~(MSize
)0; i
--) /* Free all string hash chains. */
611 gc_sweepstr(g
, &g
->str
.tab
[i
]);
614 /* -- Collector ----------------------------------------------------------- */
616 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
617 static void atomic(global_State
*g
, lua_State
*L
)
621 gc_mark_uv(g
); /* Need to remark open upvalues (the thread may be dead). */
622 gc_propagate_gray(g
); /* Propagate any left-overs. */
624 setgcrefr(g
->gc
.gray
, g
->gc
.weak
); /* Empty the list of weak tables. */
625 setgcrefnull(g
->gc
.weak
);
626 lj_assertG(!iswhite(obj2gco(mainthread(g
))), "main thread turned white");
627 gc_markobj(g
, L
); /* Mark running thread. */
628 gc_traverse_curtrace(g
); /* Traverse current trace. */
629 gc_mark_gcroot(g
); /* Mark GC roots (again). */
630 gc_propagate_gray(g
); /* Propagate all of the above. */
632 setgcrefr(g
->gc
.gray
, g
->gc
.grayagain
); /* Empty the 2nd chance list. */
633 setgcrefnull(g
->gc
.grayagain
);
634 gc_propagate_gray(g
); /* Propagate it. */
636 udsize
= lj_gc_separateudata(g
, 0); /* Separate userdata to be finalized. */
637 gc_mark_mmudata(g
); /* Mark them. */
638 udsize
+= gc_propagate_gray(g
); /* And propagate the marks. */
640 /* All marking done, clear weak tables. */
641 gc_clearweak(g
, gcref(g
->gc
.weak
));
643 lj_buf_shrink(L
, &g
->tmpbuf
); /* Shrink temp buffer. */
645 /* Prepare for sweep phase. */
646 g
->gc
.currentwhite
= (uint8_t)otherwhite(g
); /* Flip current white. */
647 g
->strempty
.marked
= g
->gc
.currentwhite
;
648 setmref(g
->gc
.sweep
, &g
->gc
.root
);
649 g
->gc
.estimate
= g
->gc
.total
- (GCSize
)udsize
; /* Initial estimate. */
652 /* GC state machine. Returns a cost estimate for each step performed. */
653 static size_t gc_onestep(lua_State
*L
)
655 global_State
*g
= G(L
);
656 switch (g
->gc
.state
) {
658 gc_mark_start(g
); /* Start a new GC cycle by marking all GC roots. */
661 if (gcref(g
->gc
.gray
) != NULL
)
662 return propagatemark(g
); /* Propagate one gray object. */
663 g
->gc
.state
= GCSatomic
; /* End of mark phase. */
666 if (tvref(g
->jit_base
)) /* Don't run atomic phase on trace. */
669 g
->gc
.state
= GCSsweepstring
; /* Start of sweep phase. */
672 case GCSsweepstring
: {
673 GCSize old
= g
->gc
.total
;
674 gc_sweepstr(g
, &g
->str
.tab
[g
->gc
.sweepstr
++]); /* Sweep one chain. */
675 if (g
->gc
.sweepstr
> g
->str
.mask
)
676 g
->gc
.state
= GCSsweep
; /* All string hash chains sweeped. */
677 lj_assertG(old
>= g
->gc
.total
, "sweep increased memory");
678 g
->gc
.estimate
-= old
- g
->gc
.total
;
682 GCSize old
= g
->gc
.total
;
683 setmref(g
->gc
.sweep
, gc_sweep(g
, mref(g
->gc
.sweep
, GCRef
), GCSWEEPMAX
));
684 lj_assertG(old
>= g
->gc
.total
, "sweep increased memory");
685 g
->gc
.estimate
-= old
- g
->gc
.total
;
686 if (gcref(*mref(g
->gc
.sweep
, GCRef
)) == NULL
) {
687 if (g
->str
.num
<= (g
->str
.mask
>> 2) && g
->str
.mask
> LJ_MIN_STRTAB
*2-1)
688 lj_str_resize(L
, g
->str
.mask
>> 1); /* Shrink string table. */
689 if (gcref(g
->gc
.mmudata
)) { /* Need any finalizations? */
690 g
->gc
.state
= GCSfinalize
;
691 } else { /* Otherwise skip this phase to help the JIT. */
692 g
->gc
.state
= GCSpause
; /* End of GC cycle. */
696 return GCSWEEPMAX
*GCSWEEPCOST
;
699 if (gcref(g
->gc
.mmudata
) != NULL
) {
700 GCSize old
= g
->gc
.total
;
701 if (tvref(g
->jit_base
)) /* Don't call finalizers on trace. */
703 gc_finalize(L
); /* Finalize one userdata object. */
704 if (old
>= g
->gc
.total
&& g
->gc
.estimate
> old
- g
->gc
.total
)
705 g
->gc
.estimate
-= old
- g
->gc
.total
;
706 if (g
->gc
.estimate
> GCFINALIZECOST
)
707 g
->gc
.estimate
-= GCFINALIZECOST
;
708 return GCFINALIZECOST
;
710 g
->gc
.state
= GCSpause
; /* End of GC cycle. */
714 lj_assertG(0, "bad GC state");
719 /* Perform a limited amount of incremental GC steps. */
720 int LJ_FASTCALL
lj_gc_step(lua_State
*L
)
722 global_State
*g
= G(L
);
724 int32_t ostate
= g
->vmstate
;
726 lim
= (GCSTEPSIZE
/100) * g
->gc
.stepmul
;
729 if (g
->gc
.total
> g
->gc
.threshold
)
730 g
->gc
.debt
+= g
->gc
.total
- g
->gc
.threshold
;
732 lim
-= (GCSize
)gc_onestep(L
);
733 if (g
->gc
.state
== GCSpause
) {
734 g
->gc
.threshold
= (g
->gc
.estimate
/100) * g
->gc
.pause
;
736 return 1; /* Finished a GC cycle. */
738 } while (sizeof(lim
) == 8 ? ((int64_t)lim
> 0) : ((int32_t)lim
> 0));
739 if (g
->gc
.debt
< GCSTEPSIZE
) {
740 g
->gc
.threshold
= g
->gc
.total
+ GCSTEPSIZE
;
744 g
->gc
.debt
-= GCSTEPSIZE
;
745 g
->gc
.threshold
= g
->gc
.total
;
751 /* Ditto, but fix the stack top first. */
752 void LJ_FASTCALL
lj_gc_step_fixtop(lua_State
*L
)
754 if (curr_funcisL(L
)) L
->top
= curr_topL(L
);
759 /* Perform multiple GC steps. Called from JIT-compiled code. */
760 int LJ_FASTCALL
lj_gc_step_jit(global_State
*g
, MSize steps
)
762 lua_State
*L
= gco2th(gcref(g
->cur_L
));
763 L
->base
= tvref(G(L
)->jit_base
);
764 L
->top
= curr_topL(L
);
765 while (steps
-- > 0 && lj_gc_step(L
) == 0)
767 /* Return 1 to force a trace exit. */
768 return (G(L
)->gc
.state
== GCSatomic
|| G(L
)->gc
.state
== GCSfinalize
);
772 /* Perform a full GC cycle. */
773 void lj_gc_fullgc(lua_State
*L
)
775 global_State
*g
= G(L
);
776 int32_t ostate
= g
->vmstate
;
778 if (g
->gc
.state
<= GCSatomic
) { /* Caught somewhere in the middle. */
779 setmref(g
->gc
.sweep
, &g
->gc
.root
); /* Sweep everything (preserving it). */
780 setgcrefnull(g
->gc
.gray
); /* Reset lists from partial propagation. */
781 setgcrefnull(g
->gc
.grayagain
);
782 setgcrefnull(g
->gc
.weak
);
783 g
->gc
.state
= GCSsweepstring
; /* Fast forward to the sweep phase. */
786 while (g
->gc
.state
== GCSsweepstring
|| g
->gc
.state
== GCSsweep
)
787 gc_onestep(L
); /* Finish sweep. */
788 lj_assertG(g
->gc
.state
== GCSfinalize
|| g
->gc
.state
== GCSpause
,
790 /* Now perform a full GC. */
791 g
->gc
.state
= GCSpause
;
792 do { gc_onestep(L
); } while (g
->gc
.state
!= GCSpause
);
793 g
->gc
.threshold
= (g
->gc
.estimate
/100) * g
->gc
.pause
;
797 /* -- Write barriers ------------------------------------------------------ */
799 /* Move the GC propagation frontier forward. */
800 void lj_gc_barrierf(global_State
*g
, GCobj
*o
, GCobj
*v
)
802 lj_assertG(isblack(o
) && iswhite(v
) && !isdead(g
, v
) && !isdead(g
, o
),
803 "bad object states for forward barrier");
804 lj_assertG(g
->gc
.state
!= GCSfinalize
&& g
->gc
.state
!= GCSpause
,
806 lj_assertG(o
->gch
.gct
!= ~LJ_TTAB
, "barrier object is not a table");
807 /* Preserve invariant during propagation. Otherwise it doesn't matter. */
808 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
809 gc_mark(g
, v
); /* Move frontier forward. */
811 makewhite(g
, o
); /* Make it white to avoid the following barrier. */
814 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
815 void LJ_FASTCALL
lj_gc_barrieruv(global_State
*g
, TValue
*tv
)
817 #define TV2MARKED(x) \
818 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
819 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
822 TV2MARKED(tv
) = (TV2MARKED(tv
) & (uint8_t)~LJ_GC_COLORS
) | curwhite(g
);
826 /* Close upvalue. Also needs a write barrier. */
827 void lj_gc_closeuv(global_State
*g
, GCupval
*uv
)
829 GCobj
*o
= obj2gco(uv
);
830 /* Copy stack slot to upvalue itself and point to the copy. */
831 copyTV(mainthread(g
), &uv
->tv
, uvval(uv
));
832 setmref(uv
->v
, &uv
->tv
);
834 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
835 setgcref(g
->gc
.root
, o
);
836 if (isgray(o
)) { /* A closed upvalue is never gray, so fix this. */
837 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
) {
838 gray2black(o
); /* Make it black and preserve invariant. */
839 if (tviswhite(&uv
->tv
))
840 lj_gc_barrierf(g
, o
, gcV(&uv
->tv
));
842 makewhite(g
, o
); /* Make it white, i.e. sweep the upvalue. */
843 lj_assertG(g
->gc
.state
!= GCSfinalize
&& g
->gc
.state
!= GCSpause
,
850 /* Mark a trace if it's saved during the propagation phase. */
851 void lj_gc_barriertrace(global_State
*g
, uint32_t traceno
)
853 if (g
->gc
.state
== GCSpropagate
|| g
->gc
.state
== GCSatomic
)
854 gc_marktrace(g
, traceno
);
858 /* -- Allocator ----------------------------------------------------------- */
860 /* Call pluggable memory allocator to allocate or resize a fragment. */
861 void *lj_mem_realloc(lua_State
*L
, void *p
, GCSize osz
, GCSize nsz
)
863 global_State
*g
= G(L
);
864 lj_assertG((osz
== 0) == (p
== NULL
), "realloc API violation");
865 p
= g
->allocf(g
->allocd
, p
, osz
, nsz
);
866 if (p
== NULL
&& nsz
> 0)
868 lj_assertG((nsz
== 0) == (p
== NULL
), "allocf API violation");
869 lj_assertG(checkptrGC(p
),
870 "allocated memory address %p outside required range", p
);
871 g
->gc
.total
= (g
->gc
.total
- osz
) + nsz
;
875 /* Allocate new GC object and link it to the root set. */
876 void * LJ_FASTCALL
lj_mem_newgco(lua_State
*L
, GCSize size
)
878 global_State
*g
= G(L
);
879 GCobj
*o
= (GCobj
*)g
->allocf(g
->allocd
, NULL
, 0, size
);
882 lj_assertG(checkptrGC(o
),
883 "allocated memory address %p outside required range", o
);
885 setgcrefr(o
->gch
.nextgc
, g
->gc
.root
);
886 setgcref(g
->gc
.root
, o
);
891 /* Resize growable vector. */
892 void *lj_mem_grow(lua_State
*L
, void *p
, MSize
*szp
, MSize lim
, MSize esz
)
894 MSize sz
= (*szp
) << 1;
895 if (sz
< LJ_MIN_VECSZ
)
899 p
= lj_mem_realloc(L
, p
, (*szp
)*esz
, sz
*esz
);