root/lj_gc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gc_mark
  2. gc_mark_gcroot
  3. gc_mark_start
  4. gc_mark_uv
  5. gc_mark_mmudata
  6. lj_gc_separateudata
  7. gc_traverse_tab
  8. gc_traverse_func
  9. gc_marktrace
  10. gc_traverse_trace
  11. gc_traverse_proto
  12. gc_traverse_frames
  13. gc_traverse_thread
  14. propagatemark
  15. gc_propagate_gray
  16. gc_shrink
  17. gc_sweep
  18. gc_mayclear
  19. gc_clearweak
  20. gc_call_finalizer
  21. gc_finalize
  22. lj_gc_finalize_udata
  23. lj_gc_finalize_cdata
  24. lj_gc_freeall
  25. atomic
  26. gc_onestep
  27. lj_gc_step
  28. lj_gc_step_fixtop
  29. lj_gc_step_jit
  30. lj_gc_fullgc
  31. lj_gc_barrierf
  32. lj_gc_barrieruv
  33. lj_gc_closeuv
  34. lj_gc_barriertrace
  35. lj_mem_realloc
  36. lj_mem_newgco
  37. lj_mem_grow

   1 /*
   2 ** Garbage collector.
   3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
   4 **
   5 ** Major portions taken verbatim or adapted from the Lua interpreter.
   6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
   7 */
   8 
   9 #define lj_gc_c
  10 #define LUA_CORE
  11 
  12 #include "lj_obj.h"
  13 #include "lj_gc.h"
  14 #include "lj_err.h"
  15 #include "lj_str.h"
  16 #include "lj_tab.h"
  17 #include "lj_func.h"
  18 #include "lj_udata.h"
  19 #include "lj_meta.h"
  20 #include "lj_state.h"
  21 #include "lj_frame.h"
  22 #if LJ_HASFFI
  23 #include "lj_ctype.h"
  24 #include "lj_cdata.h"
  25 #endif
  26 #include "lj_trace.h"
  27 #include "lj_vm.h"
  28 
  29 #define GCSTEPSIZE      1024u
  30 #define GCSWEEPMAX      40
  31 #define GCSWEEPCOST     10
  32 #define GCFINALIZECOST  100
  33 
  34 /* Macros to set GCobj colors and flags. */
  35 #define white2gray(x)           ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
  36 #define gray2black(x)           ((x)->gch.marked |= LJ_GC_BLACK)
  37 #define isfinalized(u)          ((u)->marked & LJ_GC_FINALIZED)
  38 
  39 /* -- Mark phase ---------------------------------------------------------- */
  40 
  41 /* Mark a TValue (if needed). */
  42 #define gc_marktv(g, tv) \
  43   { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
  44     if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
  45 
  46 /* Mark a GCobj (if needed). */
  47 #define gc_markobj(g, o) \
  48   { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
  49 
  50 /* Mark a string object. */
  51 #define gc_mark_str(s)          ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
  52 
  53 /* Mark a white GCobj. */
  54 static void gc_mark(global_State *g, GCobj *o)
  55 {
  56   int gct = o->gch.gct;
  57   lua_assert(iswhite(o) && !isdead(g, o));
  58   white2gray(o);
  59   if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
  60     GCtab *mt = tabref(gco2ud(o)->metatable);
  61     gray2black(o);  /* Userdata are never gray. */
  62     if (mt) gc_markobj(g, mt);
  63     gc_markobj(g, tabref(gco2ud(o)->env));
  64   } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
  65     GCupval *uv = gco2uv(o);
  66     gc_marktv(g, uvval(uv));
  67     if (uv->closed)
  68       gray2black(o);  /* Closed upvalues are never gray. */
  69   } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
  70     lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
  71                gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO);
  72     setgcrefr(o->gch.gclist, g->gc.gray);
  73     setgcref(g->gc.gray, o);
  74   }
  75 }
  76 
  77 /* Mark GC roots. */
  78 static void gc_mark_gcroot(global_State *g)
  79 {
  80   ptrdiff_t i;
  81   for (i = 0; i < GCROOT_MAX; i++)
  82     if (gcref(g->gcroot[i]) != NULL)
  83       gc_markobj(g, gcref(g->gcroot[i]));
  84 }
  85 
  86 /* Start a GC cycle and mark the root set. */
  87 static void gc_mark_start(global_State *g)
  88 {
  89   setgcrefnull(g->gc.gray);
  90   setgcrefnull(g->gc.grayagain);
  91   setgcrefnull(g->gc.weak);
  92   gc_markobj(g, mainthread(g));
  93   gc_markobj(g, tabref(mainthread(g)->env));
  94   gc_marktv(g, &g->registrytv);
  95   gc_mark_gcroot(g);
  96   g->gc.state = GCSpropagate;
  97 }
  98 
  99 /* Mark open upvalues. */
 100 static void gc_mark_uv(global_State *g)
 101 {
 102   GCupval *uv;
 103   for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
 104     lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
 105     if (isgray(obj2gco(uv)))
 106       gc_marktv(g, uvval(uv));
 107   }
 108 }
 109 
 110 /* Mark userdata in mmudata list. */
 111 static void gc_mark_mmudata(global_State *g)
 112 {
 113   GCobj *root = gcref(g->gc.mmudata);
 114   GCobj *u = root;
 115   if (u) {
 116     do {
 117       u = gcnext(u);
 118       makewhite(g, u);  /* Could be from previous GC. */
 119       gc_mark(g, u);
 120     } while (u != root);
 121   }
 122 }
 123 
 124 /* Separate userdata objects to be finalized to mmudata list. */
 125 size_t lj_gc_separateudata(global_State *g, int all)
 126 {
 127   size_t m = 0;
 128   GCRef *p = &mainthread(g)->nextgc;
 129   GCobj *o;
 130   while ((o = gcref(*p)) != NULL) {
 131     if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
 132       p = &o->gch.nextgc;  /* Nothing to do. */
 133     } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
 134       markfinalized(o);  /* Done, as there's no __gc metamethod. */
 135       p = &o->gch.nextgc;
 136     } else {  /* Otherwise move userdata to be finalized to mmudata list. */
 137       m += sizeudata(gco2ud(o));
 138       markfinalized(o);
 139       *p = o->gch.nextgc;
 140       if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
 141         GCobj *root = gcref(g->gc.mmudata);
 142         setgcrefr(o->gch.nextgc, root->gch.nextgc);
 143         setgcref(root->gch.nextgc, o);
 144         setgcref(g->gc.mmudata, o);
 145       } else {  /* Create circular list. */
 146         setgcref(o->gch.nextgc, o);
 147         setgcref(g->gc.mmudata, o);
 148       }
 149     }
 150   }
 151   return m;
 152 }
 153 
 154 /* -- Propagation phase --------------------------------------------------- */
 155 
 156 /* Traverse a table. */
 157 static int gc_traverse_tab(global_State *g, GCtab *t)
 158 {
 159   int weak = 0;
 160   cTValue *mode;
 161   GCtab *mt = tabref(t->metatable);
 162   if (mt)
 163     gc_markobj(g, mt);
 164   mode = lj_meta_fastg(g, mt, MM_mode);
 165   if (mode && tvisstr(mode)) {  /* Valid __mode field? */
 166     const char *modestr = strVdata(mode);
 167     int c;
 168     while ((c = *modestr++)) {
 169       if (c == 'k') weak |= LJ_GC_WEAKKEY;
 170       else if (c == 'v') weak |= LJ_GC_WEAKVAL;
 171     }
 172     if (weak) {  /* Weak tables are cleared in the atomic phase. */
 173 #if LJ_HASFFI
 174       CTState *cts = ctype_ctsG(g);
 175       if (cts && cts->finalizer == t) {
 176         weak = (int)(~0u & ~LJ_GC_WEAKVAL);
 177       } else
 178 #endif
 179       {
 180         t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
 181         setgcrefr(t->gclist, g->gc.weak);
 182         setgcref(g->gc.weak, obj2gco(t));
 183       }
 184     }
 185   }
 186   if (weak == LJ_GC_WEAK)  /* Nothing to mark if both keys/values are weak. */
 187     return 1;
 188   if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
 189     MSize i, asize = t->asize;
 190     for (i = 0; i < asize; i++)
 191       gc_marktv(g, arrayslot(t, i));
 192   }
 193   if (t->hmask > 0) {  /* Mark hash part. */
 194     Node *node = noderef(t->node);
 195     MSize i, hmask = t->hmask;
 196     for (i = 0; i <= hmask; i++) {
 197       Node *n = &node[i];
 198       if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
 199         lua_assert(!tvisnil(&n->key));
 200         if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
 201         if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
 202       }
 203     }
 204   }
 205   return weak;
 206 }
 207 
 208 /* Traverse a function. */
 209 static void gc_traverse_func(global_State *g, GCfunc *fn)
 210 {
 211   gc_markobj(g, tabref(fn->c.env));
 212   if (isluafunc(fn)) {
 213     uint32_t i;
 214     lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
 215     gc_markobj(g, funcproto(fn));
 216     for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
 217       gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
 218   } else {
 219     uint32_t i;
 220     for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
 221       gc_marktv(g, &fn->c.upvalue[i]);
 222   }
 223 }
 224 
 225 #if LJ_HASJIT
 226 /* Mark a trace. */
 227 static void gc_marktrace(global_State *g, TraceNo traceno)
 228 {
 229   GCobj *o = obj2gco(traceref(G2J(g), traceno));
 230   lua_assert(traceno != G2J(g)->cur.traceno);
 231   if (iswhite(o)) {
 232     white2gray(o);
 233     setgcrefr(o->gch.gclist, g->gc.gray);
 234     setgcref(g->gc.gray, o);
 235   }
 236 }
 237 
 238 /* Traverse a trace. */
 239 static void gc_traverse_trace(global_State *g, GCtrace *T)
 240 {
 241   IRRef ref;
 242   if (T->traceno == 0) return;
 243   for (ref = T->nk; ref < REF_TRUE; ref++) {
 244     IRIns *ir = &T->ir[ref];
 245     if (ir->o == IR_KGC)
 246       gc_markobj(g, ir_kgc(ir));
 247   }
 248   if (T->link) gc_marktrace(g, T->link);
 249   if (T->nextroot) gc_marktrace(g, T->nextroot);
 250   if (T->nextside) gc_marktrace(g, T->nextside);
 251   gc_markobj(g, gcref(T->startpt));
 252 }
 253 
 254 /* The current trace is a GC root while not anchored in the prototype (yet). */
 255 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
 256 #else
 257 #define gc_traverse_curtrace(g) UNUSED(g)
 258 #endif
 259 
 260 /* Traverse a prototype. */
 261 static void gc_traverse_proto(global_State *g, GCproto *pt)
 262 {
 263   ptrdiff_t i;
 264   gc_mark_str(proto_chunkname(pt));
 265   for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
 266     gc_markobj(g, proto_kgc(pt, i));
 267 #if LJ_HASJIT
 268   if (pt->trace) gc_marktrace(g, pt->trace);
 269 #endif
 270 }
 271 
 272 /* Traverse the frame structure of a stack. */
 273 static MSize gc_traverse_frames(global_State *g, lua_State *th)
 274 {
 275   TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
 276   /* Note: extra vararg frame not skipped, marks function twice (harmless). */
 277   for (frame = th->base-1; frame > bot; frame = frame_prev(frame)) {
 278     GCfunc *fn = frame_func(frame);
 279     TValue *ftop = frame;
 280     if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
 281     if (ftop > top) top = ftop;
 282     gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
 283   }
 284   top++;  /* Correct bias of -1 (frame == base-1). */
 285   if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
 286   return (MSize)(top - bot);  /* Return minimum needed stack size. */
 287 }
 288 
 289 /* Traverse a thread object. */
 290 static void gc_traverse_thread(global_State *g, lua_State *th)
 291 {
 292   TValue *o, *top = th->top;
 293   for (o = tvref(th->stack)+1; o < top; o++)
 294     gc_marktv(g, o);
 295   if (g->gc.state == GCSatomic) {
 296     top = tvref(th->stack) + th->stacksize;
 297     for (; o < top; o++)  /* Clear unmarked slots. */
 298       setnilV(o);
 299   }
 300   gc_markobj(g, tabref(th->env));
 301   lj_state_shrinkstack(th, gc_traverse_frames(g, th));
 302 }
 303 
 304 /* Propagate one gray object. Traverse it and turn it black. */
 305 static size_t propagatemark(global_State *g)
 306 {
 307   GCobj *o = gcref(g->gc.gray);
 308   int gct = o->gch.gct;
 309   lua_assert(isgray(o));
 310   gray2black(o);
 311   setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
 312   if (LJ_LIKELY(gct == ~LJ_TTAB)) {
 313     GCtab *t = gco2tab(o);
 314     if (gc_traverse_tab(g, t) > 0)
 315       black2gray(o);  /* Keep weak tables gray. */
 316     return sizeof(GCtab) + sizeof(TValue) * t->asize +
 317                            (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
 318   } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
 319     GCfunc *fn = gco2func(o);
 320     gc_traverse_func(g, fn);
 321     return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
 322                            sizeCfunc((MSize)fn->c.nupvalues);
 323   } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
 324     GCproto *pt = gco2pt(o);
 325     gc_traverse_proto(g, pt);
 326     return pt->sizept;
 327   } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
 328     lua_State *th = gco2th(o);
 329     setgcrefr(th->gclist, g->gc.grayagain);
 330     setgcref(g->gc.grayagain, o);
 331     black2gray(o);  /* Threads are never black. */
 332     gc_traverse_thread(g, th);
 333     return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
 334   } else {
 335 #if LJ_HASJIT
 336     GCtrace *T = gco2trace(o);
 337     gc_traverse_trace(g, T);
 338     return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
 339            T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
 340 #else
 341     lua_assert(0);
 342     return 0;
 343 #endif
 344   }
 345 }
 346 
 347 /* Propagate all gray objects. */
 348 static size_t gc_propagate_gray(global_State *g)
 349 {
 350   size_t m = 0;
 351   while (gcref(g->gc.gray) != NULL)
 352     m += propagatemark(g);
 353   return m;
 354 }
 355 
 356 /* -- Sweep phase --------------------------------------------------------- */
 357 
 358 /* Try to shrink some common data structures. */
 359 static void gc_shrink(global_State *g, lua_State *L)
 360 {
 361   if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
 362     lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
 363   if (g->tmpbuf.sz > LJ_MIN_SBUF*2)
 364     lj_str_resizebuf(L, &g->tmpbuf, g->tmpbuf.sz >> 1);  /* Shrink temp buf. */
 365 }
 366 
 367 /* Type of GC free functions. */
 368 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
 369 
 370 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
 371 static const GCFreeFunc gc_freefunc[] = {
 372   (GCFreeFunc)lj_str_free,
 373   (GCFreeFunc)lj_func_freeuv,
 374   (GCFreeFunc)lj_state_free,
 375   (GCFreeFunc)lj_func_freeproto,
 376   (GCFreeFunc)lj_func_free,
 377 #if LJ_HASJIT
 378   (GCFreeFunc)lj_trace_free,
 379 #else
 380   (GCFreeFunc)0,
 381 #endif
 382 #if LJ_HASFFI
 383   (GCFreeFunc)lj_cdata_free,
 384 #else
 385   (GCFreeFunc)0,
 386 #endif
 387   (GCFreeFunc)lj_tab_free,
 388   (GCFreeFunc)lj_udata_free
 389 };
 390 
 391 /* Full sweep of a GC list. */
 392 #define gc_fullsweep(g, p)      gc_sweep(g, (p), LJ_MAX_MEM)
 393 
 394 /* Partial sweep of a GC list. */
 395 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
 396 {
 397   /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
 398   int ow = otherwhite(g);
 399   GCobj *o;
 400   while ((o = gcref(*p)) != NULL && lim-- > 0) {
 401     if (o->gch.gct == ~LJ_TTHREAD)  /* Need to sweep open upvalues, too. */
 402       gc_fullsweep(g, &gco2th(o)->openupval);
 403     if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
 404       lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
 405       makewhite(g, o);  /* Value is alive, change to the current white. */
 406       p = &o->gch.nextgc;
 407     } else {  /* Otherwise value is dead, free it. */
 408       lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
 409       setgcrefr(*p, o->gch.nextgc);
 410       if (o == gcref(g->gc.root))
 411         setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
 412       gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
 413     }
 414   }
 415   return p;
 416 }
 417 
 418 /* Check whether we can clear a key or a value slot from a table. */
 419 static int gc_mayclear(cTValue *o, int val)
 420 {
 421   if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
 422     if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
 423       gc_mark_str(strV(o));  /* And need to be marked. */
 424       return 0;
 425     }
 426     if (iswhite(gcV(o)))
 427       return 1;  /* Object is about to be collected. */
 428     if (tvisudata(o) && val && isfinalized(udataV(o)))
 429       return 1;  /* Finalized userdata is dropped only from values. */
 430   }
 431   return 0;  /* Cannot clear. */
 432 }
 433 
 434 /* Clear collected entries from weak tables. */
 435 static void gc_clearweak(GCobj *o)
 436 {
 437   while (o) {
 438     GCtab *t = gco2tab(o);
 439     lua_assert((t->marked & LJ_GC_WEAK));
 440     if ((t->marked & LJ_GC_WEAKVAL)) {
 441       MSize i, asize = t->asize;
 442       for (i = 0; i < asize; i++) {
 443         /* Clear array slot when value is about to be collected. */
 444         TValue *tv = arrayslot(t, i);
 445         if (gc_mayclear(tv, 1))
 446           setnilV(tv);
 447       }
 448     }
 449     if (t->hmask > 0) {
 450       Node *node = noderef(t->node);
 451       MSize i, hmask = t->hmask;
 452       for (i = 0; i <= hmask; i++) {
 453         Node *n = &node[i];
 454         /* Clear hash slot when key or value is about to be collected. */
 455         if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
 456                                   gc_mayclear(&n->val, 1)))
 457           setnilV(&n->val);
 458       }
 459     }
 460     o = gcref(t->gclist);
 461   }
 462 }
 463 
 464 /* Call a userdata or cdata finalizer. */
 465 static void gc_call_finalizer(global_State *g, lua_State *L,
 466                               cTValue *mo, GCobj *o)
 467 {
 468   /* Save and restore lots of state around the __gc callback. */
 469   uint8_t oldh = hook_save(g);
 470   MSize oldt = g->gc.threshold;
 471   int errcode;
 472   TValue *top;
 473   lj_trace_abort(g);
 474   top = L->top;
 475   L->top = top+2;
 476   hook_entergc(g);  /* Disable hooks and new traces during __gc. */
 477   g->gc.threshold = LJ_MAX_MEM;  /* Prevent GC steps. */
 478   copyTV(L, top, mo);
 479   setgcV(L, top+1, o, ~o->gch.gct);
 480   errcode = lj_vm_pcall(L, top+1, 1+0, -1);  /* Stack: |mo|o| -> | */
 481   hook_restore(g, oldh);
 482   g->gc.threshold = oldt;  /* Restore GC threshold. */
 483   if (errcode)
 484     lj_err_throw(L, errcode);  /* Propagate errors. */
 485 }
 486 
 487 /* Finalize one userdata or cdata object from the mmudata list. */
 488 static void gc_finalize(lua_State *L)
 489 {
 490   global_State *g = G(L);
 491   GCobj *o = gcnext(gcref(g->gc.mmudata));
 492   cTValue *mo;
 493   lua_assert(gcref(g->jit_L) == NULL);  /* Must not be called on trace. */
 494   /* Unchain from list of userdata to be finalized. */
 495   if (o == gcref(g->gc.mmudata))
 496     setgcrefnull(g->gc.mmudata);
 497   else
 498     setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
 499 #if LJ_HASFFI
 500   if (o->gch.gct == ~LJ_TCDATA) {
 501     TValue tmp, *tv;
 502     /* Add cdata back to the GC list and make it white. */
 503     setgcrefr(o->gch.nextgc, g->gc.root);
 504     setgcref(g->gc.root, o);
 505     makewhite(g, o);
 506     o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
 507     /* Resolve finalizer. */
 508     setcdataV(L, &tmp, gco2cd(o));
 509     tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
 510     if (!tvisnil(tv)) {
 511       g->gc.nocdatafin = 0;
 512       copyTV(L, &tmp, tv);
 513       setnilV(tv);  /* Clear entry in finalizer table. */
 514       gc_call_finalizer(g, L, &tmp, o);
 515     }
 516     return;
 517   }
 518 #endif
 519   /* Add userdata back to the main userdata list and make it white. */
 520   setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
 521   setgcref(mainthread(g)->nextgc, o);
 522   makewhite(g, o);
 523   /* Resolve the __gc metamethod. */
 524   mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
 525   if (mo)
 526     gc_call_finalizer(g, L, mo, o);
 527 }
 528 
 529 /* Finalize all userdata objects from mmudata list. */
 530 void lj_gc_finalize_udata(lua_State *L)
 531 {
 532   while (gcref(G(L)->gc.mmudata) != NULL)
 533     gc_finalize(L);
 534 }
 535 
 536 #if LJ_HASFFI
 537 /* Finalize all cdata objects from finalizer table. */
 538 void lj_gc_finalize_cdata(lua_State *L)
 539 {
 540   global_State *g = G(L);
 541   CTState *cts = ctype_ctsG(g);
 542   if (cts) {
 543     GCtab *t = cts->finalizer;
 544     Node *node = noderef(t->node);
 545     ptrdiff_t i;
 546     setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
 547     for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
 548       if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
 549         GCobj *o = gcV(&node[i].key);
 550         TValue tmp;
 551         makewhite(g, o);
 552         o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
 553         copyTV(L, &tmp, &node[i].val);
 554         setnilV(&node[i].val);
 555         gc_call_finalizer(g, L, &tmp, o);
 556       }
 557   }
 558 }
 559 #endif
 560 
 561 /* Free all remaining GC objects. */
 562 void lj_gc_freeall(global_State *g)
 563 {
 564   MSize i, strmask;
 565   /* Free everything, except super-fixed objects (the main thread). */
 566   g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
 567   gc_fullsweep(g, &g->gc.root);
 568   strmask = g->strmask;
 569   for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
 570     gc_fullsweep(g, &g->strhash[i]);
 571 }
 572 
 573 /* -- Collector ----------------------------------------------------------- */
 574 
 575 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
 576 static void atomic(global_State *g, lua_State *L)
 577 {
 578   size_t udsize;
 579 
 580   gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
 581   gc_propagate_gray(g);  /* Propagate any left-overs. */
 582 
 583   setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
 584   setgcrefnull(g->gc.weak);
 585   lua_assert(!iswhite(obj2gco(mainthread(g))));
 586   gc_markobj(g, L);  /* Mark running thread. */
 587   gc_traverse_curtrace(g);  /* Traverse current trace. */
 588   gc_mark_gcroot(g);  /* Mark GC roots (again). */
 589   gc_propagate_gray(g);  /* Propagate all of the above. */
 590 
 591   setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
 592   setgcrefnull(g->gc.grayagain);
 593   gc_propagate_gray(g);  /* Propagate it. */
 594 
 595   udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
 596   gc_mark_mmudata(g);  /* Mark them. */
 597   udsize += gc_propagate_gray(g);  /* And propagate the marks. */
 598 
 599   /* All marking done, clear weak tables. */
 600   gc_clearweak(gcref(g->gc.weak));
 601 
 602   /* Prepare for sweep phase. */
 603   g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
 604   g->strempty.marked = g->gc.currentwhite;
 605   setmref(g->gc.sweep, &g->gc.root);
 606   g->gc.estimate = g->gc.total - (MSize)udsize;  /* Initial estimate. */
 607 }
 608 
 609 /* GC state machine. Returns a cost estimate for each step performed. */
 610 static size_t gc_onestep(lua_State *L)
 611 {
 612   global_State *g = G(L);
 613   switch (g->gc.state) {
 614   case GCSpause:
 615     gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
 616     return 0;
 617   case GCSpropagate:
 618     if (gcref(g->gc.gray) != NULL)
 619       return propagatemark(g);  /* Propagate one gray object. */
 620     g->gc.state = GCSatomic;  /* End of mark phase. */
 621     return 0;
 622   case GCSatomic:
 623     if (gcref(g->jit_L))  /* Don't run atomic phase on trace. */
 624       return LJ_MAX_MEM;
 625     atomic(g, L);
 626     g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
 627     g->gc.sweepstr = 0;
 628     return 0;
 629   case GCSsweepstring: {
 630     MSize old = g->gc.total;
 631     gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
 632     if (g->gc.sweepstr > g->strmask)
 633       g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
 634     lua_assert(old >= g->gc.total);
 635     g->gc.estimate -= old - g->gc.total;
 636     return GCSWEEPCOST;
 637     }
 638   case GCSsweep: {
 639     MSize old = g->gc.total;
 640     setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
 641     lua_assert(old >= g->gc.total);
 642     g->gc.estimate -= old - g->gc.total;
 643     if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
 644       gc_shrink(g, L);
 645       if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
 646         g->gc.state = GCSfinalize;
 647 #if LJ_HASFFI
 648         g->gc.nocdatafin = 1;
 649 #endif
 650       } else {  /* Otherwise skip this phase to help the JIT. */
 651         g->gc.state = GCSpause;  /* End of GC cycle. */
 652         g->gc.debt = 0;
 653       }
 654     }
 655     return GCSWEEPMAX*GCSWEEPCOST;
 656     }
 657   case GCSfinalize:
 658     if (gcref(g->gc.mmudata) != NULL) {
 659       if (gcref(g->jit_L))  /* Don't call finalizers on trace. */
 660         return LJ_MAX_MEM;
 661       gc_finalize(L);  /* Finalize one userdata object. */
 662       if (g->gc.estimate > GCFINALIZECOST)
 663         g->gc.estimate -= GCFINALIZECOST;
 664       return GCFINALIZECOST;
 665     }
 666 #if LJ_HASFFI
 667     if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
 668 #endif
 669     g->gc.state = GCSpause;  /* End of GC cycle. */
 670     g->gc.debt = 0;
 671     return 0;
 672   default:
 673     lua_assert(0);
 674     return 0;
 675   }
 676 }
 677 
 678 /* Perform a limited amount of incremental GC steps. */
 679 int LJ_FASTCALL lj_gc_step(lua_State *L)
 680 {
 681   global_State *g = G(L);
 682   MSize lim;
 683   int32_t ostate = g->vmstate;
 684   setvmstate(g, GC);
 685   lim = (GCSTEPSIZE/100) * g->gc.stepmul;
 686   if (lim == 0)
 687     lim = LJ_MAX_MEM;
 688   if (g->gc.total > g->gc.threshold)
 689     g->gc.debt += g->gc.total - g->gc.threshold;
 690   do {
 691     lim -= (MSize)gc_onestep(L);
 692     if (g->gc.state == GCSpause) {
 693       g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
 694       g->vmstate = ostate;
 695       return 1;  /* Finished a GC cycle. */
 696     }
 697   } while ((int32_t)lim > 0);
 698   if (g->gc.debt < GCSTEPSIZE) {
 699     g->gc.threshold = g->gc.total + GCSTEPSIZE;
 700     g->vmstate = ostate;
 701     return -1;
 702   } else {
 703     g->gc.debt -= GCSTEPSIZE;
 704     g->gc.threshold = g->gc.total;
 705     g->vmstate = ostate;
 706     return 0;
 707   }
 708 }
 709 
 710 /* Ditto, but fix the stack top first. */
 711 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
 712 {
 713   if (curr_funcisL(L)) L->top = curr_topL(L);
 714   lj_gc_step(L);
 715 }
 716 
 717 #if LJ_HASJIT
 718 /* Perform multiple GC steps. Called from JIT-compiled code. */
 719 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
 720 {
 721   lua_State *L = gco2th(gcref(g->jit_L));
 722   L->base = mref(G(L)->jit_base, TValue);
 723   L->top = curr_topL(L);
 724   while (steps-- > 0 && lj_gc_step(L) == 0)
 725     ;
 726   /* Return 1 to force a trace exit. */
 727   return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
 728 }
 729 #endif
 730 
 731 /* Perform a full GC cycle. */
 732 void lj_gc_fullgc(lua_State *L)
 733 {
 734   global_State *g = G(L);
 735   int32_t ostate = g->vmstate;
 736   setvmstate(g, GC);
 737   if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
 738     setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
 739     setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
 740     setgcrefnull(g->gc.grayagain);
 741     setgcrefnull(g->gc.weak);
 742     g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
 743     g->gc.sweepstr = 0;
 744   }
 745   while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
 746     gc_onestep(L);  /* Finish sweep. */
 747   lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
 748   /* Now perform a full GC. */
 749   g->gc.state = GCSpause;
 750   do { gc_onestep(L); } while (g->gc.state != GCSpause);
 751   g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
 752   g->vmstate = ostate;
 753 }
 754 
 755 /* -- Write barriers ------------------------------------------------------ */
 756 
 757 /* Move the GC propagation frontier forward. */
 758 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
 759 {
 760   lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
 761   lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
 762   lua_assert(o->gch.gct != ~LJ_TTAB);
 763   /* Preserve invariant during propagation. Otherwise it doesn't matter. */
 764   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
 765     gc_mark(g, v);  /* Move frontier forward. */
 766   else
 767     makewhite(g, o);  /* Make it white to avoid the following barrier. */
 768 }
 769 
 770 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
 771 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
 772 {
 773 #define TV2MARKED(x) \
 774   (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
 775   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
 776     gc_mark(g, gcV(tv));
 777   else
 778     TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
 779 #undef TV2MARKED
 780 }
 781 
 782 /* Close upvalue. Also needs a write barrier. */
 783 void lj_gc_closeuv(global_State *g, GCupval *uv)
 784 {
 785   GCobj *o = obj2gco(uv);
 786   /* Copy stack slot to upvalue itself and point to the copy. */
 787   copyTV(mainthread(g), &uv->tv, uvval(uv));
 788   setmref(uv->v, &uv->tv);
 789   uv->closed = 1;
 790   setgcrefr(o->gch.nextgc, g->gc.root);
 791   setgcref(g->gc.root, o);
 792   if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
 793     if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
 794       gray2black(o);  /* Make it black and preserve invariant. */
 795       if (tviswhite(&uv->tv))
 796         lj_gc_barrierf(g, o, gcV(&uv->tv));
 797     } else {
 798       makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
 799       lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
 800     }
 801   }
 802 }
 803 
 804 #if LJ_HASJIT
 805 /* Mark a trace if it's saved during the propagation phase. */
 806 void lj_gc_barriertrace(global_State *g, uint32_t traceno)
 807 {
 808   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
 809     gc_marktrace(g, traceno);
 810 }
 811 #endif
 812 
 813 /* -- Allocator ----------------------------------------------------------- */
 814 
 815 /* Call pluggable memory allocator to allocate or resize a fragment. */
 816 void *lj_mem_realloc(lua_State *L, void *p, MSize osz, MSize nsz)
 817 {
 818   global_State *g = G(L);
 819   lua_assert((osz == 0) == (p == NULL));
 820   p = g->allocf(g->allocd, p, osz, nsz);
 821   if (p == NULL && nsz > 0)
 822     lj_err_mem(L);
 823   lua_assert((nsz == 0) == (p == NULL));
 824   lua_assert(checkptr32(p));
 825   g->gc.total = (g->gc.total - osz) + nsz;
 826   return p;
 827 }
 828 
 829 /* Allocate new GC object and link it to the root set. */
 830 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, MSize size)
 831 {
 832   global_State *g = G(L);
 833   GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
 834   if (o == NULL)
 835     lj_err_mem(L);
 836   lua_assert(checkptr32(o));
 837   g->gc.total += size;
 838   setgcrefr(o->gch.nextgc, g->gc.root);
 839   setgcref(g->gc.root, o);
 840   newwhite(g, o);
 841   return o;
 842 }
 843 
 844 /* Resize growable vector. */
 845 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
 846 {
 847   MSize sz = (*szp) << 1;
 848   if (sz < LJ_MIN_VECSZ)
 849     sz = LJ_MIN_VECSZ;
 850   if (sz > lim)
 851     sz = lim;
 852   p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
 853   *szp = sz;
 854   return p;
 855 }
 856 

/* [<][>][^][v][top][bottom][index][help] */