root/lj_gc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gc_mark
  2. gc_mark_gcroot
  3. gc_mark_start
  4. gc_mark_uv
  5. gc_mark_mmudata
  6. lj_gc_separateudata
  7. gc_traverse_tab
  8. gc_traverse_func
  9. gc_marktrace
  10. gc_traverse_trace
  11. gc_traverse_proto
  12. gc_traverse_frames
  13. gc_traverse_thread
  14. propagatemark
  15. gc_propagate_gray
  16. gc_sweep
  17. gc_mayclear
  18. gc_clearweak
  19. gc_call_finalizer
  20. gc_finalize
  21. lj_gc_finalize_udata
  22. lj_gc_finalize_cdata
  23. lj_gc_freeall
  24. atomic
  25. gc_onestep
  26. lj_gc_step
  27. lj_gc_step_fixtop
  28. lj_gc_step_jit
  29. lj_gc_fullgc
  30. lj_gc_barrierf
  31. lj_gc_barrieruv
  32. lj_gc_closeuv
  33. lj_gc_barriertrace
  34. lj_mem_realloc
  35. lj_mem_newgco
  36. lj_mem_grow

   1 /*
   2 ** Garbage collector.
   3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
   4 **
   5 ** Major portions taken verbatim or adapted from the Lua interpreter.
   6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
   7 */
   8 
   9 #define lj_gc_c
  10 #define LUA_CORE
  11 
  12 #include "lj_obj.h"
  13 #include "lj_gc.h"
  14 #include "lj_err.h"
  15 #include "lj_buf.h"
  16 #include "lj_str.h"
  17 #include "lj_tab.h"
  18 #include "lj_func.h"
  19 #include "lj_udata.h"
  20 #include "lj_meta.h"
  21 #include "lj_state.h"
  22 #include "lj_frame.h"
  23 #if LJ_HASFFI
  24 #include "lj_ctype.h"
  25 #include "lj_cdata.h"
  26 #endif
  27 #include "lj_trace.h"
  28 #include "lj_vm.h"
  29 
  30 #define GCSTEPSIZE      1024u
  31 #define GCSWEEPMAX      40
  32 #define GCSWEEPCOST     10
  33 #define GCFINALIZECOST  100
  34 
  35 /* Macros to set GCobj colors and flags. */
  36 #define white2gray(x)           ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
  37 #define gray2black(x)           ((x)->gch.marked |= LJ_GC_BLACK)
  38 #define isfinalized(u)          ((u)->marked & LJ_GC_FINALIZED)
  39 
  40 /* -- Mark phase ---------------------------------------------------------- */
  41 
  42 /* Mark a TValue (if needed). */
  43 #define gc_marktv(g, tv) \
  44   { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
  45     if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
  46 
  47 /* Mark a GCobj (if needed). */
  48 #define gc_markobj(g, o) \
  49   { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
  50 
  51 /* Mark a string object. */
  52 #define gc_mark_str(s)          ((s)->marked &= (uint8_t)~LJ_GC_WHITES)
  53 
  54 /* Mark a white GCobj. */
  55 static void gc_mark(global_State *g, GCobj *o)
  56 {
  57   int gct = o->gch.gct;
  58   lua_assert(iswhite(o) && !isdead(g, o));
  59   white2gray(o);
  60   if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
  61     GCtab *mt = tabref(gco2ud(o)->metatable);
  62     gray2black(o);  /* Userdata are never gray. */
  63     if (mt) gc_markobj(g, mt);
  64     gc_markobj(g, tabref(gco2ud(o)->env));
  65   } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
  66     GCupval *uv = gco2uv(o);
  67     gc_marktv(g, uvval(uv));
  68     if (uv->closed)
  69       gray2black(o);  /* Closed upvalues are never gray. */
  70   } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
  71     lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
  72                gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE);
  73     setgcrefr(o->gch.gclist, g->gc.gray);
  74     setgcref(g->gc.gray, o);
  75   }
  76 }
  77 
  78 /* Mark GC roots. */
  79 static void gc_mark_gcroot(global_State *g)
  80 {
  81   ptrdiff_t i;
  82   for (i = 0; i < GCROOT_MAX; i++)
  83     if (gcref(g->gcroot[i]) != NULL)
  84       gc_markobj(g, gcref(g->gcroot[i]));
  85 }
  86 
  87 /* Start a GC cycle and mark the root set. */
  88 static void gc_mark_start(global_State *g)
  89 {
  90   setgcrefnull(g->gc.gray);
  91   setgcrefnull(g->gc.grayagain);
  92   setgcrefnull(g->gc.weak);
  93   gc_markobj(g, mainthread(g));
  94   gc_markobj(g, tabref(mainthread(g)->env));
  95   gc_marktv(g, &g->registrytv);
  96   gc_mark_gcroot(g);
  97   g->gc.state = GCSpropagate;
  98 }
  99 
 100 /* Mark open upvalues. */
 101 static void gc_mark_uv(global_State *g)
 102 {
 103   GCupval *uv;
 104   for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
 105     lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
 106     if (isgray(obj2gco(uv)))
 107       gc_marktv(g, uvval(uv));
 108   }
 109 }
 110 
 111 /* Mark userdata in mmudata list. */
 112 static void gc_mark_mmudata(global_State *g)
 113 {
 114   GCobj *root = gcref(g->gc.mmudata);
 115   GCobj *u = root;
 116   if (u) {
 117     do {
 118       u = gcnext(u);
 119       makewhite(g, u);  /* Could be from previous GC. */
 120       gc_mark(g, u);
 121     } while (u != root);
 122   }
 123 }
 124 
 125 /* Separate userdata objects to be finalized to mmudata list. */
 126 size_t lj_gc_separateudata(global_State *g, int all)
 127 {
 128   size_t m = 0;
 129   GCRef *p = &mainthread(g)->nextgc;
 130   GCobj *o;
 131   while ((o = gcref(*p)) != NULL) {
 132     if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
 133       p = &o->gch.nextgc;  /* Nothing to do. */
 134     } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
 135       markfinalized(o);  /* Done, as there's no __gc metamethod. */
 136       p = &o->gch.nextgc;
 137     } else {  /* Otherwise move userdata to be finalized to mmudata list. */
 138       m += sizeudata(gco2ud(o));
 139       markfinalized(o);
 140       *p = o->gch.nextgc;
 141       if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
 142         GCobj *root = gcref(g->gc.mmudata);
 143         setgcrefr(o->gch.nextgc, root->gch.nextgc);
 144         setgcref(root->gch.nextgc, o);
 145         setgcref(g->gc.mmudata, o);
 146       } else {  /* Create circular list. */
 147         setgcref(o->gch.nextgc, o);
 148         setgcref(g->gc.mmudata, o);
 149       }
 150     }
 151   }
 152   return m;
 153 }
 154 
 155 /* -- Propagation phase --------------------------------------------------- */
 156 
 157 /* Traverse a table. */
 158 static int gc_traverse_tab(global_State *g, GCtab *t)
 159 {
 160   int weak = 0;
 161   cTValue *mode;
 162   GCtab *mt = tabref(t->metatable);
 163   if (mt)
 164     gc_markobj(g, mt);
 165   mode = lj_meta_fastg(g, mt, MM_mode);
 166   if (mode && tvisstr(mode)) {  /* Valid __mode field? */
 167     const char *modestr = strVdata(mode);
 168     int c;
 169     while ((c = *modestr++)) {
 170       if (c == 'k') weak |= LJ_GC_WEAKKEY;
 171       else if (c == 'v') weak |= LJ_GC_WEAKVAL;
 172     }
 173     if (weak) {  /* Weak tables are cleared in the atomic phase. */
 174 #if LJ_HASFFI
 175       CTState *cts = ctype_ctsG(g);
 176       if (cts && cts->finalizer == t) {
 177         weak = (int)(~0u & ~LJ_GC_WEAKVAL);
 178       } else
 179 #endif
 180       {
 181         t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
 182         setgcrefr(t->gclist, g->gc.weak);
 183         setgcref(g->gc.weak, obj2gco(t));
 184       }
 185     }
 186   }
 187   if (weak == LJ_GC_WEAK)  /* Nothing to mark if both keys/values are weak. */
 188     return 1;
 189   if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
 190     MSize i, asize = t->asize;
 191     for (i = 0; i < asize; i++)
 192       gc_marktv(g, arrayslot(t, i));
 193   }
 194   if (t->hmask > 0) {  /* Mark hash part. */
 195     Node *node = noderef(t->node);
 196     MSize i, hmask = t->hmask;
 197     for (i = 0; i <= hmask; i++) {
 198       Node *n = &node[i];
 199       if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
 200         lua_assert(!tvisnil(&n->key));
 201         if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
 202         if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
 203       }
 204     }
 205   }
 206   return weak;
 207 }
 208 
 209 /* Traverse a function. */
 210 static void gc_traverse_func(global_State *g, GCfunc *fn)
 211 {
 212   gc_markobj(g, tabref(fn->c.env));
 213   if (isluafunc(fn)) {
 214     uint32_t i;
 215     lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
 216     gc_markobj(g, funcproto(fn));
 217     for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
 218       gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
 219   } else {
 220     uint32_t i;
 221     for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
 222       gc_marktv(g, &fn->c.upvalue[i]);
 223   }
 224 }
 225 
 226 #if LJ_HASJIT
 227 /* Mark a trace. */
 228 static void gc_marktrace(global_State *g, TraceNo traceno)
 229 {
 230   GCobj *o = obj2gco(traceref(G2J(g), traceno));
 231   lua_assert(traceno != G2J(g)->cur.traceno);
 232   if (iswhite(o)) {
 233     white2gray(o);
 234     setgcrefr(o->gch.gclist, g->gc.gray);
 235     setgcref(g->gc.gray, o);
 236   }
 237 }
 238 
 239 /* Traverse a trace. */
 240 static void gc_traverse_trace(global_State *g, GCtrace *T)
 241 {
 242   IRRef ref;
 243   if (T->traceno == 0) return;
 244   for (ref = T->nk; ref < REF_TRUE; ref++) {
 245     IRIns *ir = &T->ir[ref];
 246     if (ir->o == IR_KGC)
 247       gc_markobj(g, ir_kgc(ir));
 248     if (irt_is64(ir->t) && ir->o != IR_KNULL)
 249       ref++;
 250   }
 251   if (T->link) gc_marktrace(g, T->link);
 252   if (T->nextroot) gc_marktrace(g, T->nextroot);
 253   if (T->nextside) gc_marktrace(g, T->nextside);
 254   gc_markobj(g, gcref(T->startpt));
 255 }
 256 
 257 /* The current trace is a GC root while not anchored in the prototype (yet). */
 258 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur)
 259 #else
 260 #define gc_traverse_curtrace(g) UNUSED(g)
 261 #endif
 262 
 263 /* Traverse a prototype. */
 264 static void gc_traverse_proto(global_State *g, GCproto *pt)
 265 {
 266   ptrdiff_t i;
 267   gc_mark_str(proto_chunkname(pt));
 268   for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
 269     gc_markobj(g, proto_kgc(pt, i));
 270 #if LJ_HASJIT
 271   if (pt->trace) gc_marktrace(g, pt->trace);
 272 #endif
 273 }
 274 
 275 /* Traverse the frame structure of a stack. */
 276 static MSize gc_traverse_frames(global_State *g, lua_State *th)
 277 {
 278   TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
 279   /* Note: extra vararg frame not skipped, marks function twice (harmless). */
 280   for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
 281     GCfunc *fn = frame_func(frame);
 282     TValue *ftop = frame;
 283     if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
 284     if (ftop > top) top = ftop;
 285     if (!LJ_FR2) gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
 286   }
 287   top++;  /* Correct bias of -1 (frame == base-1). */
 288   if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
 289   return (MSize)(top - bot);  /* Return minimum needed stack size. */
 290 }
 291 
 292 /* Traverse a thread object. */
 293 static void gc_traverse_thread(global_State *g, lua_State *th)
 294 {
 295   TValue *o, *top = th->top;
 296   for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
 297     gc_marktv(g, o);
 298   if (g->gc.state == GCSatomic) {
 299     top = tvref(th->stack) + th->stacksize;
 300     for (; o < top; o++)  /* Clear unmarked slots. */
 301       setnilV(o);
 302   }
 303   gc_markobj(g, tabref(th->env));
 304   lj_state_shrinkstack(th, gc_traverse_frames(g, th));
 305 }
 306 
 307 /* Propagate one gray object. Traverse it and turn it black. */
 308 static size_t propagatemark(global_State *g)
 309 {
 310   GCobj *o = gcref(g->gc.gray);
 311   int gct = o->gch.gct;
 312   lua_assert(isgray(o));
 313   gray2black(o);
 314   setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
 315   if (LJ_LIKELY(gct == ~LJ_TTAB)) {
 316     GCtab *t = gco2tab(o);
 317     if (gc_traverse_tab(g, t) > 0)
 318       black2gray(o);  /* Keep weak tables gray. */
 319     return sizeof(GCtab) + sizeof(TValue) * t->asize +
 320                            (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0);
 321   } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
 322     GCfunc *fn = gco2func(o);
 323     gc_traverse_func(g, fn);
 324     return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
 325                            sizeCfunc((MSize)fn->c.nupvalues);
 326   } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
 327     GCproto *pt = gco2pt(o);
 328     gc_traverse_proto(g, pt);
 329     return pt->sizept;
 330   } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
 331     lua_State *th = gco2th(o);
 332     setgcrefr(th->gclist, g->gc.grayagain);
 333     setgcref(g->gc.grayagain, o);
 334     black2gray(o);  /* Threads are never black. */
 335     gc_traverse_thread(g, th);
 336     return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
 337   } else {
 338 #if LJ_HASJIT
 339     GCtrace *T = gco2trace(o);
 340     gc_traverse_trace(g, T);
 341     return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
 342            T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
 343 #else
 344     lua_assert(0);
 345     return 0;
 346 #endif
 347   }
 348 }
 349 
 350 /* Propagate all gray objects. */
 351 static size_t gc_propagate_gray(global_State *g)
 352 {
 353   size_t m = 0;
 354   while (gcref(g->gc.gray) != NULL)
 355     m += propagatemark(g);
 356   return m;
 357 }
 358 
 359 /* -- Sweep phase --------------------------------------------------------- */
 360 
 361 /* Type of GC free functions. */
 362 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
 363 
 364 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
 365 static const GCFreeFunc gc_freefunc[] = {
 366   (GCFreeFunc)lj_str_free,
 367   (GCFreeFunc)lj_func_freeuv,
 368   (GCFreeFunc)lj_state_free,
 369   (GCFreeFunc)lj_func_freeproto,
 370   (GCFreeFunc)lj_func_free,
 371 #if LJ_HASJIT
 372   (GCFreeFunc)lj_trace_free,
 373 #else
 374   (GCFreeFunc)0,
 375 #endif
 376 #if LJ_HASFFI
 377   (GCFreeFunc)lj_cdata_free,
 378 #else
 379   (GCFreeFunc)0,
 380 #endif
 381   (GCFreeFunc)lj_tab_free,
 382   (GCFreeFunc)lj_udata_free
 383 };
 384 
 385 /* Full sweep of a GC list. */
 386 #define gc_fullsweep(g, p)      gc_sweep(g, (p), ~(uint32_t)0)
 387 
 388 /* Partial sweep of a GC list. */
 389 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
 390 {
 391   /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
 392   int ow = otherwhite(g);
 393   GCobj *o;
 394   while ((o = gcref(*p)) != NULL && lim-- > 0) {
 395     if (o->gch.gct == ~LJ_TTHREAD)  /* Need to sweep open upvalues, too. */
 396       gc_fullsweep(g, &gco2th(o)->openupval);
 397     if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
 398       lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
 399       makewhite(g, o);  /* Value is alive, change to the current white. */
 400       p = &o->gch.nextgc;
 401     } else {  /* Otherwise value is dead, free it. */
 402       lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
 403       setgcrefr(*p, o->gch.nextgc);
 404       if (o == gcref(g->gc.root))
 405         setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
 406       gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
 407     }
 408   }
 409   return p;
 410 }
 411 
 412 /* Check whether we can clear a key or a value slot from a table. */
 413 static int gc_mayclear(cTValue *o, int val)
 414 {
 415   if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
 416     if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
 417       gc_mark_str(strV(o));  /* And need to be marked. */
 418       return 0;
 419     }
 420     if (iswhite(gcV(o)))
 421       return 1;  /* Object is about to be collected. */
 422     if (tvisudata(o) && val && isfinalized(udataV(o)))
 423       return 1;  /* Finalized userdata is dropped only from values. */
 424   }
 425   return 0;  /* Cannot clear. */
 426 }
 427 
 428 /* Clear collected entries from weak tables. */
 429 static void gc_clearweak(GCobj *o)
 430 {
 431   while (o) {
 432     GCtab *t = gco2tab(o);
 433     lua_assert((t->marked & LJ_GC_WEAK));
 434     if ((t->marked & LJ_GC_WEAKVAL)) {
 435       MSize i, asize = t->asize;
 436       for (i = 0; i < asize; i++) {
 437         /* Clear array slot when value is about to be collected. */
 438         TValue *tv = arrayslot(t, i);
 439         if (gc_mayclear(tv, 1))
 440           setnilV(tv);
 441       }
 442     }
 443     if (t->hmask > 0) {
 444       Node *node = noderef(t->node);
 445       MSize i, hmask = t->hmask;
 446       for (i = 0; i <= hmask; i++) {
 447         Node *n = &node[i];
 448         /* Clear hash slot when key or value is about to be collected. */
 449         if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
 450                                   gc_mayclear(&n->val, 1)))
 451           setnilV(&n->val);
 452       }
 453     }
 454     o = gcref(t->gclist);
 455   }
 456 }
 457 
 458 /* Call a userdata or cdata finalizer. */
 459 static void gc_call_finalizer(global_State *g, lua_State *L,
 460                               cTValue *mo, GCobj *o)
 461 {
 462   /* Save and restore lots of state around the __gc callback. */
 463   uint8_t oldh = hook_save(g);
 464   GCSize oldt = g->gc.threshold;
 465   int errcode;
 466   TValue *top;
 467   lj_trace_abort(g);
 468   hook_entergc(g);  /* Disable hooks and new traces during __gc. */
 469   g->gc.threshold = LJ_MAX_MEM;  /* Prevent GC steps. */
 470   top = L->top;
 471   copyTV(L, top++, mo);
 472   if (LJ_FR2) setnilV(top++);
 473   setgcV(L, top, o, ~o->gch.gct);
 474   L->top = top+1;
 475   errcode = lj_vm_pcall(L, top, 1+0, -1);  /* Stack: |mo|o| -> | */
 476   hook_restore(g, oldh);
 477   g->gc.threshold = oldt;  /* Restore GC threshold. */
 478   if (errcode)
 479     lj_err_throw(L, errcode);  /* Propagate errors. */
 480 }
 481 
 482 /* Finalize one userdata or cdata object from the mmudata list. */
 483 static void gc_finalize(lua_State *L)
 484 {
 485   global_State *g = G(L);
 486   GCobj *o = gcnext(gcref(g->gc.mmudata));
 487   cTValue *mo;
 488   lua_assert(tvref(g->jit_base) == NULL);  /* Must not be called on trace. */
 489   /* Unchain from list of userdata to be finalized. */
 490   if (o == gcref(g->gc.mmudata))
 491     setgcrefnull(g->gc.mmudata);
 492   else
 493     setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
 494 #if LJ_HASFFI
 495   if (o->gch.gct == ~LJ_TCDATA) {
 496     TValue tmp, *tv;
 497     /* Add cdata back to the GC list and make it white. */
 498     setgcrefr(o->gch.nextgc, g->gc.root);
 499     setgcref(g->gc.root, o);
 500     makewhite(g, o);
 501     o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
 502     /* Resolve finalizer. */
 503     setcdataV(L, &tmp, gco2cd(o));
 504     tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
 505     if (!tvisnil(tv)) {
 506       g->gc.nocdatafin = 0;
 507       copyTV(L, &tmp, tv);
 508       setnilV(tv);  /* Clear entry in finalizer table. */
 509       gc_call_finalizer(g, L, &tmp, o);
 510     }
 511     return;
 512   }
 513 #endif
 514   /* Add userdata back to the main userdata list and make it white. */
 515   setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
 516   setgcref(mainthread(g)->nextgc, o);
 517   makewhite(g, o);
 518   /* Resolve the __gc metamethod. */
 519   mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
 520   if (mo)
 521     gc_call_finalizer(g, L, mo, o);
 522 }
 523 
 524 /* Finalize all userdata objects from mmudata list. */
 525 void lj_gc_finalize_udata(lua_State *L)
 526 {
 527   while (gcref(G(L)->gc.mmudata) != NULL)
 528     gc_finalize(L);
 529 }
 530 
 531 #if LJ_HASFFI
 532 /* Finalize all cdata objects from finalizer table. */
 533 void lj_gc_finalize_cdata(lua_State *L)
 534 {
 535   global_State *g = G(L);
 536   CTState *cts = ctype_ctsG(g);
 537   if (cts) {
 538     GCtab *t = cts->finalizer;
 539     Node *node = noderef(t->node);
 540     ptrdiff_t i;
 541     setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
 542     for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
 543       if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
 544         GCobj *o = gcV(&node[i].key);
 545         TValue tmp;
 546         makewhite(g, o);
 547         o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
 548         copyTV(L, &tmp, &node[i].val);
 549         setnilV(&node[i].val);
 550         gc_call_finalizer(g, L, &tmp, o);
 551       }
 552   }
 553 }
 554 #endif
 555 
 556 /* Free all remaining GC objects. */
 557 void lj_gc_freeall(global_State *g)
 558 {
 559   MSize i, strmask;
 560   /* Free everything, except super-fixed objects (the main thread). */
 561   g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
 562   gc_fullsweep(g, &g->gc.root);
 563   strmask = g->strmask;
 564   for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
 565     gc_fullsweep(g, &g->strhash[i]);
 566 }
 567 
 568 /* -- Collector ----------------------------------------------------------- */
 569 
 570 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
 571 static void atomic(global_State *g, lua_State *L)
 572 {
 573   size_t udsize;
 574 
 575   gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
 576   gc_propagate_gray(g);  /* Propagate any left-overs. */
 577 
 578   setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
 579   setgcrefnull(g->gc.weak);
 580   lua_assert(!iswhite(obj2gco(mainthread(g))));
 581   gc_markobj(g, L);  /* Mark running thread. */
 582   gc_traverse_curtrace(g);  /* Traverse current trace. */
 583   gc_mark_gcroot(g);  /* Mark GC roots (again). */
 584   gc_propagate_gray(g);  /* Propagate all of the above. */
 585 
 586   setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
 587   setgcrefnull(g->gc.grayagain);
 588   gc_propagate_gray(g);  /* Propagate it. */
 589 
 590   udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
 591   gc_mark_mmudata(g);  /* Mark them. */
 592   udsize += gc_propagate_gray(g);  /* And propagate the marks. */
 593 
 594   /* All marking done, clear weak tables. */
 595   gc_clearweak(gcref(g->gc.weak));
 596 
 597   lj_buf_shrink(L, &g->tmpbuf);  /* Shrink temp buffer. */
 598 
 599   /* Prepare for sweep phase. */
 600   g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
 601   g->strempty.marked = g->gc.currentwhite;
 602   setmref(g->gc.sweep, &g->gc.root);
 603   g->gc.estimate = g->gc.total - (GCSize)udsize;  /* Initial estimate. */
 604 }
 605 
 606 /* GC state machine. Returns a cost estimate for each step performed. */
 607 static size_t gc_onestep(lua_State *L)
 608 {
 609   global_State *g = G(L);
 610   switch (g->gc.state) {
 611   case GCSpause:
 612     gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
 613     return 0;
 614   case GCSpropagate:
 615     if (gcref(g->gc.gray) != NULL)
 616       return propagatemark(g);  /* Propagate one gray object. */
 617     g->gc.state = GCSatomic;  /* End of mark phase. */
 618     return 0;
 619   case GCSatomic:
 620     if (tvref(g->jit_base))  /* Don't run atomic phase on trace. */
 621       return LJ_MAX_MEM;
 622     atomic(g, L);
 623     g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
 624     g->gc.sweepstr = 0;
 625     return 0;
 626   case GCSsweepstring: {
 627     GCSize old = g->gc.total;
 628     gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
 629     if (g->gc.sweepstr > g->strmask)
 630       g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
 631     lua_assert(old >= g->gc.total);
 632     g->gc.estimate -= old - g->gc.total;
 633     return GCSWEEPCOST;
 634     }
 635   case GCSsweep: {
 636     GCSize old = g->gc.total;
 637     setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
 638     lua_assert(old >= g->gc.total);
 639     g->gc.estimate -= old - g->gc.total;
 640     if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
 641       if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
 642         lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
 643       if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
 644         g->gc.state = GCSfinalize;
 645 #if LJ_HASFFI
 646         g->gc.nocdatafin = 1;
 647 #endif
 648       } else {  /* Otherwise skip this phase to help the JIT. */
 649         g->gc.state = GCSpause;  /* End of GC cycle. */
 650         g->gc.debt = 0;
 651       }
 652     }
 653     return GCSWEEPMAX*GCSWEEPCOST;
 654     }
 655   case GCSfinalize:
 656     if (gcref(g->gc.mmudata) != NULL) {
 657       if (tvref(g->jit_base))  /* Don't call finalizers on trace. */
 658         return LJ_MAX_MEM;
 659       gc_finalize(L);  /* Finalize one userdata object. */
 660       if (g->gc.estimate > GCFINALIZECOST)
 661         g->gc.estimate -= GCFINALIZECOST;
 662       return GCFINALIZECOST;
 663     }
 664 #if LJ_HASFFI
 665     if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
 666 #endif
 667     g->gc.state = GCSpause;  /* End of GC cycle. */
 668     g->gc.debt = 0;
 669     return 0;
 670   default:
 671     lua_assert(0);
 672     return 0;
 673   }
 674 }
 675 
 676 /* Perform a limited amount of incremental GC steps. */
 677 int LJ_FASTCALL lj_gc_step(lua_State *L)
 678 {
 679   global_State *g = G(L);
 680   GCSize lim;
 681   int32_t ostate = g->vmstate;
 682   setvmstate(g, GC);
 683   lim = (GCSTEPSIZE/100) * g->gc.stepmul;
 684   if (lim == 0)
 685     lim = LJ_MAX_MEM;
 686   if (g->gc.total > g->gc.threshold)
 687     g->gc.debt += g->gc.total - g->gc.threshold;
 688   do {
 689     lim -= (GCSize)gc_onestep(L);
 690     if (g->gc.state == GCSpause) {
 691       g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
 692       g->vmstate = ostate;
 693       return 1;  /* Finished a GC cycle. */
 694     }
 695   } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
 696   if (g->gc.debt < GCSTEPSIZE) {
 697     g->gc.threshold = g->gc.total + GCSTEPSIZE;
 698     g->vmstate = ostate;
 699     return -1;
 700   } else {
 701     g->gc.debt -= GCSTEPSIZE;
 702     g->gc.threshold = g->gc.total;
 703     g->vmstate = ostate;
 704     return 0;
 705   }
 706 }
 707 
 708 /* Ditto, but fix the stack top first. */
 709 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
 710 {
 711   if (curr_funcisL(L)) L->top = curr_topL(L);
 712   lj_gc_step(L);
 713 }
 714 
 715 #if LJ_HASJIT
 716 /* Perform multiple GC steps. Called from JIT-compiled code. */
 717 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
 718 {
 719   lua_State *L = gco2th(gcref(g->cur_L));
 720   L->base = tvref(G(L)->jit_base);
 721   L->top = curr_topL(L);
 722   while (steps-- > 0 && lj_gc_step(L) == 0)
 723     ;
 724   /* Return 1 to force a trace exit. */
 725   return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
 726 }
 727 #endif
 728 
 729 /* Perform a full GC cycle. */
 730 void lj_gc_fullgc(lua_State *L)
 731 {
 732   global_State *g = G(L);
 733   int32_t ostate = g->vmstate;
 734   setvmstate(g, GC);
 735   if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
 736     setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
 737     setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
 738     setgcrefnull(g->gc.grayagain);
 739     setgcrefnull(g->gc.weak);
 740     g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
 741     g->gc.sweepstr = 0;
 742   }
 743   while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
 744     gc_onestep(L);  /* Finish sweep. */
 745   lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
 746   /* Now perform a full GC. */
 747   g->gc.state = GCSpause;
 748   do { gc_onestep(L); } while (g->gc.state != GCSpause);
 749   g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
 750   g->vmstate = ostate;
 751 }
 752 
 753 /* -- Write barriers ------------------------------------------------------ */
 754 
 755 /* Move the GC propagation frontier forward. */
 756 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
 757 {
 758   lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
 759   lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
 760   lua_assert(o->gch.gct != ~LJ_TTAB);
 761   /* Preserve invariant during propagation. Otherwise it doesn't matter. */
 762   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
 763     gc_mark(g, v);  /* Move frontier forward. */
 764   else
 765     makewhite(g, o);  /* Make it white to avoid the following barrier. */
 766 }
 767 
 768 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
 769 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
 770 {
 771 #define TV2MARKED(x) \
 772   (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
 773   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
 774     gc_mark(g, gcV(tv));
 775   else
 776     TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
 777 #undef TV2MARKED
 778 }
 779 
 780 /* Close upvalue. Also needs a write barrier. */
 781 void lj_gc_closeuv(global_State *g, GCupval *uv)
 782 {
 783   GCobj *o = obj2gco(uv);
 784   /* Copy stack slot to upvalue itself and point to the copy. */
 785   copyTV(mainthread(g), &uv->tv, uvval(uv));
 786   setmref(uv->v, &uv->tv);
 787   uv->closed = 1;
 788   setgcrefr(o->gch.nextgc, g->gc.root);
 789   setgcref(g->gc.root, o);
 790   if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
 791     if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
 792       gray2black(o);  /* Make it black and preserve invariant. */
 793       if (tviswhite(&uv->tv))
 794         lj_gc_barrierf(g, o, gcV(&uv->tv));
 795     } else {
 796       makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
 797       lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
 798     }
 799   }
 800 }
 801 
 802 #if LJ_HASJIT
 803 /* Mark a trace if it's saved during the propagation phase. */
 804 void lj_gc_barriertrace(global_State *g, uint32_t traceno)
 805 {
 806   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
 807     gc_marktrace(g, traceno);
 808 }
 809 #endif
 810 
 811 /* -- Allocator ----------------------------------------------------------- */
 812 
 813 /* Call pluggable memory allocator to allocate or resize a fragment. */
 814 void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
 815 {
 816   global_State *g = G(L);
 817   lua_assert((osz == 0) == (p == NULL));
 818   p = g->allocf(g->allocd, p, osz, nsz);
 819   if (p == NULL && nsz > 0)
 820     lj_err_mem(L);
 821   lua_assert((nsz == 0) == (p == NULL));
 822   lua_assert(checkptrGC(p));
 823   g->gc.total = (g->gc.total - osz) + nsz;
 824   return p;
 825 }
 826 
 827 /* Allocate new GC object and link it to the root set. */
 828 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
 829 {
 830   global_State *g = G(L);
 831   GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
 832   if (o == NULL)
 833     lj_err_mem(L);
 834   lua_assert(checkptrGC(o));
 835   g->gc.total += size;
 836   setgcrefr(o->gch.nextgc, g->gc.root);
 837   setgcref(g->gc.root, o);
 838   newwhite(g, o);
 839   return o;
 840 }
 841 
 842 /* Resize growable vector. */
 843 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
 844 {
 845   MSize sz = (*szp) << 1;
 846   if (sz < LJ_MIN_VECSZ)
 847     sz = LJ_MIN_VECSZ;
 848   if (sz > lim)
 849     sz = lim;
 850   p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
 851   *szp = sz;
 852   return p;
 853 }
 854 

/* [<][>][^][v][top][bottom][index][help] */