root/lj_jit.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. JIT_PARAMDEF
  2. snap_pc
  3. snap_nextofs
  4. LJ_PRNG_BITS

   1 /*
   2 ** Common definitions for the JIT compiler.
   3 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
   4 */
   5 
   6 #ifndef _LJ_JIT_H
   7 #define _LJ_JIT_H
   8 
   9 #include "lj_obj.h"
  10 #include "lj_ir.h"
  11 
  12 /* JIT engine flags. */
  13 #define JIT_F_ON                0x00000001
  14 
  15 /* CPU-specific JIT engine flags. */
  16 #if LJ_TARGET_X86ORX64
  17 #define JIT_F_SSE2              0x00000010
  18 #define JIT_F_SSE3              0x00000020
  19 #define JIT_F_SSE4_1            0x00000040
  20 #define JIT_F_PREFER_IMUL       0x00000080
  21 #define JIT_F_LEA_AGU           0x00000100
  22 #define JIT_F_BMI2              0x00000200
  23 
  24 /* Names for the CPU-specific flags. Must match the order above. */
  25 #define JIT_F_CPU_FIRST         JIT_F_SSE2
  26 #define JIT_F_CPUSTRING         "\4SSE2\4SSE3\6SSE4.1\3AMD\4ATOM\4BMI2"
  27 #elif LJ_TARGET_ARM
  28 #define JIT_F_ARMV6_            0x00000010
  29 #define JIT_F_ARMV6T2_          0x00000020
  30 #define JIT_F_ARMV7             0x00000040
  31 #define JIT_F_VFPV2             0x00000080
  32 #define JIT_F_VFPV3             0x00000100
  33 
  34 #define JIT_F_ARMV6             (JIT_F_ARMV6_|JIT_F_ARMV6T2_|JIT_F_ARMV7)
  35 #define JIT_F_ARMV6T2           (JIT_F_ARMV6T2_|JIT_F_ARMV7)
  36 #define JIT_F_VFP               (JIT_F_VFPV2|JIT_F_VFPV3)
  37 
  38 /* Names for the CPU-specific flags. Must match the order above. */
  39 #define JIT_F_CPU_FIRST         JIT_F_ARMV6_
  40 #define JIT_F_CPUSTRING         "\5ARMv6\7ARMv6T2\5ARMv7\5VFPv2\5VFPv3"
  41 #elif LJ_TARGET_PPC
  42 #define JIT_F_SQRT              0x00000010
  43 #define JIT_F_ROUND             0x00000020
  44 
  45 /* Names for the CPU-specific flags. Must match the order above. */
  46 #define JIT_F_CPU_FIRST         JIT_F_SQRT
  47 #define JIT_F_CPUSTRING         "\4SQRT\5ROUND"
  48 #elif LJ_TARGET_MIPS
  49 #define JIT_F_MIPSXXR2          0x00000010
  50 
  51 /* Names for the CPU-specific flags. Must match the order above. */
  52 #define JIT_F_CPU_FIRST         JIT_F_MIPSXXR2
  53 #if LJ_TARGET_MIPS32
  54 #define JIT_F_CPUSTRING         "\010MIPS32R2"
  55 #else
  56 #define JIT_F_CPUSTRING         "\010MIPS64R2"
  57 #endif
  58 #else
  59 #define JIT_F_CPU_FIRST         0
  60 #define JIT_F_CPUSTRING         ""
  61 #endif
  62 
  63 /* Optimization flags. */
  64 #define JIT_F_OPT_MASK          0x0fff0000
  65 
  66 #define JIT_F_OPT_FOLD          0x00010000
  67 #define JIT_F_OPT_CSE           0x00020000
  68 #define JIT_F_OPT_DCE           0x00040000
  69 #define JIT_F_OPT_FWD           0x00080000
  70 #define JIT_F_OPT_DSE           0x00100000
  71 #define JIT_F_OPT_NARROW        0x00200000
  72 #define JIT_F_OPT_LOOP          0x00400000
  73 #define JIT_F_OPT_ABC           0x00800000
  74 #define JIT_F_OPT_SINK          0x01000000
  75 #define JIT_F_OPT_FUSE          0x02000000
  76 
  77 /* Optimizations names for -O. Must match the order above. */
  78 #define JIT_F_OPT_FIRST         JIT_F_OPT_FOLD
  79 #define JIT_F_OPTSTRING \
  80   "\4fold\3cse\3dce\3fwd\3dse\6narrow\4loop\3abc\4sink\4fuse"
  81 
  82 /* Optimization levels set a fixed combination of flags. */
  83 #define JIT_F_OPT_0     0
  84 #define JIT_F_OPT_1     (JIT_F_OPT_FOLD|JIT_F_OPT_CSE|JIT_F_OPT_DCE)
  85 #define JIT_F_OPT_2     (JIT_F_OPT_1|JIT_F_OPT_NARROW|JIT_F_OPT_LOOP)
  86 #define JIT_F_OPT_3     (JIT_F_OPT_2|\
  87   JIT_F_OPT_FWD|JIT_F_OPT_DSE|JIT_F_OPT_ABC|JIT_F_OPT_SINK|JIT_F_OPT_FUSE)
  88 #define JIT_F_OPT_DEFAULT       JIT_F_OPT_3
  89 
  90 #if LJ_TARGET_WINDOWS || LJ_64
  91 /* See: http://blogs.msdn.com/oldnewthing/archive/2003/10/08/55239.aspx */
  92 #define JIT_P_sizemcode_DEFAULT         64
  93 #else
  94 /* Could go as low as 4K, but the mmap() overhead would be rather high. */
  95 #define JIT_P_sizemcode_DEFAULT         32
  96 #endif
  97 
  98 /* Optimization parameters and their defaults. Length is a char in octal! */
  99 #define JIT_PARAMDEF(_) \
 100   _(\010, maxtrace,     1000)   /* Max. # of traces in cache. */ \
 101   _(\011, maxrecord,    4000)   /* Max. # of recorded IR instructions. */ \
 102   _(\012, maxirconst,   500)    /* Max. # of IR constants of a trace. */ \
 103   _(\007, maxside,      100)    /* Max. # of side traces of a root trace. */ \
 104   _(\007, maxsnap,      500)    /* Max. # of snapshots for a trace. */ \
 105   _(\011, minstitch,    0)      /* Min. # of IR ins for a stitched trace. */ \
 106   \
 107   _(\007, hotloop,      56)     /* # of iter. to detect a hot loop/call. */ \
 108   _(\007, hotexit,      10)     /* # of taken exits to start a side trace. */ \
 109   _(\007, tryside,      4)      /* # of attempts to compile a side trace. */ \
 110   \
 111   _(\012, instunroll,   4)      /* Max. unroll for instable loops. */ \
 112   _(\012, loopunroll,   15)     /* Max. unroll for loop ops in side traces. */ \
 113   _(\012, callunroll,   3)      /* Max. unroll for recursive calls. */ \
 114   _(\011, recunroll,    2)      /* Min. unroll for true recursion. */ \
 115   \
 116   /* Size of each machine code area (in KBytes). */ \
 117   _(\011, sizemcode,    JIT_P_sizemcode_DEFAULT) \
 118   /* Max. total size of all machine code areas (in KBytes). */ \
 119   _(\010, maxmcode,     512) \
 120   /* End of list. */
 121 
 122 enum {
 123 #define JIT_PARAMENUM(len, name, value) JIT_P_##name,
 124 JIT_PARAMDEF(JIT_PARAMENUM)
 125 #undef JIT_PARAMENUM
 126   JIT_P__MAX
 127 };
 128 
 129 #define JIT_PARAMSTR(len, name, value)  #len #name
 130 #define JIT_P_STRING    JIT_PARAMDEF(JIT_PARAMSTR)
 131 
 132 /* Trace compiler state. */
 133 typedef enum {
 134   LJ_TRACE_IDLE,        /* Trace compiler idle. */
 135   LJ_TRACE_ACTIVE = 0x10,
 136   LJ_TRACE_RECORD,      /* Bytecode recording active. */
 137   LJ_TRACE_START,       /* New trace started. */
 138   LJ_TRACE_END,         /* End of trace. */
 139   LJ_TRACE_ASM,         /* Assemble trace. */
 140   LJ_TRACE_ERR          /* Trace aborted with error. */
 141 } TraceState;
 142 
 143 /* Post-processing action. */
 144 typedef enum {
 145   LJ_POST_NONE,         /* No action. */
 146   LJ_POST_FIXCOMP,      /* Fixup comparison and emit pending guard. */
 147   LJ_POST_FIXGUARD,     /* Fixup and emit pending guard. */
 148   LJ_POST_FIXGUARDSNAP, /* Fixup and emit pending guard and snapshot. */
 149   LJ_POST_FIXBOOL,      /* Fixup boolean result. */
 150   LJ_POST_FIXCONST,     /* Fixup constant results. */
 151   LJ_POST_FFRETRY       /* Suppress recording of retried fast functions. */
 152 } PostProc;
 153 
 154 /* Machine code type. */
 155 #if LJ_TARGET_X86ORX64
 156 typedef uint8_t MCode;
 157 #else
 158 typedef uint32_t MCode;
 159 #endif
 160 
 161 /* Linked list of MCode areas. */
 162 typedef struct MCLink {
 163   MCode *next;          /* Next area. */
 164   size_t size;          /* Size of current area. */
 165 } MCLink;
 166 
 167 /* Stack snapshot header. */
 168 typedef struct SnapShot {
 169   uint16_t mapofs;      /* Offset into snapshot map. */
 170   IRRef1 ref;           /* First IR ref for this snapshot. */
 171   uint8_t nslots;       /* Number of valid slots. */
 172   uint8_t topslot;      /* Maximum frame extent. */
 173   uint8_t nent;         /* Number of compressed entries. */
 174   uint8_t count;        /* Count of taken exits for this snapshot. */
 175 } SnapShot;
 176 
 177 #define SNAPCOUNT_DONE  255     /* Already compiled and linked a side trace. */
 178 
 179 /* Compressed snapshot entry. */
 180 typedef uint32_t SnapEntry;
 181 
 182 #define SNAP_FRAME              0x010000        /* Frame slot. */
 183 #define SNAP_CONT               0x020000        /* Continuation slot. */
 184 #define SNAP_NORESTORE          0x040000        /* No need to restore slot. */
 185 #define SNAP_SOFTFPNUM          0x080000        /* Soft-float number. */
 186 LJ_STATIC_ASSERT(SNAP_FRAME == TREF_FRAME);
 187 LJ_STATIC_ASSERT(SNAP_CONT == TREF_CONT);
 188 
 189 #define SNAP(slot, flags, ref)  (((SnapEntry)(slot) << 24) + (flags) + (ref))
 190 #define SNAP_TR(slot, tr) \
 191   (((SnapEntry)(slot) << 24) + ((tr) & (TREF_CONT|TREF_FRAME|TREF_REFMASK)))
 192 #if !LJ_FR2
 193 #define SNAP_MKPC(pc)           ((SnapEntry)u32ptr(pc))
 194 #endif
 195 #define SNAP_MKFTSZ(ftsz)       ((SnapEntry)(ftsz))
 196 #define snap_ref(sn)            ((sn) & 0xffff)
 197 #define snap_slot(sn)           ((BCReg)((sn) >> 24))
 198 #define snap_isframe(sn)        ((sn) & SNAP_FRAME)
 199 #define snap_setref(sn, ref)    (((sn) & (0xffff0000&~SNAP_NORESTORE)) | (ref))
 200 
 201 static LJ_AINLINE const BCIns *snap_pc(SnapEntry *sn)
 202 {
 203 #if LJ_FR2
 204   uint64_t pcbase;
 205   memcpy(&pcbase, sn, sizeof(uint64_t));
 206   return (const BCIns *)(pcbase >> 8);
 207 #else
 208   return (const BCIns *)(uintptr_t)*sn;
 209 #endif
 210 }
 211 
 212 /* Snapshot and exit numbers. */
 213 typedef uint32_t SnapNo;
 214 typedef uint32_t ExitNo;
 215 
 216 /* Trace number. */
 217 typedef uint32_t TraceNo;       /* Used to pass around trace numbers. */
 218 typedef uint16_t TraceNo1;      /* Stored trace number. */
 219 
 220 /* Type of link. ORDER LJ_TRLINK */
 221 typedef enum {
 222   LJ_TRLINK_NONE,               /* Incomplete trace. No link, yet. */
 223   LJ_TRLINK_ROOT,               /* Link to other root trace. */
 224   LJ_TRLINK_LOOP,               /* Loop to same trace. */
 225   LJ_TRLINK_TAILREC,            /* Tail-recursion. */
 226   LJ_TRLINK_UPREC,              /* Up-recursion. */
 227   LJ_TRLINK_DOWNREC,            /* Down-recursion. */
 228   LJ_TRLINK_INTERP,             /* Fallback to interpreter. */
 229   LJ_TRLINK_RETURN,             /* Return to interpreter. */
 230   LJ_TRLINK_STITCH              /* Trace stitching. */
 231 } TraceLink;
 232 
 233 /* Trace object. */
 234 typedef struct GCtrace {
 235   GCHeader;
 236   uint8_t topslot;      /* Top stack slot already checked to be allocated. */
 237   uint8_t linktype;     /* Type of link. */
 238   IRRef nins;           /* Next IR instruction. Biased with REF_BIAS. */
 239 #if LJ_GC64
 240   uint32_t unused_gc64;
 241 #endif
 242   GCRef gclist;
 243   IRIns *ir;            /* IR instructions/constants. Biased with REF_BIAS. */
 244   IRRef nk;             /* Lowest IR constant. Biased with REF_BIAS. */
 245   uint16_t nsnap;       /* Number of snapshots. */
 246   uint16_t nsnapmap;    /* Number of snapshot map elements. */
 247   SnapShot *snap;       /* Snapshot array. */
 248   SnapEntry *snapmap;   /* Snapshot map. */
 249   GCRef startpt;        /* Starting prototype. */
 250   MRef startpc;         /* Bytecode PC of starting instruction. */
 251   BCIns startins;       /* Original bytecode of starting instruction. */
 252   MSize szmcode;        /* Size of machine code. */
 253   MCode *mcode;         /* Start of machine code. */
 254   MSize mcloop;         /* Offset of loop start in machine code. */
 255   uint16_t nchild;      /* Number of child traces (root trace only). */
 256   uint16_t spadjust;    /* Stack pointer adjustment (offset in bytes). */
 257   TraceNo1 traceno;     /* Trace number. */
 258   TraceNo1 link;        /* Linked trace (or self for loops). */
 259   TraceNo1 root;        /* Root trace of side trace (or 0 for root traces). */
 260   TraceNo1 nextroot;    /* Next root trace for same prototype. */
 261   TraceNo1 nextside;    /* Next side trace of same root trace. */
 262   uint8_t sinktags;     /* Trace has SINK tags. */
 263   uint8_t unused1;
 264 #ifdef LUAJIT_USE_GDBJIT
 265   void *gdbjit_entry;   /* GDB JIT entry. */
 266 #endif
 267 } GCtrace;
 268 
 269 #define gco2trace(o)    check_exp((o)->gch.gct == ~LJ_TTRACE, (GCtrace *)(o))
 270 #define traceref(J, n) \
 271   check_exp((n)>0 && (MSize)(n)<J->sizetrace, (GCtrace *)gcref(J->trace[(n)]))
 272 
 273 LJ_STATIC_ASSERT(offsetof(GChead, gclist) == offsetof(GCtrace, gclist));
 274 
 275 static LJ_AINLINE MSize snap_nextofs(GCtrace *T, SnapShot *snap)
 276 {
 277   if (snap+1 == &T->snap[T->nsnap])
 278     return T->nsnapmap;
 279   else
 280     return (snap+1)->mapofs;
 281 }
 282 
 283 /* Round-robin penalty cache for bytecodes leading to aborted traces. */
 284 typedef struct HotPenalty {
 285   MRef pc;              /* Starting bytecode PC. */
 286   uint16_t val;         /* Penalty value, i.e. hotcount start. */
 287   uint16_t reason;      /* Abort reason (really TraceErr). */
 288 } HotPenalty;
 289 
 290 #define PENALTY_SLOTS   64      /* Penalty cache slot. Must be a power of 2. */
 291 #define PENALTY_MIN     (36*2)  /* Minimum penalty value. */
 292 #define PENALTY_MAX     60000   /* Maximum penalty value. */
 293 #define PENALTY_RNDBITS 4       /* # of random bits to add to penalty value. */
 294 
 295 /* Round-robin backpropagation cache for narrowing conversions. */
 296 typedef struct BPropEntry {
 297   IRRef1 key;           /* Key: original reference. */
 298   IRRef1 val;           /* Value: reference after conversion. */
 299   IRRef mode;           /* Mode for this entry (currently IRCONV_*). */
 300 } BPropEntry;
 301 
 302 /* Number of slots for the backpropagation cache. Must be a power of 2. */
 303 #define BPROP_SLOTS     16
 304 
 305 /* Scalar evolution analysis cache. */
 306 typedef struct ScEvEntry {
 307   MRef pc;              /* Bytecode PC of FORI. */
 308   IRRef1 idx;           /* Index reference. */
 309   IRRef1 start;         /* Constant start reference. */
 310   IRRef1 stop;          /* Constant stop reference. */
 311   IRRef1 step;          /* Constant step reference. */
 312   IRType1 t;            /* Scalar type. */
 313   uint8_t dir;          /* Direction. 1: +, 0: -. */
 314 } ScEvEntry;
 315 
 316 /* Reverse bytecode map (IRRef -> PC). Only for selected instructions. */
 317 typedef struct RBCHashEntry {
 318   MRef pc;              /* Bytecode PC. */
 319   GCRef pt;             /* Prototype. */
 320   IRRef ref;            /* IR reference. */
 321 } RBCHashEntry;
 322 
 323 /* Number of slots in the reverse bytecode hash table. Must be a power of 2. */
 324 #define RBCHASH_SLOTS   8
 325 
 326 /* 128 bit SIMD constants. */
 327 enum {
 328   LJ_KSIMD_ABS,
 329   LJ_KSIMD_NEG,
 330   LJ_KSIMD__MAX
 331 };
 332 
 333 enum {
 334 #if LJ_TARGET_X86ORX64
 335   LJ_K64_TOBIT,         /* 2^52 + 2^51 */
 336   LJ_K64_2P64,          /* 2^64 */
 337   LJ_K64_M2P64,         /* -2^64 */
 338 #if LJ_32
 339   LJ_K64_M2P64_31,      /* -2^64 or -2^31 */
 340 #else
 341   LJ_K64_M2P64_31 = LJ_K64_M2P64,
 342 #endif
 343 #endif
 344 #if LJ_TARGET_MIPS
 345   LJ_K64_2P31,          /* 2^31 */
 346 #if LJ_64
 347   LJ_K64_2P63,          /* 2^63 */
 348   LJ_K64_M2P64,         /* -2^64 */
 349 #endif
 350 #endif
 351   LJ_K64__MAX,
 352 };
 353 
 354 enum {
 355 #if LJ_TARGET_X86ORX64
 356   LJ_K32_M2P64_31,      /* -2^64 or -2^31 */
 357 #endif
 358 #if LJ_TARGET_PPC
 359   LJ_K32_2P52_2P31,     /* 2^52 + 2^31 */
 360   LJ_K32_2P52,          /* 2^52 */
 361 #endif
 362 #if LJ_TARGET_PPC || LJ_TARGET_MIPS
 363   LJ_K32_2P31,          /* 2^31 */
 364 #endif
 365 #if LJ_TARGET_MIPS64
 366   LJ_K32_2P63,          /* 2^63 */
 367   LJ_K32_M2P64,         /* -2^64 */
 368 #endif
 369   LJ_K32__MAX
 370 };
 371 
 372 /* Get 16 byte aligned pointer to SIMD constant. */
 373 #define LJ_KSIMD(J, n) \
 374   ((TValue *)(((intptr_t)&J->ksimd[2*(n)] + 15) & ~(intptr_t)15))
 375 
 376 /* Set/reset flag to activate the SPLIT pass for the current trace. */
 377 #if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
 378 #define lj_needsplit(J)         (J->needsplit = 1)
 379 #define lj_resetsplit(J)        (J->needsplit = 0)
 380 #else
 381 #define lj_needsplit(J)         UNUSED(J)
 382 #define lj_resetsplit(J)        UNUSED(J)
 383 #endif
 384 
 385 /* Fold state is used to fold instructions on-the-fly. */
 386 typedef struct FoldState {
 387   IRIns ins;            /* Currently emitted instruction. */
 388   IRIns left[2];        /* Instruction referenced by left operand. */
 389   IRIns right[2];       /* Instruction referenced by right operand. */
 390 } FoldState;
 391 
 392 /* JIT compiler state. */
 393 typedef struct jit_State {
 394   GCtrace cur;          /* Current trace. */
 395   GCtrace *curfinal;    /* Final address of current trace (set during asm). */
 396 
 397   lua_State *L;         /* Current Lua state. */
 398   const BCIns *pc;      /* Current PC. */
 399   GCfunc *fn;           /* Current function. */
 400   GCproto *pt;          /* Current prototype. */
 401   TRef *base;           /* Current frame base, points into J->slots. */
 402 
 403   uint32_t flags;       /* JIT engine flags. */
 404   BCReg maxslot;        /* Relative to baseslot. */
 405   BCReg baseslot;       /* Current frame base, offset into J->slots. */
 406 
 407   uint8_t mergesnap;    /* Allowed to merge with next snapshot. */
 408   uint8_t needsnap;     /* Need snapshot before recording next bytecode. */
 409   IRType1 guardemit;    /* Accumulated IRT_GUARD for emitted instructions. */
 410   uint8_t bcskip;       /* Number of bytecode instructions to skip. */
 411 
 412   FoldState fold;       /* Fold state. */
 413 
 414   const BCIns *bc_min;  /* Start of allowed bytecode range for root trace. */
 415   MSize bc_extent;      /* Extent of the range. */
 416 
 417   TraceState state;     /* Trace compiler state. */
 418 
 419   int32_t instunroll;   /* Unroll counter for instable loops. */
 420   int32_t loopunroll;   /* Unroll counter for loop ops in side traces. */
 421   int32_t tailcalled;   /* Number of successive tailcalls. */
 422   int32_t framedepth;   /* Current frame depth. */
 423   int32_t retdepth;     /* Return frame depth (count of RETF). */
 424 
 425   TValue ksimd[LJ_KSIMD__MAX*2+1];  /* 16 byte aligned SIMD constants. */
 426   TValue k64[LJ_K64__MAX];  /* Common 8 byte constants used by backends. */
 427   uint32_t k32[LJ_K32__MAX];  /* Ditto for 4 byte constants. */
 428 
 429   IRIns *irbuf;         /* Temp. IR instruction buffer. Biased with REF_BIAS. */
 430   IRRef irtoplim;       /* Upper limit of instuction buffer (biased). */
 431   IRRef irbotlim;       /* Lower limit of instuction buffer (biased). */
 432   IRRef loopref;        /* Last loop reference or ref of final LOOP (or 0). */
 433 
 434   MSize sizesnap;       /* Size of temp. snapshot buffer. */
 435   SnapShot *snapbuf;    /* Temp. snapshot buffer. */
 436   SnapEntry *snapmapbuf;  /* Temp. snapshot map buffer. */
 437   MSize sizesnapmap;    /* Size of temp. snapshot map buffer. */
 438 
 439   PostProc postproc;    /* Required post-processing after execution. */
 440 #if LJ_SOFTFP32 || (LJ_32 && LJ_HASFFI)
 441   uint8_t needsplit;    /* Need SPLIT pass. */
 442 #endif
 443   uint8_t retryrec;     /* Retry recording. */
 444 
 445   GCRef *trace;         /* Array of traces. */
 446   TraceNo freetrace;    /* Start of scan for next free trace. */
 447   MSize sizetrace;      /* Size of trace array. */
 448   IRRef1 ktrace;        /* Reference to KGC with GCtrace. */
 449 
 450   IRRef1 chain[IR__MAX];  /* IR instruction skip-list chain anchors. */
 451   TRef slot[LJ_MAX_JSLOTS+LJ_STACK_EXTRA];  /* Stack slot map. */
 452 
 453   int32_t param[JIT_P__MAX];  /* JIT engine parameters. */
 454 
 455   MCode *exitstubgroup[LJ_MAX_EXITSTUBGR];  /* Exit stub group addresses. */
 456 
 457   HotPenalty penalty[PENALTY_SLOTS];  /* Penalty slots. */
 458   uint32_t penaltyslot; /* Round-robin index into penalty slots. */
 459   uint32_t prngstate;   /* PRNG state. */
 460 
 461 #ifdef LUAJIT_ENABLE_TABLE_BUMP
 462   RBCHashEntry rbchash[RBCHASH_SLOTS];  /* Reverse bytecode map. */
 463 #endif
 464 
 465   BPropEntry bpropcache[BPROP_SLOTS];  /* Backpropagation cache slots. */
 466   uint32_t bpropslot;   /* Round-robin index into bpropcache slots. */
 467 
 468   ScEvEntry scev;       /* Scalar evolution analysis cache slots. */
 469 
 470   const BCIns *startpc; /* Bytecode PC of starting instruction. */
 471   TraceNo parent;       /* Parent of current side trace (0 for root traces). */
 472   ExitNo exitno;        /* Exit number in parent of current side trace. */
 473 
 474   BCIns *patchpc;       /* PC for pending re-patch. */
 475   BCIns patchins;       /* Instruction for pending re-patch. */
 476 
 477   int mcprot;           /* Protection of current mcode area. */
 478   MCode *mcarea;        /* Base of current mcode area. */
 479   MCode *mctop;         /* Top of current mcode area. */
 480   MCode *mcbot;         /* Bottom of current mcode area. */
 481   size_t szmcarea;      /* Size of current mcode area. */
 482   size_t szallmcarea;   /* Total size of all allocated mcode areas. */
 483 
 484   TValue errinfo;       /* Additional info element for trace errors. */
 485 
 486 #if LJ_HASPROFILE
 487   GCproto *prev_pt;     /* Previous prototype. */
 488   BCLine prev_line;     /* Previous line. */
 489   int prof_mode;        /* Profiling mode: 0, 'f', 'l'. */
 490 #endif
 491 }
 492 #if LJ_TARGET_ARM
 493 LJ_ALIGN(16)            /* For DISPATCH-relative addresses in assembler part. */
 494 #endif
 495 jit_State;
 496 
 497 /* Trivial PRNG e.g. used for penalty randomization. */
 498 static LJ_AINLINE uint32_t LJ_PRNG_BITS(jit_State *J, int bits)
 499 {
 500   /* Yes, this LCG is very weak, but that doesn't matter for our use case. */
 501   J->prngstate = J->prngstate * 1103515245 + 12345;
 502   return J->prngstate >> (32-bits);
 503 }
 504 
 505 #endif

/* [<][>][^][v][top][bottom][index][help] */