root/lj_opt_narrow.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. narrow_bpc_get
  2. narrow_bpc_set
  3. narrow_stripov_backprop
  4. narrow_conv_backprop
  5. narrow_conv_emit
  6. lj_opt_narrow_convert
  7. narrow_stripov
  8. lj_opt_narrow_index
  9. lj_opt_narrow_toint
  10. lj_opt_narrow_tobit
  11. lj_opt_narrow_cindex
  12. numisint
  13. conv_str_tonum
  14. lj_opt_narrow_arith
  15. lj_opt_narrow_unm
  16. lj_opt_narrow_mod
  17. lj_opt_narrow_pow
  18. narrow_forl
  19. lj_opt_narrow_forl

   1 /*
   2 ** NARROW: Narrowing of numbers to integers (double to int32_t).
   3 ** STRIPOV: Stripping of overflow checks.
   4 ** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h
   5 */
   6 
   7 #define lj_opt_narrow_c
   8 #define LUA_CORE
   9 
  10 #include "lj_obj.h"
  11 
  12 #if LJ_HASJIT
  13 
  14 #include "lj_bc.h"
  15 #include "lj_ir.h"
  16 #include "lj_jit.h"
  17 #include "lj_iropt.h"
  18 #include "lj_trace.h"
  19 #include "lj_vm.h"
  20 #include "lj_strscan.h"
  21 
  22 /* Rationale for narrowing optimizations:
  23 **
  24 ** Lua has only a single number type and this is a FP double by default.
  25 ** Narrowing doubles to integers does not pay off for the interpreter on a
  26 ** current-generation x86/x64 machine. Most FP operations need the same
  27 ** amount of execution resources as their integer counterparts, except
  28 ** with slightly longer latencies. Longer latencies are a non-issue for
  29 ** the interpreter, since they are usually hidden by other overhead.
  30 **
  31 ** The total CPU execution bandwidth is the sum of the bandwidth of the FP
  32 ** and the integer units, because they execute in parallel. The FP units
  33 ** have an equal or higher bandwidth than the integer units. Not using
  34 ** them means losing execution bandwidth. Moving work away from them to
  35 ** the already quite busy integer units is a losing proposition.
  36 **
  37 ** The situation for JIT-compiled code is a bit different: the higher code
  38 ** density makes the extra latencies much more visible. Tight loops expose
  39 ** the latencies for updating the induction variables. Array indexing
  40 ** requires narrowing conversions with high latencies and additional
  41 ** guards (to check that the index is really an integer). And many common
  42 ** optimizations only work on integers.
  43 **
  44 ** One solution would be speculative, eager narrowing of all number loads.
  45 ** This causes many problems, like losing -0 or the need to resolve type
  46 ** mismatches between traces. It also effectively forces the integer type
  47 ** to have overflow-checking semantics. This impedes many basic
  48 ** optimizations and requires adding overflow checks to all integer
  49 ** arithmetic operations (whereas FP arithmetics can do without).
  50 **
  51 ** Always replacing an FP op with an integer op plus an overflow check is
  52 ** counter-productive on a current-generation super-scalar CPU. Although
  53 ** the overflow check branches are highly predictable, they will clog the
  54 ** execution port for the branch unit and tie up reorder buffers. This is
  55 ** turning a pure data-flow dependency into a different data-flow
  56 ** dependency (with slightly lower latency) *plus* a control dependency.
  57 ** In general, you don't want to do this since latencies due to data-flow
  58 ** dependencies can be well hidden by out-of-order execution.
  59 **
  60 ** A better solution is to keep all numbers as FP values and only narrow
  61 ** when it's beneficial to do so. LuaJIT uses predictive narrowing for
  62 ** induction variables and demand-driven narrowing for index expressions,
  63 ** integer arguments and bit operations. Additionally it can eliminate or
  64 ** hoist most of the resulting overflow checks. Regular arithmetic
  65 ** computations are never narrowed to integers.
  66 **
  67 ** The integer type in the IR has convenient wrap-around semantics and
  68 ** ignores overflow. Extra operations have been added for
  69 ** overflow-checking arithmetic (ADDOV/SUBOV) instead of an extra type.
  70 ** Apart from reducing overall complexity of the compiler, this also
  71 ** nicely solves the problem where you want to apply algebraic
  72 ** simplifications to ADD, but not to ADDOV. And the x86/x64 assembler can
  73 ** use lea instead of an add for integer ADD, but not for ADDOV (lea does
  74 ** not affect the flags, but it helps to avoid register moves).
  75 **
  76 **
  77 ** All of the above has to be reconsidered for architectures with slow FP
  78 ** operations or without a hardware FPU. The dual-number mode of LuaJIT
  79 ** addresses this issue. Arithmetic operations are performed on integers
  80 ** as far as possible and overflow checks are added as needed.
  81 **
  82 ** This implies that narrowing for integer arguments and bit operations
  83 ** should also strip overflow checks, e.g. replace ADDOV with ADD. The
  84 ** original overflow guards are weak and can be eliminated by DCE, if
  85 ** there's no other use.
  86 **
  87 ** A slight twist is that it's usually beneficial to use overflow-checked
  88 ** integer arithmetics if all inputs are already integers. This is the only
  89 ** change that affects the single-number mode, too.
  90 */
  91 
  92 /* Some local macros to save typing. Undef'd at the end. */
  93 #define IR(ref)                 (&J->cur.ir[(ref)])
  94 #define fins                    (&J->fold.ins)
  95 
  96 /* Pass IR on to next optimization in chain (FOLD). */
  97 #define emitir(ot, a, b)        (lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
  98 
  99 #define emitir_raw(ot, a, b)    (lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
 100 
 101 /* -- Elimination of narrowing type conversions --------------------------- */
 102 
 103 /* Narrowing of index expressions and bit operations is demand-driven. The
 104 ** trace recorder emits a narrowing type conversion (CONV.int.num or TOBIT)
 105 ** in all of these cases (e.g. array indexing or string indexing). FOLD
 106 ** already takes care of eliminating simple redundant conversions like
 107 ** CONV.int.num(CONV.num.int(x)) ==> x.
 108 **
 109 ** But the surrounding code is FP-heavy and arithmetic operations are
 110 ** performed on FP numbers (for the single-number mode). Consider a common
 111 ** example such as 'x=t[i+1]', with 'i' already an integer (due to induction
 112 ** variable narrowing). The index expression would be recorded as
 113 **   CONV.int.num(ADD(CONV.num.int(i), 1))
 114 ** which is clearly suboptimal.
 115 **
 116 ** One can do better by recursively backpropagating the narrowing type
 117 ** conversion across FP arithmetic operations. This turns FP ops into
 118 ** their corresponding integer counterparts. Depending on the semantics of
 119 ** the conversion they also need to check for overflow. Currently only ADD
 120 ** and SUB are supported.
 121 **
 122 ** The above example can be rewritten as
 123 **   ADDOV(CONV.int.num(CONV.num.int(i)), 1)
 124 ** and then into ADDOV(i, 1) after folding of the conversions. The original
 125 ** FP ops remain in the IR and are eliminated by DCE since all references to
 126 ** them are gone.
 127 **
 128 ** [In dual-number mode the trace recorder already emits ADDOV etc., but
 129 ** this can be further reduced. See below.]
 130 **
 131 ** Special care has to be taken to avoid narrowing across an operation
 132 ** which is potentially operating on non-integral operands. One obvious
 133 ** case is when an expression contains a non-integral constant, but ends
 134 ** up as an integer index at runtime (like t[x+1.5] with x=0.5).
 135 **
 136 ** Operations with two non-constant operands illustrate a similar problem
 137 ** (like t[a+b] with a=1.5 and b=2.5). Backpropagation has to stop there,
 138 ** unless it can be proven that either operand is integral (e.g. by CSEing
 139 ** a previous conversion). As a not-so-obvious corollary this logic also
 140 ** applies for a whole expression tree (e.g. t[(a+1)+(b+1)]).
 141 **
 142 ** Correctness of the transformation is guaranteed by avoiding to expand
 143 ** the tree by adding more conversions than the one we would need to emit
 144 ** if not backpropagating. TOBIT employs a more optimistic rule, because
 145 ** the conversion has special semantics, designed to make the life of the
 146 ** compiler writer easier. ;-)
 147 **
 148 ** Using on-the-fly backpropagation of an expression tree doesn't work
 149 ** because it's unknown whether the transform is correct until the end.
 150 ** This either requires IR rollback and cache invalidation for every
 151 ** subtree or a two-pass algorithm. The former didn't work out too well,
 152 ** so the code now combines a recursive collector with a stack-based
 153 ** emitter.
 154 **
 155 ** [A recursive backpropagation algorithm with backtracking, employing
 156 ** skip-list lookup and round-robin caching, emitting stack operations
 157 ** on-the-fly for a stack-based interpreter -- and all of that in a meager
 158 ** kilobyte? Yep, compilers are a great treasure chest. Throw away your
 159 ** textbooks and read the codebase of a compiler today!]
 160 **
 161 ** There's another optimization opportunity for array indexing: it's
 162 ** always accompanied by an array bounds-check. The outermost overflow
 163 ** check may be delegated to the ABC operation. This works because ABC is
 164 ** an unsigned comparison and wrap-around due to overflow creates negative
 165 ** numbers.
 166 **
 167 ** But this optimization is only valid for constants that cannot overflow
 168 ** an int32_t into the range of valid array indexes [0..2^27+1). A check
 169 ** for +-2^30 is safe since -2^31 - 2^30 wraps to 2^30 and 2^31-1 + 2^30
 170 ** wraps to -2^30-1.
 171 **
 172 ** It's also good enough in practice, since e.g. t[i+1] or t[i-10] are
 173 ** quite common. So the above example finally ends up as ADD(i, 1)!
 174 **
 175 ** Later on, the assembler is able to fuse the whole array reference and
 176 ** the ADD into the memory operands of loads and other instructions. This
 177 ** is why LuaJIT is able to generate very pretty (and fast) machine code
 178 ** for array indexing. And that, my dear, concludes another story about
 179 ** one of the hidden secrets of LuaJIT ...
 180 */
 181 
 182 /* Maximum backpropagation depth and maximum stack size. */
 183 #define NARROW_MAX_BACKPROP     100
 184 #define NARROW_MAX_STACK        256
 185 
 186 /* The stack machine has a 32 bit instruction format: [IROpT | IRRef1]
 187 ** The lower 16 bits hold a reference (or 0). The upper 16 bits hold
 188 ** the IR opcode + type or one of the following special opcodes:
 189 */
 190 enum {
 191   NARROW_REF,           /* Push ref. */
 192   NARROW_CONV,          /* Push conversion of ref. */
 193   NARROW_SEXT,          /* Push sign-extension of ref. */
 194   NARROW_INT            /* Push KINT ref. The next code holds an int32_t. */
 195 };
 196 
 197 typedef uint32_t NarrowIns;
 198 
 199 #define NARROWINS(op, ref)      (((op) << 16) + (ref))
 200 #define narrow_op(ins)          ((IROpT)((ins) >> 16))
 201 #define narrow_ref(ins)         ((IRRef1)(ins))
 202 
 203 /* Context used for narrowing of type conversions. */
 204 typedef struct NarrowConv {
 205   jit_State *J;         /* JIT compiler state. */
 206   NarrowIns *sp;        /* Current stack pointer. */
 207   NarrowIns *maxsp;     /* Maximum stack pointer minus redzone. */
 208   IRRef mode;           /* Conversion mode (IRCONV_*). */
 209   IRType t;             /* Destination type: IRT_INT or IRT_I64. */
 210   NarrowIns stack[NARROW_MAX_STACK];  /* Stack holding stack-machine code. */
 211 } NarrowConv;
 212 
 213 /* Lookup a reference in the backpropagation cache. */
 214 static BPropEntry *narrow_bpc_get(jit_State *J, IRRef1 key, IRRef mode)
 215 {
 216   ptrdiff_t i;
 217   for (i = 0; i < BPROP_SLOTS; i++) {
 218     BPropEntry *bp = &J->bpropcache[i];
 219     /* Stronger checks are ok, too. */
 220     if (bp->key == key && bp->mode >= mode &&
 221         ((bp->mode ^ mode) & IRCONV_MODEMASK) == 0)
 222       return bp;
 223   }
 224   return NULL;
 225 }
 226 
 227 /* Add an entry to the backpropagation cache. */
 228 static void narrow_bpc_set(jit_State *J, IRRef1 key, IRRef1 val, IRRef mode)
 229 {
 230   uint32_t slot = J->bpropslot;
 231   BPropEntry *bp = &J->bpropcache[slot];
 232   J->bpropslot = (slot + 1) & (BPROP_SLOTS-1);
 233   bp->key = key;
 234   bp->val = val;
 235   bp->mode = mode;
 236 }
 237 
 238 /* Backpropagate overflow stripping. */
 239 static void narrow_stripov_backprop(NarrowConv *nc, IRRef ref, int depth)
 240 {
 241   jit_State *J = nc->J;
 242   IRIns *ir = IR(ref);
 243   if (ir->o == IR_ADDOV || ir->o == IR_SUBOV ||
 244       (ir->o == IR_MULOV && (nc->mode & IRCONV_CONVMASK) == IRCONV_ANY)) {
 245     BPropEntry *bp = narrow_bpc_get(nc->J, ref, IRCONV_TOBIT);
 246     if (bp) {
 247       ref = bp->val;
 248     } else if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
 249       NarrowIns *savesp = nc->sp;
 250       narrow_stripov_backprop(nc, ir->op1, depth);
 251       if (nc->sp < nc->maxsp) {
 252         narrow_stripov_backprop(nc, ir->op2, depth);
 253         if (nc->sp < nc->maxsp) {
 254           *nc->sp++ = NARROWINS(IRT(ir->o - IR_ADDOV + IR_ADD, IRT_INT), ref);
 255           return;
 256         }
 257       }
 258       nc->sp = savesp;  /* Path too deep, need to backtrack. */
 259     }
 260   }
 261   *nc->sp++ = NARROWINS(NARROW_REF, ref);
 262 }
 263 
 264 /* Backpropagate narrowing conversion. Return number of needed conversions. */
 265 static int narrow_conv_backprop(NarrowConv *nc, IRRef ref, int depth)
 266 {
 267   jit_State *J = nc->J;
 268   IRIns *ir = IR(ref);
 269   IRRef cref;
 270 
 271   if (nc->sp >= nc->maxsp) return 10;  /* Path too deep. */
 272 
 273   /* Check the easy cases first. */
 274   if (ir->o == IR_CONV && (ir->op2 & IRCONV_SRCMASK) == IRT_INT) {
 275     if ((nc->mode & IRCONV_CONVMASK) <= IRCONV_ANY)
 276       narrow_stripov_backprop(nc, ir->op1, depth+1);
 277     else
 278       *nc->sp++ = NARROWINS(NARROW_REF, ir->op1);  /* Undo conversion. */
 279     if (nc->t == IRT_I64)
 280       *nc->sp++ = NARROWINS(NARROW_SEXT, 0);  /* Sign-extend integer. */
 281     return 0;
 282   } else if (ir->o == IR_KNUM) {  /* Narrow FP constant. */
 283     lua_Number n = ir_knum(ir)->n;
 284     if ((nc->mode & IRCONV_CONVMASK) == IRCONV_TOBIT) {
 285       /* Allows a wider range of constants. */
 286       int64_t k64 = (int64_t)n;
 287       if (n == (lua_Number)k64) {  /* Only if const doesn't lose precision. */
 288         *nc->sp++ = NARROWINS(NARROW_INT, 0);
 289         *nc->sp++ = (NarrowIns)k64;  /* But always truncate to 32 bits. */
 290         return 0;
 291       }
 292     } else {
 293       int32_t k = lj_num2int(n);
 294       /* Only if constant is a small integer. */
 295       if (checki16(k) && n == (lua_Number)k) {
 296         *nc->sp++ = NARROWINS(NARROW_INT, 0);
 297         *nc->sp++ = (NarrowIns)k;
 298         return 0;
 299       }
 300     }
 301     return 10;  /* Never narrow other FP constants (this is rare). */
 302   }
 303 
 304   /* Try to CSE the conversion. Stronger checks are ok, too. */
 305   cref = J->chain[fins->o];
 306   while (cref > ref) {
 307     IRIns *cr = IR(cref);
 308     if (cr->op1 == ref &&
 309         (fins->o == IR_TOBIT ||
 310          ((cr->op2 & IRCONV_MODEMASK) == (nc->mode & IRCONV_MODEMASK) &&
 311           irt_isguard(cr->t) >= irt_isguard(fins->t)))) {
 312       *nc->sp++ = NARROWINS(NARROW_REF, cref);
 313       return 0;  /* Already there, no additional conversion needed. */
 314     }
 315     cref = cr->prev;
 316   }
 317 
 318   /* Backpropagate across ADD/SUB. */
 319   if (ir->o == IR_ADD || ir->o == IR_SUB) {
 320     /* Try cache lookup first. */
 321     IRRef mode = nc->mode;
 322     BPropEntry *bp;
 323     /* Inner conversions need a stronger check. */
 324     if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX && depth > 0)
 325       mode += IRCONV_CHECK-IRCONV_INDEX;
 326     bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
 327     if (bp) {
 328       *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
 329       return 0;
 330     } else if (nc->t == IRT_I64) {
 331       /* Try sign-extending from an existing (checked) conversion to int. */
 332       mode = (IRT_INT<<5)|IRT_NUM|IRCONV_INDEX;
 333       bp = narrow_bpc_get(nc->J, (IRRef1)ref, mode);
 334       if (bp) {
 335         *nc->sp++ = NARROWINS(NARROW_REF, bp->val);
 336         *nc->sp++ = NARROWINS(NARROW_SEXT, 0);
 337         return 0;
 338       }
 339     }
 340     if (++depth < NARROW_MAX_BACKPROP && nc->sp < nc->maxsp) {
 341       NarrowIns *savesp = nc->sp;
 342       int count = narrow_conv_backprop(nc, ir->op1, depth);
 343       count += narrow_conv_backprop(nc, ir->op2, depth);
 344       if (count <= 1) {  /* Limit total number of conversions. */
 345         *nc->sp++ = NARROWINS(IRT(ir->o, nc->t), ref);
 346         return count;
 347       }
 348       nc->sp = savesp;  /* Too many conversions, need to backtrack. */
 349     }
 350   }
 351 
 352   /* Otherwise add a conversion. */
 353   *nc->sp++ = NARROWINS(NARROW_CONV, ref);
 354   return 1;
 355 }
 356 
 357 /* Emit the conversions collected during backpropagation. */
 358 static IRRef narrow_conv_emit(jit_State *J, NarrowConv *nc)
 359 {
 360   /* The fins fields must be saved now -- emitir() overwrites them. */
 361   IROpT guardot = irt_isguard(fins->t) ? IRTG(IR_ADDOV-IR_ADD, 0) : 0;
 362   IROpT convot = fins->ot;
 363   IRRef1 convop2 = fins->op2;
 364   NarrowIns *next = nc->stack;  /* List of instructions from backpropagation. */
 365   NarrowIns *last = nc->sp;
 366   NarrowIns *sp = nc->stack;  /* Recycle the stack to store operands. */
 367   while (next < last) {  /* Simple stack machine to process the ins. list. */
 368     NarrowIns ref = *next++;
 369     IROpT op = narrow_op(ref);
 370     if (op == NARROW_REF) {
 371       *sp++ = ref;
 372     } else if (op == NARROW_CONV) {
 373       *sp++ = emitir_raw(convot, ref, convop2);  /* Raw emit avoids a loop. */
 374     } else if (op == NARROW_SEXT) {
 375       lua_assert(sp >= nc->stack+1);
 376       sp[-1] = emitir(IRT(IR_CONV, IRT_I64), sp[-1],
 377                       (IRT_I64<<5)|IRT_INT|IRCONV_SEXT);
 378     } else if (op == NARROW_INT) {
 379       lua_assert(next < last);
 380       *sp++ = nc->t == IRT_I64 ?
 381               lj_ir_kint64(J, (int64_t)(int32_t)*next++) :
 382               lj_ir_kint(J, *next++);
 383     } else {  /* Regular IROpT. Pops two operands and pushes one result. */
 384       IRRef mode = nc->mode;
 385       lua_assert(sp >= nc->stack+2);
 386       sp--;
 387       /* Omit some overflow checks for array indexing. See comments above. */
 388       if ((mode & IRCONV_CONVMASK) == IRCONV_INDEX) {
 389         if (next == last && irref_isk(narrow_ref(sp[0])) &&
 390           (uint32_t)IR(narrow_ref(sp[0]))->i + 0x40000000u < 0x80000000u)
 391           guardot = 0;
 392         else  /* Otherwise cache a stronger check. */
 393           mode += IRCONV_CHECK-IRCONV_INDEX;
 394       }
 395       sp[-1] = emitir(op+guardot, sp[-1], sp[0]);
 396       /* Add to cache. */
 397       if (narrow_ref(ref))
 398         narrow_bpc_set(J, narrow_ref(ref), narrow_ref(sp[-1]), mode);
 399     }
 400   }
 401   lua_assert(sp == nc->stack+1);
 402   return nc->stack[0];
 403 }
 404 
 405 /* Narrow a type conversion of an arithmetic operation. */
 406 TRef LJ_FASTCALL lj_opt_narrow_convert(jit_State *J)
 407 {
 408   if ((J->flags & JIT_F_OPT_NARROW)) {
 409     NarrowConv nc;
 410     nc.J = J;
 411     nc.sp = nc.stack;
 412     nc.maxsp = &nc.stack[NARROW_MAX_STACK-4];
 413     nc.t = irt_type(fins->t);
 414     if (fins->o == IR_TOBIT) {
 415       nc.mode = IRCONV_TOBIT;  /* Used only in the backpropagation cache. */
 416     } else {
 417       nc.mode = fins->op2;
 418     }
 419     if (narrow_conv_backprop(&nc, fins->op1, 0) <= 1)
 420       return narrow_conv_emit(J, &nc);
 421   }
 422   return NEXTFOLD;
 423 }
 424 
 425 /* -- Narrowing of implicit conversions ----------------------------------- */
 426 
 427 /* Recursively strip overflow checks. */
 428 static TRef narrow_stripov(jit_State *J, TRef tr, int lastop, IRRef mode)
 429 {
 430   IRRef ref = tref_ref(tr);
 431   IRIns *ir = IR(ref);
 432   int op = ir->o;
 433   if (op >= IR_ADDOV && op <= lastop) {
 434     BPropEntry *bp = narrow_bpc_get(J, ref, mode);
 435     if (bp) {
 436       return TREF(bp->val, irt_t(IR(bp->val)->t));
 437     } else {
 438       IRRef op1 = ir->op1, op2 = ir->op2;  /* The IR may be reallocated. */
 439       op1 = narrow_stripov(J, op1, lastop, mode);
 440       op2 = narrow_stripov(J, op2, lastop, mode);
 441       tr = emitir(IRT(op - IR_ADDOV + IR_ADD,
 442                       ((mode & IRCONV_DSTMASK) >> IRCONV_DSH)), op1, op2);
 443       narrow_bpc_set(J, ref, tref_ref(tr), mode);
 444     }
 445   } else if (LJ_64 && (mode & IRCONV_SEXT) && !irt_is64(ir->t)) {
 446     tr = emitir(IRT(IR_CONV, IRT_INTP), tr, mode);
 447   }
 448   return tr;
 449 }
 450 
 451 /* Narrow array index. */
 452 TRef LJ_FASTCALL lj_opt_narrow_index(jit_State *J, TRef tr)
 453 {
 454   IRIns *ir;
 455   lua_assert(tref_isnumber(tr));
 456   if (tref_isnum(tr))  /* Conversion may be narrowed, too. See above. */
 457     return emitir(IRTGI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_INDEX);
 458   /* Omit some overflow checks for array indexing. See comments above. */
 459   ir = IR(tref_ref(tr));
 460   if ((ir->o == IR_ADDOV || ir->o == IR_SUBOV) && irref_isk(ir->op2) &&
 461       (uint32_t)IR(ir->op2)->i + 0x40000000u < 0x80000000u)
 462     return emitir(IRTI(ir->o - IR_ADDOV + IR_ADD), ir->op1, ir->op2);
 463   return tr;
 464 }
 465 
 466 /* Narrow conversion to integer operand (overflow undefined). */
 467 TRef LJ_FASTCALL lj_opt_narrow_toint(jit_State *J, TRef tr)
 468 {
 469   if (tref_isstr(tr))
 470     tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
 471   if (tref_isnum(tr))  /* Conversion may be narrowed, too. See above. */
 472     return emitir(IRTI(IR_CONV), tr, IRCONV_INT_NUM|IRCONV_ANY);
 473   if (!tref_isinteger(tr))
 474     lj_trace_err(J, LJ_TRERR_BADTYPE);
 475   /*
 476   ** Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV.
 477   ** Use IRCONV_TOBIT for the cache entries, since the semantics are the same.
 478   */
 479   return narrow_stripov(J, tr, IR_MULOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
 480 }
 481 
 482 /* Narrow conversion to bitop operand (overflow wrapped). */
 483 TRef LJ_FASTCALL lj_opt_narrow_tobit(jit_State *J, TRef tr)
 484 {
 485   if (tref_isstr(tr))
 486     tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
 487   if (tref_isnum(tr))  /* Conversion may be narrowed, too. See above. */
 488     return emitir(IRTI(IR_TOBIT), tr, lj_ir_knum_tobit(J));
 489   if (!tref_isinteger(tr))
 490     lj_trace_err(J, LJ_TRERR_BADTYPE);
 491   /*
 492   ** Wrapped overflow semantics allow stripping of ADDOV and SUBOV.
 493   ** MULOV cannot be stripped due to precision widening.
 494   */
 495   return narrow_stripov(J, tr, IR_SUBOV, (IRT_INT<<5)|IRT_INT|IRCONV_TOBIT);
 496 }
 497 
 498 #if LJ_HASFFI
 499 /* Narrow C array index (overflow undefined). */
 500 TRef LJ_FASTCALL lj_opt_narrow_cindex(jit_State *J, TRef tr)
 501 {
 502   lua_assert(tref_isnumber(tr));
 503   if (tref_isnum(tr))
 504     return emitir(IRT(IR_CONV, IRT_INTP), tr, (IRT_INTP<<5)|IRT_NUM|IRCONV_ANY);
 505   /* Undefined overflow semantics allow stripping of ADDOV, SUBOV and MULOV. */
 506   return narrow_stripov(J, tr, IR_MULOV,
 507                         LJ_64 ? ((IRT_INTP<<5)|IRT_INT|IRCONV_SEXT) :
 508                                 ((IRT_INTP<<5)|IRT_INT|IRCONV_TOBIT));
 509 }
 510 #endif
 511 
 512 /* -- Narrowing of arithmetic operators ----------------------------------- */
 513 
 514 /* Check whether a number fits into an int32_t (-0 is ok, too). */
 515 static int numisint(lua_Number n)
 516 {
 517   return (n == (lua_Number)lj_num2int(n));
 518 }
 519 
 520 /* Convert string to number. Error out for non-numeric string values. */
 521 static TRef conv_str_tonum(jit_State *J, TRef tr, TValue *o)
 522 {
 523   if (tref_isstr(tr)) {
 524     tr = emitir(IRTG(IR_STRTO, IRT_NUM), tr, 0);
 525     /* Would need an inverted STRTO for this rare and useless case. */
 526     if (!lj_strscan_num(strV(o), o))  /* Convert in-place. Value used below. */
 527       lj_trace_err(J, LJ_TRERR_BADTYPE);  /* Punt if non-numeric. */
 528   }
 529   return tr;
 530 }
 531 
 532 /* Narrowing of arithmetic operations. */
 533 TRef lj_opt_narrow_arith(jit_State *J, TRef rb, TRef rc,
 534                          TValue *vb, TValue *vc, IROp op)
 535 {
 536   rb = conv_str_tonum(J, rb, vb);
 537   rc = conv_str_tonum(J, rc, vc);
 538   /* Must not narrow MUL in non-DUALNUM variant, because it loses -0. */
 539   if ((op >= IR_ADD && op <= (LJ_DUALNUM ? IR_MUL : IR_SUB)) &&
 540       tref_isinteger(rb) && tref_isinteger(rc) &&
 541       numisint(lj_vm_foldarith(numberVnum(vb), numberVnum(vc),
 542                                (int)op - (int)IR_ADD)))
 543     return emitir(IRTGI((int)op - (int)IR_ADD + (int)IR_ADDOV), rb, rc);
 544   if (!tref_isnum(rb)) rb = emitir(IRTN(IR_CONV), rb, IRCONV_NUM_INT);
 545   if (!tref_isnum(rc)) rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
 546   return emitir(IRTN(op), rb, rc);
 547 }
 548 
 549 /* Narrowing of unary minus operator. */
 550 TRef lj_opt_narrow_unm(jit_State *J, TRef rc, TValue *vc)
 551 {
 552   rc = conv_str_tonum(J, rc, vc);
 553   if (tref_isinteger(rc)) {
 554     if ((uint32_t)numberVint(vc) != 0x80000000u)
 555       return emitir(IRTGI(IR_SUBOV), lj_ir_kint(J, 0), rc);
 556     rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
 557   }
 558   return emitir(IRTN(IR_NEG), rc, lj_ir_ksimd(J, LJ_KSIMD_NEG));
 559 }
 560 
 561 /* Narrowing of modulo operator. */
 562 TRef lj_opt_narrow_mod(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
 563 {
 564   TRef tmp;
 565   rb = conv_str_tonum(J, rb, vb);
 566   rc = conv_str_tonum(J, rc, vc);
 567   if ((LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) &&
 568       tref_isinteger(rb) && tref_isinteger(rc) &&
 569       (tvisint(vc) ? intV(vc) != 0 : !tviszero(vc))) {
 570     emitir(IRTGI(IR_NE), rc, lj_ir_kint(J, 0));
 571     return emitir(IRTI(IR_MOD), rb, rc);
 572   }
 573   /* b % c ==> b - floor(b/c)*c */
 574   rb = lj_ir_tonum(J, rb);
 575   rc = lj_ir_tonum(J, rc);
 576   tmp = emitir(IRTN(IR_DIV), rb, rc);
 577   tmp = emitir(IRTN(IR_FPMATH), tmp, IRFPM_FLOOR);
 578   tmp = emitir(IRTN(IR_MUL), tmp, rc);
 579   return emitir(IRTN(IR_SUB), rb, tmp);
 580 }
 581 
 582 /* Narrowing of power operator or math.pow. */
 583 TRef lj_opt_narrow_pow(jit_State *J, TRef rb, TRef rc, TValue *vb, TValue *vc)
 584 {
 585   rb = conv_str_tonum(J, rb, vb);
 586   rb = lj_ir_tonum(J, rb);  /* Left arg is always treated as an FP number. */
 587   rc = conv_str_tonum(J, rc, vc);
 588   /* Narrowing must be unconditional to preserve (-x)^i semantics. */
 589   if (tvisint(vc) || numisint(numV(vc))) {
 590     int checkrange = 0;
 591     /* Split pow is faster for bigger exponents. But do this only for (+k)^i. */
 592     if (tref_isk(rb) && (int32_t)ir_knum(IR(tref_ref(rb)))->u32.hi >= 0) {
 593       int32_t k = numberVint(vc);
 594       if (!(k >= -65536 && k <= 65536)) goto split_pow;
 595       checkrange = 1;
 596     }
 597     if (!tref_isinteger(rc)) {
 598       /* Guarded conversion to integer! */
 599       rc = emitir(IRTGI(IR_CONV), rc, IRCONV_INT_NUM|IRCONV_CHECK);
 600     }
 601     if (checkrange && !tref_isk(rc)) {  /* Range guard: -65536 <= i <= 65536 */
 602       TRef tmp = emitir(IRTI(IR_ADD), rc, lj_ir_kint(J, 65536));
 603       emitir(IRTGI(IR_ULE), tmp, lj_ir_kint(J, 2*65536));
 604     }
 605     return emitir(IRTN(IR_POW), rb, rc);
 606   }
 607 split_pow:
 608   /* FOLD covers most cases, but some are easier to do here. */
 609   if (tref_isk(rb) && tvispone(ir_knum(IR(tref_ref(rb)))))
 610     return rb;  /* 1 ^ x ==> 1 */
 611   rc = lj_ir_tonum(J, rc);
 612   if (tref_isk(rc) && ir_knum(IR(tref_ref(rc)))->n == 0.5)
 613     return emitir(IRTN(IR_FPMATH), rb, IRFPM_SQRT);  /* x ^ 0.5 ==> sqrt(x) */
 614   /* Split up b^c into exp2(c*log2(b)). Assembler may rejoin later. */
 615   rb = emitir(IRTN(IR_FPMATH), rb, IRFPM_LOG2);
 616   rc = emitir(IRTN(IR_MUL), rb, rc);
 617   return emitir(IRTN(IR_FPMATH), rc, IRFPM_EXP2);
 618 }
 619 
 620 /* -- Predictive narrowing of induction variables ------------------------- */
 621 
 622 /* Narrow a single runtime value. */
 623 static int narrow_forl(jit_State *J, cTValue *o)
 624 {
 625   if (tvisint(o)) return 1;
 626   if (LJ_DUALNUM || (J->flags & JIT_F_OPT_NARROW)) return numisint(numV(o));
 627   return 0;
 628 }
 629 
 630 /* Narrow the FORL index type by looking at the runtime values. */
 631 IRType lj_opt_narrow_forl(jit_State *J, cTValue *tv)
 632 {
 633   lua_assert(tvisnumber(&tv[FORL_IDX]) &&
 634              tvisnumber(&tv[FORL_STOP]) &&
 635              tvisnumber(&tv[FORL_STEP]));
 636   /* Narrow only if the runtime values of start/stop/step are all integers. */
 637   if (narrow_forl(J, &tv[FORL_IDX]) &&
 638       narrow_forl(J, &tv[FORL_STOP]) &&
 639       narrow_forl(J, &tv[FORL_STEP])) {
 640     /* And if the loop index can't possibly overflow. */
 641     lua_Number step = numberVnum(&tv[FORL_STEP]);
 642     lua_Number sum = numberVnum(&tv[FORL_STOP]) + step;
 643     if (0 <= step ? (sum <= 2147483647.0) : (sum >= -2147483648.0))
 644       return IRT_INT;
 645   }
 646   return IRT_NUM;
 647 }
 648 
 649 #undef IR
 650 #undef fins
 651 #undef emitir
 652 #undef emitir_raw
 653 
 654 #endif

/* [<][>][^][v][top][bottom][index][help] */