[Pcre-svn] [505] code/trunk/src: JIT compiler update.

Top Page
Delete this message
Author: Subversion repository
Date:  
To: pcre-svn
Subject: [Pcre-svn] [505] code/trunk/src: JIT compiler update.
Revision: 505
          http://www.exim.org/viewvc/pcre2?view=rev&revision=505
Author:   zherczeg
Date:     2016-03-04 08:51:53 +0000 (Fri, 04 Mar 2016)
Log Message:
-----------
JIT compiler update.


Modified Paths:
--------------
    code/trunk/src/pcre2_jit_compile.c
    code/trunk/src/sljit/sljitConfigInternal.h
    code/trunk/src/sljit/sljitLir.c
    code/trunk/src/sljit/sljitLir.h
    code/trunk/src/sljit/sljitNativeARM_32.c
    code/trunk/src/sljit/sljitNativeARM_64.c
    code/trunk/src/sljit/sljitNativeARM_T2_32.c
    code/trunk/src/sljit/sljitNativeMIPS_common.c
    code/trunk/src/sljit/sljitNativePPC_common.c
    code/trunk/src/sljit/sljitNativeSPARC_common.c
    code/trunk/src/sljit/sljitNativeTILEGX_64.c
    code/trunk/src/sljit/sljitNativeX86_common.c


Modified: code/trunk/src/pcre2_jit_compile.c
===================================================================
--- code/trunk/src/pcre2_jit_compile.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/pcre2_jit_compile.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -6129,7 +6129,7 @@


case OP_DOLL:
OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0);
- OP2(SLJIT_IAND | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTEOL);
+ OP2(SLJIT_AND32 | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTEOL);
add_jump(compiler, backtracks, JUMP(SLJIT_NOT_ZERO));

if (!common->endonly)
@@ -6144,7 +6144,7 @@
case OP_DOLLM:
jump[1] = CMP(SLJIT_LESS, STR_PTR, 0, STR_END, 0);
OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0);
- OP2(SLJIT_IAND | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTEOL);
+ OP2(SLJIT_AND32 | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTEOL);
add_jump(compiler, backtracks, JUMP(SLJIT_NOT_ZERO));
check_partial(common, FALSE);
jump[0] = JUMP(SLJIT_JUMP);
@@ -6182,7 +6182,7 @@
OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, begin));
add_jump(compiler, backtracks, CMP(SLJIT_GREATER, STR_PTR, 0, TMP1, 0));
- OP2(SLJIT_IAND | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTBOL);
+ OP2(SLJIT_AND32 | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTBOL);
add_jump(compiler, backtracks, JUMP(SLJIT_NOT_ZERO));
return cc;

@@ -6190,7 +6190,7 @@
OP1(SLJIT_MOV, TMP2, 0, ARGUMENTS, 0);
OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, begin));
jump[1] = CMP(SLJIT_GREATER, STR_PTR, 0, TMP1, 0);
- OP2(SLJIT_IAND | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTBOL);
+ OP2(SLJIT_AND32 | SLJIT_SET_E, SLJIT_UNUSED, 0, SLJIT_MEM1(TMP2), SLJIT_OFFSETOF(jit_arguments, options), SLJIT_IMM, PCRE2_NOTBOL);
add_jump(compiler, backtracks, JUMP(SLJIT_NOT_ZERO));
jump[0] = JUMP(SLJIT_JUMP);
JUMPHERE(jump[1]);

Modified: code/trunk/src/sljit/sljitConfigInternal.h
===================================================================
--- code/trunk/src/sljit/sljitConfigInternal.h    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitConfigInternal.h    2016-03-04 08:51:53 UTC (rev 505)
@@ -34,11 +34,11 @@
      sljit_s8, sljit_u8   : signed and unsigned 8 bit integer type
      sljit_s16, sljit_u16 : signed and unsigned 16 bit integer type
      sljit_s32, sljit_u32 : signed and unsigned 32 bit integer type
-     sljit_sw, sljit_uw : signed and unsigned machine word, enough to store a pointer
-     sljit_p : unsgined pointer value (usually the same as sljit_uw, but
-               some 64 bit ABIs may use 32 bit pointers)
-     sljit_s : single precision floating point value
-     sljit_d : double precision floating point value
+     sljit_sw, sljit_uw   : signed and unsigned machine word, enough to store a pointer
+     sljit_p              : unsgined pointer value (usually the same as sljit_uw, but
+                            some 64 bit ABIs may use 32 bit pointers)
+     sljit_f32            : 32 bit single precision floating point value
+     sljit_f64            : 64 bit double precision floating point value


    Macros for feature detection (boolean):
      SLJIT_32BIT_ARCHITECTURE : 32 bit architecture
@@ -56,10 +56,10 @@
      SLJIT_NUMBER_OF_SCRATCH_FLOAT_REGISTERS : number of available floating point scratch registers
      SLJIT_NUMBER_OF_SAVED_FLOAT_REGISTERS : number of available floating point saved registers
      SLJIT_WORD_SHIFT : the shift required to apply when accessing a sljit_sw/sljit_uw array by index
-     SLJIT_DOUBLE_SHIFT : the shift required to apply when accessing
-                          a double precision floating point array by index
-     SLJIT_SINGLE_SHIFT : the shift required to apply when accessing
-                          a single precision floating point array by index
+     SLJIT_F32_SHIFT : the shift required to apply when accessing
+                       a single precision floating point array by index
+     SLJIT_F64_SHIFT : the shift required to apply when accessing
+                       a double precision floating point array by index
      SLJIT_LOCALS_OFFSET : local space starting offset (SLJIT_SP + SLJIT_LOCALS_OFFSET)
      SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address


@@ -325,7 +325,7 @@
#endif /* !SLJIT_CACHE_FLUSH */

 /******************************************************/
-/* Byte/half/int/word/single/double type definitions. */
+/*    Integer and floating point type definitions.    */
 /******************************************************/


/* 8 bit byte type. */
@@ -372,15 +372,15 @@
typedef sljit_uw sljit_p;

/* Floating point types. */
-typedef float sljit_s;
-typedef double sljit_d;
+typedef float sljit_f32;
+typedef double sljit_f64;

/* Shift for pointer sized data. */
#define SLJIT_POINTER_SHIFT SLJIT_WORD_SHIFT

/* Shift for double precision sized data. */
-#define SLJIT_DOUBLE_SHIFT 3
-#define SLJIT_SINGLE_SHIFT 2
+#define SLJIT_F32_SHIFT 2
+#define SLJIT_F64_SHIFT 3

#ifndef SLJIT_W


Modified: code/trunk/src/sljit/sljitLir.c
===================================================================
--- code/trunk/src/sljit/sljitLir.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitLir.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -341,9 +341,9 @@
         && (sizeof(sljit_sw) == 4 || sizeof(sljit_sw) == 8)
         && (sizeof(sljit_uw) == 4 || sizeof(sljit_uw) == 8),
         invalid_integer_types);
-    SLJIT_COMPILE_ASSERT(SLJIT_I32_OP == SLJIT_SINGLE_OP,
+    SLJIT_COMPILE_ASSERT(SLJIT_I32_OP == SLJIT_F32_OP,
         int_op_and_single_op_must_be_the_same);
-    SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_SINGLE_OP,
+    SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_F32_OP,
         rewritable_jump_and_single_op_must_not_be_the_same);


     /* Only the non-zero members must be set. */
@@ -654,8 +654,8 @@
         break; \
     case SLJIT_BREAKPOINT: \
     case SLJIT_NOP: \
-    case SLJIT_LUMUL: \
-    case SLJIT_LSMUL: \
+    case SLJIT_LMUL_UW: \
+    case SLJIT_LMUL_SW: \
     case SLJIT_MOV: \
     case SLJIT_MOV_U32: \
     case SLJIT_MOV_P: \
@@ -666,7 +666,7 @@
         CHECK_ARGUMENT(!(op & (SLJIT_I32_OP | SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
         break; \
     default: \
-        /* Only SLJIT_I32_OP or SLJIT_SINGLE_OP is allowed. */ \
+        /* Only SLJIT_I32_OP or SLJIT_F32_OP is allowed. */ \
         CHECK_ARGUMENT(!(op & (SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
         break; \
     }
@@ -674,12 +674,12 @@
 #define FUNCTION_CHECK_FOP() \
     CHECK_ARGUMENT(!GET_FLAGS(op) || !(op & SLJIT_KEEP_FLAGS)); \
     switch (GET_OPCODE(op)) { \
-    case SLJIT_DCMP: \
+    case SLJIT_CMP_F64: \
         CHECK_ARGUMENT(!(op & (SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
         CHECK_ARGUMENT((op & (SLJIT_SET_E | SLJIT_SET_S))); \
         break; \
     default: \
-        /* Only SLJIT_I32_OP or SLJIT_SINGLE_OP is allowed. */ \
+        /* Only SLJIT_I32_OP or SLJIT_F32_OP is allowed. */ \
         CHECK_ARGUMENT(!(op & (SLJIT_SET_E | SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \
         break; \
     }
@@ -845,15 +845,15 @@
     }


 static const char* op0_names[] = {
-    (char*)"breakpoint", (char*)"nop", (char*)"lumul", (char*)"lsmul",
-    (char*)"udivmod", (char*)"sdivmod", (char*)"udivi", (char*)"sdivi"
+    (char*)"breakpoint", (char*)"nop", (char*)"lmul.uw", (char*)"lmul.sw",
+    (char*)"divmod.u", (char*)"divmod.s", (char*)"div.u", (char*)"div.s"
 };


 static const char* op1_names[] = {
-    (char*)"mov", (char*)"mov_ub", (char*)"mov_sb", (char*)"mov_uh",
-    (char*)"mov_sh", (char*)"mov_ui", (char*)"mov_si", (char*)"mov_p",
-    (char*)"movu", (char*)"movu_ub", (char*)"movu_sb", (char*)"movu_uh",
-    (char*)"movu_sh", (char*)"movu_ui", (char*)"movu_si", (char*)"movu_p",
+    (char*)"", (char*)".u8", (char*)".s8", (char*)".u16",
+    (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p",
+    (char*)"", (char*)".u8", (char*)".s8", (char*)".u16",
+    (char*)".s16", (char*)".u32", (char*)".s32", (char*)".p",
     (char*)"not", (char*)"neg", (char*)"clz",
 };


@@ -873,9 +873,9 @@
     (char*)"add", (char*)"sub", (char*)"mul", (char*)"div"
 };


-#define JUMP_PREFIX(type) \
-    ((type & 0xff) <= SLJIT_MUL_NOT_OVERFLOW ? ((type & SLJIT_I32_OP) ? "i_" : "") \
-    : ((type & 0xff) <= SLJIT_D_ORDERED ? ((type & SLJIT_SINGLE_OP) ? "s_" : "d_") : ""))
+#define JUMP_POSTFIX(type) \
+    ((type & 0xff) <= SLJIT_MUL_NOT_OVERFLOW ? ((type & SLJIT_I32_OP) ? "32" : "") \
+    : ((type & 0xff) <= SLJIT_ORDERED_F64 ? ((type & SLJIT_F32_OP) ? ".f32" : ".f64") : ""))


 static char* jump_names[] = {
     (char*)"equal", (char*)"not_equal",
@@ -993,7 +993,7 @@
         if (op == SLJIT_UNUSED)
             fprintf(compiler->verbose, "  return\n");
         else {
-            fprintf(compiler->verbose, "  return.%s ", op1_names[op - SLJIT_OP1_BASE]);
+            fprintf(compiler->verbose, "  return%s ", op1_names[op - SLJIT_OP1_BASE]);
             sljit_verbose_param(compiler, src, srcw);
             fprintf(compiler->verbose, "\n");
         }
@@ -1035,13 +1035,19 @@
 static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
 {
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
-    CHECK_ARGUMENT((op >= SLJIT_BREAKPOINT && op <= SLJIT_LSMUL)
-        || ((op & ~SLJIT_I32_OP) >= SLJIT_UDIVMOD && (op & ~SLJIT_I32_OP) <= SLJIT_SDIVI));
-    CHECK_ARGUMENT(op < SLJIT_LUMUL || compiler->scratches >= 2);
+    CHECK_ARGUMENT((op >= SLJIT_BREAKPOINT && op <= SLJIT_LMUL_SW)
+        || ((op & ~SLJIT_I32_OP) >= SLJIT_DIVMOD_UW && (op & ~SLJIT_I32_OP) <= SLJIT_DIV_SW));
+    CHECK_ARGUMENT(op < SLJIT_LMUL_UW || compiler->scratches >= 2);
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose))
-        fprintf(compiler->verbose, "  %s%s\n", !(op & SLJIT_I32_OP) ? "" : "i", op0_names[GET_OPCODE(op) - SLJIT_OP0_BASE]);
+    {
+        fprintf(compiler->verbose, "  %s", op0_names[GET_OPCODE(op) - SLJIT_OP0_BASE]);
+        if (GET_OPCODE(op) >= SLJIT_DIVMOD_UW) {
+            fprintf(compiler->verbose, (op & SLJIT_I32_OP) ? "32" : "w");
+        }
+        fprintf(compiler->verbose, "\n");
+    }
 #endif
     CHECK_RETURN_OK;
 }
@@ -1064,9 +1070,18 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  %s%s%s%s%s%s%s%s ", !(op & SLJIT_I32_OP) ? "" : "i", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE],
-            !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_U) ? "" : ".u", !(op & SLJIT_SET_S) ? "" : ".s",
-            !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
+        if (GET_OPCODE(op) <= SLJIT_MOVU_P)
+        {
+            fprintf(compiler->verbose, "  mov%s%s%s ", (GET_OPCODE(op) >= SLJIT_MOVU) ? "u" : "",
+                !(op & SLJIT_I32_OP) ? "" : "32", (op != SLJIT_MOV32 && op != SLJIT_MOVU32) ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : "");
+        }
+        else
+        {
+            fprintf(compiler->verbose, "  %s%s%s%s%s%s%s%s ", op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE], !(op & SLJIT_I32_OP) ? "" : "32",
+                !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_U) ? "" : ".u", !(op & SLJIT_SET_S) ? "" : ".s",
+                !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
+        }
+
         sljit_verbose_param(compiler, dst, dstw);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_param(compiler, src, srcw);
@@ -1095,7 +1110,7 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  %s%s%s%s%s%s%s%s ", !(op & SLJIT_I32_OP) ? "" : "i", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE],
+        fprintf(compiler->verbose, "  %s%s%s%s%s%s%s%s ", op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE], !(op & SLJIT_I32_OP) ? "" : "32",
             !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_U) ? "" : ".u", !(op & SLJIT_SET_S) ? "" : ".s",
             !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
         sljit_verbose_param(compiler, dst, dstw);
@@ -1170,7 +1185,7 @@


 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_is_fpu_available());
-    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_DMOV && GET_OPCODE(op) <= SLJIT_DABS);
+    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_MOV_F64 && GET_OPCODE(op) <= SLJIT_ABS_F64);
     FUNCTION_CHECK_FOP();
     FUNCTION_FCHECK(src, srcw);
     FUNCTION_FCHECK(dst, dstw);
@@ -1177,12 +1192,12 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
-            fprintf(compiler->verbose, "  %s%s ", fop1_names[SLJIT_CONVD_FROMS - SLJIT_FOP1_BASE],
-                (op & SLJIT_SINGLE_OP) ? "s.fromd" : "d.froms");
+        if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+            fprintf(compiler->verbose, "  %s%s ", fop1_names[SLJIT_CONV_F64_FROM_F32 - SLJIT_FOP1_BASE],
+                (op & SLJIT_F32_OP) ? ".f32.from.f64" : ".f64.from.f32");
         else
-            fprintf(compiler->verbose, "  %s%s ", (op & SLJIT_SINGLE_OP) ? "s" : "d",
-                fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE]);
+            fprintf(compiler->verbose, "  %s%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
+                (op & SLJIT_F32_OP) ? ".f32" : ".f64");


         sljit_verbose_fparam(compiler, dst, dstw);
         fprintf(compiler->verbose, ", ");
@@ -1204,7 +1219,7 @@


 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_is_fpu_available());
-    CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_DCMP);
+    CHECK_ARGUMENT(GET_OPCODE(op) == SLJIT_CMP_F64);
     FUNCTION_CHECK_FOP();
     FUNCTION_FCHECK(src1, src1w);
     FUNCTION_FCHECK(src2, src2w);
@@ -1211,7 +1226,7 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  %s%s%s%s ", (op & SLJIT_SINGLE_OP) ? "s" : "d", fop1_names[SLJIT_DCMP - SLJIT_FOP1_BASE],
+        fprintf(compiler->verbose, "  %s%s%s%s ", fop1_names[SLJIT_CMP_F64 - SLJIT_FOP1_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64",
             (op & SLJIT_SET_E) ? ".e" : "", (op & SLJIT_SET_S) ? ".s" : "");
         sljit_verbose_fparam(compiler, src1, src1w);
         fprintf(compiler->verbose, ", ");
@@ -1222,7 +1237,7 @@
     CHECK_RETURN_OK;
 }


-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1233,7 +1248,7 @@


 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_is_fpu_available());
-    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONVW_FROMD && GET_OPCODE(op) <= SLJIT_CONVI_FROMD);
+    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CONV_S32_FROM_F64);
     FUNCTION_CHECK_FOP();
     FUNCTION_FCHECK(src, srcw);
     FUNCTION_CHECK_DST(dst, dstw);
@@ -1241,8 +1256,8 @@
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
         fprintf(compiler->verbose, "  %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
-            (GET_OPCODE(op) == SLJIT_CONVI_FROMD) ? "i" : "w",
-            (op & SLJIT_SINGLE_OP) ? "s" : "d");
+            (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? ".s32" : ".sw",
+            (op & SLJIT_F32_OP) ? ".f32" : ".f64");
         sljit_verbose_param(compiler, dst, dstw);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_fparam(compiler, src, srcw);
@@ -1252,7 +1267,7 @@
     CHECK_RETURN_OK;
 }


-static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE CHECK_RETURN_TYPE check_sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1263,7 +1278,7 @@


 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_is_fpu_available());
-    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONVD_FROMW && GET_OPCODE(op) <= SLJIT_CONVD_FROMI);
+    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_CONV_F64_FROM_SW && GET_OPCODE(op) <= SLJIT_CONV_F64_FROM_S32);
     FUNCTION_CHECK_FOP();
     FUNCTION_CHECK_SRC(src, srcw);
     FUNCTION_FCHECK(dst, dstw);
@@ -1271,8 +1286,8 @@
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
         fprintf(compiler->verbose, "  %s%s.from%s ", fop1_names[GET_OPCODE(op) - SLJIT_FOP1_BASE],
-            (op & SLJIT_SINGLE_OP) ? "s" : "d",
-            (GET_OPCODE(op) == SLJIT_CONVD_FROMI) ? "i" : "w");
+            (op & SLJIT_F32_OP) ? ".f32" : ".f64",
+            (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? ".s32" : ".sw");
         sljit_verbose_fparam(compiler, dst, dstw);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_param(compiler, src, srcw);
@@ -1289,7 +1304,7 @@
 {
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_is_fpu_available());
-    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_DADD && GET_OPCODE(op) <= SLJIT_DDIV);
+    CHECK_ARGUMENT(GET_OPCODE(op) >= SLJIT_ADD_F64 && GET_OPCODE(op) <= SLJIT_DIV_F64);
     FUNCTION_CHECK_FOP();
     FUNCTION_FCHECK(src1, src1w);
     FUNCTION_FCHECK(src2, src2w);
@@ -1297,7 +1312,7 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  %s%s ", (op & SLJIT_SINGLE_OP) ? "s" : "d", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE]);
+        fprintf(compiler->verbose, "  %s%s ", fop2_names[GET_OPCODE(op) - SLJIT_FOP2_BASE], (op & SLJIT_F32_OP) ? ".f32" : ".f64");
         sljit_verbose_fparam(compiler, dst, dstw);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_fparam(compiler, src1, src1w);
@@ -1335,8 +1350,8 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose))
-        fprintf(compiler->verbose, "  jump%s.%s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
-            JUMP_PREFIX(type), jump_names[type & 0xff]);
+        fprintf(compiler->verbose, "  jump%s %s%s\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+            jump_names[type & 0xff], JUMP_POSTFIX(type));
 #endif
     CHECK_RETURN_OK;
 }
@@ -1353,8 +1368,8 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  cmp%s.%s%s ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
-            (type & SLJIT_I32_OP) ? "i_" : "", jump_names[type & 0xff]);
+        fprintf(compiler->verbose, "  cmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+            jump_names[type & 0xff], (type & SLJIT_I32_OP) ? "32" : "");
         sljit_verbose_param(compiler, src1, src1w);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_param(compiler, src2, src2w);
@@ -1370,15 +1385,15 @@
 {
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_is_fpu_available());
-    CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_SINGLE_OP)));
-    CHECK_ARGUMENT((type & 0xff) >= SLJIT_D_EQUAL && (type & 0xff) <= SLJIT_D_ORDERED);
+    CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_F32_OP)));
+    CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL_F64 && (type & 0xff) <= SLJIT_ORDERED_F64);
     FUNCTION_FCHECK(src1, src1w);
     FUNCTION_FCHECK(src2, src2w);
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  fcmp%s.%s%s ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
-            (type & SLJIT_SINGLE_OP) ? "s_" : "d_", jump_names[type & 0xff]);
+        fprintf(compiler->verbose, "  fcmp%s %s%s, ", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r",
+            jump_names[type & 0xff], (type & SLJIT_F32_OP) ? ".f32" : ".f64");
         sljit_verbose_fparam(compiler, src1, src1w);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_fparam(compiler, src2, src2w);
@@ -1417,7 +1432,7 @@
 {
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
-    CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_D_ORDERED);
+    CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
     CHECK_ARGUMENT(op == SLJIT_MOV || GET_OPCODE(op) == SLJIT_MOV_U32 || GET_OPCODE(op) == SLJIT_MOV_S32
         || (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR));
     CHECK_ARGUMENT((op & (SLJIT_SET_U | SLJIT_SET_S | SLJIT_SET_O | SLJIT_SET_C)) == 0);
@@ -1431,15 +1446,16 @@
 #endif
 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE)
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
-        fprintf(compiler->verbose, "  flags.%s%s%s%s ", !(op & SLJIT_I32_OP) ? "" : "i",
-            GET_OPCODE(op) >= SLJIT_OP2_BASE ? op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE] : op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE],
-            !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k");
+        fprintf(compiler->verbose, "  flags %s%s%s%s, ",
+            !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k",
+            GET_OPCODE(op) < SLJIT_OP2_BASE ? "mov" : op2_names[GET_OPCODE(op) - SLJIT_OP2_BASE],
+            GET_OPCODE(op) < SLJIT_OP2_BASE ? op1_names[GET_OPCODE(op) - SLJIT_OP1_BASE] : ((op & SLJIT_I32_OP) ? "32" : ""));
         sljit_verbose_param(compiler, dst, dstw);
         if (src != SLJIT_UNUSED) {
             fprintf(compiler->verbose, ", ");
             sljit_verbose_param(compiler, src, srcw);
         }
-        fprintf(compiler->verbose, ", %s%s\n", JUMP_PREFIX(type), jump_names[type & 0xff]);
+        fprintf(compiler->verbose, ", %s%s\n", jump_names[type & 0xff], JUMP_POSTFIX(type));
     }
 #endif
     CHECK_RETURN_OK;
@@ -1482,25 +1498,25 @@
 #endif /* SLJIT_ARGUMENT_CHECKS || SLJIT_VERBOSE */


 #define SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw) \
-    SLJIT_COMPILE_ASSERT(!(SLJIT_CONVW_FROMD & 0x1) && !(SLJIT_CONVD_FROMW & 0x1), \
+    SLJIT_COMPILE_ASSERT(!(SLJIT_CONV_SW_FROM_F64 & 0x1) && !(SLJIT_CONV_F64_FROM_SW & 0x1), \
         invalid_float_opcodes); \
-    if (GET_OPCODE(op) >= SLJIT_CONVW_FROMD && GET_OPCODE(op) <= SLJIT_DCMP) { \
-        if (GET_OPCODE(op) == SLJIT_DCMP) { \
+    if (GET_OPCODE(op) >= SLJIT_CONV_SW_FROM_F64 && GET_OPCODE(op) <= SLJIT_CMP_F64) { \
+        if (GET_OPCODE(op) == SLJIT_CMP_F64) { \
             CHECK(check_sljit_emit_fop1_cmp(compiler, op, dst, dstw, src, srcw)); \
             ADJUST_LOCAL_OFFSET(dst, dstw); \
             ADJUST_LOCAL_OFFSET(src, srcw); \
             return sljit_emit_fop1_cmp(compiler, op, dst, dstw, src, srcw); \
         } \
-        if ((GET_OPCODE(op) | 0x1) == SLJIT_CONVI_FROMD) { \
-            CHECK(check_sljit_emit_fop1_convw_fromd(compiler, op, dst, dstw, src, srcw)); \
+        if ((GET_OPCODE(op) | 0x1) == SLJIT_CONV_S32_FROM_F64) { \
+            CHECK(check_sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw)); \
             ADJUST_LOCAL_OFFSET(dst, dstw); \
             ADJUST_LOCAL_OFFSET(src, srcw); \
-            return sljit_emit_fop1_convw_fromd(compiler, op, dst, dstw, src, srcw); \
+            return sljit_emit_fop1_conv_sw_from_f64(compiler, op, dst, dstw, src, srcw); \
         } \
-        CHECK(check_sljit_emit_fop1_convd_fromw(compiler, op, dst, dstw, src, srcw)); \
+        CHECK(check_sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw)); \
         ADJUST_LOCAL_OFFSET(dst, dstw); \
         ADJUST_LOCAL_OFFSET(src, srcw); \
-        return sljit_emit_fop1_convd_fromw(compiler, op, dst, dstw, src, srcw); \
+        return sljit_emit_fop1_conv_f64_from_sw(compiler, op, dst, dstw, src, srcw); \
     } \
     CHECK(check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw)); \
     ADJUST_LOCAL_OFFSET(dst, dstw); \
@@ -1668,15 +1684,15 @@
     CHECK_PTR(check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w));


     condition = type & 0xff;
-    flags = (condition <= SLJIT_D_NOT_EQUAL) ? SLJIT_SET_E : SLJIT_SET_S;
-    if (type & SLJIT_SINGLE_OP)
-        flags |= SLJIT_SINGLE_OP;
+    flags = (condition <= SLJIT_NOT_EQUAL_F64) ? SLJIT_SET_E : SLJIT_SET_S;
+    if (type & SLJIT_F32_OP)
+        flags |= SLJIT_F32_OP;


 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
         || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     compiler->skip_checks = 1;
 #endif
-    sljit_emit_fop1(compiler, SLJIT_DCMP | flags, src1, src1w, src2, src2w);
+    sljit_emit_fop1(compiler, SLJIT_CMP_F64 | flags, src1, src1w, src2, src2w);


 #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
         || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)


Modified: code/trunk/src/sljit/sljitLir.h
===================================================================
--- code/trunk/src/sljit/sljitLir.h    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitLir.h    2016-03-04 08:51:53 UTC (rev 505)
@@ -226,7 +226,7 @@
 /*  Floating point registers                                             */
 /* --------------------------------------------------------------------- */


-/* Each floating point register can store a double or single precision
+/* Each floating point register can store a 32 or a 64 bit precision
    value. The FR and FS register sets are overlap in the same way as R
    and S register sets. See above. */


@@ -624,31 +624,29 @@
 #define SLJIT_MEM2(r1, r2)    (SLJIT_MEM | (r1) | ((r2) << 8))
 #define SLJIT_IMM        0x40


-/* Set 32 bit operation mode (I) on 64 bit CPUs. The flag is totally ignored on
-   32 bit CPUs. If this flag is set for an arithmetic operation, it uses only the
-   lower 32 bit of the input register(s), and set the CPU status flags according
-   to the 32 bit result. The higher 32 bits are undefined for both the input and
-   output. However, the CPU might not ignore those higher 32 bits, like MIPS, which
-   expects it to be the sign extension of the lower 32 bit. All 32 bit operations
-   are undefined, if this condition is not fulfilled. Therefore, when SLJIT_I32_OP
-   is specified, all register arguments must be the result of other operations with
-   the same SLJIT_I32_OP flag. In other words, although a register can hold either
-   a 64 or 32 bit value, these values cannot be mixed. The only exceptions are
-   SLJIT_IMOV and SLJIT_IMOVU (SLJIT_MOV_S32/SLJIT_MOVU_S32 with SLJIT_I32_OP flag)
-   which can convert any source argument to SLJIT_I32_OP compatible result. This
-   conversion might be unnecessary on some CPUs like x86-64, since the upper 32
-   bit is always ignored. In this case SLJIT is clever enough to not generate any
-   instructions if the source and destination operands are the same registers.
-   Affects sljit_emit_op0, sljit_emit_op1 and sljit_emit_op2. */
+/* Set 32 bit operation mode (I) on 64 bit CPUs. This flag is ignored on 32
+   bit CPUs. When this flag is set for an arithmetic operation, only the
+   lower 32 bit of the input register(s) are used, and the CPU status flags
+   are set according to the 32 bit result. Although the higher 32 bit of
+   the input and the result registers are not defined by SLJIT, it might be
+   defined by the CPU architecture (e.g. MIPS). To satisfy these requirements
+   all source registers must be computed by operations where this flag is
+   also set. In other words 32 and 64 bit arithmetic operations cannot be
+   mixed. The only exception is SLJIT_IMOV and SLJIT_IMOVU whose source
+   register can hold any 32 or 64 bit value. This source register is
+   converted to a 32 bit compatible format. SLJIT does not generate any
+   instructions on certain CPUs (e.g. on x86 and ARM) if the source and
+   destination operands are the same registers. Affects sljit_emit_op0,
+   sljit_emit_op1 and sljit_emit_op2. */
 #define SLJIT_I32_OP        0x100


-/* Single precision mode (SP). This flag is similar to SLJIT_I32_OP, just
+/* F32 precision mode (SP). This flag is similar to SLJIT_I32_OP, just
    it applies to floating point registers (it is even the same bit). When
-   this flag is passed, the CPU performs single precision floating point
-   operations. Similar to SLJIT_I32_OP, all register arguments must be the
-   result of other floating point operations with this flag. Affects
+   this flag is passed, the CPU performs 32 bit floating point operations.
+   Similar to SLJIT_I32_OP, all register arguments must be computed by
+   floating point operations where this flag is also set. Affects
    sljit_emit_fop1, sljit_emit_fop2 and sljit_emit_fcmp. */
-#define SLJIT_SINGLE_OP        0x100
+#define SLJIT_F32_OP        0x100


 /* Common CPU status flags for all architectures (x86, ARM, PPC)
     - carry flag
@@ -697,17 +695,17 @@
 /* Flags: - (may destroy flags)
    Unsigned multiplication of SLJIT_R0 and SLJIT_R1.
    Result is placed into SLJIT_R1:SLJIT_R0 (high:low) word */
-#define SLJIT_LUMUL            (SLJIT_OP0_BASE + 2)
+#define SLJIT_LMUL_UW            (SLJIT_OP0_BASE + 2)
 /* Flags: - (may destroy flags)
    Signed multiplication of SLJIT_R0 and SLJIT_R1.
    Result is placed into SLJIT_R1:SLJIT_R0 (high:low) word */
-#define SLJIT_LSMUL            (SLJIT_OP0_BASE + 3)
+#define SLJIT_LMUL_SW            (SLJIT_OP0_BASE + 3)
 /* Flags: I - (may destroy flags)
    Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
    Note: if SLJIT_R1 is 0, the behaviour is undefined. */
-#define SLJIT_UDIVMOD            (SLJIT_OP0_BASE + 4)
-#define SLJIT_IUDIVMOD            (SLJIT_UDIVMOD | SLJIT_I32_OP)
+#define SLJIT_DIVMOD_UW            (SLJIT_OP0_BASE + 4)
+#define SLJIT_DIVMOD_U32        (SLJIT_DIVMOD_UW | SLJIT_I32_OP)
 /* Flags: I - (may destroy flags)
    Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0 and the remainder into SLJIT_R1.
@@ -714,24 +712,22 @@
    Note: if SLJIT_R1 is 0, the behaviour is undefined.
    Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
          the behaviour is undefined. */
-#define SLJIT_SDIVMOD            (SLJIT_OP0_BASE + 5)
-#define SLJIT_ISDIVMOD            (SLJIT_SDIVMOD | SLJIT_I32_OP)
+#define SLJIT_DIVMOD_SW            (SLJIT_OP0_BASE + 5)
+#define SLJIT_DIVMOD_S32        (SLJIT_DIVMOD_SW | SLJIT_I32_OP)
 /* Flags: I - (may destroy flags)
    Unsigned divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
-   Note: if SLJIT_R1 is 0, the behaviour is undefined.
-   Note: SLJIT_SDIV is single precision divide. */
-#define SLJIT_UDIVI            (SLJIT_OP0_BASE + 6)
-#define SLJIT_IUDIVI            (SLJIT_UDIVI | SLJIT_I32_OP)
+   Note: if SLJIT_R1 is 0, the behaviour is undefined. */
+#define SLJIT_DIV_UW            (SLJIT_OP0_BASE + 6)
+#define SLJIT_DIV_U32            (SLJIT_DIV_UW | SLJIT_I32_OP)
 /* Flags: I - (may destroy flags)
    Signed divide of the value in SLJIT_R0 by the value in SLJIT_R1.
    The result is placed into SLJIT_R0. SLJIT_R1 preserves its value.
    Note: if SLJIT_R1 is 0, the behaviour is undefined.
    Note: if SLJIT_R1 is -1 and SLJIT_R0 is integer min (0x800..00),
-         the behaviour is undefined.
-   Note: SLJIT_SDIV is single precision divide. */
-#define SLJIT_SDIVI            (SLJIT_OP0_BASE + 7)
-#define SLJIT_ISDIVI            (SLJIT_SDIVI | SLJIT_I32_OP)
+         the behaviour is undefined. */
+#define SLJIT_DIV_SW            (SLJIT_OP0_BASE + 7)
+#define SLJIT_DIV_S32            (SLJIT_DIV_SW | SLJIT_I32_OP)


SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op);

@@ -753,24 +749,24 @@
 #define SLJIT_MOV            (SLJIT_OP1_BASE + 0)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOV_U8            (SLJIT_OP1_BASE + 1)
-#define SLJIT_IMOV_U8            (SLJIT_MOV_U8 | SLJIT_I32_OP)
+#define SLJIT_MOV32_U8            (SLJIT_MOV_U8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOV_S8            (SLJIT_OP1_BASE + 2)
-#define SLJIT_IMOV_S8            (SLJIT_MOV_S8 | SLJIT_I32_OP)
+#define SLJIT_MOV32_S8            (SLJIT_MOV_S8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOV_U16            (SLJIT_OP1_BASE + 3)
-#define SLJIT_IMOV_U16            (SLJIT_MOV_U16 | SLJIT_I32_OP)
+#define SLJIT_MOV32_U16            (SLJIT_MOV_U16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOV_S16            (SLJIT_OP1_BASE + 4)
-#define SLJIT_IMOV_S16            (SLJIT_MOV_S16 | SLJIT_I32_OP)
+#define SLJIT_MOV32_S16            (SLJIT_MOV_S16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_I32_OP for further details. */
+   Note: no SLJIT_MOV32_U32 form, since it is the same as SLJIT_MOV32 */
 #define SLJIT_MOV_U32            (SLJIT_OP1_BASE + 5)
-/* No SLJIT_I32_OP form, since it is the same as SLJIT_IMOV. */
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_I32_OP for further details. */
+   Note: no SLJIT_MOV32_S32 form, since it is the same as SLJIT_MOV32 */
 #define SLJIT_MOV_S32            (SLJIT_OP1_BASE + 6)
-#define SLJIT_IMOV            (SLJIT_MOV_S32 | SLJIT_I32_OP)
+/* Flags: I - (never set any flags) */
+#define SLJIT_MOV32            (SLJIT_MOV_S32 | SLJIT_I32_OP)
 /* Flags: - (never set any flags) */
 #define SLJIT_MOV_P            (SLJIT_OP1_BASE + 7)
 /* Flags: - (never set any flags) */
@@ -777,38 +773,38 @@
 #define SLJIT_MOVU            (SLJIT_OP1_BASE + 8)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOVU_U8            (SLJIT_OP1_BASE + 9)
-#define SLJIT_IMOVU_U8            (SLJIT_MOVU_U8 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_U8            (SLJIT_MOVU_U8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOVU_S8            (SLJIT_OP1_BASE + 10)
-#define SLJIT_IMOVU_S8            (SLJIT_MOVU_S8 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_S8            (SLJIT_MOVU_S8 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOVU_U16            (SLJIT_OP1_BASE + 11)
-#define SLJIT_IMOVU_U16            (SLJIT_MOVU_U16 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_U16            (SLJIT_MOVU_U16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags) */
 #define SLJIT_MOVU_S16            (SLJIT_OP1_BASE + 12)
-#define SLJIT_IMOVU_S16            (SLJIT_MOVU_S16 | SLJIT_I32_OP)
+#define SLJIT_MOVU32_S16        (SLJIT_MOVU_S16 | SLJIT_I32_OP)
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_I32_OP for further details. */
+   Note: no SLJIT_MOVU32_U32 form, since it is the same as SLJIT_MOVU32 */
 #define SLJIT_MOVU_U32            (SLJIT_OP1_BASE + 13)
-/* No SLJIT_I32_OP form, since it is the same as SLJIT_IMOVU. */
 /* Flags: I - (never set any flags)
-   Note: see SLJIT_I32_OP for further details. */
+   Note: no SLJIT_MOVU32_S32 form, since it is the same as SLJIT_MOVU32 */
 #define SLJIT_MOVU_S32            (SLJIT_OP1_BASE + 14)
-#define SLJIT_IMOVU            (SLJIT_MOVU_S32 | SLJIT_I32_OP)
+/* Flags: I - (never set any flags) */
+#define SLJIT_MOVU32            (SLJIT_MOVU_S32 | SLJIT_I32_OP)
 /* Flags: - (never set any flags) */
 #define SLJIT_MOVU_P            (SLJIT_OP1_BASE + 15)
 /* Flags: I | E | K */
 #define SLJIT_NOT            (SLJIT_OP1_BASE + 16)
-#define SLJIT_INOT            (SLJIT_NOT | SLJIT_I32_OP)
+#define SLJIT_NOT32            (SLJIT_NOT | SLJIT_I32_OP)
 /* Flags: I | E | O | K */
 #define SLJIT_NEG            (SLJIT_OP1_BASE + 17)
-#define SLJIT_INEG            (SLJIT_NEG | SLJIT_I32_OP)
+#define SLJIT_NEG32            (SLJIT_NEG | SLJIT_I32_OP)
 /* Count leading zeroes
    Flags: I | E | K
    Important note! Sparc 32 does not support K flag, since
    the required popc instruction is introduced only in sparc 64. */
 #define SLJIT_CLZ            (SLJIT_OP1_BASE + 18)
-#define SLJIT_ICLZ            (SLJIT_CLZ | SLJIT_I32_OP)
+#define SLJIT_CLZ32            (SLJIT_CLZ | SLJIT_I32_OP)


 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
@@ -819,29 +815,29 @@


 /* Flags: I | E | O | C | K */
 #define SLJIT_ADD            (SLJIT_OP2_BASE + 0)
-#define SLJIT_IADD            (SLJIT_ADD | SLJIT_I32_OP)
+#define SLJIT_ADD32            (SLJIT_ADD | SLJIT_I32_OP)
 /* Flags: I | C | K */
 #define SLJIT_ADDC            (SLJIT_OP2_BASE + 1)
-#define SLJIT_IADDC            (SLJIT_ADDC | SLJIT_I32_OP)
+#define SLJIT_ADDC32            (SLJIT_ADDC | SLJIT_I32_OP)
 /* Flags: I | E | U | S | O | C | K */
 #define SLJIT_SUB            (SLJIT_OP2_BASE + 2)
-#define SLJIT_ISUB            (SLJIT_SUB | SLJIT_I32_OP)
+#define SLJIT_SUB32            (SLJIT_SUB | SLJIT_I32_OP)
 /* Flags: I | C | K */
 #define SLJIT_SUBC            (SLJIT_OP2_BASE + 3)
-#define SLJIT_ISUBC            (SLJIT_SUBC | SLJIT_I32_OP)
+#define SLJIT_SUBC32            (SLJIT_SUBC | SLJIT_I32_OP)
 /* Note: integer mul
    Flags: I | O (see SLJIT_C_MUL_*) | K */
 #define SLJIT_MUL            (SLJIT_OP2_BASE + 4)
-#define SLJIT_IMUL            (SLJIT_MUL | SLJIT_I32_OP)
+#define SLJIT_MUL32            (SLJIT_MUL | SLJIT_I32_OP)
 /* Flags: I | E | K */
 #define SLJIT_AND            (SLJIT_OP2_BASE + 5)
-#define SLJIT_IAND            (SLJIT_AND | SLJIT_I32_OP)
+#define SLJIT_AND32            (SLJIT_AND | SLJIT_I32_OP)
 /* Flags: I | E | K */
 #define SLJIT_OR            (SLJIT_OP2_BASE + 6)
-#define SLJIT_IOR            (SLJIT_OR | SLJIT_I32_OP)
+#define SLJIT_OR32            (SLJIT_OR | SLJIT_I32_OP)
 /* Flags: I | E | K */
 #define SLJIT_XOR            (SLJIT_OP2_BASE + 7)
-#define SLJIT_IXOR            (SLJIT_XOR | SLJIT_I32_OP)
+#define SLJIT_XOR32            (SLJIT_XOR | SLJIT_I32_OP)
 /* Flags: I | E | K
    Let bit_length be the length of the shift operation: 32 or 64.
    If src2 is immediate, src2w is masked by (bit_length - 1).
@@ -848,7 +844,7 @@
    Otherwise, if the content of src2 is outside the range from 0
    to bit_length - 1, the result is undefined. */
 #define SLJIT_SHL            (SLJIT_OP2_BASE + 8)
-#define SLJIT_ISHL            (SLJIT_SHL | SLJIT_I32_OP)
+#define SLJIT_SHL32            (SLJIT_SHL | SLJIT_I32_OP)
 /* Flags: I | E | K
    Let bit_length be the length of the shift operation: 32 or 64.
    If src2 is immediate, src2w is masked by (bit_length - 1).
@@ -855,7 +851,7 @@
    Otherwise, if the content of src2 is outside the range from 0
    to bit_length - 1, the result is undefined. */
 #define SLJIT_LSHR            (SLJIT_OP2_BASE + 9)
-#define SLJIT_ILSHR            (SLJIT_LSHR | SLJIT_I32_OP)
+#define SLJIT_LSHR32            (SLJIT_LSHR | SLJIT_I32_OP)
 /* Flags: I | E | K
    Let bit_length be the length of the shift operation: 32 or 64.
    If src2 is immediate, src2w is masked by (bit_length - 1).
@@ -862,7 +858,7 @@
    Otherwise, if the content of src2 is outside the range from 0
    to bit_length - 1, the result is undefined. */
 #define SLJIT_ASHR            (SLJIT_OP2_BASE + 10)
-#define SLJIT_IASHR            (SLJIT_ASHR | SLJIT_I32_OP)
+#define SLJIT_ASHR32            (SLJIT_ASHR | SLJIT_I32_OP)


 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
@@ -877,38 +873,38 @@
 #define SLJIT_FOP1_BASE            128


 /* Flags: SP - (never set any flags) */
-#define SLJIT_DMOV            (SLJIT_FOP1_BASE + 0)
-#define SLJIT_SMOV            (SLJIT_DMOV | SLJIT_SINGLE_OP)
+#define SLJIT_MOV_F64            (SLJIT_FOP1_BASE + 0)
+#define SLJIT_MOV_F32            (SLJIT_MOV_F64 | SLJIT_F32_OP)
 /* Convert opcodes: CONV[DST_TYPE].FROM[SRC_TYPE]
    SRC/DST TYPE can be: D - double, S - single, W - signed word, I - signed int
    Rounding mode when the destination is W or I: round towards zero. */
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMS        (SLJIT_FOP1_BASE + 1)
-#define SLJIT_CONVS_FROMD        (SLJIT_CONVD_FROMS | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_F32        (SLJIT_FOP1_BASE + 1)
+#define SLJIT_CONV_F32_FROM_F64        (SLJIT_CONV_F64_FROM_F32 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVW_FROMD        (SLJIT_FOP1_BASE + 2)
-#define SLJIT_CONVW_FROMS        (SLJIT_CONVW_FROMD | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_SW_FROM_F64        (SLJIT_FOP1_BASE + 2)
+#define SLJIT_CONV_SW_FROM_F32        (SLJIT_CONV_SW_FROM_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVI_FROMD        (SLJIT_FOP1_BASE + 3)
-#define SLJIT_CONVI_FROMS        (SLJIT_CONVI_FROMD | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_S32_FROM_F64        (SLJIT_FOP1_BASE + 3)
+#define SLJIT_CONV_S32_FROM_F32        (SLJIT_CONV_S32_FROM_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMW        (SLJIT_FOP1_BASE + 4)
-#define SLJIT_CONVS_FROMW        (SLJIT_CONVD_FROMW | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_SW        (SLJIT_FOP1_BASE + 4)
+#define SLJIT_CONV_F32_FROM_SW        (SLJIT_CONV_F64_FROM_SW | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_CONVD_FROMI        (SLJIT_FOP1_BASE + 5)
-#define SLJIT_CONVS_FROMI        (SLJIT_CONVD_FROMI | SLJIT_SINGLE_OP)
+#define SLJIT_CONV_F64_FROM_S32        (SLJIT_FOP1_BASE + 5)
+#define SLJIT_CONV_F32_FROM_S32        (SLJIT_CONV_F64_FROM_S32 | SLJIT_F32_OP)
 /* Note: dst is the left and src is the right operand for SLJIT_CMPD.
    Note: NaN check is always performed. If SLJIT_C_FLOAT_UNORDERED flag
          is set, the comparison result is unpredictable.
    Flags: SP | E | S (see SLJIT_C_FLOAT_*) */
-#define SLJIT_DCMP            (SLJIT_FOP1_BASE + 6)
-#define SLJIT_SCMP            (SLJIT_DCMP | SLJIT_SINGLE_OP)
+#define SLJIT_CMP_F64            (SLJIT_FOP1_BASE + 6)
+#define SLJIT_CMP_F32            (SLJIT_CMP_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DNEG            (SLJIT_FOP1_BASE + 7)
-#define SLJIT_SNEG            (SLJIT_DNEG | SLJIT_SINGLE_OP)
+#define SLJIT_NEG_F64            (SLJIT_FOP1_BASE + 7)
+#define SLJIT_NEG_F32            (SLJIT_NEG_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DABS            (SLJIT_FOP1_BASE + 8)
-#define SLJIT_SABS            (SLJIT_DABS | SLJIT_SINGLE_OP)
+#define SLJIT_ABS_F64            (SLJIT_FOP1_BASE + 8)
+#define SLJIT_ABS_F32            (SLJIT_ABS_F64 | SLJIT_F32_OP)


 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
@@ -918,17 +914,17 @@
 #define SLJIT_FOP2_BASE            160


 /* Flags: SP - (never set any flags) */
-#define SLJIT_DADD            (SLJIT_FOP2_BASE + 0)
-#define SLJIT_SADD            (SLJIT_DADD | SLJIT_SINGLE_OP)
+#define SLJIT_ADD_F64            (SLJIT_FOP2_BASE + 0)
+#define SLJIT_ADD_F32            (SLJIT_ADD_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DSUB            (SLJIT_FOP2_BASE + 1)
-#define SLJIT_SSUB            (SLJIT_DSUB | SLJIT_SINGLE_OP)
+#define SLJIT_SUB_F64            (SLJIT_FOP2_BASE + 1)
+#define SLJIT_SUB_F32            (SLJIT_SUB_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DMUL            (SLJIT_FOP2_BASE + 2)
-#define SLJIT_SMUL            (SLJIT_DMUL | SLJIT_SINGLE_OP)
+#define SLJIT_MUL_F64            (SLJIT_FOP2_BASE + 2)
+#define SLJIT_MUL_F32            (SLJIT_MUL_F64 | SLJIT_F32_OP)
 /* Flags: SP - (never set any flags) */
-#define SLJIT_DDIV            (SLJIT_FOP2_BASE + 3)
-#define SLJIT_SDIV            (SLJIT_DDIV | SLJIT_SINGLE_OP)
+#define SLJIT_DIV_F64            (SLJIT_FOP2_BASE + 3)
+#define SLJIT_DIV_F32            (SLJIT_DIV_F64 | SLJIT_F32_OP)


 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop2(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
@@ -943,58 +939,58 @@


 /* Integer comparison types. */
 #define SLJIT_EQUAL            0
-#define SLJIT_I_EQUAL            (SLJIT_EQUAL | SLJIT_I32_OP)
+#define SLJIT_EQUAL32            (SLJIT_EQUAL | SLJIT_I32_OP)
 #define SLJIT_ZERO            0
-#define SLJIT_I_ZERO            (SLJIT_ZERO | SLJIT_I32_OP)
+#define SLJIT_ZERO32            (SLJIT_ZERO | SLJIT_I32_OP)
 #define SLJIT_NOT_EQUAL            1
-#define SLJIT_I_NOT_EQUAL        (SLJIT_NOT_EQUAL | SLJIT_I32_OP)
+#define SLJIT_NOT_EQUAL32        (SLJIT_NOT_EQUAL | SLJIT_I32_OP)
 #define SLJIT_NOT_ZERO            1
-#define SLJIT_I_NOT_ZERO        (SLJIT_NOT_ZERO | SLJIT_I32_OP)
+#define SLJIT_NOT_ZERO32        (SLJIT_NOT_ZERO | SLJIT_I32_OP)


 #define SLJIT_LESS            2
-#define SLJIT_I_LESS            (SLJIT_LESS | SLJIT_I32_OP)
+#define SLJIT_LESS32            (SLJIT_LESS | SLJIT_I32_OP)
 #define SLJIT_GREATER_EQUAL        3
-#define SLJIT_I_GREATER_EQUAL        (SLJIT_GREATER_EQUAL | SLJIT_I32_OP)
+#define SLJIT_GREATER_EQUAL32        (SLJIT_GREATER_EQUAL | SLJIT_I32_OP)
 #define SLJIT_GREATER            4
-#define SLJIT_I_GREATER            (SLJIT_GREATER | SLJIT_I32_OP)
+#define SLJIT_GREATER32            (SLJIT_GREATER | SLJIT_I32_OP)
 #define SLJIT_LESS_EQUAL        5
-#define SLJIT_I_LESS_EQUAL        (SLJIT_LESS_EQUAL | SLJIT_I32_OP)
+#define SLJIT_LESS_EQUAL32        (SLJIT_LESS_EQUAL | SLJIT_I32_OP)
 #define SLJIT_SIG_LESS            6
-#define SLJIT_I_SIG_LESS        (SLJIT_SIG_LESS | SLJIT_I32_OP)
+#define SLJIT_SIG_LESS32        (SLJIT_SIG_LESS | SLJIT_I32_OP)
 #define SLJIT_SIG_GREATER_EQUAL        7
-#define SLJIT_I_SIG_GREATER_EQUAL    (SLJIT_SIG_GREATER_EQUAL | SLJIT_I32_OP)
+#define SLJIT_SIG_GREATER_EQUAL32    (SLJIT_SIG_GREATER_EQUAL | SLJIT_I32_OP)
 #define SLJIT_SIG_GREATER        8
-#define SLJIT_I_SIG_GREATER        (SLJIT_SIG_GREATER | SLJIT_I32_OP)
+#define SLJIT_SIG_GREATER32        (SLJIT_SIG_GREATER | SLJIT_I32_OP)
 #define SLJIT_SIG_LESS_EQUAL        9
-#define SLJIT_I_SIG_LESS_EQUAL        (SLJIT_SIG_LESS_EQUAL | SLJIT_I32_OP)
+#define SLJIT_SIG_LESS_EQUAL32        (SLJIT_SIG_LESS_EQUAL | SLJIT_I32_OP)


 #define SLJIT_OVERFLOW            10
-#define SLJIT_I_OVERFLOW        (SLJIT_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_OVERFLOW32        (SLJIT_OVERFLOW | SLJIT_I32_OP)
 #define SLJIT_NOT_OVERFLOW        11
-#define SLJIT_I_NOT_OVERFLOW        (SLJIT_NOT_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_NOT_OVERFLOW32        (SLJIT_NOT_OVERFLOW | SLJIT_I32_OP)


 #define SLJIT_MUL_OVERFLOW        12
-#define SLJIT_I_MUL_OVERFLOW        (SLJIT_MUL_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_MUL_OVERFLOW32        (SLJIT_MUL_OVERFLOW | SLJIT_I32_OP)
 #define SLJIT_MUL_NOT_OVERFLOW        13
-#define SLJIT_I_MUL_NOT_OVERFLOW    (SLJIT_MUL_NOT_OVERFLOW | SLJIT_I32_OP)
+#define SLJIT_MUL_NOT_OVERFLOW32    (SLJIT_MUL_NOT_OVERFLOW | SLJIT_I32_OP)


 /* Floating point comparison types. */
-#define SLJIT_D_EQUAL            14
-#define SLJIT_S_EQUAL            (SLJIT_D_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_NOT_EQUAL        15
-#define SLJIT_S_NOT_EQUAL        (SLJIT_D_NOT_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_LESS            16
-#define SLJIT_S_LESS            (SLJIT_D_LESS | SLJIT_SINGLE_OP)
-#define SLJIT_D_GREATER_EQUAL        17
-#define SLJIT_S_GREATER_EQUAL        (SLJIT_D_GREATER_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_GREATER            18
-#define SLJIT_S_GREATER            (SLJIT_D_GREATER | SLJIT_SINGLE_OP)
-#define SLJIT_D_LESS_EQUAL        19
-#define SLJIT_S_LESS_EQUAL        (SLJIT_D_LESS_EQUAL | SLJIT_SINGLE_OP)
-#define SLJIT_D_UNORDERED        20
-#define SLJIT_S_UNORDERED        (SLJIT_D_UNORDERED | SLJIT_SINGLE_OP)
-#define SLJIT_D_ORDERED            21
-#define SLJIT_S_ORDERED            (SLJIT_D_ORDERED | SLJIT_SINGLE_OP)
+#define SLJIT_EQUAL_F64            14
+#define SLJIT_EQUAL_F32            (SLJIT_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_NOT_EQUAL_F64        15
+#define SLJIT_NOT_EQUAL_F32        (SLJIT_NOT_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_LESS_F64            16
+#define SLJIT_LESS_F32            (SLJIT_LESS_F64 | SLJIT_F32_OP)
+#define SLJIT_GREATER_EQUAL_F64        17
+#define SLJIT_GREATER_EQUAL_F32        (SLJIT_GREATER_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_GREATER_F64        18
+#define SLJIT_GREATER_F32        (SLJIT_GREATER_F64 | SLJIT_F32_OP)
+#define SLJIT_LESS_EQUAL_F64        19
+#define SLJIT_LESS_EQUAL_F32        (SLJIT_LESS_EQUAL_F64 | SLJIT_F32_OP)
+#define SLJIT_UNORDERED_F64        20
+#define SLJIT_UNORDERED_F32        (SLJIT_UNORDERED_F64 | SLJIT_F32_OP)
+#define SLJIT_ORDERED_F64        21
+#define SLJIT_ORDERED_F32        (SLJIT_ORDERED_F64 | SLJIT_F32_OP)


 /* Unconditional jump types. */
 #define SLJIT_JUMP            22
@@ -1033,7 +1029,7 @@
    sljit_emit_jump. However some architectures (i.e: MIPS) may employ
    special optimizations here. It is suggested to use this comparison form
    when appropriate.
-    type must be between SLJIT_D_EQUAL and SLJIT_S_ORDERED
+    type must be between SLJIT_EQUAL_F64 and SLJIT_ORDERED_F32
     type can be combined (or'ed) with SLJIT_REWRITABLE_JUMP
    Flags: destroy flags.
    Note: if either operand is NaN, the behaviour is undefined for


Modified: code/trunk/src/sljit/sljitNativeARM_32.c
===================================================================
--- code/trunk/src/sljit/sljitNativeARM_32.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeARM_32.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -1817,10 +1817,10 @@
     case SLJIT_NOP:
         FAIL_IF(push_inst(compiler, NOP));
         break;
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
 #if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)
-        return push_inst(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+        return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
             | (reg_map[SLJIT_R1] << 16)
             | (reg_map[SLJIT_R0] << 12)
             | (reg_map[SLJIT_R0] << 8)
@@ -1827,39 +1827,39 @@
             | reg_map[SLJIT_R1]);
 #else
         FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, RM(SLJIT_R1))));
-        return push_inst(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+        return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
             | (reg_map[SLJIT_R1] << 16)
             | (reg_map[SLJIT_R0] << 12)
             | (reg_map[SLJIT_R0] << 8)
             | reg_map[TMP_REG1]);
 #endif
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
-        SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
+        SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
         SLJIT_COMPILE_ASSERT(reg_map[2] == 1 && reg_map[3] == 2, bad_register_mapping);


-        if ((op >= SLJIT_UDIVI) && (compiler->scratches >= 3)) {
+        if ((op >= SLJIT_DIV_UW) && (compiler->scratches >= 3)) {
             FAIL_IF(push_inst(compiler, 0xe52d2008 /* str r2, [sp, #-8]! */));
             FAIL_IF(push_inst(compiler, 0xe58d1004 /* str r1, [sp, #4] */));
         }
-        else if ((op >= SLJIT_UDIVI) || (compiler->scratches >= 3))
-            FAIL_IF(push_inst(compiler, 0xe52d0008 | (op >= SLJIT_UDIVI ? 0x1000 : 0x2000) /* str r1/r2, [sp, #-8]! */));
+        else if ((op >= SLJIT_DIV_UW) || (compiler->scratches >= 3))
+            FAIL_IF(push_inst(compiler, 0xe52d0008 | (op >= SLJIT_DIV_UW ? 0x1000 : 0x2000) /* str r1/r2, [sp, #-8]! */));


 #if defined(__GNUC__)
         FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
-            ((op | 0x2) == SLJIT_UDIVI ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+            ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
 #else
 #error "Software divmod functions are needed"
 #endif


-        if ((op >= SLJIT_UDIVI) && (compiler->scratches >= 3)) {
+        if ((op >= SLJIT_DIV_UW) && (compiler->scratches >= 3)) {
             FAIL_IF(push_inst(compiler, 0xe59d1004 /* ldr r1, [sp, #4] */));
             FAIL_IF(push_inst(compiler, 0xe49d2008 /* ldr r2, [sp], #8 */));
         }
-        else if ((op >= SLJIT_UDIVI) || (compiler->scratches >= 3))
-            return push_inst(compiler, 0xe49d0008 | (op >= SLJIT_UDIVI ? 0x1000 : 0x2000) /* ldr r1/r2, [sp], #8 */);
+        else if ((op >= SLJIT_DIV_UW) || (compiler->scratches >= 3))
+            return push_inst(compiler, 0xe49d0008 | (op >= SLJIT_DIV_UW ? 0x1000 : 0x2000) /* ldr r1/r2, [sp], #8 */);
         return SLJIT_SUCCESS;
     }


@@ -2044,7 +2044,7 @@
 {
     sljit_sw tmp;
     sljit_uw imm;
-    sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD));
+    sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));
     SLJIT_ASSERT(arg & SLJIT_MEM);


     if (SLJIT_UNLIKELY(arg & OFFS_REG_MASK)) {
@@ -2104,16 +2104,16 @@
     return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0));
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
     if (src & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
         src = TMP_FREG1;
     }


-    FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_SINGLE_OP, TMP_FREG1, src, 0)));
+    FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_S32_F32, op & SLJIT_F32_OP, TMP_FREG1, src, 0)));


     if (dst == SLJIT_UNUSED)
         return SLJIT_SUCCESS;
@@ -2125,7 +2125,7 @@
     return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -2142,10 +2142,10 @@
         FAIL_IF(push_inst(compiler, VMOV | RD(TMP_REG1) | (TMP_FREG1 << 16)));
     }


-    FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_SINGLE_OP, dst_r, TMP_FREG1, 0)));
+    FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F32_S32, op & SLJIT_F32_OP, dst_r, TMP_FREG1, 0)));


     if (dst & SLJIT_MEM)
-        return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+        return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
     return SLJIT_SUCCESS;
 }


@@ -2154,16 +2154,16 @@
     sljit_s32 src2, sljit_sw src2w)
 {
     if (src1 & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
         src1 = TMP_FREG1;
     }


     if (src2 & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
         src2 = TMP_FREG2;
     }


-    FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_SINGLE_OP, src1, src2, 0)));
+    FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_F32_OP, src1, src2, 0)));
     return push_inst(compiler, VMRS);
 }


@@ -2176,42 +2176,42 @@
     CHECK_ERROR();
     compiler->cache_arg = 0;
     compiler->cache_argw = 0;
-    if (GET_OPCODE(op) != SLJIT_CONVD_FROMS)
-        op ^= SLJIT_SINGLE_OP;
+    if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
+        op ^= SLJIT_F32_OP;


-    SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error);
+    SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;


     if (src & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw));
         src = dst_r;
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DMOV:
+    case SLJIT_MOV_F64:
         if (src != dst_r) {
             if (dst_r != TMP_FREG1)
-                FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+                FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
             else
                 dst_r = src;
         }
         break;
-    case SLJIT_DNEG:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+    case SLJIT_NEG_F64:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
         break;
-    case SLJIT_DABS:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
+    case SLJIT_ABS_F64:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
         break;
-    case SLJIT_CONVD_FROMS:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_SINGLE_OP, dst_r, src, 0)));
-        op ^= SLJIT_SINGLE_OP;
+    case SLJIT_CONV_F64_FROM_F32:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VCVT_F64_F32, op & SLJIT_F32_OP, dst_r, src, 0)));
+        op ^= SLJIT_F32_OP;
         break;
     }


     if (dst & SLJIT_MEM)
-        return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_r, dst, dstw);
+        return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
     return SLJIT_SUCCESS;
 }


@@ -2230,40 +2230,40 @@

     compiler->cache_arg = 0;
     compiler->cache_argw = 0;
-    op ^= SLJIT_SINGLE_OP;
+    op ^= SLJIT_F32_OP;


     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;


     if (src2 & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w));
         src2 = TMP_FREG2;
     }


     if (src1 & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w));
         src1 = TMP_FREG1;
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+    case SLJIT_ADD_F64:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
         break;


-    case SLJIT_DSUB:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+    case SLJIT_SUB_F64:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
         break;


-    case SLJIT_DMUL:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+    case SLJIT_MUL_F64:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
         break;


-    case SLJIT_DDIV:
-        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_SINGLE_OP, dst_r, src2, src1)));
+    case SLJIT_DIV_F64:
+        FAIL_IF(push_inst(compiler, EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_F32_OP, dst_r, src2, src1)));
         break;
     }


     if (dst_r == TMP_FREG1)
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw));


     return SLJIT_SUCCESS;
 }
@@ -2331,28 +2331,28 @@
     switch (type) {
     case SLJIT_EQUAL:
     case SLJIT_MUL_NOT_OVERFLOW:
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         return 0x00000000;


     case SLJIT_NOT_EQUAL:
     case SLJIT_MUL_OVERFLOW:
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         return 0x10000000;


     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         return 0x30000000;


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         return 0x20000000;


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         return 0x80000000;


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         return 0x90000000;


     case SLJIT_SIG_LESS:
@@ -2368,11 +2368,11 @@
         return 0xd0000000;


     case SLJIT_OVERFLOW:
-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         return 0x60000000;


     case SLJIT_NOT_OVERFLOW:
-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         return 0x70000000;


     default:


Modified: code/trunk/src/sljit/sljitNativeARM_64.c
===================================================================
--- code/trunk/src/sljit/sljitNativeARM_64.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeARM_64.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -1256,20 +1256,20 @@
         return push_inst(compiler, BRK);
     case SLJIT_NOP:
         return push_inst(compiler, NOP);
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
         FAIL_IF(push_inst(compiler, ORR | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
         FAIL_IF(push_inst(compiler, MADD | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
-        return push_inst(compiler, (op == SLJIT_LUMUL ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
+        return push_inst(compiler, (op == SLJIT_LMUL_UW ? UMULH : SMULH) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
         FAIL_IF(push_inst(compiler, (ORR ^ inv_bits) | RD(TMP_REG1) | RN(TMP_ZERO) | RM(SLJIT_R0)));
-        FAIL_IF(push_inst(compiler, ((op == SLJIT_UDIVMOD ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
+        FAIL_IF(push_inst(compiler, ((op == SLJIT_DIVMOD_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1)));
         FAIL_IF(push_inst(compiler, (MADD ^ inv_bits) | RD(SLJIT_R1) | RN(SLJIT_R0) | RM(SLJIT_R1) | RT2(TMP_ZERO)));
         return push_inst(compiler, (SUB ^ inv_bits) | RD(SLJIT_R1) | RN(TMP_REG1) | RM(SLJIT_R1));
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
-        return push_inst(compiler, ((op == SLJIT_UDIVI ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
+        return push_inst(compiler, ((op == SLJIT_DIV_UW ? UDIV : SDIV) ^ inv_bits) | RD(SLJIT_R0) | RN(SLJIT_R0) | RM(SLJIT_R1));
     }


     return SLJIT_SUCCESS;
@@ -1600,18 +1600,18 @@
     return push_inst(compiler, STR_FI | ins_bits | VT(reg) | RN(TMP_REG3));
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
     sljit_s32 dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1;
-    sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+    sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;


-    if (GET_OPCODE(op) == SLJIT_CONVI_FROMD)
+    if (GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64)
         inv_bits |= (1 << 31);


     if (src & SLJIT_MEM) {
-        emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
+        emit_fop_mem(compiler, (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE, TMP_FREG1, src, srcw);
         src = TMP_FREG1;
     }


@@ -1618,26 +1618,26 @@
     FAIL_IF(push_inst(compiler, (FCVTZS ^ inv_bits) | RD(dst_r) | VN(src)));


     if (dst_r == TMP_REG1 && dst != SLJIT_UNUSED)
-        return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONVI_FROMD) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw);
+        return emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_S32_FROM_F64) ? INT_SIZE : WORD_SIZE) | STORE, TMP_REG1, dst, dstw);
     return SLJIT_SUCCESS;
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
     sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
-    sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+    sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;


-    if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+    if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
         inv_bits |= (1 << 31);


     if (src & SLJIT_MEM) {
-        emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONVD_FROMI) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw);
+        emit_op_mem(compiler, ((GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) ? INT_SIZE : WORD_SIZE), TMP_REG1, src, srcw);
         src = TMP_REG1;
     } else if (src & SLJIT_IMM) {
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-        if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+        if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
             srcw = (sljit_s32)srcw;
 #endif
         FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
@@ -1647,7 +1647,7 @@
     FAIL_IF(push_inst(compiler, (SCVTF ^ inv_bits) | VD(dst_r) | RN(src)));


     if (dst & SLJIT_MEM)
-        return emit_fop_mem(compiler, ((op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
+        return emit_fop_mem(compiler, ((op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE) | STORE, TMP_FREG1, dst, dstw);
     return SLJIT_SUCCESS;
 }


@@ -1655,8 +1655,8 @@
     sljit_s32 src1, sljit_sw src1w,
     sljit_s32 src2, sljit_sw src2w)
 {
-    sljit_s32 mem_flags = (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE;
-    sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+    sljit_s32 mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
+    sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;


     if (src1 & SLJIT_MEM) {
         emit_fop_mem(compiler, mem_flags, TMP_FREG1, src1, src1w);
@@ -1675,7 +1675,7 @@
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
-    sljit_s32 dst_r, mem_flags = (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE;
+    sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
     sljit_ins inv_bits;


     CHECK_ERROR();
@@ -1685,16 +1685,16 @@
     SLJIT_COMPILE_ASSERT((INT_SIZE ^ 0x100) == WORD_SIZE, must_be_one_bit_difference);
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


-    inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+    inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;
     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;


     if (src & SLJIT_MEM) {
-        emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONVD_FROMS) ? (mem_flags ^ 0x100) : mem_flags, dst_r, src, srcw);
+        emit_fop_mem(compiler, (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) ? (mem_flags ^ 0x100) : mem_flags, dst_r, src, srcw);
         src = dst_r;
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DMOV:
+    case SLJIT_MOV_F64:
         if (src != dst_r) {
             if (dst_r != TMP_FREG1)
                 FAIL_IF(push_inst(compiler, (FMOV ^ inv_bits) | VD(dst_r) | VN(src)));
@@ -1702,14 +1702,14 @@
                 dst_r = src;
         }
         break;
-    case SLJIT_DNEG:
+    case SLJIT_NEG_F64:
         FAIL_IF(push_inst(compiler, (FNEG ^ inv_bits) | VD(dst_r) | VN(src)));
         break;
-    case SLJIT_DABS:
+    case SLJIT_ABS_F64:
         FAIL_IF(push_inst(compiler, (FABS ^ inv_bits) | VD(dst_r) | VN(src)));
         break;
-    case SLJIT_CONVD_FROMS:
-        FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_SINGLE_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
+    case SLJIT_CONV_F64_FROM_F32:
+        FAIL_IF(push_inst(compiler, FCVT | ((op & SLJIT_F32_OP) ? (1 << 22) : (1 << 15)) | VD(dst_r) | VN(src)));
         break;
     }


@@ -1723,8 +1723,8 @@
     sljit_s32 src1, sljit_sw src1w,
     sljit_s32 src2, sljit_sw src2w)
 {
-    sljit_s32 dst_r, mem_flags = (op & SLJIT_SINGLE_OP) ? INT_SIZE : WORD_SIZE;
-    sljit_ins inv_bits = (op & SLJIT_SINGLE_OP) ? (1 << 22) : 0;
+    sljit_s32 dst_r, mem_flags = (op & SLJIT_F32_OP) ? INT_SIZE : WORD_SIZE;
+    sljit_ins inv_bits = (op & SLJIT_F32_OP) ? (1 << 22) : 0;


     CHECK_ERROR();
     CHECK(check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w));
@@ -1746,16 +1746,16 @@
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
+    case SLJIT_ADD_F64:
         FAIL_IF(push_inst(compiler, (FADD ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
         break;
-    case SLJIT_DSUB:
+    case SLJIT_SUB_F64:
         FAIL_IF(push_inst(compiler, (FSUB ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
         break;
-    case SLJIT_DMUL:
+    case SLJIT_MUL_F64:
         FAIL_IF(push_inst(compiler, (FMUL ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
         break;
-    case SLJIT_DDIV:
+    case SLJIT_DIV_F64:
         FAIL_IF(push_inst(compiler, (FDIV ^ inv_bits) | VD(dst_r) | VN(src1) | VM(src2)));
         break;
     }
@@ -1811,28 +1811,28 @@
     switch (type) {
     case SLJIT_EQUAL:
     case SLJIT_MUL_NOT_OVERFLOW:
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         return 0x1;


     case SLJIT_NOT_EQUAL:
     case SLJIT_MUL_OVERFLOW:
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         return 0x0;


     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         return 0x2;


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         return 0x3;


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         return 0x9;


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         return 0x8;


     case SLJIT_SIG_LESS:
@@ -1848,11 +1848,11 @@
         return 0xc;


     case SLJIT_OVERFLOW:
-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         return 0x7;


     case SLJIT_NOT_OVERFLOW:
-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         return 0x6;


     default:


Modified: code/trunk/src/sljit/sljitNativeARM_T2_32.c
===================================================================
--- code/trunk/src/sljit/sljitNativeARM_T2_32.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeARM_T2_32.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -1251,18 +1251,18 @@
         return push_inst16(compiler, BKPT);
     case SLJIT_NOP:
         return push_inst16(compiler, NOP);
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
-        return push_inst32(compiler, (op == SLJIT_LUMUL ? UMULL : SMULL)
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
+        return push_inst32(compiler, (op == SLJIT_LMUL_UW ? UMULL : SMULL)
             | (reg_map[SLJIT_R1] << 8)
             | (reg_map[SLJIT_R0] << 12)
             | (reg_map[SLJIT_R0] << 16)
             | reg_map[SLJIT_R1]);
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
-        SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
+        SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
         SLJIT_COMPILE_ASSERT(reg_map[2] == 1 && reg_map[3] == 2 && reg_map[4] == 12, bad_register_mapping);


         saved_reg_count = 0;
@@ -1270,7 +1270,7 @@
             saved_reg_list[saved_reg_count++] = 12;
         if (compiler->scratches >= 3)
             saved_reg_list[saved_reg_count++] = 2;
-        if (op >= SLJIT_UDIVI)
+        if (op >= SLJIT_DIV_UW)
             saved_reg_list[saved_reg_count++] = 1;


         if (saved_reg_count > 0) {
@@ -1288,7 +1288,7 @@


 #if defined(__GNUC__)
         FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM,
-            ((op | 0x2) == SLJIT_UDIVI ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
+            ((op | 0x2) == SLJIT_DIV_UW ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod))));
 #else
 #error "Software divmod functions are needed"
 #endif
@@ -1566,7 +1566,7 @@
 {
     sljit_sw tmp;
     sljit_uw imm;
-    sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD));
+    sljit_sw inst = VSTR_F32 | (flags & (SLJIT_F32_OP | FPU_LOAD));


     SLJIT_ASSERT(arg & SLJIT_MEM);


@@ -1626,16 +1626,16 @@
     return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg));
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
     if (src & SLJIT_MEM) {
-        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
+        FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src, srcw));
         src = TMP_FREG1;
     }


-    FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_SINGLE_OP) | DD4(TMP_FREG1) | DM4(src)));
+    FAIL_IF(push_inst32(compiler, VCVT_S32_F32 | (op & SLJIT_F32_OP) | DD4(TMP_FREG1) | DM4(src)));


     if (dst == SLJIT_UNUSED)
         return SLJIT_SUCCESS;
@@ -1647,7 +1647,7 @@
     return emit_fop_mem(compiler, 0, TMP_FREG1, dst, dstw);
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1664,10 +1664,10 @@
         FAIL_IF(push_inst32(compiler, VMOV | RT4(TMP_REG1) | DN4(TMP_FREG1)));
     }


-    FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(TMP_FREG1)));
+    FAIL_IF(push_inst32(compiler, VCVT_F32_S32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(TMP_FREG1)));


     if (dst & SLJIT_MEM)
-        return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+        return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
     return SLJIT_SUCCESS;
 }


@@ -1676,16 +1676,16 @@
     sljit_s32 src2, sljit_sw src2w)
 {
     if (src1 & SLJIT_MEM) {
-        emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
+        emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
         src1 = TMP_FREG1;
     }


     if (src2 & SLJIT_MEM) {
-        emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
+        emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
         src2 = TMP_FREG2;
     }


-    FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_SINGLE_OP) | DD4(src1) | DM4(src2)));
+    FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_F32_OP) | DD4(src1) | DM4(src2)));
     return push_inst32(compiler, VMRS);
 }


@@ -1698,42 +1698,42 @@
     CHECK_ERROR();
     compiler->cache_arg = 0;
     compiler->cache_argw = 0;
-    if (GET_OPCODE(op) != SLJIT_CONVD_FROMS)
-        op ^= SLJIT_SINGLE_OP;
+    if (GET_OPCODE(op) != SLJIT_CONV_F64_FROM_F32)
+        op ^= SLJIT_F32_OP;


-    SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error);
+    SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100), float_transfer_bit_error);
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;


     if (src & SLJIT_MEM) {
-        emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw);
+        emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, dst_r, src, srcw);
         src = dst_r;
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DMOV:
+    case SLJIT_MOV_F64:
         if (src != dst_r) {
             if (dst_r != TMP_FREG1)
-                FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
+                FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
             else
                 dst_r = src;
         }
         break;
-    case SLJIT_DNEG:
-        FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
+    case SLJIT_NEG_F64:
+        FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
         break;
-    case SLJIT_DABS:
-        FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
+    case SLJIT_ABS_F64:
+        FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
         break;
-    case SLJIT_CONVD_FROMS:
-        FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src)));
-        op ^= SLJIT_SINGLE_OP;
+    case SLJIT_CONV_F64_FROM_F32:
+        FAIL_IF(push_inst32(compiler, VCVT_F64_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DM4(src)));
+        op ^= SLJIT_F32_OP;
         break;
     }


     if (dst & SLJIT_MEM)
-        return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_r, dst, dstw);
+        return emit_fop_mem(compiler, (op & SLJIT_F32_OP), dst_r, dst, dstw);
     return SLJIT_SUCCESS;
 }


@@ -1752,36 +1752,36 @@

     compiler->cache_arg = 0;
     compiler->cache_argw = 0;
-    op ^= SLJIT_SINGLE_OP;
+    op ^= SLJIT_F32_OP;


     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;
     if (src1 & SLJIT_MEM) {
-        emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
+        emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG1, src1, src1w);
         src1 = TMP_FREG1;
     }
     if (src2 & SLJIT_MEM) {
-        emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
+        emit_fop_mem(compiler, (op & SLJIT_F32_OP) | FPU_LOAD, TMP_FREG2, src2, src2w);
         src2 = TMP_FREG2;
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
-        FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+    case SLJIT_ADD_F64:
+        FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
         break;
-    case SLJIT_DSUB:
-        FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+    case SLJIT_SUB_F64:
+        FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
         break;
-    case SLJIT_DMUL:
-        FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+    case SLJIT_MUL_F64:
+        FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
         break;
-    case SLJIT_DDIV:
-        FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
+    case SLJIT_DIV_F64:
+        FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_F32_OP) | DD4(dst_r) | DN4(src1) | DM4(src2)));
         break;
     }


     if (!(dst & SLJIT_MEM))
         return SLJIT_SUCCESS;
-    return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw);
+    return emit_fop_mem(compiler, (op & SLJIT_F32_OP), TMP_FREG1, dst, dstw);
 }


 #undef FPU_LOAD
@@ -1845,28 +1845,28 @@
     switch (type) {
     case SLJIT_EQUAL:
     case SLJIT_MUL_NOT_OVERFLOW:
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         return 0x0;


     case SLJIT_NOT_EQUAL:
     case SLJIT_MUL_OVERFLOW:
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         return 0x1;


     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         return 0x3;


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         return 0x2;


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         return 0x8;


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         return 0x9;


     case SLJIT_SIG_LESS:
@@ -1882,11 +1882,11 @@
         return 0xd;


     case SLJIT_OVERFLOW:
-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         return 0x6;


     case SLJIT_NOT_OVERFLOW:
-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         return 0x7;


     default: /* SLJIT_JUMP */


Modified: code/trunk/src/sljit/sljitNativeMIPS_common.c
===================================================================
--- code/trunk/src/sljit/sljitNativeMIPS_common.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeMIPS_common.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -1044,20 +1044,20 @@
         return push_inst(compiler, BREAK, UNMOVABLE_INS);
     case SLJIT_NOP:
         return push_inst(compiler, NOP, UNMOVABLE_INS);
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
 #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
-        FAIL_IF(push_inst(compiler, (op == SLJIT_LUMUL ? DMULTU : DMULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+        FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? DMULTU : DMULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
 #else
-        FAIL_IF(push_inst(compiler, (op == SLJIT_LUMUL ? MULTU : MULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+        FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? MULTU : MULT) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
 #endif
         FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_R0), DR(SLJIT_R0)));
         return push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1));
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
-        SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
+        SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
 #if !(defined SLJIT_MIPS_R1 && SLJIT_MIPS_R1)
         FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
         FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS));
@@ -1065,15 +1065,15 @@


 #if (defined SLJIT_CONFIG_MIPS_64 && SLJIT_CONFIG_MIPS_64)
         if (int_op)
-            FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+            FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
         else
-            FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? DDIVU : DDIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+            FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DDIVU : DDIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
 #else
-        FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
+        FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? DIVU : DIV) | S(SLJIT_R0) | T(SLJIT_R1), MOVABLE_INS));
 #endif


         FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_R0), DR(SLJIT_R0)));
-        return (op >= SLJIT_UDIVI) ? SLJIT_SUCCESS : push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1));
+        return (op >= SLJIT_DIV_UW) ? SLJIT_SUCCESS : push_inst(compiler, MFHI | D(SLJIT_R1), DR(SLJIT_R1));
     }


     return SLJIT_SUCCESS;
@@ -1286,10 +1286,10 @@
 #endif
 }


-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7))
-#define FMT(op) (((op & SLJIT_SINGLE_OP) ^ SLJIT_SINGLE_OP) << (21 - 8))
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
+#define FMT(op) (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) << (21 - 8))

-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1296,7 +1296,7 @@
 #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
 #    define flags 0
 #else
-    sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONVW_FROMD) << 21;
+    sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64) << 21;
 #endif


     if (src & SLJIT_MEM) {
@@ -1322,7 +1322,7 @@
 #endif
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1329,7 +1329,7 @@
 #if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32)
 #    define flags 0
 #else
-    sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONVD_FROMW) << 21;
+    sljit_s32 flags = (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW) << 21;
 #endif


     sljit_s32 dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;
@@ -1342,7 +1342,7 @@
     }
     else {
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-        if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+        if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
             srcw = (sljit_s32)srcw;
 #endif
         FAIL_IF(load_immediate(compiler, DR(TMP_REG1), srcw));
@@ -1349,7 +1349,7 @@
         FAIL_IF(push_inst(compiler, MTC1 | flags | T(TMP_REG1) | FS(TMP_FREG1), MOVABLE_INS));
     }


-    FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_SINGLE_OP) ^ SLJIT_SINGLE_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));
+    FAIL_IF(push_inst(compiler, CVT_S_S | flags | (4 << 21) | (((op & SLJIT_F32_OP) ^ SLJIT_F32_OP) >> 8) | FS(TMP_FREG1) | FD(dst_r), MOVABLE_INS));


     if (dst & SLJIT_MEM)
         return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
@@ -1409,11 +1409,11 @@
     compiler->cache_arg = 0;
     compiler->cache_argw = 0;


-    SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+    SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


-    if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
-        op ^= SLJIT_SINGLE_OP;
+    if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+        op ^= SLJIT_F32_OP;


     dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;


@@ -1425,7 +1425,7 @@
         src <<= 1;


     switch (GET_OPCODE(op)) {
-    case SLJIT_DMOV:
+    case SLJIT_MOV_F64:
         if (src != dst_r) {
             if (dst_r != TMP_FREG1)
                 FAIL_IF(push_inst(compiler, MOV_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
@@ -1433,15 +1433,15 @@
                 dst_r = src;
         }
         break;
-    case SLJIT_DNEG:
+    case SLJIT_NEG_F64:
         FAIL_IF(push_inst(compiler, NEG_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
         break;
-    case SLJIT_DABS:
+    case SLJIT_ABS_F64:
         FAIL_IF(push_inst(compiler, ABS_S | FMT(op) | FS(src) | FD(dst_r), MOVABLE_INS));
         break;
-    case SLJIT_CONVD_FROMS:
-        FAIL_IF(push_inst(compiler, CVT_S_S | ((op & SLJIT_SINGLE_OP) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS));
-        op ^= SLJIT_SINGLE_OP;
+    case SLJIT_CONV_F64_FROM_F32:
+        FAIL_IF(push_inst(compiler, CVT_S_S | ((op & SLJIT_F32_OP) ? 1 : (1 << 21)) | FS(src) | FD(dst_r), MOVABLE_INS));
+        op ^= SLJIT_F32_OP;
         break;
     }


@@ -1509,19 +1509,19 @@
         src2 = TMP_FREG2;


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
+    case SLJIT_ADD_F64:
         FAIL_IF(push_inst(compiler, ADD_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
         break;


-    case SLJIT_DSUB:
+    case SLJIT_SUB_F64:
         FAIL_IF(push_inst(compiler, SUB_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
         break;


-    case SLJIT_DMUL:
+    case SLJIT_MUL_F64:
         FAIL_IF(push_inst(compiler, MUL_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
         break;


-    case SLJIT_DDIV:
+    case SLJIT_DIV_F64:
         FAIL_IF(push_inst(compiler, DIV_S | FMT(op) | FT(src2) | FS(src1) | FD(dst_r), MOVABLE_INS));
         break;
     }
@@ -1634,27 +1634,27 @@


     switch (type) {
     case SLJIT_EQUAL:
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         BR_NZ(EQUAL_FLAG);
         break;
     case SLJIT_NOT_EQUAL:
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         BR_Z(EQUAL_FLAG);
         break;
     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         BR_Z(ULESS_FLAG);
         break;
     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         BR_NZ(ULESS_FLAG);
         break;
     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         BR_Z(UGREATER_FLAG);
         break;
     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         BR_NZ(UGREATER_FLAG);
         break;
     case SLJIT_SIG_LESS:
@@ -1677,10 +1677,10 @@
     case SLJIT_MUL_NOT_OVERFLOW:
         BR_NZ(OVERFLOW_FLAG);
         break;
-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         BR_F();
         break;
-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         BR_T();
         break;
     default:
@@ -1888,37 +1888,37 @@
     jump->flags |= IS_BIT16_COND;


     switch (type & 0xff) {
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         inst = C_UEQ_S;
         if_true = 1;
         break;
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         inst = C_UEQ_S;
         if_true = 0;
         break;
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         inst = C_ULT_S;
         if_true = 1;
         break;
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         inst = C_ULT_S;
         if_true = 0;
         break;
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         inst = C_ULE_S;
         if_true = 0;
         break;
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         inst = C_ULE_S;
         if_true = 1;
         break;
-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         inst = C_UN_S;
         if_true = 1;
         break;
     default: /* Make compilers happy. */
         SLJIT_ASSERT_STOP();
-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         inst = C_UN_S;
         if_true = 0;
         break;
@@ -2045,14 +2045,14 @@
         break;
     case SLJIT_LESS:
     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_LESS:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_LESS_F64:
+    case SLJIT_GREATER_EQUAL_F64:
         dst_ar = ULESS_FLAG;
         break;
     case SLJIT_GREATER:
     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_GREATER:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_GREATER_F64:
+    case SLJIT_LESS_EQUAL_F64:
         dst_ar = UGREATER_FLAG;
         break;
     case SLJIT_SIG_LESS:
@@ -2073,13 +2073,13 @@
         dst_ar = sugg_dst_ar;
         type ^= 0x1; /* Flip type bit for the XORI below. */
         break;
-    case SLJIT_D_EQUAL:
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_EQUAL_F64:
+    case SLJIT_NOT_EQUAL_F64:
         dst_ar = EQUAL_FLAG;
         break;


-    case SLJIT_D_UNORDERED:
-    case SLJIT_D_ORDERED:
+    case SLJIT_UNORDERED_F64:
+    case SLJIT_ORDERED_F64:
         FAIL_IF(push_inst(compiler, CFC1 | TA(sugg_dst_ar) | DA(FCSR_REG), sugg_dst_ar));
         FAIL_IF(push_inst(compiler, SRL | TA(sugg_dst_ar) | DA(sugg_dst_ar) | SH_IMM(23), sugg_dst_ar));
         FAIL_IF(push_inst(compiler, ANDI | SA(sugg_dst_ar) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar));


Modified: code/trunk/src/sljit/sljitNativePPC_common.c
===================================================================
--- code/trunk/src/sljit/sljitNativePPC_common.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativePPC_common.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -1257,33 +1257,33 @@
     case SLJIT_BREAKPOINT:
     case SLJIT_NOP:
         return push_inst(compiler, NOP);
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
         FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R0)));
 #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
         FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R1)));
-        return push_inst(compiler, (op == SLJIT_LUMUL ? MULHDU : MULHD) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
+        return push_inst(compiler, (op == SLJIT_LMUL_UW ? MULHDU : MULHD) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
 #else
         FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R1)));
-        return push_inst(compiler, (op == SLJIT_LUMUL ? MULHWU : MULHW) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
+        return push_inst(compiler, (op == SLJIT_LMUL_UW ? MULHWU : MULHW) | D(SLJIT_R1) | A(TMP_REG1) | B(SLJIT_R1));
 #endif
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
         FAIL_IF(push_inst(compiler, OR | S(SLJIT_R0) | A(TMP_REG1) | B(SLJIT_R0)));
 #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-        FAIL_IF(push_inst(compiler, (int_op ? (op == SLJIT_UDIVMOD ? DIVWU : DIVW) : (op == SLJIT_UDIVMOD ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
+        FAIL_IF(push_inst(compiler, (int_op ? (op == SLJIT_DIVMOD_UW ? DIVWU : DIVW) : (op == SLJIT_DIVMOD_UW ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
         FAIL_IF(push_inst(compiler, (int_op ? MULLW : MULLD) | D(SLJIT_R1) | A(SLJIT_R0) | B(SLJIT_R1)));
 #else
-        FAIL_IF(push_inst(compiler, (op == SLJIT_UDIVMOD ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
+        FAIL_IF(push_inst(compiler, (op == SLJIT_DIVMOD_UW ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1)));
         FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_R1) | A(SLJIT_R0) | B(SLJIT_R1)));
 #endif
         return push_inst(compiler, SUBF | D(SLJIT_R1) | A(SLJIT_R1) | B(TMP_REG1));
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
 #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
-        return push_inst(compiler, (int_op ? (op == SLJIT_UDIVI ? DIVWU : DIVW) : (op == SLJIT_UDIVI ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
+        return push_inst(compiler, (int_op ? (op == SLJIT_DIV_UW ? DIVWU : DIVW) : (op == SLJIT_DIV_UW ? DIVDU : DIVD)) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
 #else
-        return push_inst(compiler, (op == SLJIT_UDIVI ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
+        return push_inst(compiler, (op == SLJIT_DIV_UW ? DIVWU : DIVW) | D(SLJIT_R0) | A(SLJIT_R0) | B(SLJIT_R1));
 #endif
     }


@@ -1691,8 +1691,8 @@
#endif
}

-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 6))
-#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double)
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 6))
+#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)

#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
#define FLOAT_TMP_MEM_OFFSET (6 * sizeof(sljit_sw))
@@ -1709,7 +1709,7 @@

#endif /* SLJIT_CONFIG_PPC_64 */

-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1721,12 +1721,12 @@


 #if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64)
     op = GET_OPCODE(op);
-    FAIL_IF(push_inst(compiler, (op == SLJIT_CONVI_FROMD ? FCTIWZ : FCTIDZ) | FD(TMP_FREG1) | FB(src)));
+    FAIL_IF(push_inst(compiler, (op == SLJIT_CONV_S32_FROM_F64 ? FCTIWZ : FCTIDZ) | FD(TMP_FREG1) | FB(src)));


     if (dst == SLJIT_UNUSED)
         return SLJIT_SUCCESS;


-    if (op == SLJIT_CONVW_FROMD) {
+    if (op == SLJIT_CONV_SW_FROM_F64) {
         if (FAST_IS_REG(dst)) {
             FAIL_IF(emit_op_mem2(compiler, DOUBLE_DATA, TMP_FREG1, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0));
             return emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, dst, SLJIT_MEM1(SLJIT_SP), FLOAT_TMP_MEM_OFFSET, 0, 0);
@@ -1777,7 +1777,7 @@
     return push_inst(compiler, STFIWX | FS(TMP_FREG1) | A(dst & REG_MASK) | B(dstw));
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -1786,12 +1786,12 @@
     sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;


     if (src & SLJIT_IMM) {
-        if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+        if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
             srcw = (sljit_s32)srcw;
         FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
         src = TMP_REG1;
     }
-    else if (GET_OPCODE(op) == SLJIT_CONVD_FROMI) {
+    else if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32) {
         if (FAST_IS_REG(src))
             FAIL_IF(push_inst(compiler, EXTSW | S(src) | A(TMP_REG1)));
         else
@@ -1810,7 +1810,7 @@


     if (dst & SLJIT_MEM)
         return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
-    if (op & SLJIT_SINGLE_OP)
+    if (op & SLJIT_F32_OP)
         return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
     return SLJIT_SUCCESS;


@@ -1848,7 +1848,7 @@

     if (dst & SLJIT_MEM)
         return emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG1, dst, dstw, 0, 0);
-    if (op & SLJIT_SINGLE_OP)
+    if (op & SLJIT_F32_OP)
         return push_inst(compiler, FRSP | FD(dst_r) | FB(dst_r));
     return SLJIT_SUCCESS;


@@ -1882,11 +1882,11 @@
     compiler->cache_arg = 0;
     compiler->cache_argw = 0;


-    SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
+    SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error);
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


-    if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
-        op ^= SLJIT_SINGLE_OP;
+    if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+        op ^= SLJIT_F32_OP;


     dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG1;


@@ -1896,14 +1896,14 @@
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_CONVD_FROMS:
-        op ^= SLJIT_SINGLE_OP;
-        if (op & SLJIT_SINGLE_OP) {
+    case SLJIT_CONV_F64_FROM_F32:
+        op ^= SLJIT_F32_OP;
+        if (op & SLJIT_F32_OP) {
             FAIL_IF(push_inst(compiler, FRSP | FD(dst_r) | FB(src)));
             break;
         }
         /* Fall through. */
-    case SLJIT_DMOV:
+    case SLJIT_MOV_F64:
         if (src != dst_r) {
             if (dst_r != TMP_FREG1)
                 FAIL_IF(push_inst(compiler, FMR | FD(dst_r) | FB(src)));
@@ -1911,10 +1911,10 @@
                 dst_r = src;
         }
         break;
-    case SLJIT_DNEG:
+    case SLJIT_NEG_F64:
         FAIL_IF(push_inst(compiler, FNEG | FD(dst_r) | FB(src)));
         break;
-    case SLJIT_DABS:
+    case SLJIT_ABS_F64:
         FAIL_IF(push_inst(compiler, FABS | FD(dst_r) | FB(src)));
         break;
     }
@@ -1979,19 +1979,19 @@
         src2 = TMP_FREG2;


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
+    case SLJIT_ADD_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADD) | FD(dst_r) | FA(src1) | FB(src2)));
         break;


-    case SLJIT_DSUB:
+    case SLJIT_SUB_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUB) | FD(dst_r) | FA(src1) | FB(src2)));
         break;


-    case SLJIT_DMUL:
+    case SLJIT_MUL_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMUL) | FD(dst_r) | FA(src1) | FC(src2) /* FMUL use FC as src2 */));
         break;


-    case SLJIT_DDIV:
+    case SLJIT_DIV_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIV) | FD(dst_r) | FA(src1) | FB(src2)));
         break;
     }
@@ -2075,19 +2075,19 @@
         return (4 << 21) | (2 << 16);


     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         return (12 << 21) | ((4 + 0) << 16);


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         return (4 << 21) | ((4 + 0) << 16);


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         return (12 << 21) | ((4 + 1) << 16);


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         return (4 << 21) | ((4 + 1) << 16);


     case SLJIT_SIG_LESS:
@@ -2110,16 +2110,16 @@
     case SLJIT_MUL_NOT_OVERFLOW:
         return (4 << 21) | (3 << 16);


-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         return (12 << 21) | ((4 + 2) << 16);


-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         return (4 << 21) | ((4 + 2) << 16);


-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         return (12 << 21) | ((4 + 3) << 16);


-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         return (4 << 21) | ((4 + 3) << 16);


     default:
@@ -2255,23 +2255,23 @@
         break;


     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         GET_CR_BIT(4 + 0, reg);
         break;


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         GET_CR_BIT(4 + 0, reg);
         INVERT_BIT(reg);
         break;


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         GET_CR_BIT(4 + 1, reg);
         break;


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         GET_CR_BIT(4 + 1, reg);
         INVERT_BIT(reg);
         break;
@@ -2305,20 +2305,20 @@
         INVERT_BIT(reg);
         break;


-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         GET_CR_BIT(4 + 2, reg);
         break;


-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         GET_CR_BIT(4 + 2, reg);
         INVERT_BIT(reg);
         break;


-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         GET_CR_BIT(4 + 3, reg);
         break;


-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         GET_CR_BIT(4 + 3, reg);
         INVERT_BIT(reg);
         break;


Modified: code/trunk/src/sljit/sljitNativeSPARC_common.c
===================================================================
--- code/trunk/src/sljit/sljitNativeSPARC_common.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeSPARC_common.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -769,30 +769,30 @@
         return push_inst(compiler, TA, UNMOVABLE_INS);
     case SLJIT_NOP:
         return push_inst(compiler, NOP, UNMOVABLE_INS);
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-        FAIL_IF(push_inst(compiler, (op == SLJIT_LUMUL ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
+        FAIL_IF(push_inst(compiler, (op == SLJIT_LMUL_UW ? UMUL : SMUL) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
         return push_inst(compiler, RDY | D(SLJIT_R1), DR(SLJIT_R1));
 #else
 #error "Implementation required"
 #endif
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
-        SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
+        SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);
 #if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32)
-        if ((op | 0x2) == SLJIT_UDIVI)
+        if ((op | 0x2) == SLJIT_DIV_UW)
             FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS));
         else {
             FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_R0) | IMM(31), DR(TMP_REG1)));
             FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS));
         }
-        if (op <= SLJIT_SDIVMOD)
+        if (op <= SLJIT_DIVMOD_SW)
             FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_R0), DR(TMP_REG2)));
-        FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_UDIVI ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
-        if (op >= SLJIT_UDIVI)
+        FAIL_IF(push_inst(compiler, ((op | 0x2) == SLJIT_DIV_UW ? UDIV : SDIV) | D(SLJIT_R0) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R0)));
+        if (op >= SLJIT_DIV_UW)
             return SLJIT_SUCCESS;
         FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_R1) | S1(SLJIT_R0) | S2(SLJIT_R1), DR(SLJIT_R1)));
         return push_inst(compiler, SUB | D(SLJIT_R1) | S1(TMP_REG2) | S2(SLJIT_R1), DR(SLJIT_R1));
@@ -949,11 +949,11 @@
 #endif
 }


-#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7))
-#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double)
+#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_F32_OP) >> 7))
+#define SELECT_FOP(op, single, double) ((op & SLJIT_F32_OP) ? single : double)
#define FLOAT_TMP_MEM_OFFSET (22 * sizeof(sljit_sw))

-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -978,7 +978,7 @@
     return emit_op_mem2(compiler, SINGLE_DATA, TMP_FREG1, dst, dstw, 0, 0);
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -986,7 +986,7 @@


     if (src & SLJIT_IMM) {
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-        if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+        if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
             srcw = (sljit_s32)srcw;
 #endif
         FAIL_IF(load_immediate(compiler, TMP_REG1, srcw));
@@ -1039,11 +1039,11 @@
     compiler->cache_arg = 0;
     compiler->cache_argw = 0;


-    SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
+    SLJIT_COMPILE_ASSERT((SLJIT_F32_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error);
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


-    if (GET_OPCODE(op) == SLJIT_CONVD_FROMS)
-        op ^= SLJIT_SINGLE_OP;
+    if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32)
+        op ^= SLJIT_F32_OP;


     dst_r = FAST_IS_REG(dst) ? (dst << 1) : TMP_FREG1;


@@ -1055,11 +1055,11 @@
         src <<= 1;


     switch (GET_OPCODE(op)) {
-    case SLJIT_DMOV:
+    case SLJIT_MOV_F64:
         if (src != dst_r) {
             if (dst_r != TMP_FREG1) {
                 FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r) | S2A(src), MOVABLE_INS));
-                if (!(op & SLJIT_SINGLE_OP))
+                if (!(op & SLJIT_F32_OP))
                     FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
             }
             else
@@ -1066,19 +1066,19 @@
                 dst_r = src;
         }
         break;
-    case SLJIT_DNEG:
+    case SLJIT_NEG_F64:
         FAIL_IF(push_inst(compiler, FNEGS | DA(dst_r) | S2A(src), MOVABLE_INS));
-        if (dst_r != src && !(op & SLJIT_SINGLE_OP))
+        if (dst_r != src && !(op & SLJIT_F32_OP))
             FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
         break;
-    case SLJIT_DABS:
+    case SLJIT_ABS_F64:
         FAIL_IF(push_inst(compiler, FABSS | DA(dst_r) | S2A(src), MOVABLE_INS));
-        if (dst_r != src && !(op & SLJIT_SINGLE_OP))
+        if (dst_r != src && !(op & SLJIT_F32_OP))
             FAIL_IF(push_inst(compiler, FMOVS | DA(dst_r | 1) | S2A(src | 1), MOVABLE_INS));
         break;
-    case SLJIT_CONVD_FROMS:
+    case SLJIT_CONV_F64_FROM_F32:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSTOD, FDTOS) | DA(dst_r) | S2A(src), MOVABLE_INS));
-        op ^= SLJIT_SINGLE_OP;
+        op ^= SLJIT_F32_OP;
         break;
     }


@@ -1146,19 +1146,19 @@
         src2 = TMP_FREG2;


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
+    case SLJIT_ADD_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
         break;


-    case SLJIT_DSUB:
+    case SLJIT_SUB_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
         break;


-    case SLJIT_DMUL:
+    case SLJIT_MUL_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
         break;


-    case SLJIT_DDIV:
+    case SLJIT_DIV_F64:
         FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | DA(dst_r) | S1A(src1) | S2A(src2), MOVABLE_INS));
         break;
     }
@@ -1236,28 +1236,28 @@
     switch (type) {
     case SLJIT_EQUAL:
     case SLJIT_MUL_NOT_OVERFLOW:
-    case SLJIT_D_NOT_EQUAL: /* Unordered. */
+    case SLJIT_NOT_EQUAL_F64: /* Unordered. */
         return DA(0x1);


     case SLJIT_NOT_EQUAL:
     case SLJIT_MUL_OVERFLOW:
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         return DA(0x9);


     case SLJIT_LESS:
-    case SLJIT_D_GREATER: /* Unordered. */
+    case SLJIT_GREATER_F64: /* Unordered. */
         return DA(0x5);


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         return DA(0xd);


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER_EQUAL: /* Unordered. */
+    case SLJIT_GREATER_EQUAL_F64: /* Unordered. */
         return DA(0xc);


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         return DA(0x4);


     case SLJIT_SIG_LESS:
@@ -1273,11 +1273,11 @@
         return DA(0x2);


     case SLJIT_OVERFLOW:
-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         return DA(0x7);


     case SLJIT_NOT_OVERFLOW:
-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         return DA(0xf);


     default:
@@ -1298,7 +1298,7 @@
     set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP);
     type &= 0xff;


-    if (type < SLJIT_D_EQUAL) {
+    if (type < SLJIT_EQUAL_F64) {
         jump->flags |= IS_COND;
         if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET))
             jump->flags |= IS_MOVABLE;
@@ -1395,7 +1395,7 @@
     }


     type &= 0xff;
-    if (type < SLJIT_D_EQUAL)
+    if (type < SLJIT_EQUAL_F64)
         FAIL_IF(push_inst(compiler, BICC | get_cc(type) | 3, UNMOVABLE_INS));
     else
         FAIL_IF(push_inst(compiler, FBFCC | get_cc(type) | 3, UNMOVABLE_INS));


Modified: code/trunk/src/sljit/sljitNativeTILEGX_64.c
===================================================================
--- code/trunk/src/sljit/sljitNativeTILEGX_64.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeTILEGX_64.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -2180,10 +2180,12 @@
     case SLJIT_BREAKPOINT:
         return PI(BPT);


-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
         SLJIT_ASSERT_STOP();
     }



Modified: code/trunk/src/sljit/sljitNativeX86_common.c
===================================================================
--- code/trunk/src/sljit/sljitNativeX86_common.c    2016-03-01 12:02:58 UTC (rev 504)
+++ code/trunk/src/sljit/sljitNativeX86_common.c    2016-03-04 08:51:53 UTC (rev 505)
@@ -334,27 +334,27 @@
 {
     switch (type) {
     case SLJIT_EQUAL:
-    case SLJIT_D_EQUAL:
+    case SLJIT_EQUAL_F64:
         return 0x84 /* je */;


     case SLJIT_NOT_EQUAL:
-    case SLJIT_D_NOT_EQUAL:
+    case SLJIT_NOT_EQUAL_F64:
         return 0x85 /* jne */;


     case SLJIT_LESS:
-    case SLJIT_D_LESS:
+    case SLJIT_LESS_F64:
         return 0x82 /* jc */;


     case SLJIT_GREATER_EQUAL:
-    case SLJIT_D_GREATER_EQUAL:
+    case SLJIT_GREATER_EQUAL_F64:
         return 0x83 /* jae */;


     case SLJIT_GREATER:
-    case SLJIT_D_GREATER:
+    case SLJIT_GREATER_F64:
         return 0x87 /* jnbe */;


     case SLJIT_LESS_EQUAL:
-    case SLJIT_D_LESS_EQUAL:
+    case SLJIT_LESS_EQUAL_F64:
         return 0x86 /* jbe */;


     case SLJIT_SIG_LESS:
@@ -377,10 +377,10 @@
     case SLJIT_MUL_NOT_OVERFLOW:
         return 0x81 /* jno */;


-    case SLJIT_D_UNORDERED:
+    case SLJIT_UNORDERED_F64:
         return 0x8a /* jp */;


-    case SLJIT_D_ORDERED:
+    case SLJIT_ORDERED_F64:
         return 0x8b /* jpo */;
     }
     return 0;
@@ -742,12 +742,12 @@
         INC_SIZE(1);
         *inst = NOP;
         break;
-    case SLJIT_LUMUL:
-    case SLJIT_LSMUL:
-    case SLJIT_UDIVMOD:
-    case SLJIT_SDIVMOD:
-    case SLJIT_UDIVI:
-    case SLJIT_SDIVI:
+    case SLJIT_LMUL_UW:
+    case SLJIT_LMUL_SW:
+    case SLJIT_DIVMOD_UW:
+    case SLJIT_DIVMOD_SW:
+    case SLJIT_DIV_UW:
+    case SLJIT_DIV_SW:
         compiler->flags_saved = 0;
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
 #ifdef _WIN64
@@ -765,10 +765,10 @@
 #endif
         compiler->mode32 = op & SLJIT_I32_OP;
 #endif
-        SLJIT_COMPILE_ASSERT((SLJIT_UDIVMOD & 0x2) == 0 && SLJIT_UDIVI - 0x2 == SLJIT_UDIVMOD, bad_div_opcode_assignments);
+        SLJIT_COMPILE_ASSERT((SLJIT_DIVMOD_UW & 0x2) == 0 && SLJIT_DIV_UW - 0x2 == SLJIT_DIVMOD_UW, bad_div_opcode_assignments);


         op = GET_OPCODE(op);
-        if ((op | 0x2) == SLJIT_UDIVI) {
+        if ((op | 0x2) == SLJIT_DIV_UW) {
 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64)
             EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0);
             inst = emit_x86_instruction(compiler, 1, SLJIT_R1, 0, SLJIT_R1, 0);
@@ -779,7 +779,7 @@
             *inst = XOR_r_rm;
         }


-        if ((op | 0x2) == SLJIT_SDIVI) {
+        if ((op | 0x2) == SLJIT_DIV_SW) {
 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64)
             EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R1, 0);
 #endif
@@ -810,10 +810,10 @@
         FAIL_IF(!inst);
         INC_SIZE(2);
         *inst++ = GROUP_F7;
-        *inst = MOD_REG | ((op >= SLJIT_UDIVMOD) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
+        *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_map[TMP_REG1] : reg_map[SLJIT_R1]);
 #else
 #ifdef _WIN64
-        size = (!compiler->mode32 || op >= SLJIT_UDIVMOD) ? 3 : 2;
+        size = (!compiler->mode32 || op >= SLJIT_DIVMOD_UW) ? 3 : 2;
 #else
         size = (!compiler->mode32) ? 3 : 2;
 #endif
@@ -822,11 +822,11 @@
         INC_SIZE(size);
 #ifdef _WIN64
         if (!compiler->mode32)
-            *inst++ = REX_W | ((op >= SLJIT_UDIVMOD) ? REX_B : 0);
-        else if (op >= SLJIT_UDIVMOD)
+            *inst++ = REX_W | ((op >= SLJIT_DIVMOD_UW) ? REX_B : 0);
+        else if (op >= SLJIT_DIVMOD_UW)
             *inst++ = REX_B;
         *inst++ = GROUP_F7;
-        *inst = MOD_REG | ((op >= SLJIT_UDIVMOD) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
+        *inst = MOD_REG | ((op >= SLJIT_DIVMOD_UW) ? reg_lmap[TMP_REG1] : reg_lmap[SLJIT_R1]);
 #else
         if (!compiler->mode32)
             *inst++ = REX_W;
@@ -835,26 +835,26 @@
 #endif
 #endif
         switch (op) {
-        case SLJIT_LUMUL:
+        case SLJIT_LMUL_UW:
             *inst |= MUL;
             break;
-        case SLJIT_LSMUL:
+        case SLJIT_LMUL_SW:
             *inst |= IMUL;
             break;
-        case SLJIT_UDIVMOD:
-        case SLJIT_UDIVI:
+        case SLJIT_DIVMOD_UW:
+        case SLJIT_DIV_UW:
             *inst |= DIV;
             break;
-        case SLJIT_SDIVMOD:
-        case SLJIT_SDIVI:
+        case SLJIT_DIVMOD_SW:
+        case SLJIT_DIV_SW:
             *inst |= IDIV;
             break;
         }
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !defined(_WIN64)
-        if (op <= SLJIT_SDIVMOD)
+        if (op <= SLJIT_DIVMOD_SW)
             EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0);
 #else
-        if (op >= SLJIT_UDIVI)
+        if (op >= SLJIT_DIV_UW)
             EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0);
 #endif
         break;
@@ -2322,7 +2322,7 @@
     return emit_sse2(compiler, MOVSD_xm_x, single, src, dst, dstw);
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convw_fromd(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -2330,11 +2330,11 @@
     sljit_u8 *inst;


 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-    if (GET_OPCODE(op) == SLJIT_CONVW_FROMD)
+    if (GET_OPCODE(op) == SLJIT_CONV_SW_FROM_F64)
         compiler->mode32 = 0;
 #endif


-    inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_SINGLE_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
+    inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP2, dst_r, 0, src, srcw);
     FAIL_IF(!inst);
     *inst++ = GROUP_0F;
     *inst = CVTTSD2SI_r_xm;
@@ -2344,7 +2344,7 @@
     return SLJIT_SUCCESS;
 }


-static SLJIT_INLINE sljit_s32 sljit_emit_fop1_convd_fromw(struct sljit_compiler *compiler, sljit_s32 op,
+static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_f64_from_sw(struct sljit_compiler *compiler, sljit_s32 op,
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw)
 {
@@ -2352,13 +2352,13 @@
     sljit_u8 *inst;


 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-    if (GET_OPCODE(op) == SLJIT_CONVD_FROMW)
+    if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_SW)
         compiler->mode32 = 0;
 #endif


     if (src & SLJIT_IMM) {
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-        if (GET_OPCODE(op) == SLJIT_CONVD_FROMI)
+        if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_S32)
             srcw = (sljit_s32)srcw;
 #endif
         EMIT_MOV(compiler, TMP_REG1, 0, src, srcw);
@@ -2366,7 +2366,7 @@
         srcw = 0;
     }


-    inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_SINGLE_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
+    inst = emit_x86_instruction(compiler, 2 | ((op & SLJIT_F32_OP) ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2_OP1, dst_r, 0, src, srcw);
     FAIL_IF(!inst);
     *inst++ = GROUP_0F;
     *inst = CVTSI2SD_x_rm;
@@ -2375,7 +2375,7 @@
     compiler->mode32 = 1;
 #endif
     if (dst_r == TMP_FREG)
-        return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+        return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
     return SLJIT_SUCCESS;
 }


@@ -2385,10 +2385,10 @@
 {
     compiler->flags_saved = 0;
     if (!FAST_IS_REG(src1)) {
-        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w));
+        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
         src1 = TMP_FREG;
     }
-    return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_SINGLE_OP), src1, src2, src2w);
+    return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w);
 }


 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compiler, sljit_s32 op,
@@ -2404,31 +2404,31 @@
     CHECK_ERROR();
     SELECT_FOP1_OPERATION_WITH_CHECKS(compiler, op, dst, dstw, src, srcw);


-    if (GET_OPCODE(op) == SLJIT_DMOV) {
+    if (GET_OPCODE(op) == SLJIT_MOV_F64) {
         if (FAST_IS_REG(dst))
-            return emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst, src, srcw);
+            return emit_sse2_load(compiler, op & SLJIT_F32_OP, dst, src, srcw);
         if (FAST_IS_REG(src))
-            return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, src);
-        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src, srcw));
-        return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+            return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, src);
+        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src, srcw));
+        return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
     }


-    if (GET_OPCODE(op) == SLJIT_CONVD_FROMS) {
+    if (GET_OPCODE(op) == SLJIT_CONV_F64_FROM_F32) {
         dst_r = FAST_IS_REG(dst) ? dst : TMP_FREG;
         if (FAST_IS_REG(src)) {
             /* We overwrite the high bits of source. From SLJIT point of view,
                this is not an issue.
                Note: In SSE3, we could also use MOVDDUP and MOVSLDUP. */
-            FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_SINGLE_OP, src, src, 0));
+            FAIL_IF(emit_sse2_logic(compiler, UNPCKLPD_x_xm, op & SLJIT_F32_OP, src, src, 0));
         }
         else {
-            FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_SINGLE_OP), TMP_FREG, src, srcw));
+            FAIL_IF(emit_sse2_load(compiler, !(op & SLJIT_F32_OP), TMP_FREG, src, srcw));
             src = TMP_FREG;
         }


-        FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_SINGLE_OP, dst_r, src, 0));
+        FAIL_IF(emit_sse2_logic(compiler, CVTPD2PS_x_xm, op & SLJIT_F32_OP, dst_r, src, 0));
         if (dst_r == TMP_FREG)
-            return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+            return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
         return SLJIT_SUCCESS;
     }


@@ -2435,25 +2435,25 @@
     if (SLOW_IS_REG(dst)) {
         dst_r = dst;
         if (dst != src)
-            FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw));
+            FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
     }
     else {
         dst_r = TMP_FREG;
-        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw));
+        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw));
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DNEG:
-        FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer : sse2_buffer + 8)));
+    case SLJIT_NEG_F64:
+        FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer : sse2_buffer + 8)));
         break;


-    case SLJIT_DABS:
-        FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
+    case SLJIT_ABS_F64:
+        FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_F32_OP ? sse2_buffer + 4 : sse2_buffer + 12)));
         break;
     }


     if (dst_r == TMP_FREG)
-        return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+        return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
     return SLJIT_SUCCESS;
 }


@@ -2478,43 +2478,43 @@
         dst_r = dst;
         if (dst == src1)
             ; /* Do nothing here. */
-        else if (dst == src2 && (op == SLJIT_DADD || op == SLJIT_DMUL)) {
+        else if (dst == src2 && (op == SLJIT_ADD_F64 || op == SLJIT_MUL_F64)) {
             /* Swap arguments. */
             src2 = src1;
             src2w = src1w;
         }
         else if (dst != src2)
-            FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src1, src1w));
+            FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src1, src1w));
         else {
             dst_r = TMP_FREG;
-            FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w));
+            FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
         }
     }
     else {
         dst_r = TMP_FREG;
-        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w));
+        FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w));
     }


     switch (GET_OPCODE(op)) {
-    case SLJIT_DADD:
-        FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+    case SLJIT_ADD_F64:
+        FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
         break;


-    case SLJIT_DSUB:
-        FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+    case SLJIT_SUB_F64:
+        FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
         break;


-    case SLJIT_DMUL:
-        FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+    case SLJIT_MUL_F64:
+        FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
         break;


-    case SLJIT_DDIV:
-        FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w));
+    case SLJIT_DIV_F64:
+        FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_F32_OP, dst_r, src2, src2w));
         break;
     }


     if (dst_r == TMP_FREG)
-        return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG);
+        return emit_sse2_store(compiler, op & SLJIT_F32_OP, dst, dstw, TMP_FREG);
     return SLJIT_SUCCESS;
 }


@@ -2966,7 +2966,7 @@
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     CHECK_ARGUMENT(sljit_x86_is_cmov_available());
     CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP)));
-    CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_D_ORDERED);
+    CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64);
     CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_I32_OP));
     FUNCTION_CHECK_SRC(src, srcw);
 #endif
@@ -2974,7 +2974,7 @@
     if (SLJIT_UNLIKELY(!!compiler->verbose)) {
         fprintf(compiler->verbose, "  x86_cmov%s %s%s, ",
             !(dst_reg & SLJIT_I32_OP) ? "" : ".i",
-            JUMP_PREFIX(type), jump_names[type & 0xff]);
+            jump_names[type & 0xff], JUMP_POSTFIX(type));
         sljit_verbose_reg(compiler, dst_reg & ~SLJIT_I32_OP);
         fprintf(compiler->verbose, ", ");
         sljit_verbose_param(compiler, src, srcw);