[Pcre-svn] [726] code/trunk/src/sljit: JIT compiler update.

Inizio della pagina
Delete this message
Autore: Subversion repository
Data:  
To: pcre-svn
Oggetto: [Pcre-svn] [726] code/trunk/src/sljit: JIT compiler update.
Revision: 726
          http://www.exim.org/viewvc/pcre2?view=rev&revision=726
Author:   zherczeg
Date:     2017-04-03 14:30:17 +0100 (Mon, 03 Apr 2017)
Log Message:
-----------
JIT compiler update.


Modified Paths:
--------------
    code/trunk/src/sljit/sljitConfigInternal.h
    code/trunk/src/sljit/sljitLir.c
    code/trunk/src/sljit/sljitLir.h
    code/trunk/src/sljit/sljitNativeX86_32.c
    code/trunk/src/sljit/sljitNativeX86_64.c
    code/trunk/src/sljit/sljitNativeX86_common.c


Modified: code/trunk/src/sljit/sljitConfigInternal.h
===================================================================
--- code/trunk/src/sljit/sljitConfigInternal.h    2017-04-01 09:38:58 UTC (rev 725)
+++ code/trunk/src/sljit/sljitConfigInternal.h    2017-04-03 13:30:17 UTC (rev 726)
@@ -556,10 +556,10 @@
 #define SLJIT_NUMBER_OF_REGISTERS 12
 #define SLJIT_NUMBER_OF_SAVED_REGISTERS 9
 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
-#define SLJIT_LOCALS_OFFSET_BASE ((2 + 6) * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
 #else
 /* Maximum 3 arguments are passed on the stack, +1 for double alignment. */
-#define SLJIT_LOCALS_OFFSET_BASE ((3 + 1 + 6) * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
 #endif /* SLJIT_X86_32_FASTCALL */


#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
@@ -567,11 +567,11 @@
#ifndef _WIN64
#define SLJIT_NUMBER_OF_REGISTERS 12
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 6
-#define SLJIT_LOCALS_OFFSET_BASE (sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE 0
#else
#define SLJIT_NUMBER_OF_REGISTERS 12
#define SLJIT_NUMBER_OF_SAVED_REGISTERS 8
-#define SLJIT_LOCALS_OFFSET_BASE ((4 + 2) * sizeof(sljit_sw))
+#define SLJIT_LOCALS_OFFSET_BASE (compiler->locals_offset)
#endif /* _WIN64 */

#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7)

Modified: code/trunk/src/sljit/sljitLir.c
===================================================================
--- code/trunk/src/sljit/sljitLir.c    2017-04-01 09:38:58 UTC (rev 725)
+++ code/trunk/src/sljit/sljitLir.c    2017-04-03 13:30:17 UTC (rev 726)
@@ -903,7 +903,7 @@
     SLJIT_UNUSED_ARG(compiler);


 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
-    CHECK_ARGUMENT(!(options & ~SLJIT_DOUBLE_ALIGNMENT));
+    CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT));
     CHECK_ARGUMENT(args >= 0 && args <= 3);
     CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS);
     CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS);
@@ -928,7 +928,7 @@
     sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
 {
 #if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
-    CHECK_ARGUMENT(!(options & ~SLJIT_DOUBLE_ALIGNMENT));
+    CHECK_ARGUMENT(!(options & ~SLJIT_F64_ALIGNMENT));
     CHECK_ARGUMENT(args >= 0 && args <= 3);
     CHECK_ARGUMENT(scratches >= 0 && scratches <= SLJIT_NUMBER_OF_REGISTERS);
     CHECK_ARGUMENT(saveds >= 0 && saveds <= SLJIT_NUMBER_OF_REGISTERS);


Modified: code/trunk/src/sljit/sljitLir.h
===================================================================
--- code/trunk/src/sljit/sljitLir.h    2017-04-01 09:38:58 UTC (rev 725)
+++ code/trunk/src/sljit/sljitLir.h    2017-04-03 13:30:17 UTC (rev 726)
@@ -332,11 +332,16 @@


 #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32)
     sljit_s32 args;
+    sljit_s32 locals_offset;
+    sljit_s32 saveds_offset;
 #endif


 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
     sljit_s32 mode32;
+#ifdef _WIN64
+    sljit_s32 locals_offset;
 #endif
+#endif


 #if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5)
     /* Constant pool handling. */
@@ -528,8 +533,8 @@
 */


/* The absolute address returned by sljit_get_local_base with
-offset 0 is aligned to sljit_d. Otherwise it is aligned to sljit_uw. */
-#define SLJIT_DOUBLE_ALIGNMENT 0x00000001
+offset 0 is aligned to sljit_f64. Otherwise it is aligned to sljit_sw. */
+#define SLJIT_F64_ALIGNMENT 0x00000001

 /* The local_size must be >= 0 and <= SLJIT_MAX_LOCAL_SIZE. */
 #define SLJIT_MAX_LOCAL_SIZE    65536


Modified: code/trunk/src/sljit/sljitNativeX86_32.c
===================================================================
--- code/trunk/src/sljit/sljitNativeX86_32.c    2017-04-01 09:38:58 UTC (rev 725)
+++ code/trunk/src/sljit/sljitNativeX86_32.c    2017-04-03 13:30:17 UTC (rev 726)
@@ -76,6 +76,28 @@


     compiler->args = args;


+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+    /* [esp+0] for saving temporaries and third argument for calls. */
+    compiler->saveds_offset = 1 * sizeof(sljit_sw);
+#else
+    /* [esp+0] for saving temporaries and space for maximum three arguments. */
+    if (scratches <= 1)
+        compiler->saveds_offset = 1 * sizeof(sljit_sw);
+    else
+        compiler->saveds_offset = ((scratches == 2) ? 2 : 3) * sizeof(sljit_sw);
+#endif
+
+    if (scratches > 3)
+        compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw);
+
+    compiler->locals_offset = compiler->saveds_offset;
+
+    if (saveds > 3)
+        compiler->locals_offset += (saveds - 3) * sizeof(sljit_sw);
+
+    if (options & SLJIT_F64_ALIGNMENT)
+        compiler->locals_offset = (compiler->locals_offset + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1);
+
     size = 1 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3);
 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
     size += (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0);
@@ -133,45 +155,31 @@
     }
 #endif


-    SLJIT_COMPILE_ASSERT(SLJIT_LOCALS_OFFSET >= (2 + 4) * sizeof(sljit_uw), require_at_least_two_words);
+    SLJIT_ASSERT(SLJIT_LOCALS_OFFSET > 0);
+
 #if defined(__APPLE__)
     /* Ignore pushed registers and SLJIT_LOCALS_OFFSET when computing the aligned local size. */
     saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw);
     local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds;
 #else
-    if (options & SLJIT_DOUBLE_ALIGNMENT) {
-        local_size = SLJIT_LOCALS_OFFSET + ((local_size + 7) & ~7);
-
-        inst = (sljit_u8*)ensure_buf(compiler, 1 + 17);
-        FAIL_IF(!inst);
-
-        INC_SIZE(17);
-        inst[0] = MOV_r_rm;
-        inst[1] = MOD_REG | (reg_map[TMP_REG1] << 3) | reg_map[SLJIT_SP];
-        inst[2] = GROUP_F7;
-        inst[3] = MOD_REG | (0 << 3) | reg_map[SLJIT_SP];
-        sljit_unaligned_store_sw(inst + 4, 0x4);
-        inst[8] = JNE_i8;
-        inst[9] = 6;
-        inst[10] = GROUP_BINARY_81;
-        inst[11] = MOD_REG | (5 << 3) | reg_map[SLJIT_SP];
-        sljit_unaligned_store_sw(inst + 12, 0x4);
-        inst[16] = PUSH_r + reg_map[TMP_REG1];
-    }
+    if (options & SLJIT_F64_ALIGNMENT)
+        local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1));
     else
-        local_size = SLJIT_LOCALS_OFFSET + ((local_size + 3) & ~3);
+        local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1));
 #endif


     compiler->local_size = local_size;
+
 #ifdef _WIN32
     if (local_size > 1024) {
 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
         FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size));
 #else
-        local_size -= SLJIT_LOCALS_OFFSET;
+        /* Space for a single argument. This amount is excluded when the stack is allocated below. */
+        local_size -= sizeof(sljit_sw);
         FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size));
         FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
-            SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, SLJIT_LOCALS_OFFSET));
+            SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, sizeof(sljit_sw)));
 #endif
         FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
     }
@@ -178,6 +186,33 @@
 #endif


     SLJIT_ASSERT(local_size > 0);
+
+#if !defined(__APPLE__)
+    if (options & SLJIT_F64_ALIGNMENT) {
+        EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_SP, 0);
+
+        /* Some space might allocated during sljit_grow_stack() above on WIN32. */
+        FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
+            SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size + sizeof(sljit_sw)));
+
+#if defined _WIN32 && !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+        if (compiler->local_size > 1024)
+            FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+                TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, sizeof(sljit_sw)));
+#endif
+
+        inst = (sljit_u8*)ensure_buf(compiler, 1 + 6);
+        FAIL_IF(!inst);
+
+        INC_SIZE(6);
+        inst[0] = GROUP_BINARY_81;
+        inst[1] = MOD_REG | AND | reg_map[SLJIT_SP];
+        sljit_unaligned_store_sw(inst + 2, ~(sizeof(sljit_f64) - 1));
+
+        /* The real local size must be used. */
+        return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), compiler->local_size, TMP_REG1, 0);
+    }
+#endif
     return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32,
         SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size);
 }
@@ -192,14 +227,36 @@


     compiler->args = args;


+#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
+    /* [esp+0] for saving temporaries and third argument for calls. */
+    compiler->saveds_offset = 1 * sizeof(sljit_sw);
+#else
+    /* [esp+0] for saving temporaries and space for maximum three arguments. */
+    if (scratches <= 1)
+        compiler->saveds_offset = 1 * sizeof(sljit_sw);
+    else
+        compiler->saveds_offset = ((scratches == 2) ? 2 : 3) * sizeof(sljit_sw);
+#endif
+
+    if (scratches > 3)
+        compiler->saveds_offset += ((scratches > (3 + 6)) ? 6 : (scratches - 3)) * sizeof(sljit_sw);
+
+    compiler->locals_offset = compiler->saveds_offset;
+
+    if (saveds > 3)
+        compiler->locals_offset += (saveds - 3) * sizeof(sljit_sw);
+
+    if (options & SLJIT_F64_ALIGNMENT)
+        compiler->locals_offset = (compiler->locals_offset + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1);
+
 #if defined(__APPLE__)
     saveds = (2 + (scratches > 9 ? (scratches - 9) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw);
     compiler->local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds;
 #else
-    if (options & SLJIT_DOUBLE_ALIGNMENT)
-        compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + 7) & ~7);
+    if (options & SLJIT_F64_ALIGNMENT)
+        compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_f64) - 1) & ~(sizeof(sljit_f64) - 1));
     else
-        compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + 3) & ~3);
+        compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + sizeof(sljit_sw) - 1) & ~(sizeof(sljit_sw) - 1));
 #endif
     return SLJIT_SUCCESS;
 }
@@ -216,19 +273,16 @@
     FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));


     SLJIT_ASSERT(compiler->local_size > 0);
+
+#if !defined(__APPLE__)
+    if (compiler->options & SLJIT_F64_ALIGNMENT)
+        EMIT_MOV(compiler, SLJIT_SP, 0, SLJIT_MEM1(SLJIT_SP), compiler->local_size)
+    else
+        FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
+            SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
+#else
     FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32,
         SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size));
-
-#if !defined(__APPLE__)
-    if (compiler->options & SLJIT_DOUBLE_ALIGNMENT) {
-        inst = (sljit_u8*)ensure_buf(compiler, 1 + 3);
-        FAIL_IF(!inst);
-
-        INC_SIZE(3);
-        inst[0] = MOV_r_rm;
-        inst[1] = (reg_map[SLJIT_SP] << 3) | 0x4 /* SIB */;
-        inst[2] = (4 << 3) | reg_map[SLJIT_SP];
-    }
 #endif


     size = 2 + (compiler->scratches > 7 ? (compiler->scratches - 7) : 0) +


Modified: code/trunk/src/sljit/sljitNativeX86_64.c
===================================================================
--- code/trunk/src/sljit/sljitNativeX86_64.c    2017-04-01 09:38:58 UTC (rev 725)
+++ code/trunk/src/sljit/sljitNativeX86_64.c    2017-04-03 13:30:17 UTC (rev 726)
@@ -76,6 +76,14 @@
     CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
     set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);


+#ifdef _WIN64
+    /* Two/four register slots for parameters plus space for xmm6 register if needed. */
+    if (fscratches >= 6 || fsaveds >= 1)
+        compiler->locals_offset = 6 * sizeof(sljit_sw);
+    else
+        compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
+#endif
+
     /* Including the return address saved by the call instruction. */
     saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);


@@ -153,7 +161,7 @@
         INC_SIZE(4 + (3 + sizeof(sljit_s32)));
         *inst++ = REX_W;
         *inst++ = GROUP_BINARY_83;
-        *inst++ = MOD_REG | SUB | 4;
+        *inst++ = MOD_REG | SUB | reg_map[SLJIT_SP];
         /* Allocated size for registers must be divisible by 8. */
         SLJIT_ASSERT(!(saved_register_size & 0x7));
         /* Aligned to 16 byte. */
@@ -178,26 +186,27 @@
     }
 #endif


-    SLJIT_ASSERT(local_size > 0);
-    if (local_size <= 127) {
-        inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
-        FAIL_IF(!inst);
-        INC_SIZE(4);
-        *inst++ = REX_W;
-        *inst++ = GROUP_BINARY_83;
-        *inst++ = MOD_REG | SUB | 4;
-        *inst++ = local_size;
+    if (local_size > 0) {
+        if (local_size <= 127) {
+            inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
+            FAIL_IF(!inst);
+            INC_SIZE(4);
+            *inst++ = REX_W;
+            *inst++ = GROUP_BINARY_83;
+            *inst++ = MOD_REG | SUB | reg_map[SLJIT_SP];
+            *inst++ = local_size;
+        }
+        else {
+            inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
+            FAIL_IF(!inst);
+            INC_SIZE(7);
+            *inst++ = REX_W;
+            *inst++ = GROUP_BINARY_81;
+            *inst++ = MOD_REG | SUB | reg_map[SLJIT_SP];
+            sljit_unaligned_store_s32(inst, local_size);
+            inst += sizeof(sljit_s32);
+        }
     }
-    else {
-        inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
-        FAIL_IF(!inst);
-        INC_SIZE(7);
-        *inst++ = REX_W;
-        *inst++ = GROUP_BINARY_81;
-        *inst++ = MOD_REG | SUB | 4;
-        sljit_unaligned_store_s32(inst, local_size);
-        inst += sizeof(sljit_s32);
-    }


 #ifdef _WIN64
     /* Save xmm6 register: movaps [rsp + 0x20], xmm6 */
@@ -223,6 +232,14 @@
     CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size));
     set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size);


+#ifdef _WIN64
+    /* Two/four register slots for parameters plus space for xmm6 register if needed. */
+    if (fscratches >= 6 || fsaveds >= 1)
+        compiler->locals_offset = 6 * sizeof(sljit_sw);
+    else
+        compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
+#endif
+
     /* Including the return address saved by the call instruction. */
     saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
     compiler->local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
@@ -250,25 +267,26 @@
     }
 #endif


-    SLJIT_ASSERT(compiler->local_size > 0);
-    if (compiler->local_size <= 127) {
-        inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
-        FAIL_IF(!inst);
-        INC_SIZE(4);
-        *inst++ = REX_W;
-        *inst++ = GROUP_BINARY_83;
-        *inst++ = MOD_REG | ADD | 4;
-        *inst = compiler->local_size;
+    if (compiler->local_size > 0) {
+        if (compiler->local_size <= 127) {
+            inst = (sljit_u8*)ensure_buf(compiler, 1 + 4);
+            FAIL_IF(!inst);
+            INC_SIZE(4);
+            *inst++ = REX_W;
+            *inst++ = GROUP_BINARY_83;
+            *inst++ = MOD_REG | ADD | 4;
+            *inst = compiler->local_size;
+        }
+        else {
+            inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
+            FAIL_IF(!inst);
+            INC_SIZE(7);
+            *inst++ = REX_W;
+            *inst++ = GROUP_BINARY_81;
+            *inst++ = MOD_REG | ADD | 4;
+            sljit_unaligned_store_s32(inst, compiler->local_size);
+        }
     }
-    else {
-        inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
-        FAIL_IF(!inst);
-        INC_SIZE(7);
-        *inst++ = REX_W;
-        *inst++ = GROUP_BINARY_81;
-        *inst++ = MOD_REG | ADD | 4;
-        sljit_unaligned_store_s32(inst, compiler->local_size);
-    }


     tmp = compiler->scratches;
     for (i = SLJIT_FIRST_SAVED_REG; i <= tmp; i++) {


Modified: code/trunk/src/sljit/sljitNativeX86_common.c
===================================================================
--- code/trunk/src/sljit/sljitNativeX86_common.c    2017-04-01 09:38:58 UTC (rev 725)
+++ code/trunk/src/sljit/sljitNativeX86_common.c    2017-04-03 13:30:17 UTC (rev 726)
@@ -71,8 +71,11 @@
 };


 #define CHECK_EXTRA_REGS(p, w, do) \
-    if (p >= SLJIT_R3 && p <= SLJIT_R8) { \
-        w = SLJIT_LOCALS_OFFSET + ((p) - (SLJIT_R3 + 6)) * sizeof(sljit_sw); \
+    if (p >= SLJIT_R3 && p <= SLJIT_S3) { \
+        if (p <= compiler->scratches) \
+            w = compiler->saveds_offset - ((p) - SLJIT_R2) * (sljit_sw)sizeof(sljit_sw); \
+        else \
+            w = compiler->locals_offset + ((p) - SLJIT_S2) * (sljit_sw)sizeof(sljit_sw); \
         p = SLJIT_MEM1(SLJIT_SP); \
         do; \
     }
@@ -605,6 +608,9 @@
     sljit_s32 dst, sljit_sw dstw,
     sljit_s32 src, sljit_sw srcw);


+#define EMIT_MOV(compiler, dst, dstw, src, srcw) \
+    FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
+
 #ifdef _WIN32
 #include <malloc.h>


@@ -693,9 +699,6 @@
     return SLJIT_SUCCESS;
 }


-#define EMIT_MOV(compiler, dst, dstw, src, srcw) \
-    FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw));
-
 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op)
 {
     sljit_u8 *inst;
@@ -1880,7 +1883,7 @@
     }


 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
-    if (src2 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
+    if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) {
 #else
     if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) {
 #endif
@@ -2039,14 +2042,13 @@
         EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
     }
     else {
-        /* This case is really difficult, since ecx itself may used for
-           addressing, and we must ensure to work even in that case. */
+        /* This case is complex since ecx itself may be used for
+           addressing, and this case must be supported as well. */
         EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w);
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
         EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0);
 #else
-        /* [esp+0] contains the flags. */
-        EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_sw), SLJIT_PREF_SHIFT_REG, 0);
+        EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0);
 #endif
         EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w);
         inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0);
@@ -2055,7 +2057,7 @@
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
         EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0);
 #else
-        EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_sw));
+        EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0);
 #endif
         EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0);
     }
@@ -2594,11 +2596,10 @@
     sljit_u8 cond_set = 0;
 #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64)
     sljit_s32 reg;
-#else
-    /* CHECK_EXTRA_REGS migh overwrite these values. */
+#endif
+    /* ADJUST_LOCAL_OFFSET and CHECK_EXTRA_REGS might overwrite these values. */
     sljit_s32 dst_save = dst;
     sljit_sw dstw_save = dstw;
-#endif


     CHECK_ERROR();
     CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type));
@@ -2641,6 +2642,7 @@
     *inst++ = cond_set;
     *inst++ = MOD_REG | reg_lmap[reg];
     *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R));
+    /* The movzx instruction does not affect flags. */
     *inst++ = GROUP_0F;
     *inst++ = MOVZX_r_rm8;
     *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg];
@@ -2656,8 +2658,10 @@
         || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
     compiler->skip_checks = 1;
 #endif
-    return sljit_emit_op2(compiler, op, dst, dstw, dst, dstw, TMP_REG1, 0);
-#else /* SLJIT_CONFIG_X86_64 */
+    return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0);
+
+#else
+    /* The SLJIT_CONFIG_X86_32 code path starts here. */
     if (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) {
         if (reg_map[dst] <= 4) {
             /* Low byte is accessible. */