Implement i32/i64 div and rem opcodes translation (#1091)

This commit is contained in:
Wenyong Huang 2022-04-18 11:38:10 +08:00 committed by GitHub
parent f1f674bc8d
commit 5f0fab03a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 487 additions and 51 deletions

View File

@ -214,7 +214,7 @@ class JitErrorHandler : public ErrorHandler
};
/* Alu opcode */
typedef enum { ADD, SUB, MUL, DIV, REM } ALU_OP;
typedef enum { ADD, SUB, MUL, DIV_S, REM_S, DIV_U, REM_U } ALU_OP;
/* Bit opcode */
typedef enum { OR, XOR, AND } BIT_OP;
/* Shift opcode */
@ -1620,16 +1620,33 @@ alu_r_r_imm_i32(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
a.imul(regs_i32[reg_no_dst], regs_i32[reg_no_src], imm);
}
break;
case DIV:
case REM:
#if 0
imm_from_sz_v_s (imm, SZ32, data, true);
mov_r_imm (reg_I4_free, imm);
stream = cdq (stream);
idiv_r (reg_I4_free);
#endif
/* TODO */
bh_assert(0);
case DIV_S:
case REM_S:
bh_assert(reg_no_src == REG_EAX_IDX);
if (op == DIV_S) {
bh_assert(reg_no_dst == REG_EAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_EDX_IDX);
}
a.mov(regs_i32[REG_I32_FREE_IDX], imm);
/* signed extend eax to edx:eax */
a.cdq();
a.idiv(regs_i32[REG_I32_FREE_IDX]);
break;
case DIV_U:
case REM_U:
bh_assert(reg_no_src == REG_EAX_IDX);
if (op == DIV_U) {
bh_assert(reg_no_dst == REG_EAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_EDX_IDX);
}
a.mov(regs_i32[REG_I32_FREE_IDX], imm);
/* unsigned extend eax to edx:eax */
a.xor_(regs_i32[REG_EDX_IDX], regs_i32[REG_EDX_IDX]);
a.div(regs_i32[REG_I32_FREE_IDX]);
break;
default:
bh_assert(0);
@ -1680,10 +1697,46 @@ alu_r_r_r_i32(x86::Assembler &a, ALU_OP op, int32 reg_no_dst, int32 reg_no1_src,
else
a.imul(regs_i32[reg_no2_src], regs_i32[reg_no1_src]);
break;
case DIV:
case REM:
/* TODO */
bh_assert(0);
case DIV_S:
case REM_S:
bh_assert(reg_no1_src == REG_EAX_IDX);
if (op == DIV_S) {
bh_assert(reg_no_dst == REG_EAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_EDX_IDX);
if (reg_no2_src == REG_EDX_IDX) {
/* convert `REM_S edx, eax, edx` into
`mov esi, edx` and `REM_S edx eax, rsi` to
avoid overwritting edx when a.cdq() */
a.mov(regs_i32[REG_I32_FREE_IDX], regs_i32[REG_EDX_IDX]);
reg_no2_src = REG_I32_FREE_IDX;
}
}
/* signed extend eax to edx:eax */
a.cdq();
a.idiv(regs_i32[reg_no2_src]);
break;
case DIV_U:
case REM_U:
bh_assert(reg_no1_src == REG_EAX_IDX);
if (op == DIV_U) {
bh_assert(reg_no_dst == REG_EAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_EDX_IDX);
if (reg_no2_src == REG_EDX_IDX) {
/* convert `REM_U edx, eax, edx` into
`mov esi, edx` and `REM_U edx eax, rsi` to
avoid overwritting edx when unsigned extend
eax to edx:eax */
a.mov(regs_i32[REG_I32_FREE_IDX], regs_i32[REG_EDX_IDX]);
reg_no2_src = REG_I32_FREE_IDX;
}
}
/* unsigned extend eax to edx:eax */
a.xor_(regs_i32[REG_EDX_IDX], regs_i32[REG_EDX_IDX]);
a.div(regs_i32[reg_no2_src]);
break;
default:
bh_assert(0);
@ -1721,12 +1774,18 @@ alu_imm_imm_to_r_i32(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
case MUL:
data = data1_src * data2_src;
break;
case DIV:
case DIV_S:
data = data1_src / data2_src;
break;
case REM:
case REM_S:
data = data1_src % data2_src;
break;
case DIV_U:
data = (uint32)data1_src / (uint32)data2_src;
break;
case REM_U:
data = (uint32)data1_src % (uint32)data2_src;
break;
default:
bh_assert(0);
break;
@ -1879,16 +1938,33 @@ alu_r_r_imm_i64(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
a.imul(regs_i64[reg_no_dst], regs_i64[reg_no_src], imm);
}
break;
case DIV:
case REM:
#if 0
imm_from_sz_v_s (imm, SZ32, data, true);
mov_r_imm (reg_I4_free, imm);
stream = cdq (stream);
idiv_r (reg_I4_free);
#endif
/* TODO */
bh_assert(0);
case DIV_S:
case REM_S:
bh_assert(reg_no_src == REG_RAX_IDX);
if (op == DIV_S) {
bh_assert(reg_no_dst == REG_RAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_RDX_IDX);
}
a.mov(regs_i64[REG_I64_FREE_IDX], imm);
/* signed extend rax to rdx:rax */
a.cqo();
a.idiv(regs_i64[REG_I64_FREE_IDX]);
break;
case DIV_U:
case REM_U:
bh_assert(reg_no_src == REG_RAX_IDX);
if (op == DIV_U) {
bh_assert(reg_no_dst == REG_RAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_RDX_IDX);
}
a.mov(regs_i64[REG_I64_FREE_IDX], imm);
/* unsigned extend rax to rdx:rax */
a.xor_(regs_i64[REG_RDX_IDX], regs_i64[REG_RDX_IDX]);
a.div(regs_i64[REG_I64_FREE_IDX]);
break;
default:
bh_assert(0);
@ -1939,10 +2015,31 @@ alu_r_r_r_i64(x86::Assembler &a, ALU_OP op, int32 reg_no_dst, int32 reg_no1_src,
else
a.imul(regs_i64[reg_no2_src], regs_i64[reg_no1_src]);
break;
case DIV:
case REM:
/* TODO */
bh_assert(0);
case DIV_S:
case REM_S:
bh_assert(reg_no1_src == REG_RAX_IDX);
if (op == DIV_S) {
bh_assert(reg_no_dst == REG_RAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_RDX_IDX);
}
/* signed extend rax to rdx:rax */
a.cqo();
a.idiv(regs_i64[reg_no2_src]);
break;
case DIV_U:
case REM_U:
bh_assert(reg_no1_src == REG_RAX_IDX);
if (op == DIV_U) {
bh_assert(reg_no_dst == REG_RAX_IDX);
}
else {
bh_assert(reg_no_dst == REG_RDX_IDX);
}
/* unsigned extend rax to rdx:rax */
a.xor_(regs_i64[REG_RDX_IDX], regs_i64[REG_RDX_IDX]);
a.div(regs_i64[reg_no2_src]);
break;
default:
bh_assert(0);
@ -1980,12 +2077,18 @@ alu_imm_imm_to_r_i64(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
case MUL:
data = data1_src * data2_src;
break;
case DIV:
case DIV_S:
data = data1_src / data2_src;
break;
case REM:
case REM_S:
data = data1_src % data2_src;
break;
case DIV_U:
data = (uint64)data1_src / (uint64)data2_src;
break;
case REM_U:
data = (uint64)data1_src % (uint64)data2_src;
break;
default:
bh_assert(0);
break;
@ -2231,7 +2334,28 @@ alu_r_r_to_r_f64(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
static bool
bit_r_imm_i32(x86::Assembler &a, BIT_OP op, int32 reg_no, int32 data)
{
return false;
Imm imm(data);
switch (op) {
case OR:
if (data != 0)
a.or_(regs_i32[reg_no], imm);
break;
case XOR:
if (data == -1)
a.not_(regs_i32[reg_no]);
else if (data != 0)
a.xor_(regs_i32[reg_no], imm);
break;
case AND:
if (data != -1)
a.and_(regs_i32[reg_no], imm);
break;
default:
bh_assert(0);
break;
}
return true;
}
/**
@ -2247,7 +2371,21 @@ bit_r_imm_i32(x86::Assembler &a, BIT_OP op, int32 reg_no, int32 data)
static bool
bit_r_r_i32(x86::Assembler &a, BIT_OP op, int32 reg_no_dst, int32 reg_no_src)
{
return false;
switch (op) {
case OR:
a.or_(regs_i32[reg_no_dst], regs_i32[reg_no_src]);
break;
case XOR:
a.xor_(regs_i32[reg_no_dst], regs_i32[reg_no_src]);
break;
case AND:
a.and_(regs_i32[reg_no_dst], regs_i32[reg_no_src]);
break;
default:
bh_assert(0);
break;
}
return true;
}
/**
@ -2265,7 +2403,25 @@ static bool
bit_imm_imm_to_r_i32(x86::Assembler &a, BIT_OP op, int32 reg_no_dst,
int32 data1_src, int32 data2_src)
{
return false;
Imm imm;
switch (op) {
case OR:
imm.setValue(data1_src | data2_src);
break;
case XOR:
imm.setValue(data1_src ^ data2_src);
break;
case AND:
imm.setValue(data1_src & data2_src);
break;
default:
bh_assert(0);
break;
}
a.mov(regs_i32[reg_no_dst], imm);
return true;
}
/**
@ -2283,7 +2439,17 @@ static bool
bit_imm_r_to_r_i32(x86::Assembler &a, BIT_OP op, int32 reg_no_dst,
int32 data1_src, int32 reg_no2_src)
{
return false;
if (op == AND && data1_src == 0)
a.xor_(regs_i32[reg_no_dst], regs_i32[reg_no_dst]);
else if (op == OR && data1_src == -1) {
Imm imm(-1);
a.mov(regs_i32[reg_no_dst], imm);
}
else {
mov_r_to_r_i32(a, reg_no_dst, reg_no2_src);
return bit_r_imm_i32(a, op, reg_no_dst, data1_src);
}
return true;
}
/**
@ -2301,7 +2467,7 @@ static bool
bit_r_imm_to_r_i32(x86::Assembler &a, BIT_OP op, int32 reg_no_dst,
int32 reg_no1_src, int32 data2_src)
{
return false;
return bit_imm_r_to_r_i32(a, op, reg_no_dst, data2_src, reg_no1_src);
}
/**
@ -2319,6 +2485,12 @@ static bool
bit_r_r_to_r_i32(x86::Assembler &a, BIT_OP op, int32 reg_no_dst,
int32 reg_no1_src, int32 reg_no2_src)
{
if (reg_no_dst != reg_no2_src) {
mov_r_to_r_i32(a, reg_no_dst, reg_no1_src);
return bit_r_r_i32(a, op, reg_no_dst, reg_no2_src);
}
else
return bit_r_r_i32(a, op, reg_no_dst, reg_no1_src);
return false;
}
@ -4242,8 +4414,10 @@ jit_codegen_gen_native(JitCompContext *cc)
case JIT_OP_ADD:
case JIT_OP_SUB:
case JIT_OP_MUL:
case JIT_OP_DIV:
case JIT_OP_REM:
case JIT_OP_DIV_S:
case JIT_OP_REM_S:
case JIT_OP_DIV_U:
case JIT_OP_REM_U:
LOAD_3ARGS();
if (!lower_alu(cc, a,
(ALU_OP)(ADD + (insn->opcode - JIT_OP_ADD)),
@ -4704,3 +4878,18 @@ jit_codegen_get_hreg_info()
{
return &hreg_info;
}
JitReg
jit_codegen_get_hreg_by_name(const char *name)
{
if (strcmp(name, "eax") == 0)
return jit_reg_new(JIT_REG_KIND_I32, REG_EAX_IDX);
else if (strcmp(name, "edx") == 0)
return jit_reg_new(JIT_REG_KIND_I32, REG_EDX_IDX);
else if (strcmp(name, "rax") == 0)
return jit_reg_new(JIT_REG_KIND_I64, REG_RAX_IDX);
else if (strcmp(name, "rdx") == 0)
return jit_reg_new(JIT_REG_KIND_I64, REG_RDX_IDX);
return 0;
}

View File

@ -952,7 +952,7 @@ jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip)
bool
jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip)
{
if (!jit_emit_exception(cc, EXCE_UNREACHABLE, false, 0, NULL))
if (!jit_emit_exception(cc, EXCE_UNREACHABLE, JIT_OP_JMP, 0, NULL))
return false;
return handle_next_reachable_block(cc, p_frame_ip);

View File

@ -6,6 +6,7 @@
#include "jit_emit_function.h"
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
extern bool
jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
@ -85,8 +86,8 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
if (func_idx < wasm_module->import_function_count) {
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* Set native_ret to x86::eax, 1 is hard reg index of eax */
native_ret = jit_reg_new(JIT_REG_KIND_I32, 1);
/* Set native_ret to x86::eax */
native_ret = jit_codegen_get_hreg_by_name("eax");
#else
native_ret = jit_cc_new_reg_I32(cc);
#endif
@ -101,7 +102,7 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
/* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ,
cc->cmp_reg, 0)) {
cc->cmp_reg, NULL)) {
return false;
}
}

View File

@ -4,7 +4,10 @@
*/
#include "jit_emit_numberic.h"
#include "jit_emit_exception.h"
#include "jit_emit_control.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#define PUSH_INT(v) \
do { \
@ -176,12 +179,239 @@ compile_int_mul(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
return res;
}
static bool
compile_int_div_no_check(JitCompContext *cc, IntArithmetic arith_op,
bool is_i32, JitReg left, JitReg right, JitReg res)
{
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg edx_hreg = jit_codegen_get_hreg_by_name("edx");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
JitReg rdx_hreg = jit_codegen_get_hreg_by_name("rdx");
#endif
switch (arith_op) {
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
case INT_DIV_S:
case INT_DIV_U:
if (is_i32) {
GEN_INSN(MOV, eax_hreg, left);
if (arith_op == INT_DIV_S)
GEN_INSN(DIV_S, eax_hreg, eax_hreg, right);
else
GEN_INSN(DIV_U, eax_hreg, eax_hreg, right);
/* Just to indicate that edx is used,
register allocator cannot spill it out */
GEN_INSN(MOV, edx_hreg, edx_hreg);
res = eax_hreg;
}
else {
GEN_INSN(MOV, rax_hreg, left);
/* Just to indicate that eax is used,
register allocator cannot spill it out */
GEN_INSN(MOV, eax_hreg, eax_hreg);
if (arith_op == INT_DIV_S)
GEN_INSN(DIV_S, rax_hreg, rax_hreg, right);
else
GEN_INSN(DIV_U, rax_hreg, rax_hreg, right);
/* Just to indicate that edx is used,
register allocator cannot spill it out */
GEN_INSN(MOV, edx_hreg, edx_hreg);
res = rax_hreg;
}
break;
case INT_REM_S:
case INT_REM_U:
if (is_i32) {
GEN_INSN(MOV, eax_hreg, left);
if (arith_op == INT_REM_S)
GEN_INSN(REM_S, edx_hreg, eax_hreg, right);
else
GEN_INSN(REM_U, edx_hreg, eax_hreg, right);
res = edx_hreg;
}
else {
GEN_INSN(MOV, rax_hreg, left);
/* Just to indicate that eax is used,
register allocator cannot spill it out */
GEN_INSN(MOV, eax_hreg, eax_hreg);
if (arith_op == INT_REM_S)
GEN_INSN(REM_S, rdx_hreg, rax_hreg, right);
else
GEN_INSN(REM_U, rdx_hreg, rax_hreg, right);
/* Just to indicate that edx is used,
register allocator cannot spill it out */
GEN_INSN(MOV, edx_hreg, edx_hreg);
res = rdx_hreg;
}
break;
#else
case INT_DIV_S:
GEN_INSN(DIV_S, res, left, right);
break;
case INT_DIV_U:
GEN_INSN(DIV_U, res, left, right);
break;
case INT_REM_S:
GEN_INSN(REM_S, res, left, right);
break;
case INT_REM_U:
GEN_INSN(REM_U, res, left, right);
break;
#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
default:
bh_assert(0);
return false;
}
if (is_i32)
PUSH_I32(res);
else
PUSH_I64(res);
return true;
fail:
return false;
}
static bool
compile_int_div(JitCompContext *cc, IntArithmetic arith_op, bool is_i32,
uint8 **p_frame_ip)
{
/* TODO */
bh_assert(0);
JitReg left, right, res;
bh_assert(arith_op == INT_DIV_S || arith_op == INT_DIV_U
|| arith_op == INT_REM_S || arith_op == INT_REM_U);
if (is_i32) {
POP_I32(right);
POP_I32(left);
res = jit_cc_new_reg_I32(cc);
}
else {
POP_I64(right);
POP_I64(left);
res = jit_cc_new_reg_I64(cc);
}
if (jit_reg_is_const(right)) {
int64 right_val = is_i32 ? (int64)jit_cc_get_const_I32(cc, right)
: jit_cc_get_const_I64(cc, right);
switch (right_val) {
case 0:
{
/* Directly throw exception if divided by zero */
if (!(jit_emit_exception(cc, EXCE_INTEGER_DIVIDE_BY_ZERO,
JIT_OP_JMP, 0, NULL)))
goto fail;
return jit_handle_next_reachable_block(cc, p_frame_ip);
}
case 1:
{
if (arith_op == INT_DIV_S || arith_op == INT_DIV_U) {
if (is_i32)
PUSH_I32(left);
else
PUSH_I64(left);
}
else {
if (is_i32)
PUSH_I32(NEW_CONST(I32, 0));
else
PUSH_I64(NEW_CONST(I64, 0));
}
return true;
}
case -1:
{
if (arith_op == INT_DIV_S) {
if (is_i32)
GEN_INSN(CMP, cc->cmp_reg, left,
NEW_CONST(I32, INT32_MIN));
else
GEN_INSN(CMP, cc->cmp_reg, left,
NEW_CONST(I64, INT64_MIN));
/* Throw integer overflow exception if left is
INT32_MIN or INT64_MIN */
if (!(jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW,
JIT_OP_BEQ, cc->cmp_reg, NULL)))
goto fail;
/* Push -(left) to stack */
GEN_INSN(NEG, res, left);
if (is_i32)
PUSH_I32(res);
else
PUSH_I64(res);
return true;
}
else if (arith_op == INT_REM_S) {
if (is_i32)
PUSH_I32(NEW_CONST(I32, 0));
else
PUSH_I64(NEW_CONST(I64, 0));
return true;
}
else {
/* Build default div and rem */
return compile_int_div_no_check(cc, arith_op, is_i32, left,
right, res);
}
}
default:
{
/* Build default div and rem */
return compile_int_div_no_check(cc, arith_op, is_i32, left,
right, res);
}
}
}
else {
JitReg cmp1 = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
JitReg cmp2 = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
GEN_INSN(CMP, cc->cmp_reg, right,
is_i32 ? NEW_CONST(I32, 0) : NEW_CONST(I64, 0));
/* Throw integer divided by zero exception if right is zero */
if (!(jit_emit_exception(cc, EXCE_INTEGER_DIVIDE_BY_ZERO, JIT_OP_BEQ,
cc->cmp_reg, NULL)))
goto fail;
switch (arith_op) {
case INT_DIV_S:
case INT_REM_S:
/* Check integer overflow */
GEN_INSN(CMP, cc->cmp_reg, left,
is_i32 ? NEW_CONST(I32, INT32_MIN)
: NEW_CONST(I64, INT64_MIN));
GEN_INSN(SELECTEQ, cmp1, cc->cmp_reg, NEW_CONST(I32, 1),
NEW_CONST(I32, 0));
GEN_INSN(CMP, cc->cmp_reg, right,
is_i32 ? NEW_CONST(I32, -1) : NEW_CONST(I64, -1LL));
GEN_INSN(SELECTEQ, cmp2, cc->cmp_reg, NEW_CONST(I32, 1),
NEW_CONST(I32, 0));
GEN_INSN(AND, cmp1, cmp1, cmp2);
GEN_INSN(CMP, cc->cmp_reg, cmp1, NEW_CONST(I32, 1));
/* Throw integer overflow exception if left is INT32_MIN or
INT64_MIN, and right is -1 */
if (!(jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BEQ,
cc->cmp_reg, NULL)))
goto fail;
/* Build default div and rem */
return compile_int_div_no_check(cc, arith_op, is_i32, left,
right, res);
return true;
default:
/* Build default div and rem */
return compile_int_div_no_check(cc, arith_op, is_i32, left,
right, res);
}
}
fail:
return false;
}

View File

@ -35,6 +35,16 @@ jit_codegen_destroy();
const JitHardRegInfo *
jit_codegen_get_hreg_info();
/**
* Get hard register by name.
*
* @param name the name of the hard register
*
* @return the hard register of the name
*/
JitReg
jit_codegen_get_hreg_by_name(const char *name);
/**
* Generate native code for the given compilation context
*

View File

@ -617,11 +617,15 @@ form_and_translate_func(JitCompContext *cc)
}
cc->cur_basic_block = cc->exce_basic_blocks[i];
if (i != EXCE_ALREADY_THROWN) {
JitReg module_inst_reg = jit_cc_new_reg_ptr(cc);
GEN_INSN(LDPTR, module_inst_reg, cc->exec_env_reg,
NEW_CONST(I32, offsetof(WASMExecEnv, module_inst)));
insn = GEN_INSN(
CALLNATIVE, 0,
NEW_CONST(PTR, (uintptr_t)jit_set_exception_with_id), 1);
NEW_CONST(PTR, (uintptr_t)jit_set_exception_with_id), 2);
if (insn) {
*(jit_insn_opndv(insn, 2)) = NEW_CONST(I32, i);
*(jit_insn_opndv(insn, 2)) = module_inst_reg;
*(jit_insn_opndv(insn, 3)) = NEW_CONST(I32, i);
}
}
GEN_INSN(RETURN, NEW_CONST(I32, JIT_INTERP_ACTION_THROWN));
@ -762,7 +766,7 @@ init_func_translation(JitCompContext *cc)
/* if frame_boundary > top_boundary, throw stack overflow exception */
GEN_INSN(CMP, cc->cmp_reg, frame_boundary, top_boundary);
if (!jit_emit_exception(cc, EXCE_OPERAND_STACK_OVERFLOW, JIT_OP_BGTU,
cc->cmp_reg, 0)) {
cc->cmp_reg, NULL)) {
return NULL;
}

View File

@ -102,8 +102,10 @@ INSN(NOT, Reg, 2, 1)
INSN(ADD, Reg, 3, 1)
INSN(SUB, Reg, 3, 1)
INSN(MUL, Reg, 3, 1)
INSN(DIV, Reg, 3, 1)
INSN(REM, Reg, 3, 1)
INSN(DIV_S, Reg, 3, 1)
INSN(REM_S, Reg, 3, 1)
INSN(DIV_U, Reg, 3, 1)
INSN(REM_U, Reg, 3, 1)
INSN(SHL, Reg, 3, 1)
INSN(SHRS, Reg, 3, 1)
INSN(SHRU, Reg, 3, 1)