Refactor emitting callnative/callbc IRs (#1206)

And remove clear_values for opcode br_if as the jit registers
can be used in the same basic block.
This commit is contained in:
Wenyong Huang 2022-06-06 11:11:02 +08:00 committed by GitHub
parent 95eb0e3363
commit 5e9f08fb68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 310 additions and 272 deletions

View File

@ -573,7 +573,12 @@ extend_r32_to_r64(x86::Assembler &a, int32 reg_no_dst, int32 reg_no_src,
a.movsxd(regs_i64[reg_no_dst], regs_i32[reg_no_src]); a.movsxd(regs_i64[reg_no_dst], regs_i32[reg_no_src]);
} }
else { else {
a.xor_(regs_i64[reg_no_dst], regs_i64[reg_no_dst]); /*
* The upper 32-bit will be zero-extended, ref to Intel document,
* 3.4.1.1 General-Purpose Registers: 32-bit operands generate
* a 32-bit result, zero-extended to a 64-bit result in the
* destination general-purpose register
*/
a.mov(regs_i32[reg_no_dst], regs_i32[reg_no_src]); a.mov(regs_i32[reg_no_dst], regs_i32[reg_no_src]);
} }
return true; return true;
@ -653,10 +658,15 @@ mov_m_to_r(x86::Assembler &a, uint32 bytes_dst, uint32 kind_dst, bool is_signed,
case 4: case 4:
if (is_signed) if (is_signed)
a.movsxd(regs_i64[reg_no_dst], m_src); a.movsxd(regs_i64[reg_no_dst], m_src);
else { else
a.xor_(regs_i64[reg_no_dst], regs_i64[reg_no_dst]); /*
a.mov(regs_i64[reg_no_dst], m_src); * The upper 32-bit will be zero-extended, ref to Intel
} * document, 3.4.1.1 General-Purpose Registers: 32-bit
* operands generate a 32-bit result, zero-extended to
* a 64-bit result in the destination general-purpose
* register
*/
a.mov(regs_i32[reg_no_dst], m_src);
break; break;
case 8: case 8:
a.mov(regs_i64[reg_no_dst], m_src); a.mov(regs_i64[reg_no_dst], m_src);
@ -2961,7 +2971,7 @@ alu_imm_r_to_r_f32(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
/* xmm -> m128 */ /* xmm -> m128 */
x86::Mem cache = x86::xmmword_ptr(regs_i64[hreg_info->exec_env_hreg_index], x86::Mem cache = x86::xmmword_ptr(regs_i64[hreg_info->exec_env_hreg_index],
offsetof(WASMExecEnv, jit_cache)); offsetof(WASMExecEnv, jit_cache));
a.movaps(cache, regs_float[reg_no2_src]); a.movups(cache, regs_float[reg_no2_src]);
/* imm -> gp -> xmm */ /* imm -> gp -> xmm */
mov_imm_to_r_f32(a, reg_no_dst, data1_src); mov_imm_to_r_f32(a, reg_no_dst, data1_src);
@ -3135,7 +3145,7 @@ alu_imm_r_to_r_f64(x86::Assembler &a, ALU_OP op, int32 reg_no_dst,
/* xmm -> m128 */ /* xmm -> m128 */
x86::Mem cache = x86::qword_ptr(regs_i64[hreg_info->exec_env_hreg_index], x86::Mem cache = x86::qword_ptr(regs_i64[hreg_info->exec_env_hreg_index],
offsetof(WASMExecEnv, jit_cache)); offsetof(WASMExecEnv, jit_cache));
a.movapd(cache, regs_float[reg_no2_src]); a.movupd(cache, regs_float[reg_no2_src]);
/* imm -> gp -> xmm */ /* imm -> gp -> xmm */
mov_imm_to_r_f64(a, reg_no_dst, data1_src); mov_imm_to_r_f64(a, reg_no_dst, data1_src);
@ -4214,7 +4224,7 @@ cmp_imm_r_to_r_f32(x86::Assembler &a, int32 reg_no_dst, float data1_src,
/* xmm -> m128 */ /* xmm -> m128 */
x86::Mem cache = x86::xmmword_ptr(regs_i64[hreg_info->exec_env_hreg_index], x86::Mem cache = x86::xmmword_ptr(regs_i64[hreg_info->exec_env_hreg_index],
offsetof(WASMExecEnv, jit_cache)); offsetof(WASMExecEnv, jit_cache));
a.movaps(cache, regs_float[reg_no2_src]); a.movups(cache, regs_float[reg_no2_src]);
/* imm -> gp -> xmm */ /* imm -> gp -> xmm */
mov_imm_to_r_f32(a, reg_no2_src, data1_src); mov_imm_to_r_f32(a, reg_no2_src, data1_src);
@ -4309,7 +4319,7 @@ cmp_imm_r_to_r_f64(x86::Assembler &a, int32 reg_no_dst, double data1_src,
/* xmm -> m128 */ /* xmm -> m128 */
x86::Mem cache = x86::qword_ptr(regs_i64[hreg_info->exec_env_hreg_index], x86::Mem cache = x86::qword_ptr(regs_i64[hreg_info->exec_env_hreg_index],
offsetof(WASMExecEnv, jit_cache)); offsetof(WASMExecEnv, jit_cache));
a.movapd(cache, regs_float[reg_no2_src]); a.movupd(cache, regs_float[reg_no2_src]);
/* imm -> gp -> xmm */ /* imm -> gp -> xmm */
mov_imm_to_r_f64(a, reg_no2_src, data1_src); mov_imm_to_r_f64(a, reg_no2_src, data1_src);
@ -6151,32 +6161,62 @@ jit_codegen_gen_native(JitCompContext *cc)
case JIT_OP_LDI8: case JIT_OP_LDI8:
LOAD_3ARGS(); LOAD_3ARGS();
bh_assert(jit_reg_kind(r0) == JIT_REG_KIND_I32
|| jit_reg_kind(r0) == JIT_REG_KIND_I64);
if (jit_reg_kind(r0) == JIT_REG_KIND_I32)
LD_R_R_R(I32, 1, true); LD_R_R_R(I32, 1, true);
else
LD_R_R_R(I64, 1, true);
break; break;
case JIT_OP_LDU8: case JIT_OP_LDU8:
LOAD_3ARGS(); LOAD_3ARGS();
bh_assert(jit_reg_kind(r0) == JIT_REG_KIND_I32
|| jit_reg_kind(r0) == JIT_REG_KIND_I64);
if (jit_reg_kind(r0) == JIT_REG_KIND_I32)
LD_R_R_R(I32, 1, false); LD_R_R_R(I32, 1, false);
else
LD_R_R_R(I64, 1, false);
break; break;
case JIT_OP_LDI16: case JIT_OP_LDI16:
LOAD_3ARGS(); LOAD_3ARGS();
bh_assert(jit_reg_kind(r0) == JIT_REG_KIND_I32
|| jit_reg_kind(r0) == JIT_REG_KIND_I64);
if (jit_reg_kind(r0) == JIT_REG_KIND_I32)
LD_R_R_R(I32, 2, true); LD_R_R_R(I32, 2, true);
else
LD_R_R_R(I64, 2, true);
break; break;
case JIT_OP_LDU16: case JIT_OP_LDU16:
LOAD_3ARGS(); LOAD_3ARGS();
bh_assert(jit_reg_kind(r0) == JIT_REG_KIND_I32
|| jit_reg_kind(r0) == JIT_REG_KIND_I64);
if (jit_reg_kind(r0) == JIT_REG_KIND_I32)
LD_R_R_R(I32, 2, false); LD_R_R_R(I32, 2, false);
else
LD_R_R_R(I64, 2, false);
break; break;
case JIT_OP_LDI32: case JIT_OP_LDI32:
LOAD_3ARGS(); LOAD_3ARGS();
bh_assert(jit_reg_kind(r0) == JIT_REG_KIND_I32
|| jit_reg_kind(r0) == JIT_REG_KIND_I64);
if (jit_reg_kind(r0) == JIT_REG_KIND_I32)
LD_R_R_R(I32, 4, true); LD_R_R_R(I32, 4, true);
else
LD_R_R_R(I64, 4, true);
break; break;
case JIT_OP_LDU32: case JIT_OP_LDU32:
LOAD_3ARGS(); LOAD_3ARGS();
bh_assert(jit_reg_kind(r0) == JIT_REG_KIND_I32
|| jit_reg_kind(r0) == JIT_REG_KIND_I64);
if (jit_reg_kind(r0) == JIT_REG_KIND_I32)
LD_R_R_R(I32, 4, false); LD_R_R_R(I32, 4, false);
else
LD_R_R_R(I64, 4, false);
break; break;
case JIT_OP_LDI64: case JIT_OP_LDI64:
@ -6574,29 +6614,52 @@ jit_codegen_get_hreg_info()
return &hreg_info; return &hreg_info;
} }
static const char *reg_names_i32[] = {
"ebp", "eax", "ebx", "ecx", "edx", "edi", "esi", "esp",
};
static const char *reg_names_i64[] = {
"rbp", "rax", "rbx", "rcx", "rdx", "rdi", "rsi", "rsp",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
};
static const char *reg_names_f32[] = { "xmm0", "xmm1", "xmm2", "xmm3",
"xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11",
"xmm12", "xmm13", "xmm14", "xmm15" };
static const char *reg_names_f64[] = {
"xmm0_f64", "xmm1_f64", "xmm2_f64", "xmm3_f64", "xmm4_f64", "xmm5_f64",
"xmm6_f64", "xmm7_f64", "xmm8_f64", "xmm9_f64", "xmm10_f64", "xmm11_f64",
"xmm12_f64", "xmm13_f64", "xmm14_f64", "xmm15_f64"
};
JitReg JitReg
jit_codegen_get_hreg_by_name(const char *name) jit_codegen_get_hreg_by_name(const char *name)
{ {
if (strcmp(name, "eax") == 0) size_t i;
return jit_reg_new(JIT_REG_KIND_I32, REG_EAX_IDX);
else if (strcmp(name, "ecx") == 0)
return jit_reg_new(JIT_REG_KIND_I32, REG_ECX_IDX);
else if (strcmp(name, "edx") == 0)
return jit_reg_new(JIT_REG_KIND_I32, REG_EDX_IDX);
else if (strcmp(name, "esi") == 0)
return jit_reg_new(JIT_REG_KIND_I32, REG_ESI_IDX);
else if (strcmp(name, "rax") == 0)
return jit_reg_new(JIT_REG_KIND_I64, REG_RAX_IDX);
else if (strcmp(name, "rcx") == 0)
return jit_reg_new(JIT_REG_KIND_I64, REG_RCX_IDX);
else if (strcmp(name, "rdx") == 0)
return jit_reg_new(JIT_REG_KIND_I64, REG_RDX_IDX);
else if (strcmp(name, "r9") == 0)
return jit_reg_new(JIT_REG_KIND_I64, REG_R9_IDX);
else if (strcmp(name, "xmm0") == 0)
return jit_reg_new(JIT_REG_KIND_F32, 0);
else if (strcmp(name, "xmm0_f64") == 0)
return jit_reg_new(JIT_REG_KIND_F64, 0);
if (name[0] == 'e') {
for (i = 0; i < sizeof(reg_names_i32) / sizeof(char *); i++)
if (!strcmp(reg_names_i32[i], name))
return jit_reg_new(JIT_REG_KIND_I32, i);
}
else if (name[0] == 'r') {
for (i = 0; i < sizeof(reg_names_i64) / sizeof(char *); i++)
if (!strcmp(reg_names_i64[i], name))
return jit_reg_new(JIT_REG_KIND_I64, i);
}
else if (!strncmp(name, "xmm", 3)) {
if (!strstr(name, "_f64")) {
for (i = 0; i < sizeof(reg_names_f32) / sizeof(char *); i++)
if (!strcmp(reg_names_f32[i], name))
return jit_reg_new(JIT_REG_KIND_F32, i);
}
else {
for (i = 0; i < sizeof(reg_names_f64) / sizeof(char *); i++)
if (!strcmp(reg_names_f64[i], name))
return jit_reg_new(JIT_REG_KIND_F64, i);
}
}
return 0; return 0;
} }

View File

@ -4,6 +4,7 @@
*/ */
#include "jit_emit_compare.h" #include "jit_emit_compare.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h" #include "../jit_frontend.h"
#include "../jit_codegen.h" #include "../jit_codegen.h"
@ -120,7 +121,7 @@ jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond)
static int32 static int32
float_cmp_eq(float f1, float f2) float_cmp_eq(float f1, float f2)
{ {
if (isnan(f1) || isnan(f2)) if (isnanf(f1) || isnanf(f2))
return 0; return 0;
return f1 == f2; return f1 == f2;
@ -129,7 +130,7 @@ float_cmp_eq(float f1, float f2)
static int32 static int32
float_cmp_ne(float f1, float f2) float_cmp_ne(float f1, float f2)
{ {
if (isnan(f1) || isnan(f2)) if (isnanf(f1) || isnanf(f2))
return 1; return 1;
return f1 != f2; return f1 != f2;
@ -157,53 +158,26 @@ static bool
jit_compile_op_compare_float_point(JitCompContext *cc, FloatCond cond, jit_compile_op_compare_float_point(JitCompContext *cc, FloatCond cond,
JitReg lhs, JitReg rhs) JitReg lhs, JitReg rhs)
{ {
JitReg res, const_zero, const_one; JitReg res, args[2], const_zero, const_one;
JitRegKind kind;
void *func;
if (cond == FLOAT_EQ) { if (cond == FLOAT_EQ || cond == FLOAT_NE) {
JitInsn *insn = NULL; kind = jit_reg_kind(lhs);
JitRegKind kind = jit_reg_kind(lhs); if (cond == FLOAT_EQ)
func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_eq
: (void *)double_cmp_eq;
else
func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_ne
: (void *)double_cmp_ne;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
res = jit_codegen_get_hreg_by_name("eax");
#else
res = jit_cc_new_reg_I32(cc); res = jit_cc_new_reg_I32(cc);
#endif args[0] = lhs;
args[1] = rhs;
if (kind == JIT_REG_KIND_F32) { if (!jit_emit_callnative(cc, func, res, args, 2)) {
insn = GEN_INSN(CALLNATIVE, res,
NEW_CONST(PTR, (uintptr_t)float_cmp_eq), 2);
}
else {
insn = GEN_INSN(CALLNATIVE, res,
NEW_CONST(PTR, (uintptr_t)double_cmp_eq), 2);
}
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = lhs;
*(jit_insn_opndv(insn, 3)) = rhs;
}
else if (cond == FLOAT_NE) {
JitInsn *insn = NULL;
JitRegKind kind = jit_reg_kind(lhs);
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
res = jit_codegen_get_hreg_by_name("eax");
#else
res = jit_cc_new_reg_I32(cc);
#endif
if (kind == JIT_REG_KIND_F32) {
insn = GEN_INSN(CALLNATIVE, res,
NEW_CONST(PTR, (uintptr_t)float_cmp_ne), 2);
}
else {
insn = GEN_INSN(CALLNATIVE, res,
NEW_CONST(PTR, (uintptr_t)double_cmp_ne), 2);
}
if (!insn) {
goto fail;
}
*(jit_insn_opndv(insn, 2)) = lhs;
*(jit_insn_opndv(insn, 3)) = rhs;
} }
else { else {
res = jit_cc_new_reg_I32(cc); res = jit_cc_new_reg_I32(cc);

View File

@ -852,8 +852,6 @@ jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
jit_frame = cc->jit_frame; jit_frame = cc->jit_frame;
cur_basic_block = cc->cur_basic_block; cur_basic_block = cc->cur_basic_block;
gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp); gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
/* Clear frame values */
clear_values(jit_frame);
if (block_dst->label_type == LABEL_TYPE_LOOP) { if (block_dst->label_type == LABEL_TYPE_LOOP) {
frame_sp_src = frame_sp_src =

View File

@ -3,10 +3,11 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/ */
#include "../jit_codegen.h"
#include "jit_emit_conversion.h" #include "jit_emit_conversion.h"
#include "../jit_frontend.h"
#include "jit_emit_exception.h" #include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../jit_codegen.h"
#include "../jit_frontend.h"
static double static double
uint64_to_double(uint64 u64) uint64_to_double(uint64 u64)
@ -40,7 +41,6 @@ bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating) jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating)
{ {
JitReg value, nan_ret, max_valid_float, min_valid_float, res; JitReg value, nan_ret, max_valid_float, min_valid_float, res;
JitInsn *insn = NULL;
POP_F32(value); POP_F32(value);
@ -50,23 +50,11 @@ jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating)
sign ? NEW_CONST(F32, 2147483648.0f) : NEW_CONST(F32, 4294967296.0f); sign ? NEW_CONST(F32, 2147483648.0f) : NEW_CONST(F32, 4294967296.0f);
if (!saturating) { if (!saturating) {
/*if (value is Nan) goto exception*/ /* If value is NaN, throw exception */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* Set nan_ret to x86::eax */
nan_ret = jit_codegen_get_hreg_by_name("eax");
#else
nan_ret = jit_cc_new_reg_I32(cc); nan_ret = jit_cc_new_reg_I32(cc);
#endif if (!jit_emit_callnative(cc, isnanf, nan_ret, &value, 1)) {
insn =
GEN_INSN(CALLNATIVE, nan_ret, NEW_CONST(PTR, (uintptr_t)isnanf), 1);
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = value;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
jit_lock_reg_in_insn(cc, insn, nan_ret);
#endif
GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1)); GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1));
if (!jit_emit_exception(cc, EXCE_INVALID_CONVERSION_TO_INTEGER, if (!jit_emit_exception(cc, EXCE_INVALID_CONVERSION_TO_INTEGER,
@ -74,7 +62,7 @@ jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating)
goto fail; goto fail;
} }
/*if (value is out of integer range) goto exception*/ /* If value is out of integer range, throw exception */
GEN_INSN(CMP, cc->cmp_reg, value, min_valid_float); GEN_INSN(CMP, cc->cmp_reg, value, min_valid_float);
if (!jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BLES, if (!jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BLES,
cc->cmp_reg, NULL)) { cc->cmp_reg, NULL)) {
@ -113,7 +101,6 @@ bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating) jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating)
{ {
JitReg value, nan_ret, max_valid_double, min_valid_double, res; JitReg value, nan_ret, max_valid_double, min_valid_double, res;
JitInsn *insn = NULL;
POP_F64(value); POP_F64(value);
@ -123,23 +110,11 @@ jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating)
sign ? NEW_CONST(F64, 2147483648.0) : NEW_CONST(F64, 4294967296.0); sign ? NEW_CONST(F64, 2147483648.0) : NEW_CONST(F64, 4294967296.0);
if (!saturating) { if (!saturating) {
/*if (value is Nan) goto exception*/ /* If value is NaN, throw exception */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* Set nan_ret to x86::eax */
nan_ret = jit_codegen_get_hreg_by_name("eax");
#else
nan_ret = jit_cc_new_reg_I32(cc); nan_ret = jit_cc_new_reg_I32(cc);
#endif if (!jit_emit_callnative(cc, isnan, nan_ret, &value, 1)) {
insn =
GEN_INSN(CALLNATIVE, nan_ret, NEW_CONST(PTR, (uintptr_t)isnan), 1);
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = value;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
jit_lock_reg_in_insn(cc, insn, nan_ret);
#endif
GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1)); GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1));
if (!jit_emit_exception(cc, EXCE_INVALID_CONVERSION_TO_INTEGER, if (!jit_emit_exception(cc, EXCE_INVALID_CONVERSION_TO_INTEGER,
@ -147,7 +122,7 @@ jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating)
goto fail; goto fail;
} }
/*if (value is out of integer range) goto exception*/ /* If value is out of integer range, throw exception */
GEN_INSN(CMP, cc->cmp_reg, value, min_valid_double); GEN_INSN(CMP, cc->cmp_reg, value, min_valid_double);
if (!jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BLES, if (!jit_emit_exception(cc, EXCE_INTEGER_OVERFLOW, JIT_OP_BLES,
cc->cmp_reg, NULL)) { cc->cmp_reg, NULL)) {
@ -332,22 +307,10 @@ jit_compile_op_f32_convert_i64(JitCompContext *cc, bool sign)
GEN_INSN(I64TOF32, res, value); GEN_INSN(I64TOF32, res, value);
} }
else { else {
JitInsn *insn;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
res = jit_codegen_get_hreg_by_name("xmm0");
#else
res = jit_cc_new_reg_F32(cc); res = jit_cc_new_reg_F32(cc);
#endif if (!jit_emit_callnative(cc, uint64_to_float, res, &value, 1)) {
insn = GEN_INSN(CALLNATIVE, res,
NEW_CONST(PTR, (uintptr_t)uint64_to_float), 1);
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = value;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
jit_lock_reg_in_insn(cc, insn, res);
#endif
} }
PUSH_F32(res); PUSH_F32(res);
@ -406,24 +369,10 @@ jit_compile_op_f64_convert_i64(JitCompContext *cc, bool sign)
GEN_INSN(I64TOF64, res, value); GEN_INSN(I64TOF64, res, value);
} }
else { else {
JitInsn *insn;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
JitReg xmm0;
res = jit_codegen_get_hreg_by_name("xmm0_f64");
xmm0 = jit_codegen_get_hreg_by_name("xmm0");
#else
res = jit_cc_new_reg_F64(cc); res = jit_cc_new_reg_F64(cc);
#endif if (!jit_emit_callnative(cc, uint64_to_double, res, &value, 1)) {
insn = GEN_INSN(CALLNATIVE, res,
NEW_CONST(PTR, (uintptr_t)uint64_to_double), 1);
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = value;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
jit_lock_reg_in_insn(cc, insn, xmm0);
#endif
} }
PUSH_F64(res); PUSH_F64(res);

View File

@ -128,13 +128,8 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
WASMFunction *func; WASMFunction *func;
WASMType *func_type; WASMType *func_type;
JitFrame *jit_frame = cc->jit_frame; JitFrame *jit_frame = cc->jit_frame;
JitReg result = 0, native_ret; JitReg native_ret;
JitReg func_ptrs, jitted_code = 0; JitReg func_ptrs, jitted_code = 0;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
#endif
JitInsn *insn;
uint32 jitted_func_idx; uint32 jitted_func_idx;
if (func_idx >= wasm_module->import_function_count) { if (func_idx >= wasm_module->import_function_count) {
@ -161,23 +156,17 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
} }
if (func_idx < wasm_module->import_function_count) { if (func_idx < wasm_module->import_function_count) {
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) JitReg arg_regs[3];
/* Set native_ret to x86::eax */
native_ret = eax_hreg;
#else
native_ret = jit_cc_new_reg_I32(cc);
#endif
insn = GEN_INSN(CALLNATIVE, native_ret,
NEW_CONST(PTR, (uintptr_t)jit_invoke_native), 3);
if (insn) {
*(jit_insn_opndv(insn, 2)) = cc->exec_env_reg;
*(jit_insn_opndv(insn, 3)) = NEW_CONST(I32, func_idx);
*(jit_insn_opndv(insn, 4)) = cc->fp_reg;
}
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) native_ret = jit_cc_new_reg_I32(cc);
jit_lock_reg_in_insn(cc, insn, native_ret); arg_regs[0] = cc->exec_env_reg;
#endif arg_regs[1] = NEW_CONST(I32, func_idx);
arg_regs[2] = cc->fp_reg;
if (!jit_emit_callnative(cc, jit_invoke_native, native_ret, arg_regs,
3)) {
return false;
}
/* Check whether there is exception thrown */ /* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0)); GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
@ -187,6 +176,8 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
} }
} }
else { else {
JitReg res = 0;
if (func_type->result_count > 0) { if (func_type->result_count > 0) {
switch (func_type->types[func_type->param_count]) { switch (func_type->types[func_type->param_count]) {
case VALUE_TYPE_I32: case VALUE_TYPE_I32:
@ -195,23 +186,31 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
case VALUE_TYPE_FUNCREF: case VALUE_TYPE_FUNCREF:
#endif #endif
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
result = eax_hreg; res = jit_codegen_get_hreg_by_name("eax");
#else #else
result = jit_cc_new_reg_I32(cc); res = jit_cc_new_reg_I32(cc);
#endif #endif
break; break;
case VALUE_TYPE_I64: case VALUE_TYPE_I64:
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
result = rax_hreg; res = jit_codegen_get_hreg_by_name("rax");
#else #else
result = jit_cc_new_reg_I64(cc); res = jit_cc_new_reg_I64(cc);
#endif #endif
break; break;
case VALUE_TYPE_F32: case VALUE_TYPE_F32:
result = jit_cc_new_reg_F32(cc); #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
res = jit_codegen_get_hreg_by_name("xmm0");
#else
res = jit_cc_new_reg_F32(cc);
#endif
break; break;
case VALUE_TYPE_F64: case VALUE_TYPE_F64:
result = jit_cc_new_reg_F64(cc); #if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
res = jit_codegen_get_hreg_by_name("xmm0_f64");
#else
res = jit_cc_new_reg_F64(cc);
#endif
break; break;
default: default:
bh_assert(0); bh_assert(0);
@ -219,7 +218,7 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
} }
} }
GEN_INSN(CALLBC, result, 0, jitted_code); GEN_INSN(CALLBC, res, 0, jitted_code);
} }
if (!post_return(cc, func_type)) { if (!post_return(cc, func_type)) {
@ -317,12 +316,8 @@ bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx, jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx) uint32 tbl_idx)
{ {
JitReg elem_idx, native_ret, argv; JitReg elem_idx, native_ret, argv, arg_regs[6];
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
JitReg edx_hreg, r9_hreg;
#endif
WASMType *func_type; WASMType *func_type;
JitInsn *insn;
POP_I32(elem_idx); POP_I32(elem_idx);
@ -333,38 +328,18 @@ jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
argv = pack_argv(cc); argv = pack_argv(cc);
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* Set native_ret to x86::eax */
native_ret = jit_codegen_get_hreg_by_name("eax");
edx_hreg = jit_codegen_get_hreg_by_name("edx");
GEN_INSN(MOV, edx_hreg, elem_idx);
elem_idx = edx_hreg;
r9_hreg = jit_codegen_get_hreg_by_name("r9");
GEN_INSN(MOV, r9_hreg, argv);
argv = r9_hreg;
#else
native_ret = jit_cc_new_reg_I32(cc); native_ret = jit_cc_new_reg_I32(cc);
#endif arg_regs[0] = cc->exec_env_reg;
arg_regs[1] = NEW_CONST(I32, tbl_idx);
arg_regs[2] = elem_idx;
arg_regs[3] = NEW_CONST(I32, type_idx);
arg_regs[4] = NEW_CONST(I32, func_type->param_cell_num);
arg_regs[5] = argv;
insn = GEN_INSN(CALLNATIVE, native_ret, if (!jit_emit_callnative(cc, jit_call_indirect, native_ret, arg_regs, 6)) {
NEW_CONST(PTR, (uintptr_t)jit_call_indirect), 6); return false;
if (!insn) {
goto fail;
} }
*(jit_insn_opndv(insn, 2)) = cc->exec_env_reg;
*(jit_insn_opndv(insn, 3)) = NEW_CONST(I32, tbl_idx);
*(jit_insn_opndv(insn, 4)) = elem_idx;
*(jit_insn_opndv(insn, 5)) = NEW_CONST(I32, type_idx);
*(jit_insn_opndv(insn, 6)) = NEW_CONST(I32, func_type->param_cell_num);
*(jit_insn_opndv(insn, 7)) = argv;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
jit_lock_reg_in_insn(cc, insn, native_ret);
#endif
/* Check whether there is exception thrown */ /* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0)); GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, cc->cmp_reg, if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, cc->cmp_reg,
@ -403,3 +378,122 @@ jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx)
{ {
return false; return false;
} }
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count)
{
JitInsn *insn;
char *i64_arg_names[] = { "rdi", "rsi", "rdx", "rcx", "r8", "r9" };
char *f32_arg_names[] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" };
char *f64_arg_names[] = { "xmm0_f64", "xmm1_f64", "xmm2_f64",
"xmm3_f64", "xmm4_f64", "xmm5_f64" };
JitReg i64_arg_regs[6], f32_arg_regs[6], f64_arg_regs[6], res_hreg = 0;
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
JitReg xmm0_hreg = jit_codegen_get_hreg_by_name("xmm0");
JitReg xmm0_f64_hreg = jit_codegen_get_hreg_by_name("xmm0_f64");
uint32 i, i64_reg_idx, float_reg_idx;
bh_assert(param_count <= 6);
for (i = 0; i < 6; i++) {
i64_arg_regs[i] = jit_codegen_get_hreg_by_name(i64_arg_names[i]);
f32_arg_regs[i] = jit_codegen_get_hreg_by_name(f32_arg_names[i]);
f64_arg_regs[i] = jit_codegen_get_hreg_by_name(f64_arg_names[i]);
}
i64_reg_idx = float_reg_idx = 0;
for (i = 0; i < param_count; i++) {
switch (jit_reg_kind(params[i])) {
case JIT_REG_KIND_I32:
GEN_INSN(I32TOI64, i64_arg_regs[i64_reg_idx++], params[i]);
break;
case JIT_REG_KIND_I64:
GEN_INSN(MOV, i64_arg_regs[i64_reg_idx++], params[i]);
break;
case JIT_REG_KIND_F32:
GEN_INSN(MOV, f32_arg_regs[float_reg_idx++], params[i]);
break;
case JIT_REG_KIND_F64:
GEN_INSN(MOV, f64_arg_regs[float_reg_idx++], params[i]);
break;
default:
bh_assert(0);
return false;
}
}
if (res) {
switch (jit_reg_kind(res)) {
case JIT_REG_KIND_I32:
res_hreg = eax_hreg;
break;
case JIT_REG_KIND_I64:
res_hreg = rax_hreg;
break;
case JIT_REG_KIND_F32:
res_hreg = xmm0_hreg;
break;
case JIT_REG_KIND_F64:
res_hreg = xmm0_f64_hreg;
break;
default:
bh_assert(0);
return false;
}
}
insn = GEN_INSN(CALLNATIVE, res_hreg,
NEW_CONST(PTR, (uintptr_t)native_func), param_count);
if (!insn) {
return false;
}
i64_reg_idx = float_reg_idx = 0;
for (i = 0; i < param_count; i++) {
switch (jit_reg_kind(params[i])) {
case JIT_REG_KIND_I32:
case JIT_REG_KIND_I64:
*(jit_insn_opndv(insn, i + 2)) = i64_arg_regs[i64_reg_idx++];
break;
case JIT_REG_KIND_F32:
*(jit_insn_opndv(insn, i + 2)) = f32_arg_regs[float_reg_idx++];
break;
case JIT_REG_KIND_F64:
*(jit_insn_opndv(insn, i + 2)) = f64_arg_regs[float_reg_idx++];
break;
default:
bh_assert(0);
return false;
}
}
if (res) {
GEN_INSN(MOV, res, res_hreg);
}
return true;
}
#else
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count)
{
JitInsn *insn;
uint32 i;
bh_assert(param_count <= 6);
insn = GEN_INSN(CALLNATIVE, res, NEW_CONST(PTR, (uintptr_t)native_func),
param_count);
if (!insn)
return false;
for (i = 0; i < param_count; i++) {
*(jit_insn_opndv(insn, i + 2)) = params[i];
}
return true;
}
#endif

View File

@ -28,6 +28,10 @@ jit_compile_op_ref_is_null(JitCompContext *cc);
bool bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx); jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx);
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count);
#ifdef __cplusplus #ifdef __cplusplus
} /* end of extern "C" */ } /* end of extern "C" */
#endif #endif

View File

@ -5,6 +5,7 @@
#include "jit_emit_memory.h" #include "jit_emit_memory.h"
#include "jit_emit_exception.h" #include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h" #include "../jit_frontend.h"
#include "../jit_codegen.h" #include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h" #include "../../interpreter/wasm_runtime.h"
@ -475,56 +476,34 @@ jit_compile_op_memory_size(JitCompContext *cc)
bool bool
jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx) jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx)
{ {
JitReg delta, module_inst, grow_result, res, memory_inst, prev_page_count; JitReg memory_inst, grow_res, res;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) JitReg prev_page_count, inc_page_count, args[2];
JitReg esi_hreg;
#endif
JitInsn *insn;
/* WASMMemoryInstance->cur_page_count before enlarging */ /* Get current page count */
memory_inst = get_memory_inst_reg(cc->jit_frame, mem_idx); memory_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
prev_page_count = jit_cc_new_reg_I32(cc); prev_page_count = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, prev_page_count, memory_inst, GEN_INSN(LDI32, prev_page_count, memory_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count))); NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
/* call wasm_enlarge_memory */ /* Call wasm_enlarge_memory */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) POP_I32(inc_page_count);
/* Set native_ret to x86::eax */
grow_result = jit_codegen_get_hreg_by_name("eax");
#else
grow_result = jit_cc_new_reg_I32(cc);
#endif
POP_I32(delta); grow_res = jit_cc_new_reg_I32(cc);
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) args[0] = get_module_inst_reg(cc->jit_frame);
esi_hreg = jit_codegen_get_hreg_by_name("esi"); args[1] = inc_page_count;
GEN_INSN(MOV, esi_hreg, delta);
delta = esi_hreg;
#endif
module_inst = get_module_inst_reg(cc->jit_frame); if (!jit_emit_callnative(cc, wasm_enlarge_memory, grow_res, args, 2)) {
insn = GEN_INSN(CALLNATIVE, grow_result,
NEW_CONST(PTR, (uintptr_t)wasm_enlarge_memory), 2);
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = module_inst; /* Check if enlarge memory success */
*(jit_insn_opndv(insn, 3)) = delta;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
jit_lock_reg_in_insn(cc, insn, grow_result);
#endif
/* check if enlarge memory success */
res = jit_cc_new_reg_I32(cc); res = jit_cc_new_reg_I32(cc);
GEN_INSN(CMP, cc->cmp_reg, grow_result, NEW_CONST(I32, 0)); GEN_INSN(CMP, cc->cmp_reg, grow_res, NEW_CONST(I32, 0));
GEN_INSN(SELECTNE, res, cc->cmp_reg, prev_page_count, GEN_INSN(SELECTNE, res, cc->cmp_reg, prev_page_count,
NEW_CONST(I32, (int32)-1)); NEW_CONST(I32, (int32)-1));
PUSH_I32(res); PUSH_I32(res);
/* ensure a refresh in next get_memory_XXX_reg */ /* Ensure a refresh in next get memory related registers */
clear_memory_regs(cc->jit_frame); clear_memory_regs(cc->jit_frame);
return true; return true;

View File

@ -6,6 +6,7 @@
#include "jit_emit_numberic.h" #include "jit_emit_numberic.h"
#include "jit_emit_exception.h" #include "jit_emit_exception.h"
#include "jit_emit_control.h" #include "jit_emit_control.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h" #include "../jit_frontend.h"
#include "../jit_codegen.h" #include "../jit_codegen.h"
@ -1338,22 +1339,12 @@ static bool
compile_op_float_math(JitCompContext *cc, FloatMath math_op, bool is_f32) compile_op_float_math(JitCompContext *cc, FloatMath math_op, bool is_f32)
{ {
JitReg value, res; JitReg value, res;
JitInsn *insn = NULL;
void *func = NULL; void *func = NULL;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
if (is_f32) {
res = jit_codegen_get_hreg_by_name("xmm0");
}
else {
res = jit_codegen_get_hreg_by_name("xmm0_f64");
}
#else
if (is_f32) if (is_f32)
res = jit_cc_new_reg_F32(cc); res = jit_cc_new_reg_F32(cc);
else else
res = jit_cc_new_reg_F64(cc); res = jit_cc_new_reg_F64(cc);
#endif
if (is_f32) if (is_f32)
POP_F32(value); POP_F32(value);
@ -1387,11 +1378,9 @@ compile_op_float_math(JitCompContext *cc, FloatMath math_op, bool is_f32)
goto fail; goto fail;
} }
insn = GEN_INSN(CALLNATIVE, res, NEW_CONST(PTR, (uintptr_t)func), 1); if (!jit_emit_callnative(cc, func, res, &value, 1)) {
if (!insn) {
goto fail; goto fail;
} }
*(jit_insn_opndv(insn, 2)) = value;
if (is_f32) if (is_f32)
PUSH_F32(res); PUSH_F32(res);
@ -1463,34 +1452,22 @@ static bool
compile_op_float_min_max(JitCompContext *cc, FloatArithmetic arith_op, compile_op_float_min_max(JitCompContext *cc, FloatArithmetic arith_op,
bool is_f32, JitReg lhs, JitReg rhs, JitReg *out) bool is_f32, JitReg lhs, JitReg rhs, JitReg *out)
{ {
JitReg res; JitReg res, args[2];
JitInsn *insn = NULL; void *func;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
res = jit_codegen_get_hreg_by_name(is_f32 ? "xmm0" : "xmm0_f64");
#else
res = is_f32 ? jit_cc_new_reg_F32(cc) : jit_cc_new_reg_F64(cc); res = is_f32 ? jit_cc_new_reg_F32(cc) : jit_cc_new_reg_F64(cc);
#endif
if (arith_op == FLOAT_MIN) if (arith_op == FLOAT_MIN)
insn = GEN_INSN(CALLNATIVE, res, func = is_f32 ? (void *)local_minf : (void *)local_min;
is_f32 ? NEW_CONST(PTR, (uintptr_t)local_minf)
: NEW_CONST(PTR, (uintptr_t)local_min),
2);
else else
insn = GEN_INSN(CALLNATIVE, res, func = is_f32 ? (void *)local_maxf : (void *)local_max;
is_f32 ? NEW_CONST(PTR, (uintptr_t)local_maxf)
: NEW_CONST(PTR, (uintptr_t)local_max),
2);
if (!insn)
goto fail;
*(jit_insn_opndv(insn, 2)) = lhs; args[0] = lhs;
*(jit_insn_opndv(insn, 3)) = rhs; args[1] = rhs;
if (!jit_emit_callnative(cc, func, res, args, 2))
return false;
*out = res; *out = res;
return true; return true;
fail:
return false;
} }
static bool static bool