diff --git a/core/config.h b/core/config.h index a86de4d74..31c4e2bcd 100644 --- a/core/config.h +++ b/core/config.h @@ -102,6 +102,12 @@ #define WASM_ENABLE_FAST_JIT_DUMP 0 #endif +#ifndef FAST_JIT_SPILL_CACHE_SIZE +/* The size of fast jit spill cache in cell num, one cell num + occpuies 4 bytes */ +#define FAST_JIT_SPILL_CACHE_SIZE 32 +#endif + #ifndef WASM_ENABLE_WAMR_COMPILER #define WASM_ENABLE_WAMR_COMPILER 0 #endif diff --git a/core/iwasm/common/wasm_exec_env.h b/core/iwasm/common/wasm_exec_env.h index 250fe8270..460580c4e 100644 --- a/core/iwasm/common/wasm_exec_env.h +++ b/core/iwasm/common/wasm_exec_env.h @@ -85,10 +85,13 @@ typedef struct WASMExecEnv { #endif #if WASM_ENABLE_FAST_JIT != 0 - /* Cache for jit native operations, mainly for operations of float, - double and long, such as F64TOI64, F32TOI64, I64 MUL/REM, and so on. */ +#if defined(BUILD_TARGET_X86_32) + /* Cache for jit native operations in 32-bit target which hasn't 64-bit + int/float registers, mainly for the operations of double and int64, + such as F64TOI64, F32TOI64, I64 MUL/REM, and so on. */ uint64 jit_cache[2]; #endif +#endif #if WASM_ENABLE_THREAD_MGR != 0 /* thread return value */ diff --git a/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.cpp b/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.cpp index 4f9f03033..c60a6e0bc 100644 --- a/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.cpp +++ b/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.cpp @@ -207,6 +207,8 @@ class JitErrorHandler : public ErrorHandler void handleError(Error e, const char *msg, BaseEmitter *base) override { + (void)msg; + (void)base; this->err = e; } }; @@ -224,7 +226,6 @@ typedef enum { EQ, NE, GTS, GES, LTS, LES, GTU, GEU, LTU, LEU } COND_OP; typedef enum JmpType { JMP_DST_LABEL, /* jmp to dst label */ JMP_END_OF_CALLBC, /* jmp to end of CALLBC */ - JMP_TARGET_CODE /* jmp to an function address */ } JmpType; /** @@ -239,7 +240,6 @@ typedef struct JmpInfo { uint32 offset; union { uint32 label_dst; - uint32 target_code_addr; } dst_info; } JmpInfo; @@ -3868,17 +3868,77 @@ fail: * * @param cc the compiler context * @param a the assembler to emit the code + * @param jmp_info_list the jmp info list * @param label_src the index of src label * @param insn current insn info * * @return true if success, false if failed */ static bool -lower_callnative(JitCompContext *cc, x86::Assembler &a, int32 label_src, - JitInsn *insn) +lower_callnative(JitCompContext *cc, x86::Assembler &a, bh_list *jmp_info_list, + int32 label_src, JitInsn *insn) { - /* TODO: ignore it now */ + void (*func_ptr)(void); + JitReg ret_reg, func_reg, arg_reg; + x86::Gp regs_arg[] = { x86::rdi, x86::rsi, x86::rdx, + x86::rcx, x86::r8, x86::r9 }; + Imm imm; + JmpInfo *node; + uint32 i, opnd_num; + int32 i32; + int64 i64; + + ret_reg = *(jit_insn_opndv(insn, 0)); + func_reg = *(jit_insn_opndv(insn, 1)); + CHECK_KIND(func_reg, JIT_REG_KIND_I64); + CHECK_CONST(func_reg); + + func_ptr = (void (*)(void))jit_cc_get_const_I64(cc, func_reg); + + opnd_num = jit_insn_opndv_num(insn); + bh_assert(opnd_num <= (uint32)sizeof(regs_arg) / sizeof(JitReg)); + for (i = 0; i < opnd_num - 2; i++) { + arg_reg = *(jit_insn_opndv(insn, i + 2)); + switch (jit_reg_kind(arg_reg)) { + case JIT_REG_KIND_I32: + if (jit_reg_is_const(arg_reg)) { + i32 = jit_cc_get_const_I32(cc, arg_reg); + imm.setValue(i32); + a.mov(regs_arg[i], imm); + } + else { + a.mov(regs_arg[i], regs_i32[jit_reg_no(arg_reg)]); + } + break; + case JIT_REG_KIND_I64: + if (jit_reg_is_const(arg_reg)) { + i64 = jit_cc_get_const_I64(cc, arg_reg); + imm.setValue(i64); + a.mov(regs_arg[i], imm); + } + else { + if (regs_arg[i] != regs_i64[jit_reg_no(arg_reg)]) + a.mov(regs_arg[i], regs_i64[jit_reg_no(arg_reg)]); + } + break; + default: + bh_assert(0); + return false; + } + } + + imm.setValue((uint64)func_ptr); + a.mov(regs_i64[REG_RAX_IDX], imm); + a.call(regs_i64[REG_RAX_IDX]); + + if (ret_reg) { + bh_assert(jit_reg_kind(ret_reg) == JIT_REG_KIND_I32); + bh_assert(jit_reg_no(ret_reg) == REG_EAX_IDX); + } + return true; +fail: + return false; } /** @@ -3886,15 +3946,42 @@ lower_callnative(JitCompContext *cc, x86::Assembler &a, int32 label_src, * * @param cc the compiler context * @param a the assembler to emit the code + * @param jmp_info_list the jmp info list * @param label_src the index of src label * @param insn current insn info * * @return true if success, false if failed */ static bool -lower_callbc(JitCompContext *cc, x86::Assembler &a, int32 label_src, - JitInsn *insn) +lower_callbc(JitCompContext *cc, x86::Assembler &a, bh_list *jmp_info_list, + int32 label_src, JitInsn *insn) { + JmpInfo *node; + Imm imm; + JitReg func_reg = *(jit_insn_opnd(insn, 2)); + + /* Load return_jitted_addr from stack */ + x86::Mem m(x86::rbp, cc->jitted_return_address_offset); + + CHECK_KIND(func_reg, JIT_REG_KIND_I64); + + node = (JmpInfo *)jit_malloc(sizeof(JmpInfo)); + if (!node) + GOTO_FAIL; + + node->type = JMP_END_OF_CALLBC; + node->label_src = label_src; + node->offset = a.code()->sectionById(0)->buffer().size() + 2; + bh_list_insert(jmp_info_list, node); + + /* Set next jited addr to glue_ret_jited_addr, 0 will be replaced with + actual offset after actual code cache is allocated */ + imm.setValue(INT64_MAX); + a.mov(regs_i64[REG_I64_FREE_IDX], imm); + a.mov(m, regs_i64[REG_I64_FREE_IDX]); + a.jmp(regs_i64[jit_reg_no(func_reg)]); + return true; +fail: return false; } @@ -4003,10 +4090,8 @@ patch_jmp_info_list(JitCompContext *cc, bh_list *jmp_info_list) - 4; } else if (jmp_info->type == JMP_END_OF_CALLBC) { - /* TODO */ - } - else if (jmp_info->type == JMP_TARGET_CODE) { - /* TODO */ + /* 7 is the size of mov and jmp instruction */ + *(uintptr_t *)stream = (uintptr_t)stream + sizeof(uintptr_t) + 7; } jmp_info = jmp_info_next; @@ -4086,6 +4171,10 @@ jit_codegen_gen_native(JitCompContext *cc) { is_last_insn = (insn->next == block) ? true : false; +#if CODEGEN_DUMP != 0 + os_printf("\n"); + jit_dump_insn(cc, insn); +#endif switch (insn->opcode) { case JIT_OP_MOV: LOAD_2ARGS(); @@ -4333,12 +4422,13 @@ jit_codegen_gen_native(JitCompContext *cc) } case JIT_OP_CALLNATIVE: - if (!lower_callnative(cc, a, label_index, insn)) + if (!lower_callnative(cc, a, jmp_info_list, label_index, + insn)) GOTO_FAIL; break; case JIT_OP_CALLBC: - if (!lower_callbc(cc, a, label_index, insn)) + if (!lower_callbc(cc, a, jmp_info_list, label_index, insn)) GOTO_FAIL; break; @@ -4450,10 +4540,15 @@ jit_codegen_init() a.push(x86::r13); a.push(x86::r14); a.push(x86::r15); - /* push exec_env */ - a.push(x86::rdi); /* push info */ a.push(x86::rsi); + + /* Note: the number of register pushed must be odd, as the stack pointer + %rsp must be aligned to a 16-byte boundary before making a call, so + when a function (including this function) gets control, %rsp is not + aligned. We push odd number registers here to make %rsp happy before + calling native functions. */ + /* exec_env_reg = exec_env */ a.mov(regs_i64[hreg_info->exec_env_hreg_index], x86::rdi); /* fp_reg = info->frame */ @@ -4492,8 +4587,6 @@ jit_codegen_init() a.mov(m, x86::rcx); } - /* pop exec_env */ - a.pop(x86::rdi); /* pop callee-save registers */ a.pop(x86::r15); a.pop(x86::r14); @@ -4535,7 +4628,7 @@ jit_codegen_destroy() static const uint8 hreg_info_I32[3][7] = { /* ebp, eax, ebx, ecx, edx, edi, esi */ { 1, 0, 0, 0, 0, 0, 1 }, /* fixed, esi is freely used */ - { 0, 1, 0, 1, 1, 0, 0 }, /* caller_saved_native */ + { 0, 1, 0, 1, 1, 1, 0 }, /* caller_saved_native */ { 0, 1, 0, 1, 1, 1, 0 } /* caller_saved_jitted */ }; diff --git a/core/iwasm/fast-jit/fe/jit_emit_exception.c b/core/iwasm/fast-jit/fe/jit_emit_exception.c index a9cba5823..2addb5cde 100644 --- a/core/iwasm/fast-jit/fe/jit_emit_exception.c +++ b/core/iwasm/fast-jit/fe/jit_emit_exception.c @@ -16,7 +16,7 @@ jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode, bh_assert(exception_id < EXCE_NUM); - if (jit_opcode >= JIT_OP_BNE && jit_opcode <= JIT_OP_BLEU) { + if (jit_opcode >= JIT_OP_BEQ && jit_opcode <= JIT_OP_BLEU) { bh_assert(cond_br_if == cc->cmp_reg); else_label = cond_br_else_block ? jit_basic_block_label(cond_br_else_block) : 0; diff --git a/core/iwasm/fast-jit/fe/jit_emit_function.c b/core/iwasm/fast-jit/fe/jit_emit_function.c index d029d642f..f741b4c5a 100644 --- a/core/iwasm/fast-jit/fe/jit_emit_function.c +++ b/core/iwasm/fast-jit/fe/jit_emit_function.c @@ -4,11 +4,229 @@ */ #include "jit_emit_function.h" +#include "jit_emit_exception.h" #include "../jit_frontend.h" +extern bool +jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx, + WASMInterpFrame *prev_frame); + bool jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call) { + WASMModule *wasm_module = cc->cur_wasm_module; + WASMFunctionImport *func_import; + WASMFunction *func; + WASMType *func_type; + JitReg value, result = 0, module_inst, native_ret; + JitReg module, func_ptrs, jitted_code = 0; + JitInsn *insn; + uint32 i, n, outs_off, jitted_func_idx; + +#if UINTPTR_MAX == UINT64_MAX + module_inst = jit_cc_new_reg_I64(cc); + /* module_inst = exec_env->module_inst */ + GEN_INSN(LDI64, module_inst, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, module_inst))); + if (func_idx >= wasm_module->import_function_count) { + module = jit_cc_new_reg_I64(cc); + func_ptrs = jit_cc_new_reg_I64(cc); + jitted_code = jit_cc_new_reg_I64(cc); + /* module = module_inst->module */ + GEN_INSN(LDI64, module, module_inst, + NEW_CONST(I32, offsetof(WASMModuleInstance, module))); + /* func_ptrs = module->fast_jit_func_ptrs */ + GEN_INSN(LDI64, func_ptrs, module, + NEW_CONST(I32, offsetof(WASMModule, fast_jit_func_ptrs))); + /* jitted_code = func_ptrs[func_idx - import_function_count] */ + jitted_func_idx = func_idx - wasm_module->import_function_count; + GEN_INSN(LDI64, jitted_code, func_ptrs, + NEW_CONST(I32, (uint32)sizeof(void *) * jitted_func_idx)); + } +#else + module_inst = jit_cc_new_reg_I32(cc); + GEN_INSN(LDI32, module_inst, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, module_inst))); + if (func_idx >= wasm_module->import_function_count) { + module = jit_cc_new_reg_I32(cc); + func_ptrs = jit_cc_new_reg_I32(cc); + jitted_code = jit_cc_new_reg_I32(cc); + /* module = module_inst->module */ + GEN_INSN(LDI32, module, module_inst, + NEW_CONST(I32, offsetof(WASMModuleInstance, module))); + /* func_ptrs = module->fast_jit_func_ptrs */ + GEN_INSN(LDI32, func_ptrs, module, + NEW_CONST(I32, offsetof(WASMModule, fast_jit_func_ptrs))); + /* jitted_code = func_ptrs[func_idx - import_function_count] */ + jitted_func_idx = func_idx - wasm_module->import_function_count; + GEN_INSN(LDI32, jitted_code, func_ptrs, + NEW_CONST(I32, (uint32)sizeof(void *) * jitted_func_idx)); + } +#endif + + if (func_idx < wasm_module->import_function_count) { + func_import = &wasm_module->import_functions[func_idx].u.function; + func_type = func_import->func_type; + } + else { + func = wasm_module + ->functions[func_idx - wasm_module->import_function_count]; + func_type = func->func_type; + } + + /* Prepare parameters for the function to call */ + outs_off = + cc->total_frame_size + offsetof(WASMInterpFrame, lp) + + wasm_get_cell_num(func_type->types, func_type->param_count) * 4; + + for (i = 0; i < func_type->param_count; i++) { + switch (func_type->types[func_type->param_count - 1 - i]) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + POP_I32(value); + outs_off -= 4; + GEN_INSN(STI32, value, cc->fp_reg, NEW_CONST(I32, outs_off)); + break; + case VALUE_TYPE_I64: + POP_I64(value); + outs_off -= 8; + GEN_INSN(STI64, value, cc->fp_reg, NEW_CONST(I32, outs_off)); + break; + case VALUE_TYPE_F32: + POP_F32(value); + outs_off -= 4; + GEN_INSN(STF32, value, cc->fp_reg, NEW_CONST(I32, outs_off)); + break; + case VALUE_TYPE_F64: + POP_F64(value); + outs_off -= 8; + GEN_INSN(STF64, value, cc->fp_reg, NEW_CONST(I32, outs_off)); + break; + default: + bh_assert(0); + break; + } + } + + /* Commit sp as the callee may use it to store the results */ + gen_commit_sp_ip(cc->jit_frame); + + if (func_idx < wasm_module->import_function_count) { +#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) + /* Set native_ret to x86::eax, 1 is hard reg index of eax */ + native_ret = jit_reg_new(JIT_REG_KIND_I32, 1); +#else + native_ret = jit_cc_new_reg_I32(cc); +#endif +#if UINTPTR_MAX == UINT64_MAX + insn = + GEN_INSN(CALLNATIVE, native_ret, + NEW_CONST(I64, (uint64)(uintptr_t)jit_invoke_native), 3); +#else + insn = + GEN_INSN(CALLNATIVE, native_ret, + NEW_CONST(I32, (uint32)(uintptr_t)jit_invoke_native), 3); +#endif + if (insn) { + *(jit_insn_opndv(insn, 2)) = cc->exec_env_reg; + *(jit_insn_opndv(insn, 3)) = NEW_CONST(I32, func_idx); + *(jit_insn_opndv(insn, 4)) = cc->fp_reg; + } + + /* Check whether there is exception thrown */ + GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0)); + if (!jit_emit_exception(cc, EXCE_ALREADY_THROWN, JIT_OP_BEQ, + cc->cmp_reg, 0)) { + return false; + } + } + else { + if (func_type->result_count > 0) { + switch (func_type->types[func_type->param_count]) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif +#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) + /* Set result to x86::eax, 1 is hard reg index of eax */ + result = jit_reg_new(JIT_REG_KIND_I32, 1); +#else + result = jit_cc_new_reg_I32(cc); +#endif + break; + case VALUE_TYPE_I64: +#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) + /* Set result to x86::rax, 1 is hard reg index of rax */ + result = jit_reg_new(JIT_REG_KIND_I64, 1); +#else + result = jit_cc_new_reg_I64(cc); +#endif + break; + case VALUE_TYPE_F32: + result = jit_cc_new_reg_F32(cc); + break; + case VALUE_TYPE_F64: + result = jit_cc_new_reg_F64(cc); + break; + default: + bh_assert(0); + break; + } + } + + GEN_INSN(CALLBC, result, 0, jitted_code); + } + + /* Push results */ + n = cc->jit_frame->sp - cc->jit_frame->lp; + for (i = 0; i < func_type->result_count; i++) { + switch (func_type->types[func_type->param_count + i]) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + value = jit_cc_new_reg_I32(cc); + GEN_INSN(LDI32, value, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + PUSH_I32(value); + n++; + break; + case VALUE_TYPE_I64: + value = jit_cc_new_reg_I64(cc); + GEN_INSN(LDI64, value, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + PUSH_I64(value); + n += 2; + break; + case VALUE_TYPE_F32: + value = jit_cc_new_reg_F32(cc); + GEN_INSN(LDF32, value, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + PUSH_F32(value); + n++; + break; + case VALUE_TYPE_F64: + value = jit_cc_new_reg_F64(cc); + GEN_INSN(LDF64, value, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + PUSH_F64(value); + n += 2; + break; + default: + bh_assert(0); + break; + } + } + + /* Ignore tail call currently */ + (void)tail_call; + return true; +fail: return false; } diff --git a/core/iwasm/fast-jit/jit_codecache.c b/core/iwasm/fast-jit/jit_codecache.c index 3fea07b05..4c899ad9d 100644 --- a/core/iwasm/fast-jit/jit_codecache.c +++ b/core/iwasm/fast-jit/jit_codecache.c @@ -56,8 +56,10 @@ jit_code_cache_free(void *ptr) bool jit_pass_register_jitted_code(JitCompContext *cc) { + uint32 jit_func_idx = + cc->cur_wasm_func_idx - cc->cur_wasm_module->import_function_count; cc->cur_wasm_func->fast_jit_jitted_code = cc->jitted_addr_begin; - cc->cur_wasm_module->fast_jit_func_ptrs[cc->cur_wasm_func_idx] = + cc->cur_wasm_module->fast_jit_func_ptrs[jit_func_idx] = cc->jitted_addr_begin; return true; } diff --git a/core/iwasm/fast-jit/jit_compiler.c b/core/iwasm/fast-jit/jit_compiler.c index fa78bea4b..5477cfe6d 100644 --- a/core/iwasm/fast-jit/jit_compiler.c +++ b/core/iwasm/fast-jit/jit_compiler.c @@ -129,7 +129,8 @@ jit_compiler_compile(WASMModule *module, uint32 func_idx) } cc->cur_wasm_module = module; - cc->cur_wasm_func = module->functions[func_idx]; + cc->cur_wasm_func = + module->functions[func_idx - module->import_function_count]; cc->cur_wasm_func_idx = func_idx; cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow && !cc->cur_wasm_func->has_op_func_call) @@ -155,7 +156,7 @@ jit_compiler_compile_all(WASMModule *module) uint32 i; for (i = 0; i < module->function_count; i++) { - if (!jit_compiler_compile(module, i)) { + if (!jit_compiler_compile(module, module->import_function_count + i)) { return false; } } diff --git a/core/iwasm/fast-jit/jit_frontend.c b/core/iwasm/fast-jit/jit_frontend.c index b85557224..18f0fb567 100644 --- a/core/iwasm/fast-jit/jit_frontend.c +++ b/core/iwasm/fast-jit/jit_frontend.c @@ -149,7 +149,7 @@ gen_commit_sp_ip(JitFrame *frame) #if UINTPTR_MAX == UINT64_MAX sp = jit_cc_new_reg_I64(cc); GEN_INSN(ADD, sp, cc->fp_reg, - NEW_CONST(I32, offset_of_local(frame->sp - frame->lp))); + NEW_CONST(I64, offset_of_local(frame->sp - frame->lp))); GEN_INSN(STI64, sp, cc->fp_reg, NEW_CONST(I32, offsetof(WASMInterpFrame, sp))); #else @@ -222,7 +222,7 @@ form_and_translate_func(JitCompContext *cc) *(jit_insn_opnd(insn, 0)) = jit_basic_block_label(cc->exce_basic_blocks[i]); } - else if (insn->opcode >= JIT_OP_BNE + else if (insn->opcode >= JIT_OP_BEQ && insn->opcode <= JIT_OP_BLEU) { *(jit_insn_opnd(insn, 1)) = jit_basic_block_label(cc->exce_basic_blocks[i]); @@ -230,19 +230,23 @@ form_and_translate_func(JitCompContext *cc) incoming_insn = incoming_insn_next; } cc->cur_basic_block = cc->exce_basic_blocks[i]; + if (i != EXCE_ALREADY_THROWN) { #if UINTPTR_MAX == UINT64_MAX - insn = GEN_INSN( - CALLNATIVE, 0, - NEW_CONST(I64, (uint64)(uintptr_t)jit_set_exception_with_id), - 1); + insn = GEN_INSN( + CALLNATIVE, 0, + NEW_CONST(I64, + (uint64)(uintptr_t)jit_set_exception_with_id), + 1); #else - insn = GEN_INSN( - CALLNATIVE, 0, - NEW_CONST(I32, (uint32)(uintptr_t)jit_set_exception_with_id), - 1); + insn = GEN_INSN( + CALLNATIVE, 0, + NEW_CONST(I32, + (uint32)(uintptr_t)jit_set_exception_with_id), + 1); #endif - if (insn) { - *(jit_insn_opndv(insn, 2)) = NEW_CONST(I32, i); + if (insn) { + *(jit_insn_opndv(insn, 2)) = NEW_CONST(I32, i); + } } GEN_INSN(RETURN, NEW_CONST(I32, JIT_INTERP_ACTION_THROWN)); @@ -329,6 +333,8 @@ init_func_translation(JitCompContext *cc) cc->jit_frame = jit_frame; cc->cur_basic_block = jit_cc_entry_basic_block(cc); cc->total_frame_size = wasm_interp_interp_frame_size(total_cell_num); + cc->spill_cache_offset = (uint32)offsetof(WASMInterpFrame, spill_cache); + cc->spill_cache_size = (uint32)sizeof(uint32) * FAST_JIT_SPILL_CACHE_SIZE; cc->jitted_return_address_offset = offsetof(WASMInterpFrame, jitted_return_addr); cc->cur_basic_block = jit_cc_entry_basic_block(cc); @@ -588,6 +594,7 @@ jit_compile_func(JitCompContext *cc) float64 f64_const; while (frame_ip < frame_ip_end) { + cc->jit_frame->ip = frame_ip; opcode = *frame_ip++; #if 0 /* TODO */ @@ -707,6 +714,12 @@ jit_compile_func(JitCompContext *cc) return false; break; + case WASM_OP_CALL: + read_leb_uint32(frame_ip, frame_ip_end, func_idx); + if (!jit_compile_op_call(cc, func_idx, false)) + return false; + break; + case WASM_OP_CALL_INDIRECT: { uint32 tbl_idx; @@ -1805,4 +1818,4 @@ fail: return false; } #endif /* End of WASM_ENABLE_THREAD_MGR */ -#endif \ No newline at end of file +#endif diff --git a/core/iwasm/fast-jit/jit_frontend.h b/core/iwasm/fast-jit/jit_frontend.h index 24f45f6e6..d4819414a 100644 --- a/core/iwasm/fast-jit/jit_frontend.h +++ b/core/iwasm/fast-jit/jit_frontend.h @@ -114,6 +114,7 @@ typedef enum JitExceptionID { EXCE_AUX_STACK_UNDERFLOW, EXCE_OUT_OF_BOUNDS_TABLE_ACCESS, EXCE_OPERAND_STACK_OVERFLOW, + EXCE_ALREADY_THROWN, EXCE_NUM, } JitExceptionID; diff --git a/core/iwasm/fast-jit/jit_ir.h b/core/iwasm/fast-jit/jit_ir.h index bb17d0d43..5f1533caa 100644 --- a/core/iwasm/fast-jit/jit_ir.h +++ b/core/iwasm/fast-jit/jit_ir.h @@ -1021,6 +1021,11 @@ typedef struct JitCompContext { /* The total frame size of current function */ uint32 total_frame_size; + /* The spill cache offset to the interp frame */ + uint32 spill_cache_offset; + /* The spill cache size */ + uint32 spill_cache_size; + /* The offset of jitted_return_address in the frame, which is set by the pass frontend and used by the pass codegen. */ uint32 jitted_return_address_offset; diff --git a/core/iwasm/fast-jit/jit_regalloc.c b/core/iwasm/fast-jit/jit_regalloc.c index 3dec1b8d2..51745e527 100644 --- a/core/iwasm/fast-jit/jit_regalloc.c +++ b/core/iwasm/fast-jit/jit_regalloc.c @@ -247,12 +247,9 @@ rc_alloc_spill_slot(RegallocContext *rc, JitReg vreg) found: /* Now, i is the first slot for vreg. */ -#if 0 /* TODO: check the spill */ - if (rc->cc->interp_frame_size + (i + stride) * 4 - > rc->cc->total_frame_size) - /* No frame space for the spill area. */ - return 0; -#endif + if ((i + stride) * 4 > rc->cc->spill_cache_size) + /* No frame space for the spill area. */ + return 0; /* Allocate the slot(s) to vreg. */ for (j = i; j < i + stride; j++) @@ -386,9 +383,8 @@ collect_distances(RegallocContext *rc, JitBasicBlock *basic_block) static JitReg offset_of_spill_slot(JitCompContext *cc, JitReg slot) { - /* TODO: check the spill */ - return jit_cc_new_const_I32( - cc, /*cc->interp_frame_size + jit_cc_get_const_I32 (cc, slot) * 4*/ 0); + return jit_cc_new_const_I32(cc, cc->spill_cache_offset + + jit_cc_get_const_I32(cc, slot) * 4); } /** @@ -542,8 +538,7 @@ allocate_hreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance) if (hreg_num == 0) /* Unsupported hard register kind. */ { - /* TODO: how to set error */ - /*jit_set_error (JIT_ERROR_UNSUPPORTED_HREG);*/ + jit_set_last_error(rc->cc, "unsupported hard register kind"); return 0; } diff --git a/core/iwasm/interpreter/wasm.h b/core/iwasm/interpreter/wasm.h index a10462c65..074eec23a 100644 --- a/core/iwasm/interpreter/wasm.h +++ b/core/iwasm/interpreter/wasm.h @@ -594,6 +594,7 @@ wasm_get_smallest_type_idx(WASMType **types, uint32 type_count, if (wasm_type_equal(types[cur_type_idx], types[i])) return i; } + (void)type_count; return cur_type_idx; } diff --git a/core/iwasm/interpreter/wasm_interp.h b/core/iwasm/interpreter/wasm_interp.h index 4ff465562..abfe56182 100644 --- a/core/iwasm/interpreter/wasm_interp.h +++ b/core/iwasm/interpreter/wasm_interp.h @@ -28,7 +28,7 @@ typedef struct WASMInterpFrame { #if WASM_ENABLE_FAST_JIT != 0 uint8 *jitted_return_addr; - uint32 spill_cache[16]; + uint32 spill_cache[FAST_JIT_SPILL_CACHE_SIZE]; #endif #if WASM_ENABLE_PERF_PROFILING != 0 diff --git a/core/iwasm/interpreter/wasm_interp_classic.c b/core/iwasm/interpreter/wasm_interp_classic.c index ef4d9e86f..11e8fd7a4 100644 --- a/core/iwasm/interpreter/wasm_interp_classic.c +++ b/core/iwasm/interpreter/wasm_interp_classic.c @@ -829,6 +829,21 @@ wasm_interp_call_func_native(WASMModuleInstance *module_inst, wasm_exec_env_set_cur_frame(exec_env, prev_frame); } +#if WASM_ENABLE_FAST_JIT != 0 +bool +jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx, + WASMInterpFrame *prev_frame) +{ + WASMModuleInstance *module_inst = + (WASMModuleInstance *)exec_env->module_inst; + WASMFunctionInstance *cur_func = module_inst->functions + func_idx; + + wasm_interp_call_func_native(module_inst, exec_env, cur_func, prev_frame); + + return wasm_get_exception(module_inst) ? false : true; +} +#endif + #if WASM_ENABLE_MULTI_MODULE != 0 static void wasm_interp_call_func_bytecode(WASMModuleInstance *module, diff --git a/core/iwasm/interpreter/wasm_loader.c b/core/iwasm/interpreter/wasm_loader.c index d0b82960a..ad0ce81b4 100644 --- a/core/iwasm/interpreter/wasm_loader.c +++ b/core/iwasm/interpreter/wasm_loader.c @@ -15,6 +15,7 @@ #endif #if WASM_ENABLE_FAST_JIT != 0 #include "../fast-jit/jit_compiler.h" +#include "../fast-jit/jit_codecache.h" #endif /* Read a value of given type from the address pointed to by the given @@ -3737,8 +3738,13 @@ wasm_loader_unload(WASMModule *module) #endif #if WASM_ENABLE_FAST_JIT != 0 - if (module->fast_jit_func_ptrs) + if (module->fast_jit_func_ptrs) { + for (i = 0; i < module->function_count; i++) { + if (module->fast_jit_func_ptrs[i]) + jit_code_cache_free(module->fast_jit_func_ptrs[i]); + } wasm_runtime_free(module->fast_jit_func_ptrs); + } #endif wasm_runtime_free(module); diff --git a/core/iwasm/interpreter/wasm_mini_loader.c b/core/iwasm/interpreter/wasm_mini_loader.c index 2dd815e22..b92e499a1 100644 --- a/core/iwasm/interpreter/wasm_mini_loader.c +++ b/core/iwasm/interpreter/wasm_mini_loader.c @@ -12,6 +12,7 @@ #include "../common/wasm_native.h" #if WASM_ENABLE_FAST_JIT != 0 #include "../fast-jit/jit_compiler.h" +#include "../fast-jit/jit_codecache.h" #endif /* Read a value of given type from the address pointed to by the given @@ -2141,6 +2142,11 @@ load_from_sections(WASMModule *module, WASMSection *sections, } #if WASM_ENABLE_FAST_JIT != 0 + if (!(module->fast_jit_func_ptrs = + loader_malloc(sizeof(void *) * module->function_count, error_buf, + error_buf_size))) { + return false; + } if (!jit_compiler_compile_all(module)) { set_error_buf(error_buf, error_buf_size, "fast jit compilation failed"); return false; @@ -2354,6 +2360,11 @@ wasm_loader_load(uint8 *buf, uint32 size, char *error_buf, return NULL; } +#if WASM_ENABLE_FAST_JIT != 0 + module->load_addr = (uint8 *)buf; + module->load_size = size; +#endif + if (!load(buf, size, module, error_buf, error_buf_size)) { goto fail; } @@ -2439,6 +2450,16 @@ wasm_loader_unload(WASMModule *module) } } +#if WASM_ENABLE_FAST_JIT != 0 + if (module->fast_jit_func_ptrs) { + for (i = 0; i < module->function_count; i++) { + if (module->fast_jit_func_ptrs[i]) + jit_code_cache_free(module->fast_jit_func_ptrs[i]); + } + wasm_runtime_free(module->fast_jit_func_ptrs); + } +#endif + wasm_runtime_free(module); } @@ -5607,7 +5628,8 @@ re_scan: operand_offset = local_offset; PUSH_OFFSET_TYPE(local_type); #else -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) +#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \ + && (WASM_ENABLE_FAST_JIT == 0) if (local_offset < 0x80) { *p_org++ = EXT_OP_GET_LOCAL_FAST; if (is_32bit_type(local_type)) @@ -5667,7 +5689,8 @@ re_scan: POP_OFFSET_TYPE(local_type); } #else -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) +#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \ + && (WASM_ENABLE_FAST_JIT == 0) if (local_offset < 0x80) { *p_org++ = EXT_OP_SET_LOCAL_FAST; if (is_32bit_type(local_type)) @@ -5723,7 +5746,8 @@ re_scan: *(loader_ctx->frame_offset - wasm_value_type_cell_num(local_type))); #else -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) +#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \ + && (WASM_ENABLE_FAST_JIT == 0) if (local_offset < 0x80) { *p_org++ = EXT_OP_TEE_LOCAL_FAST; if (is_32bit_type(local_type))